From 834b394ee759ed31c5371d8093d7cd8cd90014a8 Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Wed, 31 Jul 2013 11:26:53 -0700 Subject: Merge remote-tracking branch 'goog/dalvik-dev' into merge-art-to-dalvik-dev Change-Id: I323e9e8c29c3e39d50d9aba93121b26266c52a46 --- build/Android.common.mk | 12 +- build/Android.gtest.mk | 16 +- compiler/Android.mk | 10 +- compiler/dex/dex_to_dex_compiler.cc | 35 +- compiler/dex/quick/arm/call_arm.cc | 8 +- compiler/dex/quick/arm/fp_arm.cc | 16 +- compiler/dex/quick/arm/int_arm.cc | 6 +- compiler/dex/quick/gen_common.cc | 103 +- compiler/dex/quick/gen_invoke.cc | 38 +- compiler/dex/quick/mips/call_mips.cc | 8 +- compiler/dex/quick/mips/fp_mips.cc | 28 +- compiler/dex/quick/mips/int_mips.cc | 4 +- compiler/dex/quick/x86/call_x86.cc | 6 +- compiler/dex/quick/x86/fp_x86.cc | 14 +- compiler/dex/quick/x86/int_x86.cc | 2 +- compiler/driver/compiler_driver.cc | 119 +- compiler/driver/compiler_driver.h | 43 +- compiler/driver/compiler_driver_test.cc | 3 +- compiler/jni/quick/arm/calling_convention_arm.cc | 2 +- compiler/jni/quick/calling_convention.h | 2 +- compiler/jni/quick/jni_compiler.cc | 24 +- compiler/jni/quick/mips/calling_convention_mips.cc | 2 +- compiler/jni/quick/x86/calling_convention_x86.cc | 2 +- compiler/llvm/runtime_support_builder.cc | 4 +- compiler/llvm/runtime_support_llvm_func.h | 5 +- compiler/llvm/runtime_support_llvm_func_list.h | 81 + compiler/sea_ir/code_gen.cc | 12 +- compiler/sea_ir/frontend.cc | 9 +- compiler/sea_ir/instruction_nodes.h | 26 +- compiler/sea_ir/sea.cc | 122 +- compiler/sea_ir/sea.h | 29 +- compiler/sea_ir/sea_node.h | 5 +- compiler/stubs/portable/stubs.cc | 15 +- compiler/stubs/quick/stubs.cc | 26 +- compiler/utils/arm/assembler_arm.cc | 1895 ++++++++++++++++++++ compiler/utils/arm/assembler_arm.h | 659 +++++++ compiler/utils/arm/constants_arm.h | 449 +++++ compiler/utils/arm/managed_register_arm.cc | 113 ++ compiler/utils/arm/managed_register_arm.h | 274 +++ compiler/utils/arm/managed_register_arm_test.cc | 767 ++++++++ compiler/utils/assembler.cc | 119 ++ compiler/utils/assembler.h | 459 +++++ compiler/utils/managed_register.h | 72 + compiler/utils/mips/assembler_mips.cc | 999 +++++++++++ compiler/utils/mips/assembler_mips.h | 507 ++++++ compiler/utils/mips/constants_mips.h | 110 ++ compiler/utils/mips/managed_register_mips.cc | 114 ++ compiler/utils/mips/managed_register_mips.h | 228 +++ compiler/utils/x86/assembler_x86.cc | 1847 +++++++++++++++++++ compiler/utils/x86/assembler_x86.h | 646 +++++++ compiler/utils/x86/assembler_x86_test.cc | 32 + compiler/utils/x86/constants_x86.h | 126 ++ compiler/utils/x86/managed_register_x86.cc | 128 ++ compiler/utils/x86/managed_register_x86.h | 218 +++ compiler/utils/x86/managed_register_x86_test.cc | 359 ++++ compiler/vector_output_stream.cc | 48 + compiler/vector_output_stream.h | 65 + dex2oat/dex2oat.cc | 22 +- runtime/Android.mk | 117 +- runtime/arch/arm/asm_support_arm.S | 38 + runtime/arch/arm/asm_support_arm.h | 31 + runtime/arch/arm/context_arm.cc | 103 ++ runtime/arch/arm/context_arm.h | 68 + runtime/arch/arm/entrypoints_init_arm.cc | 241 +++ runtime/arch/arm/jni_entrypoints_arm.S | 65 + runtime/arch/arm/portable_entrypoints_arm.S | 96 + runtime/arch/arm/quick_entrypoints_arm.S | 1288 +++++++++++++ runtime/arch/arm/registers_arm.cc | 47 + runtime/arch/arm/registers_arm.h | 97 + runtime/arch/arm/thread_arm.cc | 29 + runtime/arch/context.cc | 41 + runtime/arch/context.h | 70 + runtime/arch/mips/asm_support_mips.S | 41 + runtime/arch/mips/asm_support_mips.h | 31 + runtime/arch/mips/context_mips.cc | 102 ++ runtime/arch/mips/context_mips.h | 65 + runtime/arch/mips/entrypoints_init_mips.cc | 242 +++ runtime/arch/mips/jni_entrypoints_mips.S | 89 + runtime/arch/mips/portable_entrypoints_mips.S | 73 + runtime/arch/mips/quick_entrypoints_mips.S | 1074 +++++++++++ runtime/arch/mips/registers_mips.cc | 49 + runtime/arch/mips/registers_mips.h | 109 ++ runtime/arch/mips/thread_mips.cc | 29 + runtime/arch/x86/asm_support_x86.S | 91 + runtime/arch/x86/asm_support_x86.h | 27 + runtime/arch/x86/context_x86.cc | 97 + runtime/arch/x86/context_x86.h | 69 + runtime/arch/x86/entrypoints_init_x86.cc | 224 +++ runtime/arch/x86/jni_entrypoints_x86.S | 35 + runtime/arch/x86/portable_entrypoints_x86.S | 109 ++ runtime/arch/x86/quick_entrypoints_x86.S | 1041 +++++++++++ runtime/arch/x86/registers_x86.cc | 37 + runtime/arch/x86/registers_x86.h | 47 + runtime/arch/x86/thread_x86.cc | 139 ++ runtime/asm_support.h | 25 - runtime/base/mutex.h | 2 +- runtime/base/timing_logger.cc | 77 +- runtime/base/timing_logger.h | 35 +- runtime/class_linker.cc | 18 +- runtime/class_linker_test.cc | 2 +- runtime/common_test.h | 5 +- runtime/constants_arm.h | 519 ------ runtime/constants_mips.h | 186 -- runtime/constants_x86.h | 140 -- runtime/debugger.cc | 2 +- runtime/dex_instruction.h | 4 +- runtime/entrypoints/entrypoint_utils.cc | 407 +++++ runtime/entrypoints/entrypoint_utils.h | 412 +++++ runtime/entrypoints/jni/jni_entrypoints.cc | 46 + runtime/entrypoints/math_entrypoints.cc | 89 + runtime/entrypoints/math_entrypoints.h | 29 + runtime/entrypoints/math_entrypoints_test.cc | 74 + .../portable/portable_alloc_entrypoints.cc | 69 + .../portable/portable_argument_visitor.h | 136 ++ .../portable/portable_cast_entrypoints.cc | 57 + .../portable/portable_dexcache_entrypoints.cc | 53 + .../entrypoints/portable/portable_entrypoints.h | 44 + .../portable/portable_field_entrypoints.cc | 241 +++ .../portable/portable_fillarray_entrypoints.cc | 50 + .../portable/portable_invoke_entrypoints.cc | 104 ++ .../portable/portable_jni_entrypoints.cc | 98 + .../portable/portable_lock_entrypoints.cc | 38 + .../portable/portable_proxy_entrypoints.cc | 109 ++ .../portable/portable_stub_entrypoints.cc | 145 ++ .../portable/portable_thread_entrypoints.cc | 99 + .../portable/portable_throw_entrypoints.cc | 129 ++ runtime/entrypoints/quick/callee_save_frame.h | 41 + .../entrypoints/quick/quick_alloc_entrypoints.cc | 79 + runtime/entrypoints/quick/quick_argument_visitor.h | 138 ++ .../entrypoints/quick/quick_cast_entrypoints.cc | 68 + .../quick/quick_deoptimization_entrypoints.cc | 38 + .../quick/quick_dexcache_entrypoints.cc | 68 + runtime/entrypoints/quick/quick_entrypoints.h | 170 ++ .../entrypoints/quick/quick_field_entrypoints.cc | 273 +++ .../quick/quick_fillarray_entrypoints.cc | 63 + .../quick/quick_instrumentation_entrypoints.cc | 65 + .../quick/quick_interpreter_entrypoints.cc | 128 ++ .../entrypoints/quick/quick_invoke_entrypoints.cc | 226 +++ runtime/entrypoints/quick/quick_jni_entrypoints.cc | 171 ++ .../entrypoints/quick/quick_lock_entrypoints.cc | 42 + .../entrypoints/quick/quick_math_entrypoints.cc | 77 + .../entrypoints/quick/quick_proxy_entrypoints.cc | 126 ++ .../entrypoints/quick/quick_stub_entrypoints.cc | 295 +++ .../entrypoints/quick/quick_thread_entrypoints.cc | 38 + .../entrypoints/quick/quick_throw_entrypoints.cc | 98 + runtime/gc/accounting/atomic_stack.h | 41 +- runtime/gc/collector/garbage_collector.h | 4 +- runtime/gc/collector/mark_sweep.cc | 2 +- runtime/gc/heap.cc | 75 +- runtime/gc/heap.h | 10 +- runtime/gc/space/dlmalloc_space.cc | 14 +- runtime/gc/space/dlmalloc_space.h | 6 +- runtime/gc/space/space_test.cc | 2 +- runtime/image_test.cc | 3 +- runtime/instrumentation.cc | 2 +- runtime/interpreter/interpreter.cc | 619 ++++--- runtime/mirror/abstract_method-inl.h | 2 +- runtime/mirror/abstract_method.cc | 24 +- runtime/mirror/abstract_method.h | 6 +- runtime/mirror/array.h | 2 +- runtime/mirror/object_test.cc | 2 +- runtime/native/dalvik_system_VMDebug.cc | 67 + runtime/oat/runtime/argument_visitor.h | 248 --- runtime/oat/runtime/arm/context_arm.cc | 103 -- runtime/oat/runtime/arm/context_arm.h | 67 - .../oat/runtime/arm/oat_support_entrypoints_arm.cc | 237 --- runtime/oat/runtime/arm/runtime_support_arm.S | 1413 --------------- runtime/oat/runtime/callee_save_frame.h | 41 - runtime/oat/runtime/context.cc | 41 - runtime/oat/runtime/context.h | 70 - runtime/oat/runtime/mips/context_mips.cc | 102 -- runtime/oat/runtime/mips/context_mips.h | 64 - .../runtime/mips/oat_support_entrypoints_mips.cc | 238 --- runtime/oat/runtime/mips/runtime_support_mips.S | 1187 ------------ runtime/oat/runtime/oat_support_entrypoints.h | 177 -- runtime/oat/runtime/support_alloc.cc | 79 - runtime/oat/runtime/support_cast.cc | 68 - runtime/oat/runtime/support_deoptimize.cc | 38 - runtime/oat/runtime/support_dexcache.cc | 68 - runtime/oat/runtime/support_field.cc | 273 --- runtime/oat/runtime/support_fillarray.cc | 63 - runtime/oat/runtime/support_instrumentation.cc | 65 - runtime/oat/runtime/support_interpreter.cc | 128 -- runtime/oat/runtime/support_invoke.cc | 226 --- runtime/oat/runtime/support_jni.cc | 171 -- runtime/oat/runtime/support_locks.cc | 42 - runtime/oat/runtime/support_math.cc | 77 - runtime/oat/runtime/support_proxy.cc | 210 --- runtime/oat/runtime/support_stubs.cc | 438 ----- runtime/oat/runtime/support_thread.cc | 38 - runtime/oat/runtime/support_throw.cc | 98 - runtime/oat/runtime/x86/context_x86.cc | 97 - runtime/oat/runtime/x86/context_x86.h | 67 - .../oat/runtime/x86/oat_support_entrypoints_x86.cc | 221 --- runtime/oat/runtime/x86/runtime_support_x86.S | 1211 ------------- runtime/oat/utils/arm/assembler_arm.cc | 1895 -------------------- runtime/oat/utils/arm/assembler_arm.h | 659 ------- runtime/oat/utils/arm/managed_register_arm.cc | 113 -- runtime/oat/utils/arm/managed_register_arm.h | 274 --- runtime/oat/utils/arm/managed_register_arm_test.cc | 767 -------- runtime/oat/utils/assembler.cc | 119 -- runtime/oat/utils/assembler.h | 459 ----- runtime/oat/utils/managed_register.h | 72 - runtime/oat/utils/mips/assembler_mips.cc | 1023 ----------- runtime/oat/utils/mips/assembler_mips.h | 507 ------ runtime/oat/utils/mips/managed_register_mips.cc | 114 -- runtime/oat/utils/mips/managed_register_mips.h | 228 --- runtime/oat/utils/x86/assembler_x86.cc | 1859 ------------------- runtime/oat/utils/x86/assembler_x86.h | 646 ------- runtime/oat/utils/x86/assembler_x86_test.cc | 32 - runtime/oat/utils/x86/managed_register_x86.cc | 128 -- runtime/oat/utils/x86/managed_register_x86.h | 218 --- runtime/oat/utils/x86/managed_register_x86_test.cc | 359 ---- runtime/oat_test.cc | 7 +- runtime/runtime.cc | 8 +- runtime/runtime_support.cc | 475 ----- runtime/runtime_support.h | 419 ----- runtime/runtime_support_llvm.cc | 925 ---------- runtime/runtime_support_llvm.h | 27 - runtime/runtime_support_llvm_func_list.h | 82 - runtime/runtime_support_test.cc | 74 - runtime/stack.cc | 1 - runtime/stack.h | 2 +- runtime/thread.cc | 194 +- runtime/thread.h | 8 +- runtime/thread_arm.cc | 29 - runtime/thread_mips.cc | 29 - runtime/thread_x86.cc | 139 -- runtime/trace.cc | 6 +- runtime/vector_output_stream.cc | 48 - runtime/vector_output_stream.h | 65 - runtime/verifier/instruction_flags.cc | 15 +- runtime/verifier/instruction_flags.h | 17 + runtime/verifier/method_verifier.cc | 430 +++-- runtime/verifier/method_verifier.h | 12 +- runtime/verifier/register_line.cc | 27 +- runtime/verifier/register_line.h | 7 + test/ReferenceMap/stack_walk_refmap_jni.cc | 6 +- 238 files changed, 22538 insertions(+), 21425 deletions(-) create mode 100644 compiler/llvm/runtime_support_llvm_func_list.h create mode 100644 compiler/utils/arm/assembler_arm.cc create mode 100644 compiler/utils/arm/assembler_arm.h create mode 100644 compiler/utils/arm/constants_arm.h create mode 100644 compiler/utils/arm/managed_register_arm.cc create mode 100644 compiler/utils/arm/managed_register_arm.h create mode 100644 compiler/utils/arm/managed_register_arm_test.cc create mode 100644 compiler/utils/assembler.cc create mode 100644 compiler/utils/assembler.h create mode 100644 compiler/utils/managed_register.h create mode 100644 compiler/utils/mips/assembler_mips.cc create mode 100644 compiler/utils/mips/assembler_mips.h create mode 100644 compiler/utils/mips/constants_mips.h create mode 100644 compiler/utils/mips/managed_register_mips.cc create mode 100644 compiler/utils/mips/managed_register_mips.h create mode 100644 compiler/utils/x86/assembler_x86.cc create mode 100644 compiler/utils/x86/assembler_x86.h create mode 100644 compiler/utils/x86/assembler_x86_test.cc create mode 100644 compiler/utils/x86/constants_x86.h create mode 100644 compiler/utils/x86/managed_register_x86.cc create mode 100644 compiler/utils/x86/managed_register_x86.h create mode 100644 compiler/utils/x86/managed_register_x86_test.cc create mode 100644 compiler/vector_output_stream.cc create mode 100644 compiler/vector_output_stream.h create mode 100644 runtime/arch/arm/asm_support_arm.S create mode 100644 runtime/arch/arm/asm_support_arm.h create mode 100644 runtime/arch/arm/context_arm.cc create mode 100644 runtime/arch/arm/context_arm.h create mode 100644 runtime/arch/arm/entrypoints_init_arm.cc create mode 100644 runtime/arch/arm/jni_entrypoints_arm.S create mode 100644 runtime/arch/arm/portable_entrypoints_arm.S create mode 100644 runtime/arch/arm/quick_entrypoints_arm.S create mode 100644 runtime/arch/arm/registers_arm.cc create mode 100644 runtime/arch/arm/registers_arm.h create mode 100644 runtime/arch/arm/thread_arm.cc create mode 100644 runtime/arch/context.cc create mode 100644 runtime/arch/context.h create mode 100644 runtime/arch/mips/asm_support_mips.S create mode 100644 runtime/arch/mips/asm_support_mips.h create mode 100644 runtime/arch/mips/context_mips.cc create mode 100644 runtime/arch/mips/context_mips.h create mode 100644 runtime/arch/mips/entrypoints_init_mips.cc create mode 100644 runtime/arch/mips/jni_entrypoints_mips.S create mode 100644 runtime/arch/mips/portable_entrypoints_mips.S create mode 100644 runtime/arch/mips/quick_entrypoints_mips.S create mode 100644 runtime/arch/mips/registers_mips.cc create mode 100644 runtime/arch/mips/registers_mips.h create mode 100644 runtime/arch/mips/thread_mips.cc create mode 100644 runtime/arch/x86/asm_support_x86.S create mode 100644 runtime/arch/x86/asm_support_x86.h create mode 100644 runtime/arch/x86/context_x86.cc create mode 100644 runtime/arch/x86/context_x86.h create mode 100644 runtime/arch/x86/entrypoints_init_x86.cc create mode 100644 runtime/arch/x86/jni_entrypoints_x86.S create mode 100644 runtime/arch/x86/portable_entrypoints_x86.S create mode 100644 runtime/arch/x86/quick_entrypoints_x86.S create mode 100644 runtime/arch/x86/registers_x86.cc create mode 100644 runtime/arch/x86/registers_x86.h create mode 100644 runtime/arch/x86/thread_x86.cc delete mode 100644 runtime/constants_arm.h delete mode 100644 runtime/constants_mips.h delete mode 100644 runtime/constants_x86.h create mode 100644 runtime/entrypoints/entrypoint_utils.cc create mode 100644 runtime/entrypoints/entrypoint_utils.h create mode 100644 runtime/entrypoints/jni/jni_entrypoints.cc create mode 100644 runtime/entrypoints/math_entrypoints.cc create mode 100644 runtime/entrypoints/math_entrypoints.h create mode 100644 runtime/entrypoints/math_entrypoints_test.cc create mode 100644 runtime/entrypoints/portable/portable_alloc_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_argument_visitor.h create mode 100644 runtime/entrypoints/portable/portable_cast_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_dexcache_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_entrypoints.h create mode 100644 runtime/entrypoints/portable/portable_field_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_fillarray_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_invoke_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_jni_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_lock_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_proxy_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_stub_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_thread_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_throw_entrypoints.cc create mode 100644 runtime/entrypoints/quick/callee_save_frame.h create mode 100644 runtime/entrypoints/quick/quick_alloc_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_argument_visitor.h create mode 100644 runtime/entrypoints/quick/quick_cast_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_dexcache_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_entrypoints.h create mode 100644 runtime/entrypoints/quick/quick_field_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_fillarray_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_interpreter_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_invoke_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_jni_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_lock_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_math_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_proxy_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_stub_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_thread_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_throw_entrypoints.cc delete mode 100644 runtime/oat/runtime/argument_visitor.h delete mode 100644 runtime/oat/runtime/arm/context_arm.cc delete mode 100644 runtime/oat/runtime/arm/context_arm.h delete mode 100644 runtime/oat/runtime/arm/oat_support_entrypoints_arm.cc delete mode 100644 runtime/oat/runtime/arm/runtime_support_arm.S delete mode 100644 runtime/oat/runtime/callee_save_frame.h delete mode 100644 runtime/oat/runtime/context.cc delete mode 100644 runtime/oat/runtime/context.h delete mode 100644 runtime/oat/runtime/mips/context_mips.cc delete mode 100644 runtime/oat/runtime/mips/context_mips.h delete mode 100644 runtime/oat/runtime/mips/oat_support_entrypoints_mips.cc delete mode 100644 runtime/oat/runtime/mips/runtime_support_mips.S delete mode 100644 runtime/oat/runtime/oat_support_entrypoints.h delete mode 100644 runtime/oat/runtime/support_alloc.cc delete mode 100644 runtime/oat/runtime/support_cast.cc delete mode 100644 runtime/oat/runtime/support_deoptimize.cc delete mode 100644 runtime/oat/runtime/support_dexcache.cc delete mode 100644 runtime/oat/runtime/support_field.cc delete mode 100644 runtime/oat/runtime/support_fillarray.cc delete mode 100644 runtime/oat/runtime/support_instrumentation.cc delete mode 100644 runtime/oat/runtime/support_interpreter.cc delete mode 100644 runtime/oat/runtime/support_invoke.cc delete mode 100644 runtime/oat/runtime/support_jni.cc delete mode 100644 runtime/oat/runtime/support_locks.cc delete mode 100644 runtime/oat/runtime/support_math.cc delete mode 100644 runtime/oat/runtime/support_proxy.cc delete mode 100644 runtime/oat/runtime/support_stubs.cc delete mode 100644 runtime/oat/runtime/support_thread.cc delete mode 100644 runtime/oat/runtime/support_throw.cc delete mode 100644 runtime/oat/runtime/x86/context_x86.cc delete mode 100644 runtime/oat/runtime/x86/context_x86.h delete mode 100644 runtime/oat/runtime/x86/oat_support_entrypoints_x86.cc delete mode 100644 runtime/oat/runtime/x86/runtime_support_x86.S delete mode 100644 runtime/oat/utils/arm/assembler_arm.cc delete mode 100644 runtime/oat/utils/arm/assembler_arm.h delete mode 100644 runtime/oat/utils/arm/managed_register_arm.cc delete mode 100644 runtime/oat/utils/arm/managed_register_arm.h delete mode 100644 runtime/oat/utils/arm/managed_register_arm_test.cc delete mode 100644 runtime/oat/utils/assembler.cc delete mode 100644 runtime/oat/utils/assembler.h delete mode 100644 runtime/oat/utils/managed_register.h delete mode 100644 runtime/oat/utils/mips/assembler_mips.cc delete mode 100644 runtime/oat/utils/mips/assembler_mips.h delete mode 100644 runtime/oat/utils/mips/managed_register_mips.cc delete mode 100644 runtime/oat/utils/mips/managed_register_mips.h delete mode 100644 runtime/oat/utils/x86/assembler_x86.cc delete mode 100644 runtime/oat/utils/x86/assembler_x86.h delete mode 100644 runtime/oat/utils/x86/assembler_x86_test.cc delete mode 100644 runtime/oat/utils/x86/managed_register_x86.cc delete mode 100644 runtime/oat/utils/x86/managed_register_x86.h delete mode 100644 runtime/oat/utils/x86/managed_register_x86_test.cc delete mode 100644 runtime/runtime_support.cc delete mode 100644 runtime/runtime_support.h delete mode 100644 runtime/runtime_support_llvm.cc delete mode 100644 runtime/runtime_support_llvm.h delete mode 100644 runtime/runtime_support_llvm_func_list.h delete mode 100644 runtime/runtime_support_test.cc delete mode 100644 runtime/thread_arm.cc delete mode 100644 runtime/thread_mips.cc delete mode 100644 runtime/thread_x86.cc delete mode 100644 runtime/vector_output_stream.cc delete mode 100644 runtime/vector_output_stream.h diff --git a/build/Android.common.mk b/build/Android.common.mk index bfb1f9b89b..82097250f9 100644 --- a/build/Android.common.mk +++ b/build/Android.common.mk @@ -14,6 +14,9 @@ # limitations under the License. # +ifndef ANDROID_COMMON_MK +ANDROID_COMMON_MK = true + # These can be overridden via the environment or by editing to # enable/disable certain build configuration. # @@ -163,11 +166,8 @@ ifneq ($(filter 4.6 4.6.%, $(TARGET_GCC_VERSION)),) else # Warn if not using GCC 4.6 for target builds when not doing a top-level or 'mma' build. ifneq ($(ONE_SHOT_MAKEFILE),) - ifneq ($(ART_THREAD_SAFETY_CHECK_WARNING),true) - # Enable target GCC 4.6 with: export TARGET_GCC_VERSION_EXP=4.6 - $(info Using target GCC $(TARGET_GCC_VERSION) disables thread-safety checks.) - ART_THREAD_SAFETY_CHECK_WARNING := true - endif + # Enable target GCC 4.6 with: export TARGET_GCC_VERSION_EXP=4.6 + $(info Using target GCC $(TARGET_GCC_VERSION) disables thread-safety checks.) endif endif # We build with GCC 4.6 on the host. @@ -219,3 +219,5 @@ ifeq ($(ART_BUILD_HOST_DEBUG),true) ART_BUILD_HOST := true ART_BUILD_DEBUG := true endif + +endif # ANDROID_COMMON_MK diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index ee1115a0dd..e069d88f19 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -17,6 +17,11 @@ LOCAL_PATH := art TEST_COMMON_SRC_FILES := \ + compiler/driver/compiler_driver_test.cc \ + compiler/elf_writer_test.cc \ + compiler/jni/jni_compiler_test.cc \ + compiler/utils/arm/managed_register_arm_test.cc \ + compiler/utils/x86/managed_register_x86_test.cc \ runtime/barrier_test.cc \ runtime/base/histogram_test.cc \ runtime/base/mutex_test.cc \ @@ -29,6 +34,7 @@ TEST_COMMON_SRC_FILES := \ runtime/dex_file_test.cc \ runtime/dex_instruction_visitor_test.cc \ runtime/dex_method_iterator_test.cc \ + runtime/entrypoints/math_entrypoints_test.cc \ runtime/exception_test.cc \ runtime/gc/accounting/space_bitmap_test.cc \ runtime/gc/heap_test.cc \ @@ -42,21 +48,15 @@ TEST_COMMON_SRC_FILES := \ runtime/mem_map_test.cc \ runtime/mirror/dex_cache_test.cc \ runtime/mirror/object_test.cc \ - runtime/oat/utils/arm/managed_register_arm_test.cc \ - runtime/oat/utils/x86/managed_register_x86_test.cc \ runtime/oat_test.cc \ runtime/output_stream_test.cc \ runtime/reference_table_test.cc \ - runtime/runtime_support_test.cc \ runtime/runtime_test.cc \ runtime/thread_pool_test.cc \ runtime/utils_test.cc \ runtime/verifier/method_verifier_test.cc \ runtime/verifier/reg_type_test.cc \ - runtime/zip_archive_test.cc \ - compiler/driver/compiler_driver_test.cc \ - compiler/elf_writer_test.cc \ - compiler/jni/jni_compiler_test.cc + runtime/zip_archive_test.cc ifeq ($(ART_SEA_IR_MODE),true) TEST_COMMON_SRC_FILES += \ @@ -68,7 +68,7 @@ TEST_TARGET_SRC_FILES := \ TEST_HOST_SRC_FILES := \ $(TEST_COMMON_SRC_FILES) \ - runtime/oat/utils/x86/assembler_x86_test.cc + compiler/utils/x86/assembler_x86_test.cc ART_HOST_TEST_EXECUTABLES := ART_TARGET_TEST_EXECUTABLES := diff --git a/compiler/Android.mk b/compiler/Android.mk index 9f250225ba..df77853abf 100644 --- a/compiler/Android.mk +++ b/compiler/Android.mk @@ -76,12 +76,20 @@ LIBART_COMPILER_SRC_FILES := \ llvm/runtime_support_builder_x86.cc \ stubs/portable/stubs.cc \ stubs/quick/stubs.cc \ + utils/arm/assembler_arm.cc \ + utils/arm/managed_register_arm.cc \ + utils/assembler.cc \ + utils/mips/assembler_mips.cc \ + utils/mips/managed_register_mips.cc \ + utils/x86/assembler_x86.cc \ + utils/x86/managed_register_x86.cc \ elf_fixup.cc \ elf_stripper.cc \ elf_writer.cc \ elf_writer_quick.cc \ image_writer.cc \ - oat_writer.cc + oat_writer.cc \ + vector_output_stream.cc ifeq ($(ART_SEA_IR_MODE),true) LIBART_COMPILER_SRC_FILES += \ diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc index 3c491ce20f..1ee29cbae1 100644 --- a/compiler/dex/dex_to_dex_compiler.cc +++ b/compiler/dex/dex_to_dex_compiler.cc @@ -36,9 +36,11 @@ const bool kEnableCheckCastEllision = true; class DexCompiler { public: DexCompiler(art::CompilerDriver& compiler, - const DexCompilationUnit& unit) + const DexCompilationUnit& unit, + DexToDexCompilationLevel dex_to_dex_compilation_level) : driver_(compiler), - unit_(unit) {} + unit_(unit), + dex_to_dex_compilation_level_(dex_to_dex_compilation_level) {} ~DexCompiler() {} @@ -55,6 +57,10 @@ class DexCompiler { return *const_cast(unit_.GetDexFile()); } + bool PerformOptimizations() const { + return dex_to_dex_compilation_level_ >= kOptimize; + } + // Compiles a RETURN-VOID into a RETURN-VOID-BARRIER within a constructor where // a barrier is required. void CompileReturnVoid(Instruction* inst, uint32_t dex_pc); @@ -84,6 +90,7 @@ class DexCompiler { CompilerDriver& driver_; const DexCompilationUnit& unit_; + const DexToDexCompilationLevel dex_to_dex_compilation_level_; DISALLOW_COPY_AND_ASSIGN(DexCompiler); }; @@ -138,6 +145,7 @@ class ScopedDexWriteAccess { }; void DexCompiler::Compile() { + DCHECK_GE(dex_to_dex_compilation_level_, kRequired); const DexFile::CodeItem* code_item = unit_.GetCodeItem(); const uint16_t* insns = code_item->insns_; const uint32_t insns_size = code_item->insns_size_in_code_units_; @@ -220,7 +228,7 @@ void DexCompiler::CompileReturnVoid(Instruction* inst, uint32_t dex_pc) { } Instruction* DexCompiler::CompileCheckCast(Instruction* inst, uint32_t dex_pc) { - if (!kEnableCheckCastEllision) { + if (!kEnableCheckCastEllision || !PerformOptimizations()) { return inst; } MethodReference referrer(&GetDexFile(), unit_.GetDexMethodIndex()); @@ -253,7 +261,7 @@ void DexCompiler::CompileInstanceFieldAccess(Instruction* inst, uint32_t dex_pc, Instruction::Code new_opcode, bool is_put) { - if (!kEnableQuickening) { + if (!kEnableQuickening || !PerformOptimizations()) { return; } uint32_t field_idx = inst->VRegC_22c(); @@ -280,7 +288,7 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc, Instruction::Code new_opcode, bool is_range) { - if (!kEnableQuickening) { + if (!kEnableQuickening || !PerformOptimizations()) { return; } uint32_t method_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c(); @@ -320,14 +328,15 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst, } // namespace optimizer } // namespace art -extern "C" art::CompiledMethod* - ArtCompileDEX(art::CompilerDriver& compiler, const art::DexFile::CodeItem* code_item, +extern "C" void ArtCompileDEX(art::CompilerDriver& compiler, const art::DexFile::CodeItem* code_item, uint32_t access_flags, art::InvokeType invoke_type, uint32_t class_def_idx, uint32_t method_idx, jobject class_loader, - const art::DexFile& dex_file) { - art::DexCompilationUnit unit(NULL, class_loader, art::Runtime::Current()->GetClassLinker(), - dex_file, code_item, class_def_idx, method_idx, access_flags); - art::optimizer::DexCompiler dex_compiler(compiler, unit); - dex_compiler.Compile(); - return NULL; + const art::DexFile& dex_file, + art::DexToDexCompilationLevel dex_to_dex_compilation_level) { + if (dex_to_dex_compilation_level != art::kDontDexToDexCompile) { + art::DexCompilationUnit unit(NULL, class_loader, art::Runtime::Current()->GetClassLinker(), + dex_file, code_item, class_def_idx, method_idx, access_flags); + art::optimizer::DexCompiler dex_compiler(compiler, unit, dex_to_dex_compilation_level); + dex_compiler.Compile(); + } } diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index 64ebb6abbd..745e43dc38 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -19,7 +19,7 @@ #include "arm_lir.h" #include "codegen_arm.h" #include "dex/quick/mir_to_lir-inl.h" -#include "oat/runtime/oat_support_entrypoints.h" +#include "entrypoints/quick/quick_entrypoints.h" namespace art { @@ -432,7 +432,7 @@ void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { // Making a call - use explicit registers FlushAllRegs(); /* Everything to home location */ LoadValueDirectFixed(rl_src, r0); - LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), + LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rARM_LR); // Materialize a pointer to the fill data image NewLIR3(kThumb2Adr, r1, 0, reinterpret_cast(tab_rec)); @@ -488,7 +488,7 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { OpRegImm(kOpCmp, r1, 0); OpIT(kCondNe, "T"); // Go expensive route - artLockObjectFromCode(self, obj); - LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR); + LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR); ClobberCalleeSave(); LIR* call_inst = OpReg(kOpBlx, rARM_LR); MarkSafepointPC(call_inst); @@ -519,7 +519,7 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { OpIT(kCondEq, "EE"); StoreWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r3); // Go expensive route - UnlockObjectFromCode(obj); - LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR); + LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR); ClobberCalleeSave(); LIR* call_inst = OpReg(kOpBlx, rARM_LR); MarkSafepointPC(call_inst); diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc index 1bb08c45e3..08d6778129 100644 --- a/compiler/dex/quick/arm/fp_arm.cc +++ b/compiler/dex/quick/arm/fp_arm.cc @@ -49,7 +49,8 @@ void ArmMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, case Instruction::REM_FLOAT_2ADDR: case Instruction::REM_FLOAT: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, + false); rl_result = GetReturn(true); StoreValue(rl_dest, rl_result); return; @@ -91,7 +92,8 @@ void ArmMir2Lir::GenArithOpDouble(Instruction::Code opcode, case Instruction::REM_DOUBLE_2ADDR: case Instruction::REM_DOUBLE: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, + false); rl_result = GetReturnWide(true); StoreValueWide(rl_dest, rl_result); return; @@ -140,16 +142,16 @@ void ArmMir2Lir::GenConversion(Instruction::Code opcode, op = kThumb2VcvtDI; break; case Instruction::LONG_TO_DOUBLE: - GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); return; case Instruction::FLOAT_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); return; case Instruction::LONG_TO_FLOAT: - GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); return; case Instruction::DOUBLE_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); return; default: LOG(FATAL) << "Unexpected opcode: " << opcode; @@ -315,7 +317,7 @@ bool ArmMir2Lir::GenInlinedSqrt(CallInfo* info) { branch = NewLIR2(kThumbBCond, 0, kArmCondEq); ClobberCalleeSave(); LockCallTemps(); // Using fixed registers - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pSqrt)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pSqrt)); NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.low_reg, rl_src.high_reg)); NewLIR1(kThumbBlxR, r_tgt); NewLIR3(kThumb2Fmdrr, S2d(rl_result.low_reg, rl_result.high_reg), r0, r1); diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc index e1a77daba9..9db1016efa 100644 --- a/compiler/dex/quick/arm/int_arm.cc +++ b/compiler/dex/quick/arm/int_arm.cc @@ -19,8 +19,8 @@ #include "arm_lir.h" #include "codegen_arm.h" #include "dex/quick/mir_to_lir-inl.h" +#include "entrypoints/quick/quick_entrypoints.h" #include "mirror/array.h" -#include "oat/runtime/oat_support_entrypoints.h" namespace art { @@ -665,7 +665,7 @@ void ArmMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, */ RegLocation rl_result; if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) { - int func_offset = ENTRYPOINT_OFFSET(pLmul); + int func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul); FlushAllRegs(); CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); rl_result = GetReturnWide(false); @@ -956,7 +956,7 @@ void ArmMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, // Get the array's class. LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class); - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, r_array_class, true); // Redo LoadValues in case they didn't survive the call. LoadValueDirectFixed(rl_array, r_array); // Reload array diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 40db2c69d9..ebe10bb57e 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -17,8 +17,8 @@ #include "dex/compiler_ir.h" #include "dex/compiler_internals.h" #include "dex/quick/mir_to_lir-inl.h" +#include "entrypoints/quick/quick_entrypoints.h" #include "mirror/array.h" -#include "oat/runtime/oat_support_entrypoints.h" #include "verifier/method_verifier.h" namespace art { @@ -211,9 +211,9 @@ void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, int func_offset; if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, type_idx)) { - func_offset = ENTRYPOINT_OFFSET(pAllocArrayFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCode); } else { - func_offset= ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck); + func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck); } CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); RegLocation rl_result = GetReturn(false); @@ -233,9 +233,9 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { int func_offset; if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, type_idx)) { - func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode); } else { - func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck); + func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck); } CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true); FreeTemp(TargetReg(kArg2)); @@ -375,7 +375,7 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do // TUNING: fast path should fall through LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL); LoadConstant(TargetReg(kArg0), ssb_index); - CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true); + CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true); if (cu_->instruction_set == kMips) { // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy OpRegCopy(rBase, TargetReg(kRet0)); @@ -408,9 +408,9 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do FreeTemp(rBase); } else { FlushAllRegs(); // Everything to home locations - int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Static) : - (is_object ? ENTRYPOINT_OFFSET(pSetObjStatic) - : ENTRYPOINT_OFFSET(pSet32Static)); + int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static) : + (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic) + : QUICK_ENTRYPOINT_OFFSET(pSet32Static)); CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true); } } @@ -455,7 +455,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, // or NULL if not initialized. Check for NULL and call helper if NULL. // TUNING: fast path should fall through LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL); - CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true); + CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true); if (cu_->instruction_set == kMips) { // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy OpRegCopy(rBase, TargetReg(kRet0)); @@ -483,9 +483,9 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, } } else { FlushAllRegs(); // Everything to home locations - int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Static) : - (is_object ? ENTRYPOINT_OFFSET(pGetObjStatic) - : ENTRYPOINT_OFFSET(pGet32Static)); + int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static) : + (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic) + : QUICK_ENTRYPOINT_OFFSET(pGet32Static)); CallRuntimeHelperImm(getterOffset, field_idx, true); if (is_long_or_double) { RegLocation rl_result = GetReturnWide(rl_dest.fp); @@ -499,7 +499,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, void Mir2Lir::HandleSuspendLaunchPads() { int num_elems = suspend_launchpads_.Size(); - int helper_offset = ENTRYPOINT_OFFSET(pTestSuspendFromCode); + int helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspendFromCode); for (int i = 0; i < num_elems; i++) { ResetRegPool(); ResetDefTracking(); @@ -545,7 +545,7 @@ void Mir2Lir::HandleThrowLaunchPads() { bool target_x86 = (cu_->instruction_set == kX86); switch (lab->operands[0]) { case kThrowNullPointer: - func_offset = ENTRYPOINT_OFFSET(pThrowNullPointerFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointerFromCode); break; case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index // v1 holds the constant array index. Mips/Arm uses v2 for length, x86 reloads. @@ -557,7 +557,7 @@ void Mir2Lir::HandleThrowLaunchPads() { // Make sure the following LoadConstant doesn't mess with kArg1. LockTemp(TargetReg(kArg1)); LoadConstant(TargetReg(kArg0), v2); - func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode); break; case kThrowArrayBounds: // Move v1 (array index) to kArg0 and v2 (array length) to kArg1 @@ -590,18 +590,18 @@ void Mir2Lir::HandleThrowLaunchPads() { OpRegCopy(TargetReg(kArg0), v1); } } - func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode); break; case kThrowDivZero: - func_offset = ENTRYPOINT_OFFSET(pThrowDivZeroFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZeroFromCode); break; case kThrowNoSuchMethod: OpRegCopy(TargetReg(kArg0), v1); func_offset = - ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode); + QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode); break; case kThrowStackOverflow: - func_offset = ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode); // Restore stack alignment if (target_x86) { OpRegImm(kOpAdd, TargetReg(kSp), frame_size_); @@ -664,9 +664,9 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size, StoreValue(rl_dest, rl_result); } } else { - int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Instance) : - (is_object ? ENTRYPOINT_OFFSET(pGetObjInstance) - : ENTRYPOINT_OFFSET(pGet32Instance)); + int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance) : + (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance) + : QUICK_ENTRYPOINT_OFFSET(pGet32Instance)); CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true); if (is_long_or_double) { RegLocation rl_result = GetReturnWide(rl_dest.fp); @@ -719,9 +719,9 @@ void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size, } } } else { - int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Instance) : - (is_object ? ENTRYPOINT_OFFSET(pSetObjInstance) - : ENTRYPOINT_OFFSET(pSet32Instance)); + int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance) : + (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance) + : QUICK_ENTRYPOINT_OFFSET(pSet32Instance)); CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true); } } @@ -735,7 +735,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { type_idx)) { // Call out to helper which resolves type and verifies access. // Resolved type returned in kRet0. - CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), + CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), type_idx, rl_method.low_reg, true); RegLocation rl_result = GetReturn(false); StoreValue(rl_dest, rl_result); @@ -764,7 +764,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { // TUNING: move slow path to end & remove unconditional branch LIR* target1 = NewLIR0(kPseudoTargetLabel); // Call out to helper, which will return resolved type in kArg0 - CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, + CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, rl_method.low_reg, true); RegLocation rl_result = GetReturn(false); StoreValue(rl_dest, rl_result); @@ -797,7 +797,7 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { LoadWordDisp(TargetReg(kArg2), mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0)); // Might call out to helper, which will return resolved string in kRet0 - int r_tgt = CallHelperSetup(ENTRYPOINT_OFFSET(pResolveStringFromCode)); + int r_tgt = CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode)); LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0)); LoadConstant(TargetReg(kArg1), string_idx); if (cu_->instruction_set == kThumb2) { @@ -821,7 +821,8 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { branch->target = target; } else { DCHECK_EQ(cu_->instruction_set, kX86); - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), TargetReg(kArg1), true); + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), + TargetReg(kArg1), true); } GenBarrier(); StoreValue(rl_dest, GetReturn(false)); @@ -847,9 +848,9 @@ void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { int func_offset; if (cu_->compiler_driver->CanAccessInstantiableTypeWithoutChecks( cu_->method_idx, *cu_->dex_file, type_idx)) { - func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCode); } else { - func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck); + func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck); } CallRuntimeHelperImmMethod(func_offset, type_idx, true); RegLocation rl_result = GetReturn(false); @@ -858,7 +859,7 @@ void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { void Mir2Lir::GenThrow(RegLocation rl_src) { FlushAllRegs(); - CallRuntimeHelperRegLocation(ENTRYPOINT_OFFSET(pDeliverException), rl_src, true); + CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pDeliverException), rl_src, true); } // For final classes there are no sub-classes to check and so we can answer the instance-of @@ -928,7 +929,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know if (needs_access_check) { // Check we have access to type_idx and if not throw IllegalAccessError, // returns Class* in kArg0 - CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), + CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), type_idx, true); OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref @@ -950,7 +951,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); // Not resolved // Call out to helper, which will return resolved type in kRet0 - CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true); + CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true); OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */ // Rejoin code paths @@ -985,7 +986,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know } } else { if (cu_->instruction_set == kThumb2) { - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); if (!type_known_abstract) { /* Uses conditional nullification */ OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? @@ -1002,13 +1003,13 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); } if (cu_->instruction_set != kX86) { - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) FreeTemp(r_tgt); } else { OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); - OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); + OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); } } } @@ -1068,7 +1069,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ // Check we have access to type_idx and if not throw IllegalAccessError, // returns Class* in kRet0 // InitializeTypeAndVerifyAccess(idx, method) - CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), + CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), type_idx, TargetReg(kArg1), true); OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path } else if (use_declaring_class) { @@ -1088,8 +1089,8 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ // Not resolved // Call out to helper, which will return resolved type in kArg0 // InitializeTypeFromCode(idx, method) - CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, TargetReg(kArg1), - true); + CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, + TargetReg(kArg1), true); OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path // Rejoin code paths LIR* hop_target = NewLIR0(kPseudoTargetLabel); @@ -1108,8 +1109,8 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ if (!type_known_abstract) { branch2 = OpCmpBranch(kCondEq, TargetReg(kArg1), class_reg, NULL); } - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), TargetReg(kArg2), - true); + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), + TargetReg(kArg2), true); /* branch target here */ LIR* target = NewLIR0(kPseudoTargetLabel); branch1->target = target; @@ -1172,15 +1173,15 @@ void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, switch (opcode) { case Instruction::SHL_LONG: case Instruction::SHL_LONG_2ADDR: - func_offset = ENTRYPOINT_OFFSET(pShlLong); + func_offset = QUICK_ENTRYPOINT_OFFSET(pShlLong); break; case Instruction::SHR_LONG: case Instruction::SHR_LONG_2ADDR: - func_offset = ENTRYPOINT_OFFSET(pShrLong); + func_offset = QUICK_ENTRYPOINT_OFFSET(pShrLong); break; case Instruction::USHR_LONG: case Instruction::USHR_LONG_2ADDR: - func_offset = ENTRYPOINT_OFFSET(pUshrLong); + func_offset = QUICK_ENTRYPOINT_OFFSET(pUshrLong); break; default: LOG(FATAL) << "Unexpected case"; @@ -1302,7 +1303,7 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, } rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv); } else { - int func_offset = ENTRYPOINT_OFFSET(pIdivmod); + int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod); FlushAllRegs(); /* Send everything to home location */ LoadValueDirectFixed(rl_src2, TargetReg(kArg1)); int r_tgt = CallHelperSetup(func_offset); @@ -1557,7 +1558,7 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re FlushAllRegs(); /* Everything to home location */ LoadValueDirectFixed(rl_src, TargetReg(kArg0)); Clobber(TargetReg(kArg0)); - int func_offset = ENTRYPOINT_OFFSET(pIdivmod); + int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod); CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false); if (is_div) rl_result = GetReturn(false); @@ -1634,7 +1635,7 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, } else { call_out = true; ret_reg = TargetReg(kRet0); - func_offset = ENTRYPOINT_OFFSET(pLmul); + func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul); } break; case Instruction::DIV_LONG: @@ -1642,13 +1643,13 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, call_out = true; check_zero = true; ret_reg = TargetReg(kRet0); - func_offset = ENTRYPOINT_OFFSET(pLdiv); + func_offset = QUICK_ENTRYPOINT_OFFSET(pLdiv); break; case Instruction::REM_LONG: case Instruction::REM_LONG_2ADDR: call_out = true; check_zero = true; - func_offset = ENTRYPOINT_OFFSET(pLdivmod); + func_offset = QUICK_ENTRYPOINT_OFFSET(pLdivmod); /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0); break; diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index cae13190ec..1b34e99a72 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -16,11 +16,11 @@ #include "dex/compiler_ir.h" #include "dex_file-inl.h" +#include "entrypoints/quick/quick_entrypoints.h" #include "invoke_type.h" #include "mirror/array.h" #include "mirror/string.h" #include "mir_to_lir-inl.h" -#include "oat/runtime/oat_support_entrypoints.h" #include "x86/codegen_x86.h" namespace art { @@ -471,7 +471,7 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, direct_method = 0; } int trampoline = (cu->instruction_set == kX86) ? 0 - : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline); + : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline); if (direct_method != 0) { switch (state) { @@ -555,7 +555,7 @@ static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, InvokeType unused3) { - int trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); + int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -563,7 +563,7 @@ static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, InvokeType unused3) { - int trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); + int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -571,7 +571,7 @@ static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, InvokeType unused3) { - int trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); + int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -579,7 +579,7 @@ static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, InvokeType unused3) { - int trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); + int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -589,7 +589,7 @@ static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu, uint32_t unused, uintptr_t unused2, uintptr_t unused3, InvokeType unused4) { - int trampoline = ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); + int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -773,14 +773,14 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // Generate memcpy OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset); OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset); - CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0), + CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0), TargetReg(kArg1), (info->num_arg_words - 3) * 4, false); } else { if (info->num_arg_words >= 20) { // Generate memcpy OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset); OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset); - CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0), + CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0), TargetReg(kArg1), (info->num_arg_words - 3) * 4, false); } else { // Use vldm/vstm pair using kArg3 as a temp @@ -1047,7 +1047,7 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { } else { LoadValueDirectFixed(rl_start, reg_start); } - int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(ENTRYPOINT_OFFSET(pIndexOf)) : 0; + int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf)) : 0; GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags); LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast(info)); intrinsic_launchpads_.Insert(launch_pad); @@ -1056,7 +1056,7 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { if (cu_->instruction_set != kX86) { OpReg(kOpBlx, r_tgt); } else { - OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pIndexOf)); + OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pIndexOf)); } LIR* resume_tgt = NewLIR0(kPseudoTargetLabel); launch_pad->operands[2] = reinterpret_cast(resume_tgt); @@ -1084,7 +1084,7 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { LoadValueDirectFixed(rl_this, reg_this); LoadValueDirectFixed(rl_cmp, reg_cmp); int r_tgt = (cu_->instruction_set != kX86) ? - LoadHelper(ENTRYPOINT_OFFSET(pStringCompareTo)) : 0; + LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0; GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags); // TUNING: check if rl_cmp.s_reg_low is already null checked LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast(info)); @@ -1094,7 +1094,7 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { if (cu_->instruction_set != kX86) { OpReg(kOpBlx, r_tgt); } else { - OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pStringCompareTo)); + OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)); } launch_pad->operands[2] = 0; // No return possible // Record that we've already inlined & null checked @@ -1409,20 +1409,20 @@ void Mir2Lir::GenInvoke(CallInfo* info) { int trampoline = 0; switch (info->type) { case kInterface: - trampoline = fast_path ? ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline) - : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); + trampoline = fast_path ? QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline) + : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); break; case kDirect: - trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); + trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); break; case kStatic: - trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); + trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); break; case kSuper: - trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); + trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); break; case kVirtual: - trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); + trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); break; default: LOG(FATAL) << "Unexpected invoke type"; diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc index 8b375ea2f0..846c055ac2 100644 --- a/compiler/dex/quick/mips/call_mips.cc +++ b/compiler/dex/quick/mips/call_mips.cc @@ -18,8 +18,8 @@ #include "codegen_mips.h" #include "dex/quick/mir_to_lir-inl.h" +#include "entrypoints/quick/quick_entrypoints.h" #include "mips_lir.h" -#include "oat/runtime/oat_support_entrypoints.h" namespace art { @@ -247,7 +247,7 @@ void MipsMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { GenBarrier(); NewLIR0(kMipsCurrPC); // Really a jal to .+8 // Now, fill the branch delay slot with the helper load - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode)); GenBarrier(); // Scheduling barrier // Construct BaseLabel and set up table base register @@ -272,7 +272,7 @@ void MipsMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { LockCallTemps(); // Prepare for explicit register usage GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags); // Go expensive route - artLockObjectFromCode(self, obj); - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pLockObjectFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode)); ClobberCalleeSave(); LIR* call_inst = OpReg(kOpBlx, r_tgt); MarkSafepointPC(call_inst); @@ -287,7 +287,7 @@ void MipsMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { LockCallTemps(); // Prepare for explicit register usage GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags); // Go expensive route - UnlockObjectFromCode(obj); - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pUnlockObjectFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode)); ClobberCalleeSave(); LIR* call_inst = OpReg(kOpBlx, r_tgt); MarkSafepointPC(call_inst); diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc index 6cd9acc099..320301726b 100644 --- a/compiler/dex/quick/mips/fp_mips.cc +++ b/compiler/dex/quick/mips/fp_mips.cc @@ -16,8 +16,8 @@ #include "codegen_mips.h" #include "dex/quick/mir_to_lir-inl.h" +#include "entrypoints/quick/quick_entrypoints.h" #include "mips_lir.h" -#include "oat/runtime/oat_support_entrypoints.h" namespace art { @@ -50,7 +50,8 @@ void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode, case Instruction::REM_FLOAT_2ADDR: case Instruction::REM_FLOAT: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, + false); rl_result = GetReturn(true); StoreValue(rl_dest, rl_result); return; @@ -92,7 +93,8 @@ void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode, case Instruction::REM_DOUBLE_2ADDR: case Instruction::REM_DOUBLE: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, + false); rl_result = GetReturnWide(true); StoreValueWide(rl_dest, rl_result); return; @@ -133,22 +135,22 @@ void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, op = kMipsFcvtdw; break; case Instruction::FLOAT_TO_INT: - GenConversionCall(ENTRYPOINT_OFFSET(pF2iz), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2iz), rl_dest, rl_src); return; case Instruction::DOUBLE_TO_INT: - GenConversionCall(ENTRYPOINT_OFFSET(pD2iz), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2iz), rl_dest, rl_src); return; case Instruction::LONG_TO_DOUBLE: - GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); return; case Instruction::FLOAT_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); return; case Instruction::LONG_TO_FLOAT: - GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); return; case Instruction::DOUBLE_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); return; default: LOG(FATAL) << "Unexpected opcode: " << opcode; @@ -178,18 +180,18 @@ void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, switch (opcode) { case Instruction::CMPL_FLOAT: - offset = ENTRYPOINT_OFFSET(pCmplFloat); + offset = QUICK_ENTRYPOINT_OFFSET(pCmplFloat); wide = false; break; case Instruction::CMPG_FLOAT: - offset = ENTRYPOINT_OFFSET(pCmpgFloat); + offset = QUICK_ENTRYPOINT_OFFSET(pCmpgFloat); wide = false; break; case Instruction::CMPL_DOUBLE: - offset = ENTRYPOINT_OFFSET(pCmplDouble); + offset = QUICK_ENTRYPOINT_OFFSET(pCmplDouble); break; case Instruction::CMPG_DOUBLE: - offset = ENTRYPOINT_OFFSET(pCmpgDouble); + offset = QUICK_ENTRYPOINT_OFFSET(pCmpgDouble); break; default: LOG(FATAL) << "Unexpected opcode: " << opcode; diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc index ea7da6030e..bd044c66bd 100644 --- a/compiler/dex/quick/mips/int_mips.cc +++ b/compiler/dex/quick/mips/int_mips.cc @@ -18,9 +18,9 @@ #include "codegen_mips.h" #include "dex/quick/mir_to_lir-inl.h" +#include "entrypoints/quick/quick_entrypoints.h" #include "mips_lir.h" #include "mirror/array.h" -#include "oat/runtime/oat_support_entrypoints.h" namespace art { @@ -579,7 +579,7 @@ void MipsMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, // Get the array's class. LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class); - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, r_array_class, true); // Redo LoadValues in case they didn't survive the call. LoadValueDirectFixed(rl_array, r_array); // Reload array diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index d530a1c644..1c395def55 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -148,7 +148,7 @@ void X86Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { NewLIR1(kX86StartOfMethod, rX86_ARG2); NewLIR2(kX86PcRelAdr, rX86_ARG1, reinterpret_cast(tab_rec)); NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2); - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0, + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0, rX86_ARG1, true); } @@ -165,7 +165,7 @@ void X86Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { NewLIR3(kX86LockCmpxchgMR, rCX, mirror::Object::MonitorOffset().Int32Value(), rDX); LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondEq); // If lock is held, go the expensive route - artLockObjectFromCode(self, obj); - CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true); + CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true); branch->target = NewLIR0(kPseudoTargetLabel); } @@ -185,7 +185,7 @@ void X86Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { LIR* branch2 = NewLIR1(kX86Jmp8, 0); branch->target = NewLIR0(kPseudoTargetLabel); // Otherwise, go the expensive route - UnlockObjectFromCode(obj); - CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true); + CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true); branch2->target = NewLIR0(kPseudoTargetLabel); } diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc index cc6f374488..f736b5e28f 100644 --- a/compiler/dex/quick/x86/fp_x86.cc +++ b/compiler/dex/quick/x86/fp_x86.cc @@ -49,7 +49,8 @@ void X86Mir2Lir::GenArithOpFloat(Instruction::Code opcode, case Instruction::REM_FLOAT_2ADDR: case Instruction::REM_FLOAT: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, + false); rl_result = GetReturn(true); StoreValue(rl_dest, rl_result); return; @@ -99,7 +100,8 @@ void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode, case Instruction::REM_DOUBLE_2ADDR: case Instruction::REM_DOUBLE: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, + false); rl_result = GetReturnWide(true); StoreValueWide(rl_dest, rl_result); return; @@ -196,17 +198,17 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, return; } case Instruction::LONG_TO_DOUBLE: - GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); return; case Instruction::LONG_TO_FLOAT: // TODO: inline by using memory as a 64-bit source. Be careful about promoted registers. - GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); return; case Instruction::FLOAT_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); return; case Instruction::DOUBLE_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); return; default: LOG(INFO) << "Unexpected opcode: " << opcode; diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index 3be24df565..0b4b4be04e 100644 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -532,7 +532,7 @@ void X86Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, // Get the array's class. LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class); - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, r_array_class, true); // Redo LoadValues in case they didn't survive the call. LoadValueDirectFixed(rl_array, r_array); // Reload array diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 038957e128..38d00a0804 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -369,7 +369,7 @@ CompilerDriver::CompilerDriver(CompilerBackend compiler_backend, InstructionSet compiler_ = reinterpret_cast(ArtQuickCompileMethod); } - dex_to_dex_compiler_ = reinterpret_cast(ArtCompileDEX); + dex_to_dex_compiler_ = reinterpret_cast(ArtCompileDEX); #ifdef ART_SEA_IR_MODE sea_ir_compiler_ = NULL; @@ -495,7 +495,7 @@ const std::vector* CompilerDriver::CreateInterpreterToQuickEntry() cons void CompilerDriver::CompileAll(jobject class_loader, const std::vector& dex_files, - TimingLogger& timings) { + base::TimingLogger& timings) { DCHECK(!Runtime::Current()->IsStarted()); UniquePtr thread_pool(new ThreadPool(thread_count_)); PreCompile(class_loader, dex_files, *thread_pool.get(), timings); @@ -505,16 +505,10 @@ void CompilerDriver::CompileAll(jobject class_loader, } } -static bool IsDexToDexCompilationAllowed(mirror::ClassLoader* class_loader, - const DexFile& dex_file, - const DexFile::ClassDef& class_def) +static DexToDexCompilationLevel GetDexToDexCompilationlevel(mirror::ClassLoader* class_loader, + const DexFile& dex_file, + const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Do not allow DEX-to-DEX compilation of image classes. This is to prevent the - // verifier from passing on "quick" instruction at compilation time. It must - // only pass on quick instructions at runtime. - if (class_loader == NULL) { - return false; - } const char* descriptor = dex_file.GetClassDescriptor(class_def); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); mirror::Class* klass = class_linker->FindClass(descriptor, class_loader); @@ -522,13 +516,30 @@ static bool IsDexToDexCompilationAllowed(mirror::ClassLoader* class_loader, Thread* self = Thread::Current(); CHECK(self->IsExceptionPending()); self->ClearException(); - return false; + return kDontDexToDexCompile; + } + // The verifier can only run on "quick" instructions at runtime (see usage of + // FindAccessedFieldAtDexPc and FindInvokedMethodAtDexPc in ThrowNullPointerExceptionFromDexPC + // function). Since image classes can be verified again while compiling an application, + // we must prevent the DEX-to-DEX compiler from introducing them. + // TODO: find a way to enable "quick" instructions for image classes and remove this check. + bool compiling_image_classes = (class_loader == NULL); + if (compiling_image_classes) { + return kRequired; + } else if (klass->IsVerified()) { + // Class is verified so we can enable DEX-to-DEX compilation for performance. + return kOptimize; + } else if (klass->IsCompileTimeVerified()) { + // Class verification has soft-failed. Anyway, ensure at least correctness. + DCHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime); + return kRequired; + } else { + // Class verification has failed: do not run DEX-to-DEX compilation. + return kDontDexToDexCompile; } - // DEX-to-DEX compilation is only allowed on preverified classes. - return klass->IsVerified(); } -void CompilerDriver::CompileOne(const mirror::AbstractMethod* method, TimingLogger& timings) { +void CompilerDriver::CompileOne(const mirror::AbstractMethod* method, base::TimingLogger& timings) { DCHECK(!Runtime::Current()->IsStarted()); Thread* self = Thread::Current(); jobject jclass_loader; @@ -556,15 +567,15 @@ void CompilerDriver::CompileOne(const mirror::AbstractMethod* method, TimingLogg uint32_t method_idx = method->GetDexMethodIndex(); const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset()); // Can we run DEX-to-DEX compiler on this class ? - bool allow_dex_compilation; + DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile; { ScopedObjectAccess soa(Thread::Current()); const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx); mirror::ClassLoader* class_loader = soa.Decode(jclass_loader); - allow_dex_compilation = IsDexToDexCompilationAllowed(class_loader, *dex_file, class_def); + dex_to_dex_compilation_level = GetDexToDexCompilationlevel(class_loader, *dex_file, class_def); } CompileMethod(code_item, method->GetAccessFlags(), method->GetInvokeType(), - class_def_idx, method_idx, jclass_loader, *dex_file, allow_dex_compilation); + class_def_idx, method_idx, jclass_loader, *dex_file, dex_to_dex_compilation_level); self->GetJniEnv()->DeleteGlobalRef(jclass_loader); @@ -572,7 +583,7 @@ void CompilerDriver::CompileOne(const mirror::AbstractMethod* method, TimingLogg } void CompilerDriver::Resolve(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; CHECK(dex_file != NULL); @@ -581,7 +592,7 @@ void CompilerDriver::Resolve(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { LoadImageClasses(timings); Resolve(class_loader, dex_files, thread_pool, timings); @@ -666,12 +677,13 @@ static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg) } // Make a list of descriptors for classes to include in the image -void CompilerDriver::LoadImageClasses(TimingLogger& timings) +void CompilerDriver::LoadImageClasses(base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_) { if (image_classes_.get() == NULL) { return; } + timings.NewSplit("LoadImageClasses"); // Make a first class to load all classes explicitly listed in the file Thread* self = Thread::Current(); ScopedObjectAccess soa(self); @@ -726,7 +738,6 @@ void CompilerDriver::LoadImageClasses(TimingLogger& timings) class_linker->VisitClasses(RecordImageClassesVisitor, image_classes_.get()); CHECK_NE(image_classes_->size(), 0U); - timings.AddSplit("LoadImageClasses"); } static void MaybeAddToImageClasses(mirror::Class* klass, CompilerDriver::DescriptorSet* image_classes) @@ -758,11 +769,13 @@ void CompilerDriver::FindClinitImageClassesCallback(mirror::Object* object, void MaybeAddToImageClasses(object->GetClass(), compiler_driver->image_classes_.get()); } -void CompilerDriver::UpdateImageClasses(TimingLogger& timings) { +void CompilerDriver::UpdateImageClasses(base::TimingLogger& timings) { if (image_classes_.get() == NULL) { return; } + timings.NewSplit("UpdateImageClasses"); + // Update image_classes_ with classes for objects created by methods. Thread* self = Thread::Current(); const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter"); @@ -772,7 +785,6 @@ void CompilerDriver::UpdateImageClasses(TimingLogger& timings) { heap->FlushAllocStack(); heap->GetLiveBitmap()->Walk(FindClinitImageClassesCallback, this); self->EndAssertNoThreadSuspension(old_cause); - timings.AddSplit("UpdateImageClasses"); } void CompilerDriver::RecordClassStatus(ClassReference ref, CompiledClass* compiled_class) { @@ -1551,22 +1563,22 @@ static void ResolveType(const ParallelCompilationManager* manager, size_t type_i } void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_file, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); // TODO: we could resolve strings here, although the string table is largely filled with class // and method names. + timings.NewSplit(strdup(("Resolve " + dex_file.GetLocation() + " Types").c_str())); ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, thread_pool); context.ForAll(0, dex_file.NumTypeIds(), ResolveType, thread_count_); - timings.AddSplit("Resolve " + dex_file.GetLocation() + " Types"); + timings.NewSplit(strdup(("Resolve " + dex_file.GetLocation() + " MethodsAndFields").c_str())); context.ForAll(0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_); - timings.AddSplit("Resolve " + dex_file.GetLocation() + " MethodsAndFields"); } void CompilerDriver::Verify(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; CHECK(dex_file != NULL); @@ -1620,11 +1632,11 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_ } void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { + timings.NewSplit(strdup(("Verify " + dex_file.GetLocation()).c_str())); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, thread_pool); context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_); - timings.AddSplit("Verify " + dex_file.GetLocation()); } static const char* class_initializer_black_list[] = { @@ -2116,7 +2128,8 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl } void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { + timings.NewSplit(strdup(("InitializeNoClinit " + dex_file.GetLocation()).c_str())); #ifndef NDEBUG for (size_t i = 0; i < arraysize(class_initializer_black_list); ++i) { const char* descriptor = class_initializer_black_list[i]; @@ -2126,12 +2139,11 @@ void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ParallelCompilationManager context(class_linker, jni_class_loader, this, &dex_file, thread_pool); context.ForAll(0, dex_file.NumClassDefs(), InitializeClass, thread_count_); - timings.AddSplit("InitializeNoClinit " + dex_file.GetLocation()); } void CompilerDriver::InitializeClasses(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; CHECK(dex_file != NULL); @@ -2140,7 +2152,7 @@ void CompilerDriver::InitializeClasses(jobject class_loader, } void CompilerDriver::Compile(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; CHECK(dex_file != NULL); @@ -2170,11 +2182,11 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz return; } // Can we run DEX-to-DEX compiler on this class ? - bool allow_dex_compilation; + DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile; { ScopedObjectAccess soa(Thread::Current()); mirror::ClassLoader* class_loader = soa.Decode(jclass_loader); - allow_dex_compilation = IsDexToDexCompilationAllowed(class_loader, dex_file, class_def); + dex_to_dex_compilation_level = GetDexToDexCompilationlevel(class_loader, dex_file, class_def); } ClassDataItemIterator it(dex_file, class_data); // Skip fields @@ -2197,7 +2209,7 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz previous_direct_method_idx = method_idx; manager->GetCompiler()->CompileMethod(it.GetMethodCodeItem(), it.GetMemberAccessFlags(), it.GetMethodInvokeType(class_def), class_def_index, - method_idx, jclass_loader, dex_file, allow_dex_compilation); + method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level); it.Next(); } // Compile virtual methods @@ -2213,24 +2225,24 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz previous_virtual_method_idx = method_idx; manager->GetCompiler()->CompileMethod(it.GetMethodCodeItem(), it.GetMemberAccessFlags(), it.GetMethodInvokeType(class_def), class_def_index, - method_idx, jclass_loader, dex_file, allow_dex_compilation); + method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level); it.Next(); } DCHECK(!it.HasNext()); } void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_file, - ThreadPool& thread_pool, TimingLogger& timings) { + ThreadPool& thread_pool, base::TimingLogger& timings) { + timings.NewSplit(strdup(("Compile " + dex_file.GetLocation()).c_str())); ParallelCompilationManager context(NULL, class_loader, this, &dex_file, thread_pool); context.ForAll(0, dex_file.NumClassDefs(), CompilerDriver::CompileClass, thread_count_); - timings.AddSplit("Compile " + dex_file.GetLocation()); } void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type, uint32_t class_def_idx, uint32_t method_idx, jobject class_loader, const DexFile& dex_file, - bool allow_dex_to_dex_compilation) { + DexToDexCompilationLevel dex_to_dex_compilation_level) { CompiledMethod* compiled_method = NULL; uint64_t start_ns = NanoTime(); @@ -2239,18 +2251,8 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t CHECK(compiled_method != NULL); } else if ((access_flags & kAccAbstract) != 0) { } else { - // In small mode we only compile image classes. - bool dont_compile = (Runtime::Current()->IsSmallMode() && - ((image_classes_.get() == NULL) || (image_classes_->size() == 0))); - - // Don't compile class initializers, ever. - if (((access_flags & kAccConstructor) != 0) && ((access_flags & kAccStatic) != 0)) { - dont_compile = true; - } else if (code_item->insns_size_in_code_units_ < Runtime::Current()->GetSmallModeMethodDexSizeLimit()) { - // Do compile small methods. - dont_compile = false; - } - if (!dont_compile) { + bool compile = verifier::MethodVerifier::IsCandidateForCompilation(code_item, access_flags); + if (compile) { CompilerFn compiler = compiler_; #ifdef ART_SEA_IR_MODE bool use_sea = Runtime::Current()->IsSeaIRMode(); @@ -2262,13 +2264,12 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t compiled_method = (*compiler)(*this, code_item, access_flags, invoke_type, class_def_idx, method_idx, class_loader, dex_file); CHECK(compiled_method != NULL) << PrettyMethod(method_idx, dex_file); - } else if (allow_dex_to_dex_compilation) { + } else if (dex_to_dex_compilation_level != kDontDexToDexCompile) { // TODO: add a mode to disable DEX-to-DEX compilation ? - compiled_method = (*dex_to_dex_compiler_)(*this, code_item, access_flags, - invoke_type, class_def_idx, - method_idx, class_loader, dex_file); - // No native code is generated. - CHECK(compiled_method == NULL) << PrettyMethod(method_idx, dex_file); + (*dex_to_dex_compiler_)(*this, code_item, access_flags, + invoke_type, class_def_idx, + method_idx, class_loader, dex_file, + dex_to_dex_compilation_level); } } uint64_t duration_ns = NanoTime() - start_ns; diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index f3f72dd3c7..18f852dc6f 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -48,6 +48,12 @@ enum CompilerBackend { kNoBackend }; +enum DexToDexCompilationLevel { + kDontDexToDexCompile, // Only meaning wrt image time interpretation. + kRequired, // Dex-to-dex compilation required for correctness. + kOptimize // Perform required transformation and peep-hole optimizations. +}; + // Thread-local storage compiler worker threads class CompilerTls { public: @@ -78,11 +84,11 @@ class CompilerDriver { ~CompilerDriver(); void CompileAll(jobject class_loader, const std::vector& dex_files, - TimingLogger& timings) + base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_); // Compile a single Method - void CompileOne(const mirror::AbstractMethod* method, TimingLogger& timings) + void CompileOne(const mirror::AbstractMethod* method, base::TimingLogger& timings) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); InstructionSet GetInstructionSet() const { @@ -284,47 +290,47 @@ class CompilerDriver { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void PreCompile(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings) + ThreadPool& thread_pool, base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_); - void LoadImageClasses(TimingLogger& timings); + void LoadImageClasses(base::TimingLogger& timings); // Attempt to resolve all type, methods, fields, and strings // referenced from code in the dex file following PathClassLoader // ordering semantics. void Resolve(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings) + ThreadPool& thread_pool, base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_); void ResolveDexFile(jobject class_loader, const DexFile& dex_file, - ThreadPool& thread_pool, TimingLogger& timings) + ThreadPool& thread_pool, base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_); void Verify(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings); + ThreadPool& thread_pool, base::TimingLogger& timings); void VerifyDexFile(jobject class_loader, const DexFile& dex_file, - ThreadPool& thread_pool, TimingLogger& timings) + ThreadPool& thread_pool, base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_); void InitializeClasses(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings) + ThreadPool& thread_pool, base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_); void InitializeClasses(jobject class_loader, const DexFile& dex_file, - ThreadPool& thread_pool, TimingLogger& timings) + ThreadPool& thread_pool, base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_, compiled_classes_lock_); - void UpdateImageClasses(TimingLogger& timings); + void UpdateImageClasses(base::TimingLogger& timings); static void FindClinitImageClassesCallback(mirror::Object* object, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Compile(jobject class_loader, const std::vector& dex_files, - ThreadPool& thread_pool, TimingLogger& timings); + ThreadPool& thread_pool, base::TimingLogger& timings); void CompileDexFile(jobject class_loader, const DexFile& dex_file, - ThreadPool& thread_pool, TimingLogger& timings) + ThreadPool& thread_pool, base::TimingLogger& timings) LOCKS_EXCLUDED(Locks::mutator_lock_); void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type, uint32_t class_def_idx, uint32_t method_idx, jobject class_loader, const DexFile& dex_file, - bool allow_dex_to_dex_compilation) + DexToDexCompilationLevel dex_to_dex_compilation_level) LOCKS_EXCLUDED(compiled_methods_lock_); static void CompileClass(const ParallelCompilationManager* context, size_t class_def_index) @@ -375,12 +381,19 @@ class CompilerDriver { uint32_t access_flags, InvokeType invoke_type, uint32_t class_dex_idx, uint32_t method_idx, jobject class_loader, const DexFile& dex_file); + + typedef void (*DexToDexCompilerFn)(CompilerDriver& driver, + const DexFile::CodeItem* code_item, + uint32_t access_flags, InvokeType invoke_type, + uint32_t class_dex_idx, uint32_t method_idx, + jobject class_loader, const DexFile& dex_file, + DexToDexCompilationLevel dex_to_dex_compilation_level); CompilerFn compiler_; #ifdef ART_SEA_IR_MODE CompilerFn sea_ir_compiler_; #endif - CompilerFn dex_to_dex_compiler_; + DexToDexCompilerFn dex_to_dex_compiler_; void* compiler_context_; diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc index 78cacaf08e..8ee9cf6442 100644 --- a/compiler/driver/compiler_driver_test.cc +++ b/compiler/driver/compiler_driver_test.cc @@ -36,7 +36,8 @@ namespace art { class CompilerDriverTest : public CommonTest { protected: void CompileAll(jobject class_loader) LOCKS_EXCLUDED(Locks::mutator_lock_) { - TimingLogger timings("CompilerDriverTest::CompileAll", false); + base::TimingLogger timings("CompilerDriverTest::CompileAll", false, false); + timings.StartSplit("CompileAll"); compiler_driver_->CompileAll(class_loader, Runtime::Current()->GetCompileTimeClassPath(class_loader), timings); diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc index e9b09c5bba..9778293ee7 100644 --- a/compiler/jni/quick/arm/calling_convention_arm.cc +++ b/compiler/jni/quick/arm/calling_convention_arm.cc @@ -16,7 +16,7 @@ #include "base/logging.h" #include "calling_convention_arm.h" -#include "oat/utils/arm/managed_register_arm.h" +#include "utils/arm/managed_register_arm.h" namespace art { namespace arm { diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h index d492b42237..f2b7fd9a4a 100644 --- a/compiler/jni/quick/calling_convention.h +++ b/compiler/jni/quick/calling_convention.h @@ -18,9 +18,9 @@ #define ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_ #include -#include "oat/utils/managed_register.h" #include "stack_indirect_reference_table.h" #include "thread.h" +#include "utils/managed_register.h" namespace art { diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index fa227f7fbb..b069fbd4a1 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -25,13 +25,13 @@ #include "dex_file-inl.h" #include "driver/compiler_driver.h" #include "disassembler.h" +#include "entrypoints/quick/quick_entrypoints.h" #include "jni_internal.h" -#include "oat/runtime/oat_support_entrypoints.h" -#include "oat/utils/assembler.h" -#include "oat/utils/managed_register.h" -#include "oat/utils/arm/managed_register_arm.h" -#include "oat/utils/mips/managed_register_mips.h" -#include "oat/utils/x86/managed_register_x86.h" +#include "utils/assembler.h" +#include "utils/managed_register.h" +#include "utils/arm/managed_register_arm.h" +#include "utils/mips/managed_register_mips.h" +#include "utils/x86/managed_register_x86.h" #include "thread.h" #include "UniquePtr.h" @@ -172,8 +172,8 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler, // can occur. The result is the saved JNI local state that is restored by the exit call. We // abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer // arguments. - uintptr_t jni_start = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodStartSynchronized) - : ENTRYPOINT_OFFSET(pJniMethodStart); + uintptr_t jni_start = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodStartSynchronized) + : QUICK_ENTRYPOINT_OFFSET(pJniMethodStart); main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); FrameOffset locked_object_sirt_offset(0); if (is_synchronized) { @@ -304,13 +304,13 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler, uintptr_t jni_end; if (reference_return) { // Pass result. - jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized) - : ENTRYPOINT_OFFSET(pJniMethodEndWithReference); + jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized) + : QUICK_ENTRYPOINT_OFFSET(pJniMethodEndWithReference); SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister()); end_jni_conv->Next(); } else { - jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndSynchronized) - : ENTRYPOINT_OFFSET(pJniMethodEnd); + jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodEndSynchronized) + : QUICK_ENTRYPOINT_OFFSET(pJniMethodEnd); } // Pass saved local reference state. if (end_jni_conv->IsCurrentParamOnStack()) { diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc index 053ab44eb4..0a48500380 100644 --- a/compiler/jni/quick/mips/calling_convention_mips.cc +++ b/compiler/jni/quick/mips/calling_convention_mips.cc @@ -17,7 +17,7 @@ #include "calling_convention_mips.h" #include "base/logging.h" -#include "oat/utils/mips/managed_register_mips.h" +#include "utils/mips/managed_register_mips.h" namespace art { namespace mips { diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc index 45dd42960c..8b5c86d683 100644 --- a/compiler/jni/quick/x86/calling_convention_x86.cc +++ b/compiler/jni/quick/x86/calling_convention_x86.cc @@ -17,7 +17,7 @@ #include "calling_convention_x86.h" #include "base/logging.h" -#include "oat/utils/x86/managed_register_x86.h" +#include "utils/x86/managed_register_x86.h" #include "utils.h" namespace art { diff --git a/compiler/llvm/runtime_support_builder.cc b/compiler/llvm/runtime_support_builder.cc index 729980309d..24e283d309 100644 --- a/compiler/llvm/runtime_support_builder.cc +++ b/compiler/llvm/runtime_support_builder.cc @@ -20,6 +20,7 @@ #include "ir_builder.h" #include "monitor.h" #include "mirror/object.h" +#include "runtime_support_llvm_func_list.h" #include "thread.h" #include @@ -47,10 +48,7 @@ RuntimeSupportBuilder::RuntimeSupportBuilder(::llvm::LLVMContext& context, runtime_support_func_decls_[runtime_support::ID] = fn; \ } while (0); -#include "runtime_support_llvm_func_list.h" RUNTIME_SUPPORT_FUNC_LIST(GET_RUNTIME_SUPPORT_FUNC_DECL) -#undef RUNTIME_SUPPORT_FUNC_LIST -#undef GET_RUNTIME_SUPPORT_FUNC_DECL } diff --git a/compiler/llvm/runtime_support_llvm_func.h b/compiler/llvm/runtime_support_llvm_func.h index 2634c683f1..a5ad852b49 100644 --- a/compiler/llvm/runtime_support_llvm_func.h +++ b/compiler/llvm/runtime_support_llvm_func.h @@ -17,16 +17,15 @@ #ifndef ART_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_FUNC_H_ #define ART_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_FUNC_H_ +#include "runtime_support_llvm_func_list.h" + namespace art { namespace llvm { namespace runtime_support { enum RuntimeId { #define DEFINE_RUNTIME_SUPPORT_FUNC_ID(ID, NAME) ID, -#include "runtime_support_llvm_func_list.h" RUNTIME_SUPPORT_FUNC_LIST(DEFINE_RUNTIME_SUPPORT_FUNC_ID) -#undef RUNTIME_SUPPORT_FUNC_LIST -#undef DEFINE_RUNTIME_SUPPORT_FUNC_ID MAX_ID }; diff --git a/compiler/llvm/runtime_support_llvm_func_list.h b/compiler/llvm/runtime_support_llvm_func_list.h new file mode 100644 index 0000000000..b5ac1ffe63 --- /dev/null +++ b/compiler/llvm/runtime_support_llvm_func_list.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_FUNC_LIST_H_ +#define ART_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_FUNC_LIST_H_ + +#define RUNTIME_SUPPORT_FUNC_LIST(V) \ + V(LockObject, art_portable_lock_object_from_code) \ + V(UnlockObject, art_portable_unlock_object_from_code) \ + V(GetCurrentThread, art_portable_get_current_thread_from_code) \ + V(SetCurrentThread, art_portable_set_current_thread_from_code) \ + V(PushShadowFrame, art_portable_push_shadow_frame_from_code) \ + V(PopShadowFrame, art_portable_pop_shadow_frame_from_code) \ + V(TestSuspend, art_portable_test_suspend_from_code) \ + V(ThrowException, art_portable_throw_exception_from_code) \ + V(ThrowStackOverflowException, art_portable_throw_stack_overflow_from_code) \ + V(ThrowNullPointerException, art_portable_throw_null_pointer_exception_from_code) \ + V(ThrowDivZeroException, art_portable_throw_div_zero_from_code) \ + V(ThrowIndexOutOfBounds, art_portable_throw_array_bounds_from_code) \ + V(InitializeTypeAndVerifyAccess, art_portable_initialize_type_and_verify_access_from_code) \ + V(InitializeType, art_portable_initialize_type_from_code) \ + V(IsAssignable, art_portable_is_assignable_from_code) \ + V(CheckCast, art_portable_check_cast_from_code) \ + V(CheckPutArrayElement, art_portable_check_put_array_element_from_code) \ + V(AllocObject, art_portable_alloc_object_from_code) \ + V(AllocObjectWithAccessCheck, art_portable_alloc_object_from_code_with_access_check) \ + V(AllocArray, art_portable_alloc_array_from_code) \ + V(AllocArrayWithAccessCheck, art_portable_alloc_array_from_code_with_access_check) \ + V(CheckAndAllocArray, art_portable_check_and_alloc_array_from_code) \ + V(CheckAndAllocArrayWithAccessCheck, art_portable_check_and_alloc_array_from_code_with_access_check) \ + V(FindStaticMethodWithAccessCheck, art_portable_find_static_method_from_code_with_access_check) \ + V(FindDirectMethodWithAccessCheck, art_portable_find_direct_method_from_code_with_access_check) \ + V(FindVirtualMethodWithAccessCheck, art_portable_find_virtual_method_from_code_with_access_check) \ + V(FindSuperMethodWithAccessCheck, art_portable_find_super_method_from_code_with_access_check) \ + V(FindInterfaceMethodWithAccessCheck, art_portable_find_interface_method_from_code_with_access_check) \ + V(FindInterfaceMethod, art_portable_find_interface_method_from_code) \ + V(ResolveString, art_portable_resolve_string_from_code) \ + V(Set32Static, art_portable_set32_static_from_code) \ + V(Set64Static, art_portable_set64_static_from_code) \ + V(SetObjectStatic, art_portable_set_obj_static_from_code) \ + V(Get32Static, art_portable_get32_static_from_code) \ + V(Get64Static, art_portable_get64_static_from_code) \ + V(GetObjectStatic, art_portable_get_obj_static_from_code) \ + V(Set32Instance, art_portable_set32_instance_from_code) \ + V(Set64Instance, art_portable_set64_instance_from_code) \ + V(SetObjectInstance, art_portable_set_obj_instance_from_code) \ + V(Get32Instance, art_portable_get32_instance_from_code) \ + V(Get64Instance, art_portable_get64_instance_from_code) \ + V(GetObjectInstance, art_portable_get_obj_instance_from_code) \ + V(InitializeStaticStorage, art_portable_initialize_static_storage_from_code) \ + V(FillArrayData, art_portable_fill_array_data_from_code) \ + V(GetAndClearException, art_portable_get_and_clear_exception) \ + V(IsExceptionPending, art_portable_is_exception_pending_from_code) \ + V(FindCatchBlock, art_portable_find_catch_block_from_code) \ + V(MarkGCCard, art_portable_mark_gc_card_from_code) \ + V(ProxyInvokeHandler, art_portable_proxy_invoke_handler_from_code) \ + V(art_d2l, art_d2l) \ + V(art_d2i, art_d2i) \ + V(art_f2l, art_f2l) \ + V(art_f2i, art_f2i) \ + V(JniMethodStart, art_portable_jni_method_start) \ + V(JniMethodStartSynchronized, art_portable_jni_method_start_synchronized) \ + V(JniMethodEnd, art_portable_jni_method_end) \ + V(JniMethodEndSynchronized, art_portable_jni_method_end_synchronized) \ + V(JniMethodEndWithReference, art_portable_jni_method_end_with_reference) \ + V(JniMethodEndWithReferenceSynchronized, art_portable_jni_method_end_with_reference_synchronized) + +#endif // ART_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_FUNC_LIST_H_ diff --git a/compiler/sea_ir/code_gen.cc b/compiler/sea_ir/code_gen.cc index f359849d4d..a513907b38 100644 --- a/compiler/sea_ir/code_gen.cc +++ b/compiler/sea_ir/code_gen.cc @@ -66,7 +66,8 @@ void CodeGenPrepassVisitor::Visit(SeaGraph* graph) { std::vector parameter_types(parameters->size(), llvm::Type::getInt32Ty(*llvm_data_->context_)); // Build llvm function name. - std::string function_name = art::StringPrintf("class=%d_method=%d", graph->class_def_idx_, graph->method_idx_); + std::string function_name = art::StringPrintf( + "class=%d_method=%d", graph->class_def_idx_, graph->method_idx_); // Build llvm function type and parameters. llvm::FunctionType *function_type = llvm::FunctionType::get( @@ -259,15 +260,18 @@ void CodeGenPostpassVisitor::Visit(PhiInstructionNode* phi) { void CodeGenVisitor::Visit(SignatureNode* signature) { std::cout << "Signature: ;" << "Id:" << signature->StringId() << std::endl; - DCHECK_EQ(signature->GetDefinitions().size(), 1u) << "Signature nodes must correspond to a single parameter register."; + DCHECK_EQ(signature->GetDefinitions().size(), 1u) << + "Signature nodes must correspond to a single parameter register."; } void CodeGenPrepassVisitor::Visit(SignatureNode* signature) { std::cout << "Signature: ;" << "Id:" << signature->StringId() << std::endl; - DCHECK_EQ(signature->GetDefinitions().size(), 1u) << "Signature nodes must correspond to a single parameter register."; + DCHECK_EQ(signature->GetDefinitions().size(), 1u) << + "Signature nodes must correspond to a single parameter register."; } void CodeGenPostpassVisitor::Visit(SignatureNode* signature) { std::cout << "Signature: ;" << "Id:" << signature->StringId() << std::endl; - DCHECK_EQ(signature->GetDefinitions().size(), 1u) << "Signature nodes must correspond to a single parameter register."; + DCHECK_EQ(signature->GetDefinitions().size(), 1u) << + "Signature nodes must correspond to a single parameter register."; } } // namespace sea_ir diff --git a/compiler/sea_ir/frontend.cc b/compiler/sea_ir/frontend.cc index 8fc1cf8315..5843388c42 100644 --- a/compiler/sea_ir/frontend.cc +++ b/compiler/sea_ir/frontend.cc @@ -40,7 +40,7 @@ static CompiledMethod* CompileMethodWithSeaIr(CompilerDriver& compiler, // NOTE: Instead of keeping the convention from the Dalvik frontend.cc // and silencing the cpplint.py warning, I just corrected the formatting. VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "..."; - sea_ir::SeaGraph* sg = sea_ir::SeaGraph::GetCurrentGraph(); + sea_ir::SeaGraph* sg = sea_ir::SeaGraph::GetCurrentGraph(dex_file); sg->CompileMethod(code_item, class_def_idx, method_idx, dex_file); sg->DumpSea("/tmp/temp.dot"); CHECK(0 && "No SEA compiled function exists yet."); @@ -57,8 +57,8 @@ CompiledMethod* SeaIrCompileOneMethod(CompilerDriver& compiler, jobject class_loader, const DexFile& dex_file, llvm::LlvmCompilationUnit* llvm_compilation_unit) { - return CompileMethodWithSeaIr(compiler, backend, code_item, access_flags, invoke_type, class_def_idx, - method_idx, class_loader, dex_file + return CompileMethodWithSeaIr(compiler, backend, code_item, access_flags, invoke_type, + class_def_idx, method_idx, class_loader, dex_file #if defined(ART_USE_PORTABLE_COMPILER) , llvm_compilation_unit #endif @@ -71,7 +71,8 @@ extern "C" art::CompiledMethod* uint32_t access_flags, art::InvokeType invoke_type, uint32_t class_def_idx, uint32_t method_idx, jobject class_loader, const art::DexFile& dex_file) { - // TODO: check method fingerprint here to determine appropriate backend type. Until then, use build default + // TODO: Check method fingerprint here to determine appropriate backend type. + // Until then, use build default art::CompilerBackend backend = compiler.GetCompilerBackend(); return art::SeaIrCompileOneMethod(compiler, backend, code_item, access_flags, invoke_type, class_def_idx, method_idx, class_loader, dex_file, diff --git a/compiler/sea_ir/instruction_nodes.h b/compiler/sea_ir/instruction_nodes.h index 5c9cfe19dc..6f9bdddf77 100644 --- a/compiler/sea_ir/instruction_nodes.h +++ b/compiler/sea_ir/instruction_nodes.h @@ -50,13 +50,14 @@ class InstructionNode: public SeaNode { // Returns the set of register numbers that are used by the instruction. virtual std::vector GetUses(); // Appends to @result the .dot string representation of the instruction. - virtual void ToDot(std::string& result) const; + virtual void ToDot(std::string& result, const art::DexFile& dex_file) const; // Mark the current instruction as a downward exposed definition. void MarkAsDEDef(); // Rename the use of @reg_no to refer to the instruction @definition, // essentially creating SSA form. void RenameToSSA(int reg_no, InstructionNode* definition) { definition_edges_.insert(std::pair(reg_no, definition)); + definition->AddSSAUse(this); } // Returns the ordered set of Instructions that define the input operands of this instruction. // Precondition: SeaGraph.ConvertToSSA(). @@ -69,6 +70,10 @@ class InstructionNode: public SeaNode { return ssa_uses; } + virtual void AddSSAUse(InstructionNode* use) { + used_in_.push_back(use); + } + void Accept(IRVisitor* v) { v->Visit(this); v->Traverse(this); @@ -85,11 +90,14 @@ class InstructionNode: public SeaNode { protected: explicit InstructionNode(const art::Instruction* in): - SeaNode(), instruction_(in), de_def_(false), region_(NULL) { } + SeaNode(), instruction_(in), used_in_(), de_def_(false), region_(NULL) { } + void ToDotSSAEdges(std::string& result) const; protected: const art::Instruction* const instruction_; std::map definition_edges_; + // Stores pointers to instructions that use the result of the current instruction. + std::vector used_in_; bool de_def_; Region* region_; }; @@ -126,7 +134,7 @@ class UnnamedConstInstructionNode: public ConstInstructionNode { return value_; } - void ToDot(std::string& result) const { + void ToDot(std::string& result, const art::DexFile& dex_file) const { std::ostringstream sstream; sstream << GetConstValue(); const std::string value_as_string(sstream.str()); @@ -136,17 +144,7 @@ class UnnamedConstInstructionNode: public ConstInstructionNode { result += "style=bold"; } result += "];\n"; - // SSA definitions: - for (std::map::const_iterator def_it = definition_edges_.begin(); - def_it != definition_edges_.end(); def_it++) { - if (NULL != def_it->second) { - result += def_it->second->StringId() + " -> " + StringId() +"[color=red,label=\""; - std::stringstream ss; - ss << def_it->first; - result.append(ss.str()); - result += "\"] ; // ssa edge\n"; - } - } + ToDotSSAEdges(result); } private: diff --git a/compiler/sea_ir/sea.cc b/compiler/sea_ir/sea.cc index 3488afd5be..99b21f8771 100644 --- a/compiler/sea_ir/sea.cc +++ b/compiler/sea_ir/sea.cc @@ -27,7 +27,6 @@ namespace sea_ir { -SeaGraph SeaGraph::graph_; int SeaNode::current_max_node_id_ = 0; void IRVisitor::Traverse(Region* region) { @@ -51,16 +50,16 @@ void IRVisitor::Traverse(SeaGraph* graph) { } } -SeaGraph* SeaGraph::GetCurrentGraph() { - return &sea_ir::SeaGraph::graph_; +SeaGraph* SeaGraph::GetCurrentGraph(const art::DexFile& dex_file) { + return new SeaGraph(dex_file); } void SeaGraph::DumpSea(std::string filename) const { LOG(INFO) << "Starting to write SEA string to file."; std::string result; - result += "digraph seaOfNodes {\n"; + result += "digraph seaOfNodes {\ncompound=true\n"; for (std::vector::const_iterator cit = regions_.begin(); cit != regions_.end(); cit++) { - (*cit)->ToDot(result); + (*cit)->ToDot(result, dex_file_); } result += "}\n"; art::File* file = art::OS::OpenFile(filename.c_str(), true, true); @@ -238,7 +237,8 @@ void SeaGraph::BuildMethodSeaGraph(const art::DexFile::CodeItem* code_item, sea_ir::InstructionNode* node = NULL; while (i < size_in_code_units) { const art::Instruction* inst = art::Instruction::At(&code[i]); - std::vector sea_instructions_for_dalvik = sea_ir::InstructionNode::Create(inst); + std::vector sea_instructions_for_dalvik = + sea_ir::InstructionNode::Create(inst); for (std::vector::const_iterator cit = sea_instructions_for_dalvik.begin(); sea_instructions_for_dalvik.end() != cit; ++cit) { last_node = node; @@ -250,7 +250,6 @@ void SeaGraph::BuildMethodSeaGraph(const art::DexFile::CodeItem* code_item, DCHECK(it != target_regions.end()); AddEdge(r, it->second); // Add edge to branch target. } - std::map::iterator it = target_regions.find(&code[i]); if (target_regions.end() != it) { // Get the already created region because this is a branch target. @@ -332,7 +331,8 @@ void SeaGraph::ConvertToSSA() { int global = *globals_it; // Copy the set, because we will modify the worklist as we go. std::set worklist((*(blocks.find(global))).second); - for (std::set::const_iterator b_it = worklist.begin(); b_it != worklist.end(); b_it++) { + for (std::set::const_iterator b_it = worklist.begin(); + b_it != worklist.end(); b_it++) { std::set* df = (*b_it)->GetDominanceFrontier(); for (std::set::const_iterator df_it = df->begin(); df_it != df->end(); df_it++) { if ((*df_it)->InsertPhiFor(global)) { @@ -490,53 +490,44 @@ SeaNode* Region::GetLastChild() const { return NULL; } -void Region::ToDot(std::string& result) const { - result += "\n// Region: \n" + StringId() + " [label=\"region " + StringId() + "(rpo="; +void Region::ToDot(std::string& result, const art::DexFile& dex_file) const { + result += "\n// Region: \nsubgraph " + StringId() + " { label=\"region " + StringId() + "(rpo="; result += art::StringPrintf("%d", rpo_number_); if (NULL != GetIDominator()) { result += " dom=" + GetIDominator()->StringId(); } - result += ")\"];\n"; + result += ")\";\n"; + + for (std::vector::const_iterator cit = phi_instructions_.begin(); + cit != phi_instructions_.end(); cit++) { + result += (*cit)->StringId() +";\n"; + } + + for (std::vector::const_iterator cit = instructions_.begin(); + cit != instructions_.end(); cit++) { + result += (*cit)->StringId() +";\n"; + } + + result += "} // End Region.\n"; // Save phi-nodes. for (std::vector::const_iterator cit = phi_instructions_.begin(); cit != phi_instructions_.end(); cit++) { - (*cit)->ToDot(result); - result += StringId() + " -> " + (*cit)->StringId() + "; // phi-function \n"; + (*cit)->ToDot(result, dex_file); } // Save instruction nodes. for (std::vector::const_iterator cit = instructions_.begin(); cit != instructions_.end(); cit++) { - (*cit)->ToDot(result); - result += StringId() + " -> " + (*cit)->StringId() + "; // region -> instruction \n"; + (*cit)->ToDot(result, dex_file); } for (std::vector::const_iterator cit = successors_.begin(); cit != successors_.end(); cit++) { DCHECK(NULL != *cit) << "Null successor found for SeaNode" << GetLastChild()->StringId() << "."; - result += GetLastChild()->StringId() + " -> " + (*cit)->StringId() + ";\n\n"; - } - // Save reaching definitions. - for (std::map* >::const_iterator cit = - reaching_defs_.begin(); - cit != reaching_defs_.end(); cit++) { - for (std::set::const_iterator - reaching_set_it = (*cit).second->begin(); - reaching_set_it != (*cit).second->end(); - reaching_set_it++) { - result += (*reaching_set_it)->StringId() + - " -> " + StringId() + - " [style=dotted]; // Reaching def.\n"; - } - } - // Save dominance frontier. - for (std::set::const_iterator cit = df_.begin(); cit != df_.end(); cit++) { - result += StringId() + - " -> " + (*cit)->StringId() + - " [color=gray]; // Dominance frontier.\n"; + result += GetLastChild()->StringId() + " -> " + (*cit)->GetLastChild()->StringId() + + "[lhead=" + (*cit)->StringId() + ", " + "ltail=" + StringId() + "];\n\n"; } - result += "// End Region.\n"; } void Region::ComputeDownExposedDefs() { @@ -570,7 +561,8 @@ bool Region::UpdateReachingDefs() { pred_it != predecessors_.end(); pred_it++) { // The reaching_defs variable will contain reaching defs __for current predecessor only__ std::map* > reaching_defs; - std::map* >* pred_reaching = (*pred_it)->GetReachingDefs(); + std::map* >* pred_reaching = + (*pred_it)->GetReachingDefs(); const std::map* de_defs = (*pred_it)->GetDownExposedDefs(); // The definitions from the reaching set of the predecessor @@ -588,7 +580,8 @@ bool Region::UpdateReachingDefs() { // Now we combine the reaching map coming from the current predecessor (reaching_defs) // with the accumulated set from all predecessors so far (from new_reaching). - std::map*>::iterator reaching_it = reaching_defs.begin(); + std::map*>::iterator reaching_it = + reaching_defs.begin(); for (; reaching_it != reaching_defs.end(); reaching_it++) { std::map*>::iterator crt_entry = new_reaching.find(reaching_it->first); @@ -608,7 +601,8 @@ bool Region::UpdateReachingDefs() { // TODO: Find formal proof. int old_size = 0; if (-1 == reaching_defs_size_) { - std::map*>::iterator reaching_it = reaching_defs_.begin(); + std::map*>::iterator reaching_it = + reaching_defs_.begin(); for (; reaching_it != reaching_defs_.end(); reaching_it++) { old_size += (*reaching_it).second->size(); } @@ -698,22 +692,36 @@ std::vector InstructionNode::Create(const art::Instruction* in return sea_instructions; } -void InstructionNode::ToDot(std::string& result) const { - result += "// Instruction ("+StringId()+"): \n" + StringId() + - " [label=\"" + instruction_->DumpString(NULL) + "\""; - if (de_def_) { - result += "style=bold"; - } - result += "];\n"; +void InstructionNode::ToDotSSAEdges(std::string& result) const { // SSA definitions: - for (std::map::const_iterator def_it = definition_edges_.begin(); + for (std::map::const_iterator def_it = definition_edges_.begin(); def_it != definition_edges_.end(); def_it++) { if (NULL != def_it->second) { - result += def_it->second->StringId() + " -> " + StringId() +"[color=red,label=\""; - result += art::StringPrintf("%d", def_it->first); - result += "\"] ; // ssa edge\n"; + result += def_it->second->StringId() + " -> " + StringId() + "[color=gray,label=\""; + result += art::StringPrintf("vR = %d", def_it->first); + result += "\"] ; // ssa edge\n"; } } + + // SSA used-by: + if (DotConversion::SaveUseEdges()) { + for (std::vector::const_iterator cit = used_in_.begin(); + cit != used_in_.end(); cit++) { + result += (*cit)->StringId() + " -> " + StringId() + "[color=gray,label=\""; + result += "\"] ; // SSA used-by edge\n"; + } + } +} + +void InstructionNode::ToDot(std::string& result, const art::DexFile& dex_file) const { + result += "// Instruction ("+StringId()+"): \n" + StringId() + + " [label=\"" + instruction_->DumpString(&dex_file) + "\""; + if (de_def_) { + result += "style=bold"; + } + result += "];\n"; + + ToDotSSAEdges(result); } void InstructionNode::MarkAsDEDef() { @@ -756,22 +764,12 @@ std::vector InstructionNode::GetUses() { return uses; } -void PhiInstructionNode::ToDot(std::string& result) const { +void PhiInstructionNode::ToDot(std::string& result, const art::DexFile& dex_file) const { result += "// PhiInstruction: \n" + StringId() + " [label=\"" + "PHI("; result += art::StringPrintf("%d", register_no_); result += ")\""; result += "];\n"; - - for (std::vector*>::const_iterator pred_it = definition_edges_.begin(); - pred_it != definition_edges_.end(); pred_it++) { - std::vector* defs_from_pred = *pred_it; - for (std::vector::const_iterator def_it = defs_from_pred->begin(); - def_it != defs_from_pred->end(); def_it++) { - result += (*def_it)->StringId() + " -> " + StringId() +"[color=red,label=\"vR = "; - result += art::StringPrintf("%d", GetRegisterNumber()); - result += "\"] ; // phi-ssa edge\n"; - } - } + ToDotSSAEdges(result); } } // namespace sea_ir diff --git a/compiler/sea_ir/sea.h b/compiler/sea_ir/sea.h index 25ab1fed10..5cb84240ae 100644 --- a/compiler/sea_ir/sea.h +++ b/compiler/sea_ir/sea.h @@ -35,6 +35,17 @@ enum RegionNumbering { VISITING = -2 }; +// Stores options for turning a SEA IR graph to a .dot file. +class DotConversion { + public: + static bool SaveUseEdges() { + return save_use_edges_; + } + + private: + static const bool save_use_edges_ = false; // TODO: Enable per-sea graph configuration. +}; + class Region; class InstructionNode; @@ -49,10 +60,11 @@ class SignatureNode: public InstructionNode { explicit SignatureNode(unsigned int parameter_register):InstructionNode(NULL), parameter_register_(parameter_register) { } - void ToDot(std::string& result) const { + void ToDot(std::string& result, const art::DexFile& dex_file) const { result += StringId() +" [label=\"signature:"; result += art::StringPrintf("r%d", GetResultRegister()); result += "\"] // signature node\n"; + ToDotSSAEdges(result); } int GetResultRegister() const { @@ -77,7 +89,7 @@ class PhiInstructionNode: public InstructionNode { explicit PhiInstructionNode(int register_no): InstructionNode(NULL), register_no_(register_no), definition_edges_() {} // Appends to @result the .dot string representation of the instruction. - void ToDot(std::string& result) const; + void ToDot(std::string& result, const art::DexFile& dex_file) const; // Returns the register on which this phi-function is used. int GetRegisterNumber() const { return register_no_; @@ -98,6 +110,7 @@ class PhiInstructionNode: public InstructionNode { definition_edges_[predecessor_id] = new std::vector(); } definition_edges_[predecessor_id]->push_back(definition); + definition->AddSSAUse(this); } // Returns the instruction that defines the phi register from predecessor @@ -125,7 +138,9 @@ class Region : public SeaNode { public: explicit Region(): SeaNode(), successors_(), predecessors_(), reaching_defs_size_(0), - rpo_number_(NOT_VISITED), idom_(NULL), idominated_set_(), df_(), phi_set_() {} + rpo_number_(NOT_VISITED), idom_(NULL), idominated_set_(), df_(), phi_set_() { + string_id_ = "cluster_" + string_id_; + } // Adds @instruction as an instruction node child in the current region. void AddChild(sea_ir::InstructionNode* instruction); // Returns the last instruction node child of the current region. @@ -138,7 +153,7 @@ class Region : public SeaNode { // Appends to @result a dot language formatted string representing the node and // (by convention) outgoing edges, so that the composition of theToDot() of all nodes // builds a complete dot graph (without prolog and epilog though). - virtual void ToDot(std::string& result) const; + virtual void ToDot(std::string& result, const art::DexFile& dex_file) const; // Computes Downward Exposed Definitions for the current node. void ComputeDownExposedDefs(); const std::map* GetDownExposedDefs() const; @@ -242,7 +257,7 @@ class Region : public SeaNode { // and acts as starting point for visitors (ex: during code generation). class SeaGraph: IVisitable { public: - static SeaGraph* GetCurrentGraph(); + static SeaGraph* GetCurrentGraph(const art::DexFile&); void CompileMethod(const art::DexFile::CodeItem* code_item, uint32_t class_def_idx, uint32_t method_idx, const art::DexFile& dex_file); @@ -264,7 +279,8 @@ class SeaGraph: IVisitable { uint32_t method_idx_; private: - SeaGraph(): class_def_idx_(0), method_idx_(0), regions_(), parameters_() { + explicit SeaGraph(const art::DexFile& df): + class_def_idx_(0), method_idx_(0), regions_(), parameters_(), dex_file_(df) { } // Registers @childReg as a region belonging to the SeaGraph instance. void AddRegion(Region* childReg); @@ -319,6 +335,7 @@ class SeaGraph: IVisitable { static SeaGraph graph_; std::vector regions_; std::vector parameters_; + const art::DexFile& dex_file_; }; } // namespace sea_ir #endif // ART_COMPILER_SEA_IR_SEA_H_ diff --git a/compiler/sea_ir/sea_node.h b/compiler/sea_ir/sea_node.h index 5d28f8aa72..c13e5d6aba 100644 --- a/compiler/sea_ir/sea_node.h +++ b/compiler/sea_ir/sea_node.h @@ -30,7 +30,7 @@ class IVisitable { }; // This abstract class provides the essential services that -// we want each SEA IR element should have. +// we want each SEA IR element to have. // At the moment, these are: // - an id and corresponding string representation. // - a .dot graph language representation for .dot output. @@ -42,6 +42,7 @@ class SeaNode: public IVisitable { explicit SeaNode():id_(GetNewId()), string_id_() { string_id_ = art::StringPrintf("%d", id_); } + // Adds CFG predecessors and successors to each block. void AddSuccessor(Region* successor); void AddPredecessor(Region* predecesor); @@ -58,7 +59,7 @@ class SeaNode: public IVisitable { // Appends to @result a dot language formatted string representing the node and // (by convention) outgoing edges, so that the composition of theToDot() of all nodes // builds a complete dot graph, but without prolog ("digraph {") and epilog ("}"). - virtual void ToDot(std::string& result) const = 0; + virtual void ToDot(std::string& result, const art::DexFile& dex_file) const = 0; virtual ~SeaNode() { } diff --git a/compiler/stubs/portable/stubs.cc b/compiler/stubs/portable/stubs.cc index 69568d7a19..def43e2bd2 100644 --- a/compiler/stubs/portable/stubs.cc +++ b/compiler/stubs/portable/stubs.cc @@ -16,11 +16,11 @@ #include "stubs/stubs.h" +#include "entrypoints/quick/quick_entrypoints.h" #include "jni_internal.h" -#include "oat/utils/arm/assembler_arm.h" -#include "oat/utils/mips/assembler_mips.h" -#include "oat/utils/x86/assembler_x86.h" -#include "oat/runtime/oat_support_entrypoints.h" +#include "utils/arm/assembler_arm.h" +#include "utils/mips/assembler_mips.h" +#include "utils/x86/assembler_x86.h" #include "stack_indirect_reference_table.h" #include "sirt_ref.h" @@ -34,7 +34,8 @@ const std::vector* CreatePortableResolutionTrampoline() { RegList save = (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3) | (1 << LR); __ PushList(save); - __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)); + __ LoadFromOffset(kLoadWord, R12, TR, + PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)); __ mov(R3, ShifterOperand(TR)); // Pass Thread::Current() in R3 __ mov(R2, ShifterOperand(SP)); // Pass sp for Method** callee_addr __ IncreaseFrameSize(12); // 3 words of space for alignment @@ -69,7 +70,7 @@ const std::vector* CreatePortableResolutionTrampoline() { __ StoreToOffset(kStoreWord, A0, SP, 0); __ LoadFromOffset(kLoadWord, T9, S1, - ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)); + PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)); __ Move(A3, S1); // Pass Thread::Current() in A3 __ Move(A2, SP); // Pass SP for Method** callee_addr __ Jalr(T9); // Call to resolution trampoline (callee, receiver, callee_addr, Thread*) @@ -112,7 +113,7 @@ const std::vector* CreatePortableResolutionTrampoline() { __ pushl(ECX); // pass receiver __ pushl(EAX); // pass called // Call to resolve method. - __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)), + __ Call(ThreadOffset(PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)), X86ManagedRegister::FromCpuRegister(ECX)); __ leave(); diff --git a/compiler/stubs/quick/stubs.cc b/compiler/stubs/quick/stubs.cc index 8fc2a81d24..912f1c0746 100644 --- a/compiler/stubs/quick/stubs.cc +++ b/compiler/stubs/quick/stubs.cc @@ -16,11 +16,11 @@ #include "stubs/stubs.h" +#include "entrypoints/quick/quick_entrypoints.h" #include "jni_internal.h" -#include "oat/runtime/oat_support_entrypoints.h" -#include "oat/utils/arm/assembler_arm.h" -#include "oat/utils/mips/assembler_mips.h" -#include "oat/utils/x86/assembler_x86.h" +#include "utils/arm/assembler_arm.h" +#include "utils/mips/assembler_mips.h" +#include "utils/x86/assembler_x86.h" #include "sirt_ref.h" #include "stack_indirect_reference_table.h" @@ -46,7 +46,7 @@ const std::vector* CreateQuickResolutionTrampoline() { // TODO: enable when GetCalleeSaveMethod is available at stub generation time // DCHECK_EQ(save, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetCoreSpillMask()); __ PushList(save); - __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)); + __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)); __ mov(R3, ShifterOperand(TR)); // Pass Thread::Current() in R3 __ IncreaseFrameSize(8); // 2 words of space for alignment __ mov(R2, ShifterOperand(SP)); // Pass SP @@ -71,7 +71,7 @@ const std::vector* CreateQuickResolutionTrampoline() { const std::vector* CreateInterpreterToInterpreterEntry() { UniquePtr assembler(static_cast(Assembler::Create(kArm))); - __ LoadFromOffset(kLoadWord, PC, R0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); + __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); __ bkpt(0); size_t cs = assembler->CodeSize(); @@ -85,7 +85,7 @@ const std::vector* CreateInterpreterToInterpreterEntry() { const std::vector* CreateInterpreterToQuickEntry() { UniquePtr assembler(static_cast(Assembler::Create(kArm))); - __ LoadFromOffset(kLoadWord, PC, R0, ENTRYPOINT_OFFSET(pInterpreterToQuickEntry)); + __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry)); __ bkpt(0); size_t cs = assembler->CodeSize(); @@ -123,7 +123,7 @@ const std::vector* CreateQuickResolutionTrampoline() { __ StoreToOffset(kStoreWord, A2, SP, 8); __ StoreToOffset(kStoreWord, A1, SP, 4); - __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)); + __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)); __ Move(A3, S1); // Pass Thread::Current() in A3 __ Move(A2, SP); // Pass SP for Method** callee_addr __ Jalr(T9); // Call to resolution trampoline (method_idx, receiver, sp, Thread*) @@ -161,7 +161,7 @@ const std::vector* CreateQuickResolutionTrampoline() { const std::vector* CreateInterpreterToInterpreterEntry() { UniquePtr assembler(static_cast(Assembler::Create(kMips))); - __ LoadFromOffset(kLoadWord, T9, A0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); + __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); __ Jr(T9); __ Break(); @@ -176,7 +176,7 @@ const std::vector* CreateInterpreterToInterpreterEntry() { const std::vector* CreateInterpreterToQuickEntry() { UniquePtr assembler(static_cast(Assembler::Create(kMips))); - __ LoadFromOffset(kLoadWord, T9, A0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); + __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); __ Jr(T9); __ Break(); @@ -208,7 +208,7 @@ const std::vector* CreateQuickResolutionTrampoline() { __ pushl(EAX); // pass Method* // Call to resolve method. - __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)), + __ Call(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)), X86ManagedRegister::FromCpuRegister(ECX)); __ movl(EDI, EAX); // save code pointer in EDI @@ -236,7 +236,7 @@ const std::vector* CreateQuickResolutionTrampoline() { const std::vector* CreateInterpreterToInterpreterEntry() { UniquePtr assembler(static_cast(Assembler::Create(kX86))); - __ fs()->jmp(Address::Absolute(ThreadOffset(ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)))); + __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)))); size_t cs = assembler->CodeSize(); UniquePtr > entry_stub(new std::vector(cs)); @@ -249,7 +249,7 @@ const std::vector* CreateInterpreterToInterpreterEntry() { const std::vector* CreateInterpreterToQuickEntry() { UniquePtr assembler(static_cast(Assembler::Create(kX86))); - __ fs()->jmp(Address::Absolute(ThreadOffset(ENTRYPOINT_OFFSET(pInterpreterToQuickEntry)))); + __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry)))); size_t cs = assembler->CodeSize(); UniquePtr > entry_stub(new std::vector(cs)); diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc new file mode 100644 index 0000000000..fa202c3017 --- /dev/null +++ b/compiler/utils/arm/assembler_arm.cc @@ -0,0 +1,1895 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "assembler_arm.h" + +#include "base/logging.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "offsets.h" +#include "thread.h" +#include "utils.h" + +namespace art { +namespace arm { + +// Instruction encoding bits. +enum { + H = 1 << 5, // halfword (or byte) + L = 1 << 20, // load (or store) + S = 1 << 20, // set condition code (or leave unchanged) + W = 1 << 21, // writeback base register (or leave unchanged) + A = 1 << 21, // accumulate in multiply instruction (or not) + B = 1 << 22, // unsigned byte (or word) + N = 1 << 22, // long (or short) + U = 1 << 23, // positive (or negative) offset/index + P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing) + I = 1 << 25, // immediate shifter operand (or not) + + B0 = 1, + B1 = 1 << 1, + B2 = 1 << 2, + B3 = 1 << 3, + B4 = 1 << 4, + B5 = 1 << 5, + B6 = 1 << 6, + B7 = 1 << 7, + B8 = 1 << 8, + B9 = 1 << 9, + B10 = 1 << 10, + B11 = 1 << 11, + B12 = 1 << 12, + B16 = 1 << 16, + B17 = 1 << 17, + B18 = 1 << 18, + B19 = 1 << 19, + B20 = 1 << 20, + B21 = 1 << 21, + B22 = 1 << 22, + B23 = 1 << 23, + B24 = 1 << 24, + B25 = 1 << 25, + B26 = 1 << 26, + B27 = 1 << 27, + + // Instruction bit masks. + RdMask = 15 << 12, // in str instruction + CondMask = 15 << 28, + CoprocessorMask = 15 << 8, + OpCodeMask = 15 << 21, // in data-processing instructions + Imm24Mask = (1 << 24) - 1, + Off12Mask = (1 << 12) - 1, + + // ldrex/strex register field encodings. + kLdExRnShift = 16, + kLdExRtShift = 12, + kStrExRnShift = 16, + kStrExRdShift = 12, + kStrExRtShift = 0, +}; + + +static const char* kRegisterNames[] = { + "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", + "fp", "ip", "sp", "lr", "pc" +}; +std::ostream& operator<<(std::ostream& os, const Register& rhs) { + if (rhs >= R0 && rhs <= PC) { + os << kRegisterNames[rhs]; + } else { + os << "Register[" << static_cast(rhs) << "]"; + } + return os; +} + + +std::ostream& operator<<(std::ostream& os, const SRegister& rhs) { + if (rhs >= S0 && rhs < kNumberOfSRegisters) { + os << "s" << static_cast(rhs); + } else { + os << "SRegister[" << static_cast(rhs) << "]"; + } + return os; +} + + +std::ostream& operator<<(std::ostream& os, const DRegister& rhs) { + if (rhs >= D0 && rhs < kNumberOfDRegisters) { + os << "d" << static_cast(rhs); + } else { + os << "DRegister[" << static_cast(rhs) << "]"; + } + return os; +} + + +static const char* kConditionNames[] = { + "EQ", "NE", "CS", "CC", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT", + "LE", "AL", +}; +std::ostream& operator<<(std::ostream& os, const Condition& rhs) { + if (rhs >= EQ && rhs <= AL) { + os << kConditionNames[rhs]; + } else { + os << "Condition[" << static_cast(rhs) << "]"; + } + return os; +} + +void ArmAssembler::Emit(int32_t value) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + buffer_.Emit(value); +} + + +void ArmAssembler::EmitType01(Condition cond, + int type, + Opcode opcode, + int set_cc, + Register rn, + Register rd, + ShifterOperand so) { + CHECK_NE(rd, kNoRegister); + CHECK_NE(cond, kNoCondition); + int32_t encoding = static_cast(cond) << kConditionShift | + type << kTypeShift | + static_cast(opcode) << kOpcodeShift | + set_cc << kSShift | + static_cast(rn) << kRnShift | + static_cast(rd) << kRdShift | + so.encoding(); + Emit(encoding); +} + + +void ArmAssembler::EmitType5(Condition cond, int offset, bool link) { + CHECK_NE(cond, kNoCondition); + int32_t encoding = static_cast(cond) << kConditionShift | + 5 << kTypeShift | + (link ? 1 : 0) << kLinkShift; + Emit(ArmAssembler::EncodeBranchOffset(offset, encoding)); +} + + +void ArmAssembler::EmitMemOp(Condition cond, + bool load, + bool byte, + Register rd, + Address ad) { + CHECK_NE(rd, kNoRegister); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B26 | + (load ? L : 0) | + (byte ? B : 0) | + (static_cast(rd) << kRdShift) | + ad.encoding(); + Emit(encoding); +} + + +void ArmAssembler::EmitMemOpAddressMode3(Condition cond, + int32_t mode, + Register rd, + Address ad) { + CHECK_NE(rd, kNoRegister); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B22 | + mode | + (static_cast(rd) << kRdShift) | + ad.encoding3(); + Emit(encoding); +} + + +void ArmAssembler::EmitMultiMemOp(Condition cond, + BlockAddressMode am, + bool load, + Register base, + RegList regs) { + CHECK_NE(base, kNoRegister); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B27 | + am | + (load ? L : 0) | + (static_cast(base) << kRnShift) | + regs; + Emit(encoding); +} + + +void ArmAssembler::EmitShiftImmediate(Condition cond, + Shift opcode, + Register rd, + Register rm, + ShifterOperand so) { + CHECK_NE(cond, kNoCondition); + CHECK_EQ(so.type(), 1U); + int32_t encoding = static_cast(cond) << kConditionShift | + static_cast(MOV) << kOpcodeShift | + static_cast(rd) << kRdShift | + so.encoding() << kShiftImmShift | + static_cast(opcode) << kShiftShift | + static_cast(rm); + Emit(encoding); +} + + +void ArmAssembler::EmitShiftRegister(Condition cond, + Shift opcode, + Register rd, + Register rm, + ShifterOperand so) { + CHECK_NE(cond, kNoCondition); + CHECK_EQ(so.type(), 0U); + int32_t encoding = static_cast(cond) << kConditionShift | + static_cast(MOV) << kOpcodeShift | + static_cast(rd) << kRdShift | + so.encoding() << kShiftRegisterShift | + static_cast(opcode) << kShiftShift | + B4 | + static_cast(rm); + Emit(encoding); +} + + +void ArmAssembler::EmitBranch(Condition cond, Label* label, bool link) { + if (label->IsBound()) { + EmitType5(cond, label->Position() - buffer_.Size(), link); + } else { + int position = buffer_.Size(); + // Use the offset field of the branch instruction for linking the sites. + EmitType5(cond, label->position_, link); + label->LinkTo(position); + } +} + +void ArmAssembler::and_(Register rd, Register rn, ShifterOperand so, + Condition cond) { + EmitType01(cond, so.type(), AND, 0, rn, rd, so); +} + + +void ArmAssembler::eor(Register rd, Register rn, ShifterOperand so, + Condition cond) { + EmitType01(cond, so.type(), EOR, 0, rn, rd, so); +} + + +void ArmAssembler::sub(Register rd, Register rn, ShifterOperand so, + Condition cond) { + EmitType01(cond, so.type(), SUB, 0, rn, rd, so); +} + +void ArmAssembler::rsb(Register rd, Register rn, ShifterOperand so, + Condition cond) { + EmitType01(cond, so.type(), RSB, 0, rn, rd, so); +} + +void ArmAssembler::rsbs(Register rd, Register rn, ShifterOperand so, + Condition cond) { + EmitType01(cond, so.type(), RSB, 1, rn, rd, so); +} + + +void ArmAssembler::add(Register rd, Register rn, ShifterOperand so, + Condition cond) { + EmitType01(cond, so.type(), ADD, 0, rn, rd, so); +} + + +void ArmAssembler::adds(Register rd, Register rn, ShifterOperand so, + Condition cond) { + EmitType01(cond, so.type(), ADD, 1, rn, rd, so); +} + + +void ArmAssembler::subs(Register rd, Register rn, ShifterOperand so, + Condition cond) { + EmitType01(cond, so.type(), SUB, 1, rn, rd, so); +} + + +void ArmAssembler::adc(Register rd, Register rn, ShifterOperand so, + Condition cond) { + EmitType01(cond, so.type(), ADC, 0, rn, rd, so); +} + + +void ArmAssembler::sbc(Register rd, Register rn, ShifterOperand so, + Condition cond) { + EmitType01(cond, so.type(), SBC, 0, rn, rd, so); +} + + +void ArmAssembler::rsc(Register rd, Register rn, ShifterOperand so, + Condition cond) { + EmitType01(cond, so.type(), RSC, 0, rn, rd, so); +} + + +void ArmAssembler::tst(Register rn, ShifterOperand so, Condition cond) { + CHECK_NE(rn, PC); // Reserve tst pc instruction for exception handler marker. + EmitType01(cond, so.type(), TST, 1, rn, R0, so); +} + + +void ArmAssembler::teq(Register rn, ShifterOperand so, Condition cond) { + CHECK_NE(rn, PC); // Reserve teq pc instruction for exception handler marker. + EmitType01(cond, so.type(), TEQ, 1, rn, R0, so); +} + + +void ArmAssembler::cmp(Register rn, ShifterOperand so, Condition cond) { + EmitType01(cond, so.type(), CMP, 1, rn, R0, so); +} + + +void ArmAssembler::cmn(Register rn, ShifterOperand so, Condition cond) { + EmitType01(cond, so.type(), CMN, 1, rn, R0, so); +} + + +void ArmAssembler::orr(Register rd, Register rn, + ShifterOperand so, Condition cond) { + EmitType01(cond, so.type(), ORR, 0, rn, rd, so); +} + + +void ArmAssembler::orrs(Register rd, Register rn, + ShifterOperand so, Condition cond) { + EmitType01(cond, so.type(), ORR, 1, rn, rd, so); +} + + +void ArmAssembler::mov(Register rd, ShifterOperand so, Condition cond) { + EmitType01(cond, so.type(), MOV, 0, R0, rd, so); +} + + +void ArmAssembler::movs(Register rd, ShifterOperand so, Condition cond) { + EmitType01(cond, so.type(), MOV, 1, R0, rd, so); +} + + +void ArmAssembler::bic(Register rd, Register rn, ShifterOperand so, + Condition cond) { + EmitType01(cond, so.type(), BIC, 0, rn, rd, so); +} + + +void ArmAssembler::mvn(Register rd, ShifterOperand so, Condition cond) { + EmitType01(cond, so.type(), MVN, 0, R0, rd, so); +} + + +void ArmAssembler::mvns(Register rd, ShifterOperand so, Condition cond) { + EmitType01(cond, so.type(), MVN, 1, R0, rd, so); +} + + +void ArmAssembler::clz(Register rd, Register rm, Condition cond) { + CHECK_NE(rd, kNoRegister); + CHECK_NE(rm, kNoRegister); + CHECK_NE(cond, kNoCondition); + CHECK_NE(rd, PC); + CHECK_NE(rm, PC); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B24 | B22 | B21 | (0xf << 16) | + (static_cast(rd) << kRdShift) | + (0xf << 8) | B4 | static_cast(rm); + Emit(encoding); +} + + +void ArmAssembler::movw(Register rd, uint16_t imm16, Condition cond) { + CHECK_NE(cond, kNoCondition); + int32_t encoding = static_cast(cond) << kConditionShift | + B25 | B24 | ((imm16 >> 12) << 16) | + static_cast(rd) << kRdShift | (imm16 & 0xfff); + Emit(encoding); +} + + +void ArmAssembler::movt(Register rd, uint16_t imm16, Condition cond) { + CHECK_NE(cond, kNoCondition); + int32_t encoding = static_cast(cond) << kConditionShift | + B25 | B24 | B22 | ((imm16 >> 12) << 16) | + static_cast(rd) << kRdShift | (imm16 & 0xfff); + Emit(encoding); +} + + +void ArmAssembler::EmitMulOp(Condition cond, int32_t opcode, + Register rd, Register rn, + Register rm, Register rs) { + CHECK_NE(rd, kNoRegister); + CHECK_NE(rn, kNoRegister); + CHECK_NE(rm, kNoRegister); + CHECK_NE(rs, kNoRegister); + CHECK_NE(cond, kNoCondition); + int32_t encoding = opcode | + (static_cast(cond) << kConditionShift) | + (static_cast(rn) << kRnShift) | + (static_cast(rd) << kRdShift) | + (static_cast(rs) << kRsShift) | + B7 | B4 | + (static_cast(rm) << kRmShift); + Emit(encoding); +} + + +void ArmAssembler::mul(Register rd, Register rn, Register rm, Condition cond) { + // Assembler registers rd, rn, rm are encoded as rn, rm, rs. + EmitMulOp(cond, 0, R0, rd, rn, rm); +} + + +void ArmAssembler::mla(Register rd, Register rn, Register rm, Register ra, + Condition cond) { + // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd. + EmitMulOp(cond, B21, ra, rd, rn, rm); +} + + +void ArmAssembler::mls(Register rd, Register rn, Register rm, Register ra, + Condition cond) { + // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd. + EmitMulOp(cond, B22 | B21, ra, rd, rn, rm); +} + + +void ArmAssembler::umull(Register rd_lo, Register rd_hi, Register rn, + Register rm, Condition cond) { + // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. + EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm); +} + + +void ArmAssembler::ldr(Register rd, Address ad, Condition cond) { + EmitMemOp(cond, true, false, rd, ad); +} + + +void ArmAssembler::str(Register rd, Address ad, Condition cond) { + EmitMemOp(cond, false, false, rd, ad); +} + + +void ArmAssembler::ldrb(Register rd, Address ad, Condition cond) { + EmitMemOp(cond, true, true, rd, ad); +} + + +void ArmAssembler::strb(Register rd, Address ad, Condition cond) { + EmitMemOp(cond, false, true, rd, ad); +} + + +void ArmAssembler::ldrh(Register rd, Address ad, Condition cond) { + EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad); +} + + +void ArmAssembler::strh(Register rd, Address ad, Condition cond) { + EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad); +} + + +void ArmAssembler::ldrsb(Register rd, Address ad, Condition cond) { + EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad); +} + + +void ArmAssembler::ldrsh(Register rd, Address ad, Condition cond) { + EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad); +} + + +void ArmAssembler::ldrd(Register rd, Address ad, Condition cond) { + CHECK_EQ(rd % 2, 0); + EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, ad); +} + + +void ArmAssembler::strd(Register rd, Address ad, Condition cond) { + CHECK_EQ(rd % 2, 0); + EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, ad); +} + + +void ArmAssembler::ldm(BlockAddressMode am, + Register base, + RegList regs, + Condition cond) { + EmitMultiMemOp(cond, am, true, base, regs); +} + + +void ArmAssembler::stm(BlockAddressMode am, + Register base, + RegList regs, + Condition cond) { + EmitMultiMemOp(cond, am, false, base, regs); +} + + +void ArmAssembler::ldrex(Register rt, Register rn, Condition cond) { + CHECK_NE(rn, kNoRegister); + CHECK_NE(rt, kNoRegister); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B24 | + B23 | + L | + (static_cast(rn) << kLdExRnShift) | + (static_cast(rt) << kLdExRtShift) | + B11 | B10 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0; + Emit(encoding); +} + + +void ArmAssembler::strex(Register rd, + Register rt, + Register rn, + Condition cond) { + CHECK_NE(rn, kNoRegister); + CHECK_NE(rd, kNoRegister); + CHECK_NE(rt, kNoRegister); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B24 | + B23 | + (static_cast(rn) << kStrExRnShift) | + (static_cast(rd) << kStrExRdShift) | + B11 | B10 | B9 | B8 | B7 | B4 | + (static_cast(rt) << kStrExRtShift); + Emit(encoding); +} + + +void ArmAssembler::clrex() { + int32_t encoding = (kSpecialCondition << kConditionShift) | + B26 | B24 | B22 | B21 | B20 | (0xff << 12) | B4 | 0xf; + Emit(encoding); +} + + +void ArmAssembler::nop(Condition cond) { + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B25 | B24 | B21 | (0xf << 12); + Emit(encoding); +} + + +void ArmAssembler::vmovsr(SRegister sn, Register rt, Condition cond) { + CHECK_NE(sn, kNoSRegister); + CHECK_NE(rt, kNoRegister); + CHECK_NE(rt, SP); + CHECK_NE(rt, PC); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B27 | B26 | B25 | + ((static_cast(sn) >> 1)*B16) | + (static_cast(rt)*B12) | B11 | B9 | + ((static_cast(sn) & 1)*B7) | B4; + Emit(encoding); +} + + +void ArmAssembler::vmovrs(Register rt, SRegister sn, Condition cond) { + CHECK_NE(sn, kNoSRegister); + CHECK_NE(rt, kNoRegister); + CHECK_NE(rt, SP); + CHECK_NE(rt, PC); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B27 | B26 | B25 | B20 | + ((static_cast(sn) >> 1)*B16) | + (static_cast(rt)*B12) | B11 | B9 | + ((static_cast(sn) & 1)*B7) | B4; + Emit(encoding); +} + + +void ArmAssembler::vmovsrr(SRegister sm, Register rt, Register rt2, + Condition cond) { + CHECK_NE(sm, kNoSRegister); + CHECK_NE(sm, S31); + CHECK_NE(rt, kNoRegister); + CHECK_NE(rt, SP); + CHECK_NE(rt, PC); + CHECK_NE(rt2, kNoRegister); + CHECK_NE(rt2, SP); + CHECK_NE(rt2, PC); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B27 | B26 | B22 | + (static_cast(rt2)*B16) | + (static_cast(rt)*B12) | B11 | B9 | + ((static_cast(sm) & 1)*B5) | B4 | + (static_cast(sm) >> 1); + Emit(encoding); +} + + +void ArmAssembler::vmovrrs(Register rt, Register rt2, SRegister sm, + Condition cond) { + CHECK_NE(sm, kNoSRegister); + CHECK_NE(sm, S31); + CHECK_NE(rt, kNoRegister); + CHECK_NE(rt, SP); + CHECK_NE(rt, PC); + CHECK_NE(rt2, kNoRegister); + CHECK_NE(rt2, SP); + CHECK_NE(rt2, PC); + CHECK_NE(rt, rt2); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B27 | B26 | B22 | B20 | + (static_cast(rt2)*B16) | + (static_cast(rt)*B12) | B11 | B9 | + ((static_cast(sm) & 1)*B5) | B4 | + (static_cast(sm) >> 1); + Emit(encoding); +} + + +void ArmAssembler::vmovdrr(DRegister dm, Register rt, Register rt2, + Condition cond) { + CHECK_NE(dm, kNoDRegister); + CHECK_NE(rt, kNoRegister); + CHECK_NE(rt, SP); + CHECK_NE(rt, PC); + CHECK_NE(rt2, kNoRegister); + CHECK_NE(rt2, SP); + CHECK_NE(rt2, PC); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B27 | B26 | B22 | + (static_cast(rt2)*B16) | + (static_cast(rt)*B12) | B11 | B9 | B8 | + ((static_cast(dm) >> 4)*B5) | B4 | + (static_cast(dm) & 0xf); + Emit(encoding); +} + + +void ArmAssembler::vmovrrd(Register rt, Register rt2, DRegister dm, + Condition cond) { + CHECK_NE(dm, kNoDRegister); + CHECK_NE(rt, kNoRegister); + CHECK_NE(rt, SP); + CHECK_NE(rt, PC); + CHECK_NE(rt2, kNoRegister); + CHECK_NE(rt2, SP); + CHECK_NE(rt2, PC); + CHECK_NE(rt, rt2); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B27 | B26 | B22 | B20 | + (static_cast(rt2)*B16) | + (static_cast(rt)*B12) | B11 | B9 | B8 | + ((static_cast(dm) >> 4)*B5) | B4 | + (static_cast(dm) & 0xf); + Emit(encoding); +} + + +void ArmAssembler::vldrs(SRegister sd, Address ad, Condition cond) { + CHECK_NE(sd, kNoSRegister); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B27 | B26 | B24 | B20 | + ((static_cast(sd) & 1)*B22) | + ((static_cast(sd) >> 1)*B12) | + B11 | B9 | ad.vencoding(); + Emit(encoding); +} + + +void ArmAssembler::vstrs(SRegister sd, Address ad, Condition cond) { + CHECK_NE(static_cast(ad.encoding_ & (0xf << kRnShift)), PC); + CHECK_NE(sd, kNoSRegister); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B27 | B26 | B24 | + ((static_cast(sd) & 1)*B22) | + ((static_cast(sd) >> 1)*B12) | + B11 | B9 | ad.vencoding(); + Emit(encoding); +} + + +void ArmAssembler::vldrd(DRegister dd, Address ad, Condition cond) { + CHECK_NE(dd, kNoDRegister); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B27 | B26 | B24 | B20 | + ((static_cast(dd) >> 4)*B22) | + ((static_cast(dd) & 0xf)*B12) | + B11 | B9 | B8 | ad.vencoding(); + Emit(encoding); +} + + +void ArmAssembler::vstrd(DRegister dd, Address ad, Condition cond) { + CHECK_NE(static_cast(ad.encoding_ & (0xf << kRnShift)), PC); + CHECK_NE(dd, kNoDRegister); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B27 | B26 | B24 | + ((static_cast(dd) >> 4)*B22) | + ((static_cast(dd) & 0xf)*B12) | + B11 | B9 | B8 | ad.vencoding(); + Emit(encoding); +} + + +void ArmAssembler::EmitVFPsss(Condition cond, int32_t opcode, + SRegister sd, SRegister sn, SRegister sm) { + CHECK_NE(sd, kNoSRegister); + CHECK_NE(sn, kNoSRegister); + CHECK_NE(sm, kNoSRegister); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B27 | B26 | B25 | B11 | B9 | opcode | + ((static_cast(sd) & 1)*B22) | + ((static_cast(sn) >> 1)*B16) | + ((static_cast(sd) >> 1)*B12) | + ((static_cast(sn) & 1)*B7) | + ((static_cast(sm) & 1)*B5) | + (static_cast(sm) >> 1); + Emit(encoding); +} + + +void ArmAssembler::EmitVFPddd(Condition cond, int32_t opcode, + DRegister dd, DRegister dn, DRegister dm) { + CHECK_NE(dd, kNoDRegister); + CHECK_NE(dn, kNoDRegister); + CHECK_NE(dm, kNoDRegister); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B27 | B26 | B25 | B11 | B9 | B8 | opcode | + ((static_cast(dd) >> 4)*B22) | + ((static_cast(dn) & 0xf)*B16) | + ((static_cast(dd) & 0xf)*B12) | + ((static_cast(dn) >> 4)*B7) | + ((static_cast(dm) >> 4)*B5) | + (static_cast(dm) & 0xf); + Emit(encoding); +} + + +void ArmAssembler::vmovs(SRegister sd, SRegister sm, Condition cond) { + EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm); +} + + +void ArmAssembler::vmovd(DRegister dd, DRegister dm, Condition cond) { + EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm); +} + + +bool ArmAssembler::vmovs(SRegister sd, float s_imm, Condition cond) { + uint32_t imm32 = bit_cast(s_imm); + if (((imm32 & ((1 << 19) - 1)) == 0) && + ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) || + (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) -1)))) { + uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) | + ((imm32 >> 19) & ((1 << 6) -1)); + EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | (imm8 & 0xf), + sd, S0, S0); + return true; + } + return false; +} + + +bool ArmAssembler::vmovd(DRegister dd, double d_imm, Condition cond) { + uint64_t imm64 = bit_cast(d_imm); + if (((imm64 & ((1LL << 48) - 1)) == 0) && + ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) || + (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) -1)))) { + uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) | + ((imm64 >> 48) & ((1 << 6) -1)); + EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | B8 | (imm8 & 0xf), + dd, D0, D0); + return true; + } + return false; +} + + +void ArmAssembler::vadds(SRegister sd, SRegister sn, SRegister sm, + Condition cond) { + EmitVFPsss(cond, B21 | B20, sd, sn, sm); +} + + +void ArmAssembler::vaddd(DRegister dd, DRegister dn, DRegister dm, + Condition cond) { + EmitVFPddd(cond, B21 | B20, dd, dn, dm); +} + + +void ArmAssembler::vsubs(SRegister sd, SRegister sn, SRegister sm, + Condition cond) { + EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm); +} + + +void ArmAssembler::vsubd(DRegister dd, DRegister dn, DRegister dm, + Condition cond) { + EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm); +} + + +void ArmAssembler::vmuls(SRegister sd, SRegister sn, SRegister sm, + Condition cond) { + EmitVFPsss(cond, B21, sd, sn, sm); +} + + +void ArmAssembler::vmuld(DRegister dd, DRegister dn, DRegister dm, + Condition cond) { + EmitVFPddd(cond, B21, dd, dn, dm); +} + + +void ArmAssembler::vmlas(SRegister sd, SRegister sn, SRegister sm, + Condition cond) { + EmitVFPsss(cond, 0, sd, sn, sm); +} + + +void ArmAssembler::vmlad(DRegister dd, DRegister dn, DRegister dm, + Condition cond) { + EmitVFPddd(cond, 0, dd, dn, dm); +} + + +void ArmAssembler::vmlss(SRegister sd, SRegister sn, SRegister sm, + Condition cond) { + EmitVFPsss(cond, B6, sd, sn, sm); +} + + +void ArmAssembler::vmlsd(DRegister dd, DRegister dn, DRegister dm, + Condition cond) { + EmitVFPddd(cond, B6, dd, dn, dm); +} + + +void ArmAssembler::vdivs(SRegister sd, SRegister sn, SRegister sm, + Condition cond) { + EmitVFPsss(cond, B23, sd, sn, sm); +} + + +void ArmAssembler::vdivd(DRegister dd, DRegister dn, DRegister dm, + Condition cond) { + EmitVFPddd(cond, B23, dd, dn, dm); +} + + +void ArmAssembler::vabss(SRegister sd, SRegister sm, Condition cond) { + EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm); +} + + +void ArmAssembler::vabsd(DRegister dd, DRegister dm, Condition cond) { + EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm); +} + + +void ArmAssembler::vnegs(SRegister sd, SRegister sm, Condition cond) { + EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm); +} + + +void ArmAssembler::vnegd(DRegister dd, DRegister dm, Condition cond) { + EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm); +} + + +void ArmAssembler::vsqrts(SRegister sd, SRegister sm, Condition cond) { + EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm); +} + +void ArmAssembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) { + EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm); +} + + +void ArmAssembler::EmitVFPsd(Condition cond, int32_t opcode, + SRegister sd, DRegister dm) { + CHECK_NE(sd, kNoSRegister); + CHECK_NE(dm, kNoDRegister); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B27 | B26 | B25 | B11 | B9 | opcode | + ((static_cast(sd) & 1)*B22) | + ((static_cast(sd) >> 1)*B12) | + ((static_cast(dm) >> 4)*B5) | + (static_cast(dm) & 0xf); + Emit(encoding); +} + + +void ArmAssembler::EmitVFPds(Condition cond, int32_t opcode, + DRegister dd, SRegister sm) { + CHECK_NE(dd, kNoDRegister); + CHECK_NE(sm, kNoSRegister); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B27 | B26 | B25 | B11 | B9 | opcode | + ((static_cast(dd) >> 4)*B22) | + ((static_cast(dd) & 0xf)*B12) | + ((static_cast(sm) & 1)*B5) | + (static_cast(sm) >> 1); + Emit(encoding); +} + + +void ArmAssembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) { + EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm); +} + + +void ArmAssembler::vcvtds(DRegister dd, SRegister sm, Condition cond) { + EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm); +} + + +void ArmAssembler::vcvtis(SRegister sd, SRegister sm, Condition cond) { + EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm); +} + + +void ArmAssembler::vcvtid(SRegister sd, DRegister dm, Condition cond) { + EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm); +} + + +void ArmAssembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) { + EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm); +} + + +void ArmAssembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) { + EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm); +} + + +void ArmAssembler::vcvtus(SRegister sd, SRegister sm, Condition cond) { + EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm); +} + + +void ArmAssembler::vcvtud(SRegister sd, DRegister dm, Condition cond) { + EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm); +} + + +void ArmAssembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) { + EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm); +} + + +void ArmAssembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) { + EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm); +} + + +void ArmAssembler::vcmps(SRegister sd, SRegister sm, Condition cond) { + EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm); +} + + +void ArmAssembler::vcmpd(DRegister dd, DRegister dm, Condition cond) { + EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm); +} + + +void ArmAssembler::vcmpsz(SRegister sd, Condition cond) { + EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0); +} + + +void ArmAssembler::vcmpdz(DRegister dd, Condition cond) { + EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0); +} + + +void ArmAssembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 | + (static_cast(PC)*B12) | + B11 | B9 | B4; + Emit(encoding); +} + + +void ArmAssembler::svc(uint32_t imm24) { + CHECK(IsUint(24, imm24)) << imm24; + int32_t encoding = (AL << kConditionShift) | B27 | B26 | B25 | B24 | imm24; + Emit(encoding); +} + + +void ArmAssembler::bkpt(uint16_t imm16) { + int32_t encoding = (AL << kConditionShift) | B24 | B21 | + ((imm16 >> 4) << 8) | B6 | B5 | B4 | (imm16 & 0xf); + Emit(encoding); +} + + +void ArmAssembler::b(Label* label, Condition cond) { + EmitBranch(cond, label, false); +} + + +void ArmAssembler::bl(Label* label, Condition cond) { + EmitBranch(cond, label, true); +} + + +void ArmAssembler::blx(Register rm, Condition cond) { + CHECK_NE(rm, kNoRegister); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B24 | B21 | (0xfff << 8) | B5 | B4 | + (static_cast(rm) << kRmShift); + Emit(encoding); +} + +void ArmAssembler::bx(Register rm, Condition cond) { + CHECK_NE(rm, kNoRegister); + CHECK_NE(cond, kNoCondition); + int32_t encoding = (static_cast(cond) << kConditionShift) | + B24 | B21 | (0xfff << 8) | B4 | + (static_cast(rm) << kRmShift); + Emit(encoding); +} + +void ArmAssembler::MarkExceptionHandler(Label* label) { + EmitType01(AL, 1, TST, 1, PC, R0, ShifterOperand(0)); + Label l; + b(&l); + EmitBranch(AL, label, false); + Bind(&l); +} + + +void ArmAssembler::Bind(Label* label) { + CHECK(!label->IsBound()); + int bound_pc = buffer_.Size(); + while (label->IsLinked()) { + int32_t position = label->Position(); + int32_t next = buffer_.Load(position); + int32_t encoded = ArmAssembler::EncodeBranchOffset(bound_pc - position, next); + buffer_.Store(position, encoded); + label->position_ = ArmAssembler::DecodeBranchOffset(next); + } + label->BindTo(bound_pc); +} + + +void ArmAssembler::EncodeUint32InTstInstructions(uint32_t data) { + // TODO: Consider using movw ip, <16 bits>. + while (!IsUint(8, data)) { + tst(R0, ShifterOperand(data & 0xFF), VS); + data >>= 8; + } + tst(R0, ShifterOperand(data), MI); +} + + +int32_t ArmAssembler::EncodeBranchOffset(int offset, int32_t inst) { + // The offset is off by 8 due to the way the ARM CPUs read PC. + offset -= 8; + CHECK_ALIGNED(offset, 4); + CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset)) << offset; + + // Properly preserve only the bits supported in the instruction. + offset >>= 2; + offset &= kBranchOffsetMask; + return (inst & ~kBranchOffsetMask) | offset; +} + + +int ArmAssembler::DecodeBranchOffset(int32_t inst) { + // Sign-extend, left-shift by 2, then add 8. + return ((((inst & kBranchOffsetMask) << 8) >> 6) + 8); +} + +void ArmAssembler::AddConstant(Register rd, int32_t value, Condition cond) { + AddConstant(rd, rd, value, cond); +} + + +void ArmAssembler::AddConstant(Register rd, Register rn, int32_t value, + Condition cond) { + if (value == 0) { + if (rd != rn) { + mov(rd, ShifterOperand(rn), cond); + } + return; + } + // We prefer to select the shorter code sequence rather than selecting add for + // positive values and sub for negatives ones, which would slightly improve + // the readability of generated code for some constants. + ShifterOperand shifter_op; + if (ShifterOperand::CanHold(value, &shifter_op)) { + add(rd, rn, shifter_op, cond); + } else if (ShifterOperand::CanHold(-value, &shifter_op)) { + sub(rd, rn, shifter_op, cond); + } else { + CHECK(rn != IP); + if (ShifterOperand::CanHold(~value, &shifter_op)) { + mvn(IP, shifter_op, cond); + add(rd, rn, ShifterOperand(IP), cond); + } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) { + mvn(IP, shifter_op, cond); + sub(rd, rn, ShifterOperand(IP), cond); + } else { + movw(IP, Low16Bits(value), cond); + uint16_t value_high = High16Bits(value); + if (value_high != 0) { + movt(IP, value_high, cond); + } + add(rd, rn, ShifterOperand(IP), cond); + } + } +} + + +void ArmAssembler::AddConstantSetFlags(Register rd, Register rn, int32_t value, + Condition cond) { + ShifterOperand shifter_op; + if (ShifterOperand::CanHold(value, &shifter_op)) { + adds(rd, rn, shifter_op, cond); + } else if (ShifterOperand::CanHold(-value, &shifter_op)) { + subs(rd, rn, shifter_op, cond); + } else { + CHECK(rn != IP); + if (ShifterOperand::CanHold(~value, &shifter_op)) { + mvn(IP, shifter_op, cond); + adds(rd, rn, ShifterOperand(IP), cond); + } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) { + mvn(IP, shifter_op, cond); + subs(rd, rn, ShifterOperand(IP), cond); + } else { + movw(IP, Low16Bits(value), cond); + uint16_t value_high = High16Bits(value); + if (value_high != 0) { + movt(IP, value_high, cond); + } + adds(rd, rn, ShifterOperand(IP), cond); + } + } +} + + +void ArmAssembler::LoadImmediate(Register rd, int32_t value, Condition cond) { + ShifterOperand shifter_op; + if (ShifterOperand::CanHold(value, &shifter_op)) { + mov(rd, shifter_op, cond); + } else if (ShifterOperand::CanHold(~value, &shifter_op)) { + mvn(rd, shifter_op, cond); + } else { + movw(rd, Low16Bits(value), cond); + uint16_t value_high = High16Bits(value); + if (value_high != 0) { + movt(rd, value_high, cond); + } + } +} + + +bool Address::CanHoldLoadOffset(LoadOperandType type, int offset) { + switch (type) { + case kLoadSignedByte: + case kLoadSignedHalfword: + case kLoadUnsignedHalfword: + case kLoadWordPair: + return IsAbsoluteUint(8, offset); // Addressing mode 3. + case kLoadUnsignedByte: + case kLoadWord: + return IsAbsoluteUint(12, offset); // Addressing mode 2. + case kLoadSWord: + case kLoadDWord: + return IsAbsoluteUint(10, offset); // VFP addressing mode. + default: + LOG(FATAL) << "UNREACHABLE"; + return false; + } +} + + +bool Address::CanHoldStoreOffset(StoreOperandType type, int offset) { + switch (type) { + case kStoreHalfword: + case kStoreWordPair: + return IsAbsoluteUint(8, offset); // Addressing mode 3. + case kStoreByte: + case kStoreWord: + return IsAbsoluteUint(12, offset); // Addressing mode 2. + case kStoreSWord: + case kStoreDWord: + return IsAbsoluteUint(10, offset); // VFP addressing mode. + default: + LOG(FATAL) << "UNREACHABLE"; + return false; + } +} + + +// Implementation note: this method must emit at most one instruction when +// Address::CanHoldLoadOffset. +void ArmAssembler::LoadFromOffset(LoadOperandType type, + Register reg, + Register base, + int32_t offset, + Condition cond) { + if (!Address::CanHoldLoadOffset(type, offset)) { + CHECK(base != IP); + LoadImmediate(IP, offset, cond); + add(IP, IP, ShifterOperand(base), cond); + base = IP; + offset = 0; + } + CHECK(Address::CanHoldLoadOffset(type, offset)); + switch (type) { + case kLoadSignedByte: + ldrsb(reg, Address(base, offset), cond); + break; + case kLoadUnsignedByte: + ldrb(reg, Address(base, offset), cond); + break; + case kLoadSignedHalfword: + ldrsh(reg, Address(base, offset), cond); + break; + case kLoadUnsignedHalfword: + ldrh(reg, Address(base, offset), cond); + break; + case kLoadWord: + ldr(reg, Address(base, offset), cond); + break; + case kLoadWordPair: + ldrd(reg, Address(base, offset), cond); + break; + default: + LOG(FATAL) << "UNREACHABLE"; + } +} + +// Implementation note: this method must emit at most one instruction when +// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset. +void ArmAssembler::LoadSFromOffset(SRegister reg, + Register base, + int32_t offset, + Condition cond) { + if (!Address::CanHoldLoadOffset(kLoadSWord, offset)) { + CHECK_NE(base, IP); + LoadImmediate(IP, offset, cond); + add(IP, IP, ShifterOperand(base), cond); + base = IP; + offset = 0; + } + CHECK(Address::CanHoldLoadOffset(kLoadSWord, offset)); + vldrs(reg, Address(base, offset), cond); +} + +// Implementation note: this method must emit at most one instruction when +// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset. +void ArmAssembler::LoadDFromOffset(DRegister reg, + Register base, + int32_t offset, + Condition cond) { + if (!Address::CanHoldLoadOffset(kLoadDWord, offset)) { + CHECK_NE(base, IP); + LoadImmediate(IP, offset, cond); + add(IP, IP, ShifterOperand(base), cond); + base = IP; + offset = 0; + } + CHECK(Address::CanHoldLoadOffset(kLoadDWord, offset)); + vldrd(reg, Address(base, offset), cond); +} + +// Implementation note: this method must emit at most one instruction when +// Address::CanHoldStoreOffset. +void ArmAssembler::StoreToOffset(StoreOperandType type, + Register reg, + Register base, + int32_t offset, + Condition cond) { + if (!Address::CanHoldStoreOffset(type, offset)) { + CHECK(reg != IP); + CHECK(base != IP); + LoadImmediate(IP, offset, cond); + add(IP, IP, ShifterOperand(base), cond); + base = IP; + offset = 0; + } + CHECK(Address::CanHoldStoreOffset(type, offset)); + switch (type) { + case kStoreByte: + strb(reg, Address(base, offset), cond); + break; + case kStoreHalfword: + strh(reg, Address(base, offset), cond); + break; + case kStoreWord: + str(reg, Address(base, offset), cond); + break; + case kStoreWordPair: + strd(reg, Address(base, offset), cond); + break; + default: + LOG(FATAL) << "UNREACHABLE"; + } +} + +// Implementation note: this method must emit at most one instruction when +// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreToOffset. +void ArmAssembler::StoreSToOffset(SRegister reg, + Register base, + int32_t offset, + Condition cond) { + if (!Address::CanHoldStoreOffset(kStoreSWord, offset)) { + CHECK_NE(base, IP); + LoadImmediate(IP, offset, cond); + add(IP, IP, ShifterOperand(base), cond); + base = IP; + offset = 0; + } + CHECK(Address::CanHoldStoreOffset(kStoreSWord, offset)); + vstrs(reg, Address(base, offset), cond); +} + +// Implementation note: this method must emit at most one instruction when +// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreSToOffset. +void ArmAssembler::StoreDToOffset(DRegister reg, + Register base, + int32_t offset, + Condition cond) { + if (!Address::CanHoldStoreOffset(kStoreDWord, offset)) { + CHECK_NE(base, IP); + LoadImmediate(IP, offset, cond); + add(IP, IP, ShifterOperand(base), cond); + base = IP; + offset = 0; + } + CHECK(Address::CanHoldStoreOffset(kStoreDWord, offset)); + vstrd(reg, Address(base, offset), cond); +} + +void ArmAssembler::Push(Register rd, Condition cond) { + str(rd, Address(SP, -kRegisterSize, Address::PreIndex), cond); +} + +void ArmAssembler::Pop(Register rd, Condition cond) { + ldr(rd, Address(SP, kRegisterSize, Address::PostIndex), cond); +} + +void ArmAssembler::PushList(RegList regs, Condition cond) { + stm(DB_W, SP, regs, cond); +} + +void ArmAssembler::PopList(RegList regs, Condition cond) { + ldm(IA_W, SP, regs, cond); +} + +void ArmAssembler::Mov(Register rd, Register rm, Condition cond) { + if (rd != rm) { + mov(rd, ShifterOperand(rm), cond); + } +} + +void ArmAssembler::Lsl(Register rd, Register rm, uint32_t shift_imm, + Condition cond) { + CHECK_NE(shift_imm, 0u); // Do not use Lsl if no shift is wanted. + mov(rd, ShifterOperand(rm, LSL, shift_imm), cond); +} + +void ArmAssembler::Lsr(Register rd, Register rm, uint32_t shift_imm, + Condition cond) { + CHECK_NE(shift_imm, 0u); // Do not use Lsr if no shift is wanted. + if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax. + mov(rd, ShifterOperand(rm, LSR, shift_imm), cond); +} + +void ArmAssembler::Asr(Register rd, Register rm, uint32_t shift_imm, + Condition cond) { + CHECK_NE(shift_imm, 0u); // Do not use Asr if no shift is wanted. + if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax. + mov(rd, ShifterOperand(rm, ASR, shift_imm), cond); +} + +void ArmAssembler::Ror(Register rd, Register rm, uint32_t shift_imm, + Condition cond) { + CHECK_NE(shift_imm, 0u); // Use Rrx instruction. + mov(rd, ShifterOperand(rm, ROR, shift_imm), cond); +} + +void ArmAssembler::Rrx(Register rd, Register rm, Condition cond) { + mov(rd, ShifterOperand(rm, ROR, 0), cond); +} + +void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, + const std::vector& callee_save_regs, + const std::vector& entry_spills) { + CHECK_ALIGNED(frame_size, kStackAlignment); + CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister()); + + // Push callee saves and link register. + RegList push_list = 1 << LR; + size_t pushed_values = 1; + for (size_t i = 0; i < callee_save_regs.size(); i++) { + Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister(); + push_list |= 1 << reg; + pushed_values++; + } + PushList(push_list); + + // Increase frame to required size. + CHECK_GT(frame_size, pushed_values * kPointerSize); // Must be at least space to push Method* + size_t adjust = frame_size - (pushed_values * kPointerSize); + IncreaseFrameSize(adjust); + + // Write out Method*. + StoreToOffset(kStoreWord, R0, SP, 0); + + // Write out entry spills. + for (size_t i = 0; i < entry_spills.size(); ++i) { + Register reg = entry_spills.at(i).AsArm().AsCoreRegister(); + StoreToOffset(kStoreWord, reg, SP, frame_size + kPointerSize + (i * kPointerSize)); + } +} + +void ArmAssembler::RemoveFrame(size_t frame_size, + const std::vector& callee_save_regs) { + CHECK_ALIGNED(frame_size, kStackAlignment); + // Compute callee saves to pop and PC + RegList pop_list = 1 << PC; + size_t pop_values = 1; + for (size_t i = 0; i < callee_save_regs.size(); i++) { + Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister(); + pop_list |= 1 << reg; + pop_values++; + } + + // Decrease frame to start of callee saves + CHECK_GT(frame_size, pop_values * kPointerSize); + size_t adjust = frame_size - (pop_values * kPointerSize); + DecreaseFrameSize(adjust); + + // Pop callee saves and PC + PopList(pop_list); +} + +void ArmAssembler::IncreaseFrameSize(size_t adjust) { + AddConstant(SP, -adjust); +} + +void ArmAssembler::DecreaseFrameSize(size_t adjust) { + AddConstant(SP, adjust); +} + +void ArmAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) { + ArmManagedRegister src = msrc.AsArm(); + if (src.IsNoRegister()) { + CHECK_EQ(0u, size); + } else if (src.IsCoreRegister()) { + CHECK_EQ(4u, size); + StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); + } else if (src.IsRegisterPair()) { + CHECK_EQ(8u, size); + StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value()); + StoreToOffset(kStoreWord, src.AsRegisterPairHigh(), + SP, dest.Int32Value() + 4); + } else if (src.IsSRegister()) { + StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value()); + } else { + CHECK(src.IsDRegister()) << src; + StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value()); + } +} + +void ArmAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) { + ArmManagedRegister src = msrc.AsArm(); + CHECK(src.IsCoreRegister()) << src; + StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); +} + +void ArmAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) { + ArmManagedRegister src = msrc.AsArm(); + CHECK(src.IsCoreRegister()) << src; + StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); +} + +void ArmAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc, + FrameOffset in_off, ManagedRegister mscratch) { + ArmManagedRegister src = msrc.AsArm(); + ArmManagedRegister scratch = mscratch.AsArm(); + StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4); +} + +void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src, + ManagedRegister mscratch) { + ArmManagedRegister scratch = mscratch.AsArm(); + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); +} + +void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, + MemberOffset offs) { + ArmManagedRegister dst = mdest.AsArm(); + CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst; + LoadFromOffset(kLoadWord, dst.AsCoreRegister(), + base.AsArm().AsCoreRegister(), offs.Int32Value()); +} + +void ArmAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) { + ArmManagedRegister dst = mdest.AsArm(); + CHECK(dst.IsCoreRegister()) << dst; + LoadFromOffset(kLoadWord, dst.AsCoreRegister(), SP, src.Int32Value()); +} + +void ArmAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, + Offset offs) { + ArmManagedRegister dst = mdest.AsArm(); + CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst; + LoadFromOffset(kLoadWord, dst.AsCoreRegister(), + base.AsArm().AsCoreRegister(), offs.Int32Value()); +} + +void ArmAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, + ManagedRegister mscratch) { + ArmManagedRegister scratch = mscratch.AsArm(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadImmediate(scratch.AsCoreRegister(), imm); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); +} + +void ArmAssembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm, + ManagedRegister mscratch) { + ArmManagedRegister scratch = mscratch.AsArm(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadImmediate(scratch.AsCoreRegister(), imm); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, dest.Int32Value()); +} + +static void EmitLoad(ArmAssembler* assembler, ManagedRegister m_dst, + Register src_register, int32_t src_offset, size_t size) { + ArmManagedRegister dst = m_dst.AsArm(); + if (dst.IsNoRegister()) { + CHECK_EQ(0u, size) << dst; + } else if (dst.IsCoreRegister()) { + CHECK_EQ(4u, size) << dst; + assembler->LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset); + } else if (dst.IsRegisterPair()) { + CHECK_EQ(8u, size) << dst; + assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset); + assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4); + } else if (dst.IsSRegister()) { + assembler->LoadSFromOffset(dst.AsSRegister(), src_register, src_offset); + } else { + CHECK(dst.IsDRegister()) << dst; + assembler->LoadDFromOffset(dst.AsDRegister(), src_register, src_offset); + } +} + +void ArmAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) { + return EmitLoad(this, m_dst, SP, src.Int32Value(), size); +} + +void ArmAssembler::Load(ManagedRegister m_dst, ThreadOffset src, size_t size) { + return EmitLoad(this, m_dst, TR, src.Int32Value(), size); +} + +void ArmAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset offs) { + ArmManagedRegister dst = m_dst.AsArm(); + CHECK(dst.IsCoreRegister()) << dst; + LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value()); +} + +void ArmAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset thr_offs, + ManagedRegister mscratch) { + ArmManagedRegister scratch = mscratch.AsArm(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + TR, thr_offs.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), + SP, fr_offs.Int32Value()); +} + +void ArmAssembler::CopyRawPtrToThread(ThreadOffset thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { + ArmManagedRegister scratch = mscratch.AsArm(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + SP, fr_offs.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), + TR, thr_offs.Int32Value()); +} + +void ArmAssembler::StoreStackOffsetToThread(ThreadOffset thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { + ArmManagedRegister scratch = mscratch.AsArm(); + CHECK(scratch.IsCoreRegister()) << scratch; + AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), + TR, thr_offs.Int32Value()); +} + +void ArmAssembler::StoreStackPointerToThread(ThreadOffset thr_offs) { + StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value()); +} + +void ArmAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm"; +} + +void ArmAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm"; +} + +void ArmAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t /*size*/) { + ArmManagedRegister dst = m_dst.AsArm(); + ArmManagedRegister src = m_src.AsArm(); + if (!dst.Equals(src)) { + if (dst.IsCoreRegister()) { + CHECK(src.IsCoreRegister()) << src; + mov(dst.AsCoreRegister(), ShifterOperand(src.AsCoreRegister())); + } else if (dst.IsDRegister()) { + CHECK(src.IsDRegister()) << src; + vmovd(dst.AsDRegister(), src.AsDRegister()); + } else if (dst.IsSRegister()) { + CHECK(src.IsSRegister()) << src; + vmovs(dst.AsSRegister(), src.AsSRegister()); + } else { + CHECK(dst.IsRegisterPair()) << dst; + CHECK(src.IsRegisterPair()) << src; + // Ensure that the first move doesn't clobber the input of the second + if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) { + mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow())); + mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh())); + } else { + mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh())); + mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow())); + } + } + } +} + +void ArmAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) { + ArmManagedRegister scratch = mscratch.AsArm(); + CHECK(scratch.IsCoreRegister()) << scratch; + CHECK(size == 4 || size == 8) << size; + if (size == 4) { + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); + } else if (size == 8) { + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4); + } +} + +void ArmAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, + ManagedRegister mscratch, size_t size) { + Register scratch = mscratch.AsArm().AsCoreRegister(); + CHECK_EQ(size, 4u); + LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value()); + StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value()); +} + +void ArmAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, + ManagedRegister mscratch, size_t size) { + Register scratch = mscratch.AsArm().AsCoreRegister(); + CHECK_EQ(size, 4u); + LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value()); + StoreToOffset(kStoreWord, scratch, dest_base.AsArm().AsCoreRegister(), dest_offset.Int32Value()); +} + +void ArmAssembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/, + ManagedRegister /*mscratch*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL); +} + +void ArmAssembler::Copy(ManagedRegister dest, Offset dest_offset, + ManagedRegister src, Offset src_offset, + ManagedRegister mscratch, size_t size) { + CHECK_EQ(size, 4u); + Register scratch = mscratch.AsArm().AsCoreRegister(); + LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value()); + StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value()); +} + +void ArmAssembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/, + ManagedRegister /*scratch*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL); +} + + +void ArmAssembler::MemoryBarrier(ManagedRegister mscratch) { + CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12); +#if ANDROID_SMP != 0 +#if defined(__ARM_HAVE_DMB) + int32_t encoding = 0xf57ff05f; // dmb + Emit(encoding); +#elif defined(__ARM_HAVE_LDREX_STREX) + LoadImmediate(R12, 0); + int32_t encoding = 0xee07cfba; // mcr p15, 0, r12, c7, c10, 5 + Emit(encoding); +#else + LoadImmediate(R12, 0xffff0fa0); // kuser_memory_barrier + blx(R12); +#endif +#endif +} + +void ArmAssembler::CreateSirtEntry(ManagedRegister mout_reg, + FrameOffset sirt_offset, + ManagedRegister min_reg, bool null_allowed) { + ArmManagedRegister out_reg = mout_reg.AsArm(); + ArmManagedRegister in_reg = min_reg.AsArm(); + CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg; + CHECK(out_reg.IsCoreRegister()) << out_reg; + if (null_allowed) { + // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is + // the address in the SIRT holding the reference. + // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset) + if (in_reg.IsNoRegister()) { + LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), + SP, sirt_offset.Int32Value()); + in_reg = out_reg; + } + cmp(in_reg.AsCoreRegister(), ShifterOperand(0)); + if (!out_reg.Equals(in_reg)) { + LoadImmediate(out_reg.AsCoreRegister(), 0, EQ); + } + AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE); + } else { + AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL); + } +} + +void ArmAssembler::CreateSirtEntry(FrameOffset out_off, + FrameOffset sirt_offset, + ManagedRegister mscratch, + bool null_allowed) { + ArmManagedRegister scratch = mscratch.AsArm(); + CHECK(scratch.IsCoreRegister()) << scratch; + if (null_allowed) { + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, + sirt_offset.Int32Value()); + // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is + // the address in the SIRT holding the reference. + // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset) + cmp(scratch.AsCoreRegister(), ShifterOperand(0)); + AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE); + } else { + AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL); + } + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value()); +} + +void ArmAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg, + ManagedRegister min_reg) { + ArmManagedRegister out_reg = mout_reg.AsArm(); + ArmManagedRegister in_reg = min_reg.AsArm(); + CHECK(out_reg.IsCoreRegister()) << out_reg; + CHECK(in_reg.IsCoreRegister()) << in_reg; + Label null_arg; + if (!out_reg.Equals(in_reg)) { + LoadImmediate(out_reg.AsCoreRegister(), 0, EQ); + } + cmp(in_reg.AsCoreRegister(), ShifterOperand(0)); + LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), + in_reg.AsCoreRegister(), 0, NE); +} + +void ArmAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) { + // TODO: not validating references +} + +void ArmAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) { + // TODO: not validating references +} + +void ArmAssembler::Call(ManagedRegister mbase, Offset offset, + ManagedRegister mscratch) { + ArmManagedRegister base = mbase.AsArm(); + ArmManagedRegister scratch = mscratch.AsArm(); + CHECK(base.IsCoreRegister()) << base; + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + base.AsCoreRegister(), offset.Int32Value()); + blx(scratch.AsCoreRegister()); + // TODO: place reference map on call +} + +void ArmAssembler::Call(FrameOffset base, Offset offset, + ManagedRegister mscratch) { + ArmManagedRegister scratch = mscratch.AsArm(); + CHECK(scratch.IsCoreRegister()) << scratch; + // Call *(*(SP + base) + offset) + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + SP, base.Int32Value()); + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + scratch.AsCoreRegister(), offset.Int32Value()); + blx(scratch.AsCoreRegister()); + // TODO: place reference map on call +} + +void ArmAssembler::Call(ThreadOffset /*offset*/, ManagedRegister /*scratch*/) { + UNIMPLEMENTED(FATAL); +} + +void ArmAssembler::GetCurrentThread(ManagedRegister tr) { + mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR)); +} + +void ArmAssembler::GetCurrentThread(FrameOffset offset, + ManagedRegister /*scratch*/) { + StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL); +} + +void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) { + ArmManagedRegister scratch = mscratch.AsArm(); + ArmExceptionSlowPath* slow = new ArmExceptionSlowPath(scratch, stack_adjust); + buffer_.EnqueueSlowPath(slow); + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + TR, Thread::ExceptionOffset().Int32Value()); + cmp(scratch.AsCoreRegister(), ShifterOperand(0)); + b(slow->Entry(), NE); +} + +void ArmExceptionSlowPath::Emit(Assembler* sasm) { + ArmAssembler* sp_asm = down_cast(sasm); +#define __ sp_asm-> + __ Bind(&entry_); + if (stack_adjust_ != 0) { // Fix up the frame. + __ DecreaseFrameSize(stack_adjust_); + } + // Pass exception object as argument + // Don't care about preserving R0 as this call won't return + __ mov(R0, ShifterOperand(scratch_.AsCoreRegister())); + // Set up call to Thread::Current()->pDeliverException + __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pDeliverException)); + __ blx(R12); + // Call never returns + __ bkpt(0); +#undef __ +} + +} // namespace arm +} // namespace art diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h new file mode 100644 index 0000000000..757a8a2e99 --- /dev/null +++ b/compiler/utils/arm/assembler_arm.h @@ -0,0 +1,659 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_H_ +#define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_H_ + +#include + +#include "base/logging.h" +#include "constants_arm.h" +#include "utils/arm/managed_register_arm.h" +#include "utils/assembler.h" +#include "offsets.h" +#include "utils.h" + +namespace art { +namespace arm { + +// Encodes Addressing Mode 1 - Data-processing operands defined in Section 5.1. +class ShifterOperand { + public: + // Data-processing operands - Uninitialized + ShifterOperand() { + type_ = -1; + } + + // Data-processing operands - Immediate + explicit ShifterOperand(uint32_t immediate) { + CHECK(immediate < (1 << kImmed8Bits)); + type_ = 1; + encoding_ = immediate; + } + + // Data-processing operands - Rotated immediate + ShifterOperand(uint32_t rotate, uint32_t immed8) { + CHECK((rotate < (1 << kRotateBits)) && (immed8 < (1 << kImmed8Bits))); + type_ = 1; + encoding_ = (rotate << kRotateShift) | (immed8 << kImmed8Shift); + } + + // Data-processing operands - Register + explicit ShifterOperand(Register rm) { + type_ = 0; + encoding_ = static_cast(rm); + } + + // Data-processing operands - Logical shift/rotate by immediate + ShifterOperand(Register rm, Shift shift, uint32_t shift_imm) { + CHECK(shift_imm < (1 << kShiftImmBits)); + type_ = 0; + encoding_ = shift_imm << kShiftImmShift | + static_cast(shift) << kShiftShift | + static_cast(rm); + } + + // Data-processing operands - Logical shift/rotate by register + ShifterOperand(Register rm, Shift shift, Register rs) { + type_ = 0; + encoding_ = static_cast(rs) << kShiftRegisterShift | + static_cast(shift) << kShiftShift | (1 << 4) | + static_cast(rm); + } + + static bool CanHold(uint32_t immediate, ShifterOperand* shifter_op) { + // Avoid the more expensive test for frequent small immediate values. + if (immediate < (1 << kImmed8Bits)) { + shifter_op->type_ = 1; + shifter_op->encoding_ = (0 << kRotateShift) | (immediate << kImmed8Shift); + return true; + } + // Note that immediate must be unsigned for the test to work correctly. + for (int rot = 0; rot < 16; rot++) { + uint32_t imm8 = (immediate << 2*rot) | (immediate >> (32 - 2*rot)); + if (imm8 < (1 << kImmed8Bits)) { + shifter_op->type_ = 1; + shifter_op->encoding_ = (rot << kRotateShift) | (imm8 << kImmed8Shift); + return true; + } + } + return false; + } + + private: + bool is_valid() const { return (type_ == 0) || (type_ == 1); } + + uint32_t type() const { + CHECK(is_valid()); + return type_; + } + + uint32_t encoding() const { + CHECK(is_valid()); + return encoding_; + } + + uint32_t type_; // Encodes the type field (bits 27-25) in the instruction. + uint32_t encoding_; + + friend class ArmAssembler; +#ifdef SOURCE_ASSEMBLER_SUPPORT + friend class BinaryAssembler; +#endif +}; + + +enum LoadOperandType { + kLoadSignedByte, + kLoadUnsignedByte, + kLoadSignedHalfword, + kLoadUnsignedHalfword, + kLoadWord, + kLoadWordPair, + kLoadSWord, + kLoadDWord +}; + + +enum StoreOperandType { + kStoreByte, + kStoreHalfword, + kStoreWord, + kStoreWordPair, + kStoreSWord, + kStoreDWord +}; + + +// Load/store multiple addressing mode. +enum BlockAddressMode { + // bit encoding P U W + DA = (0|0|0) << 21, // decrement after + IA = (0|4|0) << 21, // increment after + DB = (8|0|0) << 21, // decrement before + IB = (8|4|0) << 21, // increment before + DA_W = (0|0|1) << 21, // decrement after with writeback to base + IA_W = (0|4|1) << 21, // increment after with writeback to base + DB_W = (8|0|1) << 21, // decrement before with writeback to base + IB_W = (8|4|1) << 21 // increment before with writeback to base +}; + + +class Address { + public: + // Memory operand addressing mode + enum Mode { + // bit encoding P U W + Offset = (8|4|0) << 21, // offset (w/o writeback to base) + PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback + PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback + NegOffset = (8|0|0) << 21, // negative offset (w/o writeback to base) + NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback + NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback + }; + + explicit Address(Register rn, int32_t offset = 0, Mode am = Offset) { + CHECK(IsAbsoluteUint(12, offset)); + if (offset < 0) { + encoding_ = (am ^ (1 << kUShift)) | -offset; // Flip U to adjust sign. + } else { + encoding_ = am | offset; + } + encoding_ |= static_cast(rn) << kRnShift; + } + + static bool CanHoldLoadOffset(LoadOperandType type, int offset); + static bool CanHoldStoreOffset(StoreOperandType type, int offset); + + private: + uint32_t encoding() const { return encoding_; } + + // Encoding for addressing mode 3. + uint32_t encoding3() const { + const uint32_t offset_mask = (1 << 12) - 1; + uint32_t offset = encoding_ & offset_mask; + CHECK_LT(offset, 256u); + return (encoding_ & ~offset_mask) | ((offset & 0xf0) << 4) | (offset & 0xf); + } + + // Encoding for vfp load/store addressing. + uint32_t vencoding() const { + const uint32_t offset_mask = (1 << 12) - 1; + uint32_t offset = encoding_ & offset_mask; + CHECK(IsAbsoluteUint(10, offset)); // In the range -1020 to +1020. + CHECK_ALIGNED(offset, 2); // Multiple of 4. + int mode = encoding_ & ((8|4|1) << 21); + CHECK((mode == Offset) || (mode == NegOffset)); + uint32_t vencoding = (encoding_ & (0xf << kRnShift)) | (offset >> 2); + if (mode == Offset) { + vencoding |= 1 << 23; + } + return vencoding; + } + + uint32_t encoding_; + + friend class ArmAssembler; +}; + + +class ArmAssembler : public Assembler { + public: + ArmAssembler() {} + virtual ~ArmAssembler() {} + + // Data-processing instructions. + void and_(Register rd, Register rn, ShifterOperand so, Condition cond = AL); + + void eor(Register rd, Register rn, ShifterOperand so, Condition cond = AL); + + void sub(Register rd, Register rn, ShifterOperand so, Condition cond = AL); + void subs(Register rd, Register rn, ShifterOperand so, Condition cond = AL); + + void rsb(Register rd, Register rn, ShifterOperand so, Condition cond = AL); + void rsbs(Register rd, Register rn, ShifterOperand so, Condition cond = AL); + + void add(Register rd, Register rn, ShifterOperand so, Condition cond = AL); + + void adds(Register rd, Register rn, ShifterOperand so, Condition cond = AL); + + void adc(Register rd, Register rn, ShifterOperand so, Condition cond = AL); + + void sbc(Register rd, Register rn, ShifterOperand so, Condition cond = AL); + + void rsc(Register rd, Register rn, ShifterOperand so, Condition cond = AL); + + void tst(Register rn, ShifterOperand so, Condition cond = AL); + + void teq(Register rn, ShifterOperand so, Condition cond = AL); + + void cmp(Register rn, ShifterOperand so, Condition cond = AL); + + void cmn(Register rn, ShifterOperand so, Condition cond = AL); + + void orr(Register rd, Register rn, ShifterOperand so, Condition cond = AL); + void orrs(Register rd, Register rn, ShifterOperand so, Condition cond = AL); + + void mov(Register rd, ShifterOperand so, Condition cond = AL); + void movs(Register rd, ShifterOperand so, Condition cond = AL); + + void bic(Register rd, Register rn, ShifterOperand so, Condition cond = AL); + + void mvn(Register rd, ShifterOperand so, Condition cond = AL); + void mvns(Register rd, ShifterOperand so, Condition cond = AL); + + // Miscellaneous data-processing instructions. + void clz(Register rd, Register rm, Condition cond = AL); + void movw(Register rd, uint16_t imm16, Condition cond = AL); + void movt(Register rd, uint16_t imm16, Condition cond = AL); + + // Multiply instructions. + void mul(Register rd, Register rn, Register rm, Condition cond = AL); + void mla(Register rd, Register rn, Register rm, Register ra, + Condition cond = AL); + void mls(Register rd, Register rn, Register rm, Register ra, + Condition cond = AL); + void umull(Register rd_lo, Register rd_hi, Register rn, Register rm, + Condition cond = AL); + + // Load/store instructions. + void ldr(Register rd, Address ad, Condition cond = AL); + void str(Register rd, Address ad, Condition cond = AL); + + void ldrb(Register rd, Address ad, Condition cond = AL); + void strb(Register rd, Address ad, Condition cond = AL); + + void ldrh(Register rd, Address ad, Condition cond = AL); + void strh(Register rd, Address ad, Condition cond = AL); + + void ldrsb(Register rd, Address ad, Condition cond = AL); + void ldrsh(Register rd, Address ad, Condition cond = AL); + + void ldrd(Register rd, Address ad, Condition cond = AL); + void strd(Register rd, Address ad, Condition cond = AL); + + void ldm(BlockAddressMode am, Register base, + RegList regs, Condition cond = AL); + void stm(BlockAddressMode am, Register base, + RegList regs, Condition cond = AL); + + void ldrex(Register rd, Register rn, Condition cond = AL); + void strex(Register rd, Register rt, Register rn, Condition cond = AL); + + // Miscellaneous instructions. + void clrex(); + void nop(Condition cond = AL); + + // Note that gdb sets breakpoints using the undefined instruction 0xe7f001f0. + void bkpt(uint16_t imm16); + void svc(uint32_t imm24); + + // Floating point instructions (VFPv3-D16 and VFPv3-D32 profiles). + void vmovsr(SRegister sn, Register rt, Condition cond = AL); + void vmovrs(Register rt, SRegister sn, Condition cond = AL); + void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond = AL); + void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond = AL); + void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond = AL); + void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond = AL); + void vmovs(SRegister sd, SRegister sm, Condition cond = AL); + void vmovd(DRegister dd, DRegister dm, Condition cond = AL); + + // Returns false if the immediate cannot be encoded. + bool vmovs(SRegister sd, float s_imm, Condition cond = AL); + bool vmovd(DRegister dd, double d_imm, Condition cond = AL); + + void vldrs(SRegister sd, Address ad, Condition cond = AL); + void vstrs(SRegister sd, Address ad, Condition cond = AL); + void vldrd(DRegister dd, Address ad, Condition cond = AL); + void vstrd(DRegister dd, Address ad, Condition cond = AL); + + void vadds(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); + void vaddd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); + void vsubs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); + void vsubd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); + void vmuls(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); + void vmuld(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); + void vmlas(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); + void vmlad(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); + void vmlss(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); + void vmlsd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); + void vdivs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); + void vdivd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); + + void vabss(SRegister sd, SRegister sm, Condition cond = AL); + void vabsd(DRegister dd, DRegister dm, Condition cond = AL); + void vnegs(SRegister sd, SRegister sm, Condition cond = AL); + void vnegd(DRegister dd, DRegister dm, Condition cond = AL); + void vsqrts(SRegister sd, SRegister sm, Condition cond = AL); + void vsqrtd(DRegister dd, DRegister dm, Condition cond = AL); + + void vcvtsd(SRegister sd, DRegister dm, Condition cond = AL); + void vcvtds(DRegister dd, SRegister sm, Condition cond = AL); + void vcvtis(SRegister sd, SRegister sm, Condition cond = AL); + void vcvtid(SRegister sd, DRegister dm, Condition cond = AL); + void vcvtsi(SRegister sd, SRegister sm, Condition cond = AL); + void vcvtdi(DRegister dd, SRegister sm, Condition cond = AL); + void vcvtus(SRegister sd, SRegister sm, Condition cond = AL); + void vcvtud(SRegister sd, DRegister dm, Condition cond = AL); + void vcvtsu(SRegister sd, SRegister sm, Condition cond = AL); + void vcvtdu(DRegister dd, SRegister sm, Condition cond = AL); + + void vcmps(SRegister sd, SRegister sm, Condition cond = AL); + void vcmpd(DRegister dd, DRegister dm, Condition cond = AL); + void vcmpsz(SRegister sd, Condition cond = AL); + void vcmpdz(DRegister dd, Condition cond = AL); + void vmstat(Condition cond = AL); // VMRS APSR_nzcv, FPSCR + + // Branch instructions. + void b(Label* label, Condition cond = AL); + void bl(Label* label, Condition cond = AL); + void blx(Register rm, Condition cond = AL); + void bx(Register rm, Condition cond = AL); + + // Macros. + // Add signed constant value to rd. May clobber IP. + void AddConstant(Register rd, int32_t value, Condition cond = AL); + void AddConstant(Register rd, Register rn, int32_t value, + Condition cond = AL); + void AddConstantSetFlags(Register rd, Register rn, int32_t value, + Condition cond = AL); + void AddConstantWithCarry(Register rd, Register rn, int32_t value, + Condition cond = AL); + + // Load and Store. May clobber IP. + void LoadImmediate(Register rd, int32_t value, Condition cond = AL); + void LoadSImmediate(SRegister sd, float value, Condition cond = AL); + void LoadDImmediate(DRegister dd, double value, + Register scratch, Condition cond = AL); + void MarkExceptionHandler(Label* label); + void LoadFromOffset(LoadOperandType type, + Register reg, + Register base, + int32_t offset, + Condition cond = AL); + void StoreToOffset(StoreOperandType type, + Register reg, + Register base, + int32_t offset, + Condition cond = AL); + void LoadSFromOffset(SRegister reg, + Register base, + int32_t offset, + Condition cond = AL); + void StoreSToOffset(SRegister reg, + Register base, + int32_t offset, + Condition cond = AL); + void LoadDFromOffset(DRegister reg, + Register base, + int32_t offset, + Condition cond = AL); + void StoreDToOffset(DRegister reg, + Register base, + int32_t offset, + Condition cond = AL); + + void Push(Register rd, Condition cond = AL); + void Pop(Register rd, Condition cond = AL); + + void PushList(RegList regs, Condition cond = AL); + void PopList(RegList regs, Condition cond = AL); + + void Mov(Register rd, Register rm, Condition cond = AL); + + // Convenience shift instructions. Use mov instruction with shifter operand + // for variants setting the status flags or using a register shift count. + void Lsl(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL); + void Lsr(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL); + void Asr(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL); + void Ror(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL); + void Rrx(Register rd, Register rm, Condition cond = AL); + + // Encode a signed constant in tst instructions, only affecting the flags. + void EncodeUint32InTstInstructions(uint32_t data); + // ... and decode from a pc pointing to the start of encoding instructions. + static uint32_t DecodeUint32FromTstInstructions(uword pc); + static bool IsInstructionForExceptionHandling(uword pc); + + // Emit data (e.g. encoded instruction or immediate) to the + // instruction stream. + void Emit(int32_t value); + void Bind(Label* label); + + // + // Overridden common assembler high-level functionality + // + + // Emit code that will create an activation on the stack + virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg, + const std::vector& callee_save_regs, + const std::vector& entry_spills); + + // Emit code that will remove an activation from the stack + virtual void RemoveFrame(size_t frame_size, + const std::vector& callee_save_regs); + + virtual void IncreaseFrameSize(size_t adjust); + virtual void DecreaseFrameSize(size_t adjust); + + // Store routines + virtual void Store(FrameOffset offs, ManagedRegister src, size_t size); + virtual void StoreRef(FrameOffset dest, ManagedRegister src); + virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src); + + virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, + ManagedRegister scratch); + + virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm, + ManagedRegister scratch); + + virtual void StoreStackOffsetToThread(ThreadOffset thr_offs, + FrameOffset fr_offs, + ManagedRegister scratch); + + virtual void StoreStackPointerToThread(ThreadOffset thr_offs); + + virtual void StoreSpanning(FrameOffset dest, ManagedRegister src, + FrameOffset in_off, ManagedRegister scratch); + + // Load routines + virtual void Load(ManagedRegister dest, FrameOffset src, size_t size); + + virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size); + + virtual void LoadRef(ManagedRegister dest, FrameOffset src); + + virtual void LoadRef(ManagedRegister dest, ManagedRegister base, + MemberOffset offs); + + virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, + Offset offs); + + virtual void LoadRawPtrFromThread(ManagedRegister dest, + ThreadOffset offs); + + // Copying routines + virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size); + + virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs, + ManagedRegister scratch); + + virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs, + ManagedRegister scratch); + + virtual void CopyRef(FrameOffset dest, FrameOffset src, + ManagedRegister scratch); + + virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size); + + virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, + ManagedRegister scratch, size_t size); + + virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, + ManagedRegister scratch, size_t size); + + virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, + ManagedRegister scratch, size_t size); + + virtual void Copy(ManagedRegister dest, Offset dest_offset, + ManagedRegister src, Offset src_offset, + ManagedRegister scratch, size_t size); + + virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, + ManagedRegister scratch, size_t size); + + virtual void MemoryBarrier(ManagedRegister scratch); + + // Sign extension + virtual void SignExtend(ManagedRegister mreg, size_t size); + + // Zero extension + virtual void ZeroExtend(ManagedRegister mreg, size_t size); + + // Exploit fast access in managed code to Thread::Current() + virtual void GetCurrentThread(ManagedRegister tr); + virtual void GetCurrentThread(FrameOffset dest_offset, + ManagedRegister scratch); + + // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the + // value is null and null_allowed. in_reg holds a possibly stale reference + // that can be used to avoid loading the SIRT entry to see if the value is + // NULL. + virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, + ManagedRegister in_reg, bool null_allowed); + + // Set up out_off to hold a Object** into the SIRT, or to be NULL if the + // value is null and null_allowed. + virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, + ManagedRegister scratch, bool null_allowed); + + // src holds a SIRT entry (Object**) load this into dst + virtual void LoadReferenceFromSirt(ManagedRegister dst, + ManagedRegister src); + + // Heap::VerifyObject on src. In some cases (such as a reference to this) we + // know that src may not be null. + virtual void VerifyObject(ManagedRegister src, bool could_be_null); + virtual void VerifyObject(FrameOffset src, bool could_be_null); + + // Call to address held at [base+offset] + virtual void Call(ManagedRegister base, Offset offset, + ManagedRegister scratch); + virtual void Call(FrameOffset base, Offset offset, + ManagedRegister scratch); + virtual void Call(ThreadOffset offset, ManagedRegister scratch); + + // Generate code to check if Thread::Current()->exception_ is non-null + // and branch to a ExceptionSlowPath if it is. + virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust); + + private: + void EmitType01(Condition cond, + int type, + Opcode opcode, + int set_cc, + Register rn, + Register rd, + ShifterOperand so); + + void EmitType5(Condition cond, int offset, bool link); + + void EmitMemOp(Condition cond, + bool load, + bool byte, + Register rd, + Address ad); + + void EmitMemOpAddressMode3(Condition cond, + int32_t mode, + Register rd, + Address ad); + + void EmitMultiMemOp(Condition cond, + BlockAddressMode am, + bool load, + Register base, + RegList regs); + + void EmitShiftImmediate(Condition cond, + Shift opcode, + Register rd, + Register rm, + ShifterOperand so); + + void EmitShiftRegister(Condition cond, + Shift opcode, + Register rd, + Register rm, + ShifterOperand so); + + void EmitMulOp(Condition cond, + int32_t opcode, + Register rd, + Register rn, + Register rm, + Register rs); + + void EmitVFPsss(Condition cond, + int32_t opcode, + SRegister sd, + SRegister sn, + SRegister sm); + + void EmitVFPddd(Condition cond, + int32_t opcode, + DRegister dd, + DRegister dn, + DRegister dm); + + void EmitVFPsd(Condition cond, + int32_t opcode, + SRegister sd, + DRegister dm); + + void EmitVFPds(Condition cond, + int32_t opcode, + DRegister dd, + SRegister sm); + + void EmitBranch(Condition cond, Label* label, bool link); + static int32_t EncodeBranchOffset(int offset, int32_t inst); + static int DecodeBranchOffset(int32_t inst); + int32_t EncodeTstOffset(int offset, int32_t inst); + int DecodeTstOffset(int32_t inst); + + // Returns whether or not the given register is used for passing parameters. + static int RegisterCompare(const Register* reg1, const Register* reg2) { + return *reg1 - *reg2; + } +}; + +// Slowpath entered when Thread::Current()->_exception is non-null +class ArmExceptionSlowPath : public SlowPath { + public: + explicit ArmExceptionSlowPath(ArmManagedRegister scratch, size_t stack_adjust) + : scratch_(scratch), stack_adjust_(stack_adjust) { + } + virtual void Emit(Assembler *sp_asm); + private: + const ArmManagedRegister scratch_; + const size_t stack_adjust_; +}; + +} // namespace arm +} // namespace art + +#endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_H_ diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h new file mode 100644 index 0000000000..cc795b1482 --- /dev/null +++ b/compiler/utils/arm/constants_arm.h @@ -0,0 +1,449 @@ +/* + * Copyright (C) 2009 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_ARM_CONSTANTS_ARM_H_ +#define ART_COMPILER_UTILS_ARM_CONSTANTS_ARM_H_ + +#include + +#include + +#include "arch/arm/registers_arm.h" +#include "base/casts.h" +#include "base/logging.h" +#include "globals.h" + +namespace art { +namespace arm { + +// Defines constants and accessor classes to assemble, disassemble and +// simulate ARM instructions. +// +// Section references in the code refer to the "ARM Architecture Reference +// Manual" from July 2005 (available at http://www.arm.com/miscPDFs/14128.pdf) +// +// Constants for specific fields are defined in their respective named enums. +// General constants are in an anonymous enum in class Instr. + + +// We support both VFPv3-D16 and VFPv3-D32 profiles, but currently only one at +// a time, so that compile time optimizations can be applied. +// Warning: VFPv3-D32 is untested. +#define VFPv3_D16 +#if defined(VFPv3_D16) == defined(VFPv3_D32) +#error "Exactly one of VFPv3_D16 or VFPv3_D32 can be defined at a time." +#endif + + +enum ScaleFactor { + TIMES_1 = 0, + TIMES_2 = 1, + TIMES_4 = 2, + TIMES_8 = 3 +}; + +// Values for double-precision floating point registers. +enum DRegister { + D0 = 0, + D1 = 1, + D2 = 2, + D3 = 3, + D4 = 4, + D5 = 5, + D6 = 6, + D7 = 7, + D8 = 8, + D9 = 9, + D10 = 10, + D11 = 11, + D12 = 12, + D13 = 13, + D14 = 14, + D15 = 15, +#ifdef VFPv3_D16 + kNumberOfDRegisters = 16, +#else + D16 = 16, + D17 = 17, + D18 = 18, + D19 = 19, + D20 = 20, + D21 = 21, + D22 = 22, + D23 = 23, + D24 = 24, + D25 = 25, + D26 = 26, + D27 = 27, + D28 = 28, + D29 = 29, + D30 = 30, + D31 = 31, + kNumberOfDRegisters = 32, +#endif + kNumberOfOverlappingDRegisters = 16, + kNoDRegister = -1, +}; +std::ostream& operator<<(std::ostream& os, const DRegister& rhs); + + +// Values for the condition field as defined in section A3.2. +enum Condition { + kNoCondition = -1, + EQ = 0, // equal + NE = 1, // not equal + CS = 2, // carry set/unsigned higher or same + CC = 3, // carry clear/unsigned lower + MI = 4, // minus/negative + PL = 5, // plus/positive or zero + VS = 6, // overflow + VC = 7, // no overflow + HI = 8, // unsigned higher + LS = 9, // unsigned lower or same + GE = 10, // signed greater than or equal + LT = 11, // signed less than + GT = 12, // signed greater than + LE = 13, // signed less than or equal + AL = 14, // always (unconditional) + kSpecialCondition = 15, // special condition (refer to section A3.2.1) + kMaxCondition = 16, +}; +std::ostream& operator<<(std::ostream& os, const Condition& rhs); + + +// Opcodes for Data-processing instructions (instructions with a type 0 and 1) +// as defined in section A3.4 +enum Opcode { + kNoOperand = -1, + AND = 0, // Logical AND + EOR = 1, // Logical Exclusive OR + SUB = 2, // Subtract + RSB = 3, // Reverse Subtract + ADD = 4, // Add + ADC = 5, // Add with Carry + SBC = 6, // Subtract with Carry + RSC = 7, // Reverse Subtract with Carry + TST = 8, // Test + TEQ = 9, // Test Equivalence + CMP = 10, // Compare + CMN = 11, // Compare Negated + ORR = 12, // Logical (inclusive) OR + MOV = 13, // Move + BIC = 14, // Bit Clear + MVN = 15, // Move Not + kMaxOperand = 16 +}; + + +// Shifter types for Data-processing operands as defined in section A5.1.2. +enum Shift { + kNoShift = -1, + LSL = 0, // Logical shift left + LSR = 1, // Logical shift right + ASR = 2, // Arithmetic shift right + ROR = 3, // Rotate right + kMaxShift = 4 +}; + + +// Constants used for the decoding or encoding of the individual fields of +// instructions. Based on the "Figure 3-1 ARM instruction set summary". +enum InstructionFields { + kConditionShift = 28, + kConditionBits = 4, + kTypeShift = 25, + kTypeBits = 3, + kLinkShift = 24, + kLinkBits = 1, + kUShift = 23, + kUBits = 1, + kOpcodeShift = 21, + kOpcodeBits = 4, + kSShift = 20, + kSBits = 1, + kRnShift = 16, + kRnBits = 4, + kRdShift = 12, + kRdBits = 4, + kRsShift = 8, + kRsBits = 4, + kRmShift = 0, + kRmBits = 4, + + // Immediate instruction fields encoding. + kRotateShift = 8, + kRotateBits = 4, + kImmed8Shift = 0, + kImmed8Bits = 8, + + // Shift instruction register fields encodings. + kShiftImmShift = 7, + kShiftRegisterShift = 8, + kShiftImmBits = 5, + kShiftShift = 5, + kShiftBits = 2, + + // Load/store instruction offset field encoding. + kOffset12Shift = 0, + kOffset12Bits = 12, + kOffset12Mask = 0x00000fff, + + // Mul instruction register fields encodings. + kMulRdShift = 16, + kMulRdBits = 4, + kMulRnShift = 12, + kMulRnBits = 4, + + kBranchOffsetMask = 0x00ffffff +}; + + +// Size (in bytes) of registers. +const int kRegisterSize = 4; + +// List of registers used in load/store multiple. +typedef uint16_t RegList; + +// The class Instr enables access to individual fields defined in the ARM +// architecture instruction set encoding as described in figure A3-1. +// +// Example: Test whether the instruction at ptr does set the condition code +// bits. +// +// bool InstructionSetsConditionCodes(byte* ptr) { +// Instr* instr = Instr::At(ptr); +// int type = instr->TypeField(); +// return ((type == 0) || (type == 1)) && instr->HasS(); +// } +// +class Instr { + public: + enum { + kInstrSize = 4, + kInstrSizeLog2 = 2, + kPCReadOffset = 8 + }; + + bool IsBreakPoint() { + return IsBkpt(); + } + + // Get the raw instruction bits. + inline int32_t InstructionBits() const { + return *reinterpret_cast(this); + } + + // Set the raw instruction bits to value. + inline void SetInstructionBits(int32_t value) { + *reinterpret_cast(this) = value; + } + + // Read one particular bit out of the instruction bits. + inline int Bit(int nr) const { + return (InstructionBits() >> nr) & 1; + } + + // Read a bit field out of the instruction bits. + inline int Bits(int shift, int count) const { + return (InstructionBits() >> shift) & ((1 << count) - 1); + } + + + // Accessors for the different named fields used in the ARM encoding. + // The naming of these accessor corresponds to figure A3-1. + // Generally applicable fields + inline Condition ConditionField() const { + return static_cast(Bits(kConditionShift, kConditionBits)); + } + inline int TypeField() const { return Bits(kTypeShift, kTypeBits); } + + inline Register RnField() const { return static_cast( + Bits(kRnShift, kRnBits)); } + inline Register RdField() const { return static_cast( + Bits(kRdShift, kRdBits)); } + + // Fields used in Data processing instructions + inline Opcode OpcodeField() const { + return static_cast(Bits(kOpcodeShift, kOpcodeBits)); + } + inline int SField() const { return Bits(kSShift, kSBits); } + // with register + inline Register RmField() const { + return static_cast(Bits(kRmShift, kRmBits)); + } + inline Shift ShiftField() const { return static_cast( + Bits(kShiftShift, kShiftBits)); } + inline int RegShiftField() const { return Bit(4); } + inline Register RsField() const { + return static_cast(Bits(kRsShift, kRsBits)); + } + inline int ShiftAmountField() const { return Bits(kShiftImmShift, + kShiftImmBits); } + // with immediate + inline int RotateField() const { return Bits(kRotateShift, kRotateBits); } + inline int Immed8Field() const { return Bits(kImmed8Shift, kImmed8Bits); } + + // Fields used in Load/Store instructions + inline int PUField() const { return Bits(23, 2); } + inline int BField() const { return Bit(22); } + inline int WField() const { return Bit(21); } + inline int LField() const { return Bit(20); } + // with register uses same fields as Data processing instructions above + // with immediate + inline int Offset12Field() const { return Bits(kOffset12Shift, + kOffset12Bits); } + // multiple + inline int RlistField() const { return Bits(0, 16); } + // extra loads and stores + inline int SignField() const { return Bit(6); } + inline int HField() const { return Bit(5); } + inline int ImmedHField() const { return Bits(8, 4); } + inline int ImmedLField() const { return Bits(0, 4); } + + // Fields used in Branch instructions + inline int LinkField() const { return Bits(kLinkShift, kLinkBits); } + inline int SImmed24Field() const { return ((InstructionBits() << 8) >> 8); } + + // Fields used in Supervisor Call instructions + inline uint32_t SvcField() const { return Bits(0, 24); } + + // Field used in Breakpoint instruction + inline uint16_t BkptField() const { + return ((Bits(8, 12) << 4) | Bits(0, 4)); + } + + // Field used in 16-bit immediate move instructions + inline uint16_t MovwField() const { + return ((Bits(16, 4) << 12) | Bits(0, 12)); + } + + // Field used in VFP float immediate move instruction + inline float ImmFloatField() const { + uint32_t imm32 = (Bit(19) << 31) | (((1 << 5) - Bit(18)) << 25) | + (Bits(16, 2) << 23) | (Bits(0, 4) << 19); + return bit_cast(imm32); + } + + // Field used in VFP double immediate move instruction + inline double ImmDoubleField() const { + uint64_t imm64 = (Bit(19)*(1LL << 63)) | (((1LL << 8) - Bit(18)) << 54) | + (Bits(16, 2)*(1LL << 52)) | (Bits(0, 4)*(1LL << 48)); + return bit_cast(imm64); + } + + // Test for data processing instructions of type 0 or 1. + // See "ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition", + // section A5.1 "ARM instruction set encoding". + inline bool IsDataProcessing() const { + CHECK_NE(ConditionField(), kSpecialCondition); + CHECK_EQ(Bits(26, 2), 0); // Type 0 or 1. + return ((Bits(20, 5) & 0x19) != 0x10) && + ((Bit(25) == 1) || // Data processing immediate. + (Bit(4) == 0) || // Data processing register. + (Bit(7) == 0)); // Data processing register-shifted register. + } + + // Tests for special encodings of type 0 instructions (extra loads and stores, + // as well as multiplications, synchronization primitives, and miscellaneous). + // Can only be called for a type 0 or 1 instruction. + inline bool IsMiscellaneous() const { + CHECK_EQ(Bits(26, 2), 0); // Type 0 or 1. + return ((Bit(25) == 0) && ((Bits(20, 5) & 0x19) == 0x10) && (Bit(7) == 0)); + } + inline bool IsMultiplyOrSyncPrimitive() const { + CHECK_EQ(Bits(26, 2), 0); // Type 0 or 1. + return ((Bit(25) == 0) && (Bits(4, 4) == 9)); + } + + // Test for Supervisor Call instruction. + inline bool IsSvc() const { + return ((InstructionBits() & 0xff000000) == 0xef000000); + } + + // Test for Breakpoint instruction. + inline bool IsBkpt() const { + return ((InstructionBits() & 0xfff000f0) == 0xe1200070); + } + + // VFP register fields. + inline SRegister SnField() const { + return static_cast((Bits(kRnShift, kRnBits) << 1) + Bit(7)); + } + inline SRegister SdField() const { + return static_cast((Bits(kRdShift, kRdBits) << 1) + Bit(22)); + } + inline SRegister SmField() const { + return static_cast((Bits(kRmShift, kRmBits) << 1) + Bit(5)); + } + inline DRegister DnField() const { + return static_cast(Bits(kRnShift, kRnBits) + (Bit(7) << 4)); + } + inline DRegister DdField() const { + return static_cast(Bits(kRdShift, kRdBits) + (Bit(22) << 4)); + } + inline DRegister DmField() const { + return static_cast(Bits(kRmShift, kRmBits) + (Bit(5) << 4)); + } + + // Test for VFP data processing or single transfer instructions of type 7. + inline bool IsVFPDataProcessingOrSingleTransfer() const { + CHECK_NE(ConditionField(), kSpecialCondition); + CHECK_EQ(TypeField(), 7); + return ((Bit(24) == 0) && (Bits(9, 3) == 5)); + // Bit(4) == 0: Data Processing + // Bit(4) == 1: 8, 16, or 32-bit Transfer between ARM Core and VFP + } + + // Test for VFP 64-bit transfer instructions of type 6. + inline bool IsVFPDoubleTransfer() const { + CHECK_NE(ConditionField(), kSpecialCondition); + CHECK_EQ(TypeField(), 6); + return ((Bits(21, 4) == 2) && (Bits(9, 3) == 5) && + ((Bits(4, 4) & 0xd) == 1)); + } + + // Test for VFP load and store instructions of type 6. + inline bool IsVFPLoadStore() const { + CHECK_NE(ConditionField(), kSpecialCondition); + CHECK_EQ(TypeField(), 6); + return ((Bits(20, 5) & 0x12) == 0x10) && (Bits(9, 3) == 5); + } + + // Special accessors that test for existence of a value. + inline bool HasS() const { return SField() == 1; } + inline bool HasB() const { return BField() == 1; } + inline bool HasW() const { return WField() == 1; } + inline bool HasL() const { return LField() == 1; } + inline bool HasSign() const { return SignField() == 1; } + inline bool HasH() const { return HField() == 1; } + inline bool HasLink() const { return LinkField() == 1; } + + // Instructions are read out of a code stream. The only way to get a + // reference to an instruction is to convert a pointer. There is no way + // to allocate or create instances of class Instr. + // Use the At(pc) function to create references to Instr. + static Instr* At(uword pc) { return reinterpret_cast(pc); } + Instr* Next() { return this + kInstrSize; } + + private: + // We need to prevent the creation of instances of class Instr. + DISALLOW_IMPLICIT_CONSTRUCTORS(Instr); +}; + +} // namespace arm +} // namespace art + +#endif // ART_COMPILER_UTILS_ARM_CONSTANTS_ARM_H_ diff --git a/compiler/utils/arm/managed_register_arm.cc b/compiler/utils/arm/managed_register_arm.cc new file mode 100644 index 0000000000..57c23059de --- /dev/null +++ b/compiler/utils/arm/managed_register_arm.cc @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "managed_register_arm.h" + +#include "globals.h" + +namespace art { +namespace arm { + +// We need all registers for caching of locals. +// Register R9 .. R15 are reserved. +static const int kNumberOfAvailableCoreRegisters = (R8 - R0) + 1; +static const int kNumberOfAvailableSRegisters = kNumberOfSRegisters; +static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters; +static const int kNumberOfAvailableOverlappingDRegisters = + kNumberOfOverlappingDRegisters; +static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs; + + +// Returns true if this managed-register overlaps the other managed-register. +bool ArmManagedRegister::Overlaps(const ArmManagedRegister& other) const { + if (IsNoRegister() || other.IsNoRegister()) return false; + if (Equals(other)) return true; + if (IsRegisterPair()) { + Register low = AsRegisterPairLow(); + Register high = AsRegisterPairHigh(); + return ArmManagedRegister::FromCoreRegister(low).Overlaps(other) || + ArmManagedRegister::FromCoreRegister(high).Overlaps(other); + } + if (IsOverlappingDRegister()) { + if (other.IsDRegister()) return Equals(other); + if (other.IsSRegister()) { + SRegister low = AsOverlappingDRegisterLow(); + SRegister high = AsOverlappingDRegisterHigh(); + SRegister other_sreg = other.AsSRegister(); + return (low == other_sreg) || (high == other_sreg); + } + return false; + } + if (other.IsRegisterPair() || other.IsOverlappingDRegister()) { + return other.Overlaps(*this); + } + return false; +} + + +int ArmManagedRegister::AllocIdLow() const { + CHECK(IsOverlappingDRegister() || IsRegisterPair()); + const int r = RegId() - (kNumberOfCoreRegIds + kNumberOfSRegIds); + int low; + if (r < kNumberOfOverlappingDRegIds) { + CHECK(IsOverlappingDRegister()); + low = (r * 2) + kNumberOfCoreRegIds; // Return a SRegister. + } else { + CHECK(IsRegisterPair()); + low = (r - kNumberOfDRegIds) * 2; // Return a Register. + if (low > 6) { + // we didn't got a pair higher than R6_R7, must be the dalvik special case + low = 1; + } + } + return low; +} + + +int ArmManagedRegister::AllocIdHigh() const { + return AllocIdLow() + 1; +} + + +void ArmManagedRegister::Print(std::ostream& os) const { + if (!IsValidManagedRegister()) { + os << "No Register"; + } else if (IsCoreRegister()) { + os << "Core: " << static_cast(AsCoreRegister()); + } else if (IsRegisterPair()) { + os << "Pair: " << static_cast(AsRegisterPairLow()) << ", " + << static_cast(AsRegisterPairHigh()); + } else if (IsSRegister()) { + os << "SRegister: " << static_cast(AsSRegister()); + } else if (IsDRegister()) { + os << "DRegister: " << static_cast(AsDRegister()); + } else { + os << "??: " << RegId(); + } +} + +std::ostream& operator<<(std::ostream& os, const ArmManagedRegister& reg) { + reg.Print(os); + return os; +} + +std::ostream& operator<<(std::ostream& os, const RegisterPair& r) { + os << ArmManagedRegister::FromRegisterPair(r); + return os; +} + +} // namespace arm +} // namespace art diff --git a/compiler/utils/arm/managed_register_arm.h b/compiler/utils/arm/managed_register_arm.h new file mode 100644 index 0000000000..a496c87150 --- /dev/null +++ b/compiler/utils/arm/managed_register_arm.h @@ -0,0 +1,274 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_ARM_MANAGED_REGISTER_ARM_H_ +#define ART_COMPILER_UTILS_ARM_MANAGED_REGISTER_ARM_H_ + +#include "base/logging.h" +#include "constants_arm.h" +#include "utils/managed_register.h" + +namespace art { +namespace arm { + +// Values for register pairs. +enum RegisterPair { + R0_R1 = 0, + R2_R3 = 1, + R4_R5 = 2, + R6_R7 = 3, + R1_R2 = 4, // Dalvik style passing + kNumberOfRegisterPairs = 5, + kNoRegisterPair = -1, +}; + +std::ostream& operator<<(std::ostream& os, const RegisterPair& reg); + +const int kNumberOfCoreRegIds = kNumberOfCoreRegisters; +const int kNumberOfCoreAllocIds = kNumberOfCoreRegisters; + +const int kNumberOfSRegIds = kNumberOfSRegisters; +const int kNumberOfSAllocIds = kNumberOfSRegisters; + +const int kNumberOfDRegIds = kNumberOfDRegisters; +const int kNumberOfOverlappingDRegIds = kNumberOfOverlappingDRegisters; +const int kNumberOfDAllocIds = kNumberOfDRegIds - kNumberOfOverlappingDRegIds; + +const int kNumberOfPairRegIds = kNumberOfRegisterPairs; + +const int kNumberOfRegIds = kNumberOfCoreRegIds + kNumberOfSRegIds + + kNumberOfDRegIds + kNumberOfPairRegIds; +const int kNumberOfAllocIds = + kNumberOfCoreAllocIds + kNumberOfSAllocIds + kNumberOfDAllocIds; + +// Register ids map: +// [0..R[ core registers (enum Register) +// [R..S[ single precision VFP registers (enum SRegister) +// [S..D[ double precision VFP registers (enum DRegister) +// [D..P[ core register pairs (enum RegisterPair) +// where +// R = kNumberOfCoreRegIds +// S = R + kNumberOfSRegIds +// D = S + kNumberOfDRegIds +// P = D + kNumberOfRegisterPairs + +// Allocation ids map: +// [0..R[ core registers (enum Register) +// [R..S[ single precision VFP registers (enum SRegister) +// [S..N[ non-overlapping double precision VFP registers (16-31 in enum +// DRegister, VFPv3-D32 only) +// where +// R = kNumberOfCoreAllocIds +// S = R + kNumberOfSAllocIds +// N = S + kNumberOfDAllocIds + + +// An instance of class 'ManagedRegister' represents a single ARM register or a +// pair of core ARM registers (enum RegisterPair). A single register is either a +// core register (enum Register), a VFP single precision register +// (enum SRegister), or a VFP double precision register (enum DRegister). +// 'ManagedRegister::NoRegister()' returns an invalid ManagedRegister. +// There is a one-to-one mapping between ManagedRegister and register id. +class ArmManagedRegister : public ManagedRegister { + public: + Register AsCoreRegister() const { + CHECK(IsCoreRegister()); + return static_cast(id_); + } + + SRegister AsSRegister() const { + CHECK(IsSRegister()); + return static_cast(id_ - kNumberOfCoreRegIds); + } + + DRegister AsDRegister() const { + CHECK(IsDRegister()); + return static_cast(id_ - kNumberOfCoreRegIds - kNumberOfSRegIds); + } + + SRegister AsOverlappingDRegisterLow() const { + CHECK(IsOverlappingDRegister()); + DRegister d_reg = AsDRegister(); + return static_cast(d_reg * 2); + } + + SRegister AsOverlappingDRegisterHigh() const { + CHECK(IsOverlappingDRegister()); + DRegister d_reg = AsDRegister(); + return static_cast(d_reg * 2 + 1); + } + + RegisterPair AsRegisterPair() const { + CHECK(IsRegisterPair()); + Register reg_low = AsRegisterPairLow(); + if (reg_low == R1) { + return R1_R2; + } else { + return static_cast(reg_low / 2); + } + } + + Register AsRegisterPairLow() const { + CHECK(IsRegisterPair()); + // Appropriate mapping of register ids allows to use AllocIdLow(). + return FromRegId(AllocIdLow()).AsCoreRegister(); + } + + Register AsRegisterPairHigh() const { + CHECK(IsRegisterPair()); + // Appropriate mapping of register ids allows to use AllocIdHigh(). + return FromRegId(AllocIdHigh()).AsCoreRegister(); + } + + bool IsCoreRegister() const { + CHECK(IsValidManagedRegister()); + return (0 <= id_) && (id_ < kNumberOfCoreRegIds); + } + + bool IsSRegister() const { + CHECK(IsValidManagedRegister()); + const int test = id_ - kNumberOfCoreRegIds; + return (0 <= test) && (test < kNumberOfSRegIds); + } + + bool IsDRegister() const { + CHECK(IsValidManagedRegister()); + const int test = id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds); + return (0 <= test) && (test < kNumberOfDRegIds); + } + + // Returns true if this DRegister overlaps SRegisters. + bool IsOverlappingDRegister() const { + CHECK(IsValidManagedRegister()); + const int test = id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds); + return (0 <= test) && (test < kNumberOfOverlappingDRegIds); + } + + bool IsRegisterPair() const { + CHECK(IsValidManagedRegister()); + const int test = + id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds + kNumberOfDRegIds); + return (0 <= test) && (test < kNumberOfPairRegIds); + } + + bool IsSameType(ArmManagedRegister test) const { + CHECK(IsValidManagedRegister() && test.IsValidManagedRegister()); + return + (IsCoreRegister() && test.IsCoreRegister()) || + (IsSRegister() && test.IsSRegister()) || + (IsDRegister() && test.IsDRegister()) || + (IsRegisterPair() && test.IsRegisterPair()); + } + + + // Returns true if the two managed-registers ('this' and 'other') overlap. + // Either managed-register may be the NoRegister. If both are the NoRegister + // then false is returned. + bool Overlaps(const ArmManagedRegister& other) const; + + void Print(std::ostream& os) const; + + static ArmManagedRegister FromCoreRegister(Register r) { + CHECK_NE(r, kNoRegister); + return FromRegId(r); + } + + static ArmManagedRegister FromSRegister(SRegister r) { + CHECK_NE(r, kNoSRegister); + return FromRegId(r + kNumberOfCoreRegIds); + } + + static ArmManagedRegister FromDRegister(DRegister r) { + CHECK_NE(r, kNoDRegister); + return FromRegId(r + (kNumberOfCoreRegIds + kNumberOfSRegIds)); + } + + static ArmManagedRegister FromRegisterPair(RegisterPair r) { + CHECK_NE(r, kNoRegisterPair); + return FromRegId(r + (kNumberOfCoreRegIds + + kNumberOfSRegIds + kNumberOfDRegIds)); + } + + // Return a RegisterPair consisting of Register r_low and r_low + 1. + static ArmManagedRegister FromCoreRegisterPair(Register r_low) { + if (r_low != R1) { // not the dalvik special case + CHECK_NE(r_low, kNoRegister); + CHECK_EQ(0, (r_low % 2)); + const int r = r_low / 2; + CHECK_LT(r, kNumberOfPairRegIds); + return FromRegisterPair(static_cast(r)); + } else { + return FromRegisterPair(R1_R2); + } + } + + // Return a DRegister overlapping SRegister r_low and r_low + 1. + static ArmManagedRegister FromSRegisterPair(SRegister r_low) { + CHECK_NE(r_low, kNoSRegister); + CHECK_EQ(0, (r_low % 2)); + const int r = r_low / 2; + CHECK_LT(r, kNumberOfOverlappingDRegIds); + return FromDRegister(static_cast(r)); + } + + private: + bool IsValidManagedRegister() const { + return (0 <= id_) && (id_ < kNumberOfRegIds); + } + + int RegId() const { + CHECK(!IsNoRegister()); + return id_; + } + + int AllocId() const { + CHECK(IsValidManagedRegister() && + !IsOverlappingDRegister() && !IsRegisterPair()); + int r = id_; + if ((kNumberOfDAllocIds > 0) && IsDRegister()) { // VFPv3-D32 only. + r -= kNumberOfOverlappingDRegIds; + } + CHECK_LT(r, kNumberOfAllocIds); + return r; + } + + int AllocIdLow() const; + int AllocIdHigh() const; + + friend class ManagedRegister; + + explicit ArmManagedRegister(int reg_id) : ManagedRegister(reg_id) {} + + static ArmManagedRegister FromRegId(int reg_id) { + ArmManagedRegister reg(reg_id); + CHECK(reg.IsValidManagedRegister()); + return reg; + } +}; + +std::ostream& operator<<(std::ostream& os, const ArmManagedRegister& reg); + +} // namespace arm + +inline arm::ArmManagedRegister ManagedRegister::AsArm() const { + arm::ArmManagedRegister reg(id_); + CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister()); + return reg; +} + +} // namespace art + +#endif // ART_COMPILER_UTILS_ARM_MANAGED_REGISTER_ARM_H_ diff --git a/compiler/utils/arm/managed_register_arm_test.cc b/compiler/utils/arm/managed_register_arm_test.cc new file mode 100644 index 0000000000..f5d4cc0d10 --- /dev/null +++ b/compiler/utils/arm/managed_register_arm_test.cc @@ -0,0 +1,767 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "globals.h" +#include "managed_register_arm.h" +#include "gtest/gtest.h" + +namespace art { +namespace arm { + +TEST(ArmManagedRegister, NoRegister) { + ArmManagedRegister reg = ManagedRegister::NoRegister().AsArm(); + EXPECT_TRUE(reg.IsNoRegister()); + EXPECT_TRUE(!reg.Overlaps(reg)); +} + +TEST(ArmManagedRegister, CoreRegister) { + ArmManagedRegister reg = ArmManagedRegister::FromCoreRegister(R0); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(R0, reg.AsCoreRegister()); + + reg = ArmManagedRegister::FromCoreRegister(R1); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(R1, reg.AsCoreRegister()); + + reg = ArmManagedRegister::FromCoreRegister(R8); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(R8, reg.AsCoreRegister()); + + reg = ArmManagedRegister::FromCoreRegister(R15); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(R15, reg.AsCoreRegister()); +} + + +TEST(ArmManagedRegister, SRegister) { + ArmManagedRegister reg = ArmManagedRegister::FromSRegister(S0); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(S0, reg.AsSRegister()); + + reg = ArmManagedRegister::FromSRegister(S1); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(S1, reg.AsSRegister()); + + reg = ArmManagedRegister::FromSRegister(S3); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(S3, reg.AsSRegister()); + + reg = ArmManagedRegister::FromSRegister(S15); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(S15, reg.AsSRegister()); + + reg = ArmManagedRegister::FromSRegister(S30); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(S30, reg.AsSRegister()); + + reg = ArmManagedRegister::FromSRegister(S31); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(S31, reg.AsSRegister()); +} + + +TEST(ArmManagedRegister, DRegister) { + ArmManagedRegister reg = ArmManagedRegister::FromDRegister(D0); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.IsDRegister()); + EXPECT_TRUE(reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(D0, reg.AsDRegister()); + EXPECT_EQ(S0, reg.AsOverlappingDRegisterLow()); + EXPECT_EQ(S1, reg.AsOverlappingDRegisterHigh()); + EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S0))); + + reg = ArmManagedRegister::FromDRegister(D1); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.IsDRegister()); + EXPECT_TRUE(reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(D1, reg.AsDRegister()); + EXPECT_EQ(S2, reg.AsOverlappingDRegisterLow()); + EXPECT_EQ(S3, reg.AsOverlappingDRegisterHigh()); + EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S2))); + + reg = ArmManagedRegister::FromDRegister(D6); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.IsDRegister()); + EXPECT_TRUE(reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(D6, reg.AsDRegister()); + EXPECT_EQ(S12, reg.AsOverlappingDRegisterLow()); + EXPECT_EQ(S13, reg.AsOverlappingDRegisterHigh()); + EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S12))); + + reg = ArmManagedRegister::FromDRegister(D14); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.IsDRegister()); + EXPECT_TRUE(reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(D14, reg.AsDRegister()); + EXPECT_EQ(S28, reg.AsOverlappingDRegisterLow()); + EXPECT_EQ(S29, reg.AsOverlappingDRegisterHigh()); + EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S28))); + + reg = ArmManagedRegister::FromDRegister(D15); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.IsDRegister()); + EXPECT_TRUE(reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(D15, reg.AsDRegister()); + EXPECT_EQ(S30, reg.AsOverlappingDRegisterLow()); + EXPECT_EQ(S31, reg.AsOverlappingDRegisterHigh()); + EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S30))); + +#ifdef VFPv3_D32 + reg = ArmManagedRegister::FromDRegister(D16); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(D16, reg.AsDRegister()); + + reg = ArmManagedRegister::FromDRegister(D18); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(D18, reg.AsDRegister()); + + reg = ArmManagedRegister::FromDRegister(D30); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(D30, reg.AsDRegister()); + + reg = ArmManagedRegister::FromDRegister(D31); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(D31, reg.AsDRegister()); +#endif // VFPv3_D32 +} + + +TEST(ArmManagedRegister, Pair) { + ArmManagedRegister reg = ArmManagedRegister::FromRegisterPair(R0_R1); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(reg.IsRegisterPair()); + EXPECT_EQ(R0_R1, reg.AsRegisterPair()); + EXPECT_EQ(R0, reg.AsRegisterPairLow()); + EXPECT_EQ(R1, reg.AsRegisterPairHigh()); + EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R0))); + + reg = ArmManagedRegister::FromRegisterPair(R1_R2); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(reg.IsRegisterPair()); + EXPECT_EQ(R1_R2, reg.AsRegisterPair()); + EXPECT_EQ(R1, reg.AsRegisterPairLow()); + EXPECT_EQ(R2, reg.AsRegisterPairHigh()); + EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R1))); + + reg = ArmManagedRegister::FromRegisterPair(R2_R3); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(reg.IsRegisterPair()); + EXPECT_EQ(R2_R3, reg.AsRegisterPair()); + EXPECT_EQ(R2, reg.AsRegisterPairLow()); + EXPECT_EQ(R3, reg.AsRegisterPairHigh()); + EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R2))); + + reg = ArmManagedRegister::FromRegisterPair(R4_R5); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(reg.IsRegisterPair()); + EXPECT_EQ(R4_R5, reg.AsRegisterPair()); + EXPECT_EQ(R4, reg.AsRegisterPairLow()); + EXPECT_EQ(R5, reg.AsRegisterPairHigh()); + EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R4))); + + reg = ArmManagedRegister::FromRegisterPair(R6_R7); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsOverlappingDRegister()); + EXPECT_TRUE(reg.IsRegisterPair()); + EXPECT_EQ(R6_R7, reg.AsRegisterPair()); + EXPECT_EQ(R6, reg.AsRegisterPairLow()); + EXPECT_EQ(R7, reg.AsRegisterPairHigh()); + EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R6))); +} + + +TEST(ArmManagedRegister, Equals) { + ManagedRegister no_reg = ManagedRegister::NoRegister(); + EXPECT_TRUE(no_reg.Equals(ArmManagedRegister::NoRegister())); + EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); + + ArmManagedRegister reg_R0 = ArmManagedRegister::FromCoreRegister(R0); + EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::NoRegister())); + EXPECT_TRUE(reg_R0.Equals(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); + + ArmManagedRegister reg_R1 = ArmManagedRegister::FromCoreRegister(R1); + EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(reg_R1.Equals(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); + + ArmManagedRegister reg_R8 = ArmManagedRegister::FromCoreRegister(R8); + EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(reg_R8.Equals(ArmManagedRegister::FromCoreRegister(R8))); + EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); + + ArmManagedRegister reg_S0 = ArmManagedRegister::FromSRegister(S0); + EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(reg_S0.Equals(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); + + ArmManagedRegister reg_S1 = ArmManagedRegister::FromSRegister(S1); + EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(reg_S1.Equals(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); + + ArmManagedRegister reg_S31 = ArmManagedRegister::FromSRegister(S31); + EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(reg_S31.Equals(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); + + ArmManagedRegister reg_D0 = ArmManagedRegister::FromDRegister(D0); + EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(reg_D0.Equals(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); + + ArmManagedRegister reg_D15 = ArmManagedRegister::FromDRegister(D15); + EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(reg_D15.Equals(ArmManagedRegister::FromDRegister(D15))); + EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); + +#ifdef VFPv3_D32 + ArmManagedRegister reg_D16 = ArmManagedRegister::FromDRegister(D16); + EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D15))); + EXPECT_TRUE(reg_D16.Equals(ArmManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); + + ArmManagedRegister reg_D30 = ArmManagedRegister::FromDRegister(D30); + EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D15))); + EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(reg_D30.Equals(ArmManagedRegister::FromDRegister(D30))); + EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); + + ArmManagedRegister reg_D31 = ArmManagedRegister::FromDRegister(D30); + EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D15))); + EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D30))); + EXPECT_TRUE(reg_D31.Equals(ArmManagedRegister::FromDRegister(D31))); + EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); +#endif // VFPv3_D32 + + ArmManagedRegister reg_R0R1 = ArmManagedRegister::FromRegisterPair(R0_R1); + EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D15))); + EXPECT_TRUE(reg_R0R1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); + EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromRegisterPair(R2_R3))); + + ArmManagedRegister reg_R4R5 = ArmManagedRegister::FromRegisterPair(R4_R5); + EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D15))); + EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); + EXPECT_TRUE(reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R4_R5))); + EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R6_R7))); + + ArmManagedRegister reg_R6R7 = ArmManagedRegister::FromRegisterPair(R6_R7); + EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D15))); + EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); + EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R4_R5))); + EXPECT_TRUE(reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R6_R7))); +} + + +TEST(ArmManagedRegister, Overlaps) { + ArmManagedRegister reg = ArmManagedRegister::FromCoreRegister(R0); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); +#ifdef VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); +#endif // VFPv3_D32 + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); + + reg = ArmManagedRegister::FromCoreRegister(R1); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); +#ifdef VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); +#endif // VFPv3_D32 + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); + + reg = ArmManagedRegister::FromCoreRegister(R7); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); +#ifdef VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); +#endif // VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); + + reg = ArmManagedRegister::FromSRegister(S0); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); +#ifdef VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); +#endif // VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); + + reg = ArmManagedRegister::FromSRegister(S1); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); +#ifdef VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); +#endif // VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); + + reg = ArmManagedRegister::FromSRegister(S15); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); +#ifdef VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); +#endif // VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); + + reg = ArmManagedRegister::FromSRegister(S31); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); +#ifdef VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); +#endif // VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); + + reg = ArmManagedRegister::FromDRegister(D0); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); +#ifdef VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); +#endif // VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); + + reg = ArmManagedRegister::FromDRegister(D7); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); +#ifdef VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); +#endif // VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); + + reg = ArmManagedRegister::FromDRegister(D15); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); +#ifdef VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); +#endif // VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); + +#ifdef VFPv3_D32 + reg = ArmManagedRegister::FromDRegister(D16); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); + + reg = ArmManagedRegister::FromDRegister(D31); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); +#endif // VFPv3_D32 + + reg = ArmManagedRegister::FromRegisterPair(R0_R1); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); +#ifdef VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); +#endif // VFPv3_D32 + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); + + reg = ArmManagedRegister::FromRegisterPair(R4_R5); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); +#ifdef VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); +#endif // VFPv3_D32 + EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); + EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); +} + +} // namespace arm +} // namespace art diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc new file mode 100644 index 0000000000..92ce0b8001 --- /dev/null +++ b/compiler/utils/assembler.cc @@ -0,0 +1,119 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "assembler.h" + +#include +#include + +#include "arm/assembler_arm.h" +#include "mips/assembler_mips.h" +#include "x86/assembler_x86.h" +#include "globals.h" +#include "memory_region.h" + +namespace art { + +static byte* NewContents(size_t capacity) { + return new byte[capacity]; +} + + +AssemblerBuffer::AssemblerBuffer() { + static const size_t kInitialBufferCapacity = 4 * KB; + contents_ = NewContents(kInitialBufferCapacity); + cursor_ = contents_; + limit_ = ComputeLimit(contents_, kInitialBufferCapacity); + fixup_ = NULL; + slow_path_ = NULL; +#ifndef NDEBUG + has_ensured_capacity_ = false; + fixups_processed_ = false; +#endif + + // Verify internal state. + CHECK_EQ(Capacity(), kInitialBufferCapacity); + CHECK_EQ(Size(), 0U); +} + + +AssemblerBuffer::~AssemblerBuffer() { + delete[] contents_; +} + + +void AssemblerBuffer::ProcessFixups(const MemoryRegion& region) { + AssemblerFixup* fixup = fixup_; + while (fixup != NULL) { + fixup->Process(region, fixup->position()); + fixup = fixup->previous(); + } +} + + +void AssemblerBuffer::FinalizeInstructions(const MemoryRegion& instructions) { + // Copy the instructions from the buffer. + MemoryRegion from(reinterpret_cast(contents()), Size()); + instructions.CopyFrom(0, from); + // Process fixups in the instructions. + ProcessFixups(instructions); +#ifndef NDEBUG + fixups_processed_ = true; +#endif +} + + +void AssemblerBuffer::ExtendCapacity() { + size_t old_size = Size(); + size_t old_capacity = Capacity(); + size_t new_capacity = std::min(old_capacity * 2, old_capacity + 1 * MB); + + // Allocate the new data area and copy contents of the old one to it. + byte* new_contents = NewContents(new_capacity); + memmove(reinterpret_cast(new_contents), + reinterpret_cast(contents_), + old_size); + + // Compute the relocation delta and switch to the new contents area. + ptrdiff_t delta = new_contents - contents_; + contents_ = new_contents; + + // Update the cursor and recompute the limit. + cursor_ += delta; + limit_ = ComputeLimit(new_contents, new_capacity); + + // Verify internal state. + CHECK_EQ(Capacity(), new_capacity); + CHECK_EQ(Size(), old_size); +} + + +Assembler* Assembler::Create(InstructionSet instruction_set) { + switch (instruction_set) { + case kArm: + case kThumb2: + return new arm::ArmAssembler(); + case kMips: + return new mips::MipsAssembler(); + case kX86: + return new x86::X86Assembler(); + default: + LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; + return NULL; + } +} + +} // namespace art diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h new file mode 100644 index 0000000000..9d79002625 --- /dev/null +++ b/compiler/utils/assembler.h @@ -0,0 +1,459 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_ASSEMBLER_H_ +#define ART_COMPILER_UTILS_ASSEMBLER_H_ + +#include + +#include "base/logging.h" +#include "base/macros.h" +#include "arm/constants_arm.h" +#include "mips/constants_mips.h" +#include "x86/constants_x86.h" +#include "instruction_set.h" +#include "managed_register.h" +#include "memory_region.h" +#include "offsets.h" + +namespace art { + +class Assembler; +class AssemblerBuffer; +class AssemblerFixup; + +namespace arm { + class ArmAssembler; +} +namespace mips { + class MipsAssembler; +} +namespace x86 { + class X86Assembler; +} + +class Label { + public: + Label() : position_(0) {} + + ~Label() { + // Assert if label is being destroyed with unresolved branches pending. + CHECK(!IsLinked()); + } + + // Returns the position for bound and linked labels. Cannot be used + // for unused labels. + int Position() const { + CHECK(!IsUnused()); + return IsBound() ? -position_ - kPointerSize : position_ - kPointerSize; + } + + int LinkPosition() const { + CHECK(IsLinked()); + return position_ - kWordSize; + } + + bool IsBound() const { return position_ < 0; } + bool IsUnused() const { return position_ == 0; } + bool IsLinked() const { return position_ > 0; } + + private: + int position_; + + void Reinitialize() { + position_ = 0; + } + + void BindTo(int position) { + CHECK(!IsBound()); + position_ = -position - kPointerSize; + CHECK(IsBound()); + } + + void LinkTo(int position) { + CHECK(!IsBound()); + position_ = position + kPointerSize; + CHECK(IsLinked()); + } + + friend class arm::ArmAssembler; + friend class mips::MipsAssembler; + friend class x86::X86Assembler; + + DISALLOW_COPY_AND_ASSIGN(Label); +}; + + +// Assembler fixups are positions in generated code that require processing +// after the code has been copied to executable memory. This includes building +// relocation information. +class AssemblerFixup { + public: + virtual void Process(const MemoryRegion& region, int position) = 0; + virtual ~AssemblerFixup() {} + + private: + AssemblerFixup* previous_; + int position_; + + AssemblerFixup* previous() const { return previous_; } + void set_previous(AssemblerFixup* previous) { previous_ = previous; } + + int position() const { return position_; } + void set_position(int position) { position_ = position; } + + friend class AssemblerBuffer; +}; + +// Parent of all queued slow paths, emitted during finalization +class SlowPath { + public: + SlowPath() : next_(NULL) {} + virtual ~SlowPath() {} + + Label* Continuation() { return &continuation_; } + Label* Entry() { return &entry_; } + // Generate code for slow path + virtual void Emit(Assembler *sp_asm) = 0; + + protected: + // Entry branched to by fast path + Label entry_; + // Optional continuation that is branched to at the end of the slow path + Label continuation_; + // Next in linked list of slow paths + SlowPath *next_; + + friend class AssemblerBuffer; + DISALLOW_COPY_AND_ASSIGN(SlowPath); +}; + +class AssemblerBuffer { + public: + AssemblerBuffer(); + ~AssemblerBuffer(); + + // Basic support for emitting, loading, and storing. + template void Emit(T value) { + CHECK(HasEnsuredCapacity()); + *reinterpret_cast(cursor_) = value; + cursor_ += sizeof(T); + } + + template T Load(size_t position) { + CHECK_LE(position, Size() - static_cast(sizeof(T))); + return *reinterpret_cast(contents_ + position); + } + + template void Store(size_t position, T value) { + CHECK_LE(position, Size() - static_cast(sizeof(T))); + *reinterpret_cast(contents_ + position) = value; + } + + // Emit a fixup at the current location. + void EmitFixup(AssemblerFixup* fixup) { + fixup->set_previous(fixup_); + fixup->set_position(Size()); + fixup_ = fixup; + } + + void EnqueueSlowPath(SlowPath* slowpath) { + if (slow_path_ == NULL) { + slow_path_ = slowpath; + } else { + SlowPath* cur = slow_path_; + for ( ; cur->next_ != NULL ; cur = cur->next_) {} + cur->next_ = slowpath; + } + } + + void EmitSlowPaths(Assembler* sp_asm) { + SlowPath* cur = slow_path_; + SlowPath* next = NULL; + slow_path_ = NULL; + for ( ; cur != NULL ; cur = next) { + cur->Emit(sp_asm); + next = cur->next_; + delete cur; + } + } + + // Get the size of the emitted code. + size_t Size() const { + CHECK_GE(cursor_, contents_); + return cursor_ - contents_; + } + + byte* contents() const { return contents_; } + + // Copy the assembled instructions into the specified memory block + // and apply all fixups. + void FinalizeInstructions(const MemoryRegion& region); + + // To emit an instruction to the assembler buffer, the EnsureCapacity helper + // must be used to guarantee that the underlying data area is big enough to + // hold the emitted instruction. Usage: + // + // AssemblerBuffer buffer; + // AssemblerBuffer::EnsureCapacity ensured(&buffer); + // ... emit bytes for single instruction ... + +#ifndef NDEBUG + + class EnsureCapacity { + public: + explicit EnsureCapacity(AssemblerBuffer* buffer) { + if (buffer->cursor() >= buffer->limit()) { + buffer->ExtendCapacity(); + } + // In debug mode, we save the assembler buffer along with the gap + // size before we start emitting to the buffer. This allows us to + // check that any single generated instruction doesn't overflow the + // limit implied by the minimum gap size. + buffer_ = buffer; + gap_ = ComputeGap(); + // Make sure that extending the capacity leaves a big enough gap + // for any kind of instruction. + CHECK_GE(gap_, kMinimumGap); + // Mark the buffer as having ensured the capacity. + CHECK(!buffer->HasEnsuredCapacity()); // Cannot nest. + buffer->has_ensured_capacity_ = true; + } + + ~EnsureCapacity() { + // Unmark the buffer, so we cannot emit after this. + buffer_->has_ensured_capacity_ = false; + // Make sure the generated instruction doesn't take up more + // space than the minimum gap. + int delta = gap_ - ComputeGap(); + CHECK_LE(delta, kMinimumGap); + } + + private: + AssemblerBuffer* buffer_; + int gap_; + + int ComputeGap() { return buffer_->Capacity() - buffer_->Size(); } + }; + + bool has_ensured_capacity_; + bool HasEnsuredCapacity() const { return has_ensured_capacity_; } + +#else + + class EnsureCapacity { + public: + explicit EnsureCapacity(AssemblerBuffer* buffer) { + if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity(); + } + }; + + // When building the C++ tests, assertion code is enabled. To allow + // asserting that the user of the assembler buffer has ensured the + // capacity needed for emitting, we add a dummy method in non-debug mode. + bool HasEnsuredCapacity() const { return true; } + +#endif + + // Returns the position in the instruction stream. + int GetPosition() { return cursor_ - contents_; } + + private: + // The limit is set to kMinimumGap bytes before the end of the data area. + // This leaves enough space for the longest possible instruction and allows + // for a single, fast space check per instruction. + static const int kMinimumGap = 32; + + byte* contents_; + byte* cursor_; + byte* limit_; + AssemblerFixup* fixup_; + bool fixups_processed_; + + // Head of linked list of slow paths + SlowPath* slow_path_; + + byte* cursor() const { return cursor_; } + byte* limit() const { return limit_; } + size_t Capacity() const { + CHECK_GE(limit_, contents_); + return (limit_ - contents_) + kMinimumGap; + } + + // Process the fixup chain starting at the given fixup. The offset is + // non-zero for fixups in the body if the preamble is non-empty. + void ProcessFixups(const MemoryRegion& region); + + // Compute the limit based on the data area and the capacity. See + // description of kMinimumGap for the reasoning behind the value. + static byte* ComputeLimit(byte* data, size_t capacity) { + return data + capacity - kMinimumGap; + } + + void ExtendCapacity(); + + friend class AssemblerFixup; +}; + +class Assembler { + public: + static Assembler* Create(InstructionSet instruction_set); + + // Emit slow paths queued during assembly + void EmitSlowPaths() { buffer_.EmitSlowPaths(this); } + + // Size of generated code + size_t CodeSize() const { return buffer_.Size(); } + + // Copy instructions out of assembly buffer into the given region of memory + void FinalizeInstructions(const MemoryRegion& region) { + buffer_.FinalizeInstructions(region); + } + + // Emit code that will create an activation on the stack + virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg, + const std::vector& callee_save_regs, + const std::vector& entry_spills) = 0; + + // Emit code that will remove an activation from the stack + virtual void RemoveFrame(size_t frame_size, + const std::vector& callee_save_regs) = 0; + + virtual void IncreaseFrameSize(size_t adjust) = 0; + virtual void DecreaseFrameSize(size_t adjust) = 0; + + // Store routines + virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0; + virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0; + virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0; + + virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, + ManagedRegister scratch) = 0; + + virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm, + ManagedRegister scratch) = 0; + + virtual void StoreStackOffsetToThread(ThreadOffset thr_offs, + FrameOffset fr_offs, + ManagedRegister scratch) = 0; + + virtual void StoreStackPointerToThread(ThreadOffset thr_offs) = 0; + + virtual void StoreSpanning(FrameOffset dest, ManagedRegister src, + FrameOffset in_off, ManagedRegister scratch) = 0; + + // Load routines + virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0; + + virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size) = 0; + + virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0; + + virtual void LoadRef(ManagedRegister dest, ManagedRegister base, + MemberOffset offs) = 0; + + virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, + Offset offs) = 0; + + virtual void LoadRawPtrFromThread(ManagedRegister dest, + ThreadOffset offs) = 0; + + // Copying routines + virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0; + + virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs, + ManagedRegister scratch) = 0; + + virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs, + ManagedRegister scratch) = 0; + + virtual void CopyRef(FrameOffset dest, FrameOffset src, + ManagedRegister scratch) = 0; + + virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0; + + virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, + ManagedRegister scratch, size_t size) = 0; + + virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, + ManagedRegister scratch, size_t size) = 0; + + virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, + ManagedRegister scratch, size_t size) = 0; + + virtual void Copy(ManagedRegister dest, Offset dest_offset, + ManagedRegister src, Offset src_offset, + ManagedRegister scratch, size_t size) = 0; + + virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, + ManagedRegister scratch, size_t size) = 0; + + virtual void MemoryBarrier(ManagedRegister scratch) = 0; + + // Sign extension + virtual void SignExtend(ManagedRegister mreg, size_t size) = 0; + + // Zero extension + virtual void ZeroExtend(ManagedRegister mreg, size_t size) = 0; + + // Exploit fast access in managed code to Thread::Current() + virtual void GetCurrentThread(ManagedRegister tr) = 0; + virtual void GetCurrentThread(FrameOffset dest_offset, + ManagedRegister scratch) = 0; + + // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the + // value is null and null_allowed. in_reg holds a possibly stale reference + // that can be used to avoid loading the SIRT entry to see if the value is + // NULL. + virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, + ManagedRegister in_reg, bool null_allowed) = 0; + + // Set up out_off to hold a Object** into the SIRT, or to be NULL if the + // value is null and null_allowed. + virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, + ManagedRegister scratch, bool null_allowed) = 0; + + // src holds a SIRT entry (Object**) load this into dst + virtual void LoadReferenceFromSirt(ManagedRegister dst, + ManagedRegister src) = 0; + + // Heap::VerifyObject on src. In some cases (such as a reference to this) we + // know that src may not be null. + virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0; + virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0; + + // Call to address held at [base+offset] + virtual void Call(ManagedRegister base, Offset offset, + ManagedRegister scratch) = 0; + virtual void Call(FrameOffset base, Offset offset, + ManagedRegister scratch) = 0; + virtual void Call(ThreadOffset offset, ManagedRegister scratch) = 0; + + // Generate code to check if Thread::Current()->exception_ is non-null + // and branch to a ExceptionSlowPath if it is. + virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0; + + virtual ~Assembler() {} + + protected: + Assembler() : buffer_() {} + + AssemblerBuffer buffer_; +}; + +} // namespace art + +#endif // ART_COMPILER_UTILS_ASSEMBLER_H_ diff --git a/compiler/utils/managed_register.h b/compiler/utils/managed_register.h new file mode 100644 index 0000000000..4ad1763754 --- /dev/null +++ b/compiler/utils/managed_register.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_MANAGED_REGISTER_H_ +#define ART_COMPILER_UTILS_MANAGED_REGISTER_H_ + +namespace art { + +namespace arm { +class ArmManagedRegister; +} +namespace mips { +class MipsManagedRegister; +} +namespace x86 { +class X86ManagedRegister; +} + +class ManagedRegister { + public: + // ManagedRegister is a value class. There exists no method to change the + // internal state. We therefore allow a copy constructor and an + // assignment-operator. + ManagedRegister(const ManagedRegister& other) : id_(other.id_) { } + + ManagedRegister& operator=(const ManagedRegister& other) { + id_ = other.id_; + return *this; + } + + arm::ArmManagedRegister AsArm() const; + mips::MipsManagedRegister AsMips() const; + x86::X86ManagedRegister AsX86() const; + + // It is valid to invoke Equals on and with a NoRegister. + bool Equals(const ManagedRegister& other) const { + return id_ == other.id_; + } + + bool IsNoRegister() const { + return id_ == kNoRegister; + } + + static ManagedRegister NoRegister() { + return ManagedRegister(); + } + + protected: + static const int kNoRegister = -1; + + ManagedRegister() : id_(kNoRegister) { } + explicit ManagedRegister(int reg_id) : id_(reg_id) { } + + int id_; +}; + +} // namespace art + +#endif // ART_COMPILER_UTILS_MANAGED_REGISTER_H_ diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc new file mode 100644 index 0000000000..931d7ab0f7 --- /dev/null +++ b/compiler/utils/mips/assembler_mips.cc @@ -0,0 +1,999 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "assembler_mips.h" + +#include "base/casts.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "memory_region.h" +#include "thread.h" + +namespace art { +namespace mips { +#if 0 +class DirectCallRelocation : public AssemblerFixup { + public: + void Process(const MemoryRegion& region, int position) { + // Direct calls are relative to the following instruction on mips. + int32_t pointer = region.Load(position); + int32_t start = reinterpret_cast(region.start()); + int32_t delta = start + position + sizeof(int32_t); + region.Store(position, pointer - delta); + } +}; +#endif + +std::ostream& operator<<(std::ostream& os, const DRegister& rhs) { + if (rhs >= D0 && rhs < kNumberOfDRegisters) { + os << "d" << static_cast(rhs); + } else { + os << "DRegister[" << static_cast(rhs) << "]"; + } + return os; +} + +void MipsAssembler::Emit(int32_t value) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + buffer_.Emit(value); +} + +void MipsAssembler::EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct) { + CHECK_NE(rs, kNoRegister); + CHECK_NE(rt, kNoRegister); + CHECK_NE(rd, kNoRegister); + int32_t encoding = opcode << kOpcodeShift | + static_cast(rs) << kRsShift | + static_cast(rt) << kRtShift | + static_cast(rd) << kRdShift | + shamt << kShamtShift | + funct; + Emit(encoding); +} + +void MipsAssembler::EmitI(int opcode, Register rs, Register rt, uint16_t imm) { + CHECK_NE(rs, kNoRegister); + CHECK_NE(rt, kNoRegister); + int32_t encoding = opcode << kOpcodeShift | + static_cast(rs) << kRsShift | + static_cast(rt) << kRtShift | + imm; + Emit(encoding); +} + +void MipsAssembler::EmitJ(int opcode, int address) { + int32_t encoding = opcode << kOpcodeShift | + address; + Emit(encoding); +} + +void MipsAssembler::EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct) { + CHECK_NE(ft, kNoFRegister); + CHECK_NE(fs, kNoFRegister); + CHECK_NE(fd, kNoFRegister); + int32_t encoding = opcode << kOpcodeShift | + fmt << kFmtShift | + static_cast(ft) << kFtShift | + static_cast(fs) << kFsShift | + static_cast(fd) << kFdShift | + funct; + Emit(encoding); +} + +void MipsAssembler::EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm) { + CHECK_NE(rt, kNoFRegister); + int32_t encoding = opcode << kOpcodeShift | + fmt << kFmtShift | + static_cast(rt) << kRtShift | + imm; + Emit(encoding); +} + +void MipsAssembler::EmitBranch(Register rt, Register rs, Label* label, bool equal) { + int offset; + if (label->IsBound()) { + offset = label->Position() - buffer_.Size(); + } else { + // Use the offset field of the branch instruction for linking the sites. + offset = label->position_; + label->LinkTo(buffer_.Size()); + } + if (equal) { + Beq(rt, rs, (offset >> 2) & kBranchOffsetMask); + } else { + Bne(rt, rs, (offset >> 2) & kBranchOffsetMask); + } +} + +void MipsAssembler::EmitJump(Label* label, bool link) { + int offset; + if (label->IsBound()) { + offset = label->Position() - buffer_.Size(); + } else { + // Use the offset field of the jump instruction for linking the sites. + offset = label->position_; + label->LinkTo(buffer_.Size()); + } + if (link) { + Jal((offset >> 2) & kJumpOffsetMask); + } else { + J((offset >> 2) & kJumpOffsetMask); + } +} + +int32_t MipsAssembler::EncodeBranchOffset(int offset, int32_t inst, bool is_jump) { + CHECK_ALIGNED(offset, 4); + CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset)) << offset; + + // Properly preserve only the bits supported in the instruction. + offset >>= 2; + if (is_jump) { + offset &= kJumpOffsetMask; + return (inst & ~kJumpOffsetMask) | offset; + } else { + offset &= kBranchOffsetMask; + return (inst & ~kBranchOffsetMask) | offset; + } +} + +int MipsAssembler::DecodeBranchOffset(int32_t inst, bool is_jump) { + // Sign-extend, then left-shift by 2. + if (is_jump) { + return (((inst & kJumpOffsetMask) << 6) >> 4); + } else { + return (((inst & kBranchOffsetMask) << 16) >> 14); + } +} + +void MipsAssembler::Bind(Label* label, bool is_jump) { + CHECK(!label->IsBound()); + int bound_pc = buffer_.Size(); + while (label->IsLinked()) { + int32_t position = label->Position(); + int32_t next = buffer_.Load(position); + int32_t offset = is_jump ? bound_pc - position : bound_pc - position - 4; + int32_t encoded = MipsAssembler::EncodeBranchOffset(offset, next, is_jump); + buffer_.Store(position, encoded); + label->position_ = MipsAssembler::DecodeBranchOffset(next, is_jump); + } + label->BindTo(bound_pc); +} + +void MipsAssembler::Add(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x20); +} + +void MipsAssembler::Addu(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x21); +} + +void MipsAssembler::Addi(Register rt, Register rs, uint16_t imm16) { + EmitI(0x8, rs, rt, imm16); +} + +void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16) { + EmitI(0x9, rs, rt, imm16); +} + +void MipsAssembler::Sub(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x22); +} + +void MipsAssembler::Subu(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x23); +} + +void MipsAssembler::Mult(Register rs, Register rt) { + EmitR(0, rs, rt, static_cast(0), 0, 0x18); +} + +void MipsAssembler::Multu(Register rs, Register rt) { + EmitR(0, rs, rt, static_cast(0), 0, 0x19); +} + +void MipsAssembler::Div(Register rs, Register rt) { + EmitR(0, rs, rt, static_cast(0), 0, 0x1a); +} + +void MipsAssembler::Divu(Register rs, Register rt) { + EmitR(0, rs, rt, static_cast(0), 0, 0x1b); +} + +void MipsAssembler::And(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x24); +} + +void MipsAssembler::Andi(Register rt, Register rs, uint16_t imm16) { + EmitI(0xc, rs, rt, imm16); +} + +void MipsAssembler::Or(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x25); +} + +void MipsAssembler::Ori(Register rt, Register rs, uint16_t imm16) { + EmitI(0xd, rs, rt, imm16); +} + +void MipsAssembler::Xor(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x26); +} + +void MipsAssembler::Xori(Register rt, Register rs, uint16_t imm16) { + EmitI(0xe, rs, rt, imm16); +} + +void MipsAssembler::Nor(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x27); +} + +void MipsAssembler::Sll(Register rd, Register rs, int shamt) { + EmitR(0, rs, static_cast(0), rd, shamt, 0x00); +} + +void MipsAssembler::Srl(Register rd, Register rs, int shamt) { + EmitR(0, rs, static_cast(0), rd, shamt, 0x02); +} + +void MipsAssembler::Sra(Register rd, Register rs, int shamt) { + EmitR(0, rs, static_cast(0), rd, shamt, 0x03); +} + +void MipsAssembler::Sllv(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x04); +} + +void MipsAssembler::Srlv(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x06); +} + +void MipsAssembler::Srav(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x07); +} + +void MipsAssembler::Lb(Register rt, Register rs, uint16_t imm16) { + EmitI(0x20, rs, rt, imm16); +} + +void MipsAssembler::Lh(Register rt, Register rs, uint16_t imm16) { + EmitI(0x21, rs, rt, imm16); +} + +void MipsAssembler::Lw(Register rt, Register rs, uint16_t imm16) { + EmitI(0x23, rs, rt, imm16); +} + +void MipsAssembler::Lbu(Register rt, Register rs, uint16_t imm16) { + EmitI(0x24, rs, rt, imm16); +} + +void MipsAssembler::Lhu(Register rt, Register rs, uint16_t imm16) { + EmitI(0x25, rs, rt, imm16); +} + +void MipsAssembler::Lui(Register rt, uint16_t imm16) { + EmitI(0xf, static_cast(0), rt, imm16); +} + +void MipsAssembler::Mfhi(Register rd) { + EmitR(0, static_cast(0), static_cast(0), rd, 0, 0x10); +} + +void MipsAssembler::Mflo(Register rd) { + EmitR(0, static_cast(0), static_cast(0), rd, 0, 0x12); +} + +void MipsAssembler::Sb(Register rt, Register rs, uint16_t imm16) { + EmitI(0x28, rs, rt, imm16); +} + +void MipsAssembler::Sh(Register rt, Register rs, uint16_t imm16) { + EmitI(0x29, rs, rt, imm16); +} + +void MipsAssembler::Sw(Register rt, Register rs, uint16_t imm16) { + EmitI(0x2b, rs, rt, imm16); +} + +void MipsAssembler::Slt(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x2a); +} + +void MipsAssembler::Sltu(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x2b); +} + +void MipsAssembler::Slti(Register rt, Register rs, uint16_t imm16) { + EmitI(0xa, rs, rt, imm16); +} + +void MipsAssembler::Sltiu(Register rt, Register rs, uint16_t imm16) { + EmitI(0xb, rs, rt, imm16); +} + +void MipsAssembler::Beq(Register rt, Register rs, uint16_t imm16) { + EmitI(0x4, rs, rt, imm16); + Nop(); +} + +void MipsAssembler::Bne(Register rt, Register rs, uint16_t imm16) { + EmitI(0x5, rs, rt, imm16); + Nop(); +} + +void MipsAssembler::J(uint32_t address) { + EmitJ(0x2, address); + Nop(); +} + +void MipsAssembler::Jal(uint32_t address) { + EmitJ(0x2, address); + Nop(); +} + +void MipsAssembler::Jr(Register rs) { + EmitR(0, rs, static_cast(0), static_cast(0), 0, 0x08); + Nop(); +} + +void MipsAssembler::Jalr(Register rs) { + EmitR(0, rs, static_cast(0), RA, 0, 0x09); + Nop(); +} + +void MipsAssembler::AddS(FRegister fd, FRegister fs, FRegister ft) { + EmitFR(0x11, 0x10, ft, fs, fd, 0x0); +} + +void MipsAssembler::SubS(FRegister fd, FRegister fs, FRegister ft) { + EmitFR(0x11, 0x10, ft, fs, fd, 0x1); +} + +void MipsAssembler::MulS(FRegister fd, FRegister fs, FRegister ft) { + EmitFR(0x11, 0x10, ft, fs, fd, 0x2); +} + +void MipsAssembler::DivS(FRegister fd, FRegister fs, FRegister ft) { + EmitFR(0x11, 0x10, ft, fs, fd, 0x3); +} + +void MipsAssembler::AddD(DRegister fd, DRegister fs, DRegister ft) { + EmitFR(0x11, 0x11, static_cast(ft), static_cast(fs), + static_cast(fd), 0x0); +} + +void MipsAssembler::SubD(DRegister fd, DRegister fs, DRegister ft) { + EmitFR(0x11, 0x11, static_cast(ft), static_cast(fs), + static_cast(fd), 0x1); +} + +void MipsAssembler::MulD(DRegister fd, DRegister fs, DRegister ft) { + EmitFR(0x11, 0x11, static_cast(ft), static_cast(fs), + static_cast(fd), 0x2); +} + +void MipsAssembler::DivD(DRegister fd, DRegister fs, DRegister ft) { + EmitFR(0x11, 0x11, static_cast(ft), static_cast(fs), + static_cast(fd), 0x3); +} + +void MipsAssembler::MovS(FRegister fd, FRegister fs) { + EmitFR(0x11, 0x10, static_cast(0), fs, fd, 0x6); +} + +void MipsAssembler::MovD(DRegister fd, DRegister fs) { + EmitFR(0x11, 0x11, static_cast(0), static_cast(fs), + static_cast(fd), 0x6); +} + +void MipsAssembler::Mfc1(Register rt, FRegister fs) { + EmitFR(0x11, 0x00, static_cast(rt), fs, static_cast(0), 0x0); +} + +void MipsAssembler::Mtc1(FRegister ft, Register rs) { + EmitFR(0x11, 0x04, ft, static_cast(rs), static_cast(0), 0x0); +} + +void MipsAssembler::Lwc1(FRegister ft, Register rs, uint16_t imm16) { + EmitI(0x31, rs, static_cast(ft), imm16); +} + +void MipsAssembler::Ldc1(DRegister ft, Register rs, uint16_t imm16) { + EmitI(0x35, rs, static_cast(ft), imm16); +} + +void MipsAssembler::Swc1(FRegister ft, Register rs, uint16_t imm16) { + EmitI(0x39, rs, static_cast(ft), imm16); +} + +void MipsAssembler::Sdc1(DRegister ft, Register rs, uint16_t imm16) { + EmitI(0x3d, rs, static_cast(ft), imm16); +} + +void MipsAssembler::Break() { + EmitR(0, static_cast(0), static_cast(0), + static_cast(0), 0, 0xD); +} + +void MipsAssembler::Nop() { + EmitR(0x0, static_cast(0), static_cast(0), static_cast(0), 0, 0x0); +} + +void MipsAssembler::Move(Register rt, Register rs) { + EmitI(0x8, rs, rt, 0); +} + +void MipsAssembler::Clear(Register rt) { + EmitR(0, static_cast(0), static_cast(0), rt, 0, 0x20); +} + +void MipsAssembler::Not(Register rt, Register rs) { + EmitR(0, static_cast(0), rs, rt, 0, 0x27); +} + +void MipsAssembler::Mul(Register rd, Register rs, Register rt) { + Mult(rs, rt); + Mflo(rd); +} + +void MipsAssembler::Div(Register rd, Register rs, Register rt) { + Div(rs, rt); + Mflo(rd); +} + +void MipsAssembler::Rem(Register rd, Register rs, Register rt) { + Div(rs, rt); + Mfhi(rd); +} + +void MipsAssembler::AddConstant(Register rt, Register rs, int32_t value) { + Addi(rt, rs, value); +} + +void MipsAssembler::LoadImmediate(Register rt, int32_t value) { + Addi(rt, ZERO, value); +} + +void MipsAssembler::EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset, + size_t size) { + MipsManagedRegister dst = m_dst.AsMips(); + if (dst.IsNoRegister()) { + CHECK_EQ(0u, size) << dst; + } else if (dst.IsCoreRegister()) { + CHECK_EQ(4u, size) << dst; + LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset); + } else if (dst.IsRegisterPair()) { + CHECK_EQ(8u, size) << dst; + LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset); + LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4); + } else if (dst.IsFRegister()) { + LoadSFromOffset(dst.AsFRegister(), src_register, src_offset); + } else { + CHECK(dst.IsDRegister()) << dst; + LoadDFromOffset(dst.AsDRegister(), src_register, src_offset); + } +} + +void MipsAssembler::LoadFromOffset(LoadOperandType type, Register reg, Register base, + int32_t offset) { + switch (type) { + case kLoadSignedByte: + Lb(reg, base, offset); + break; + case kLoadUnsignedByte: + Lbu(reg, base, offset); + break; + case kLoadSignedHalfword: + Lh(reg, base, offset); + break; + case kLoadUnsignedHalfword: + Lhu(reg, base, offset); + break; + case kLoadWord: + Lw(reg, base, offset); + break; + case kLoadWordPair: + LOG(FATAL) << "UNREACHABLE"; + break; + default: + LOG(FATAL) << "UNREACHABLE"; + } +} + +void MipsAssembler::LoadSFromOffset(FRegister reg, Register base, int32_t offset) { + Lwc1(reg, base, offset); +} + +void MipsAssembler::LoadDFromOffset(DRegister reg, Register base, int32_t offset) { + Ldc1(reg, base, offset); +} + +void MipsAssembler::StoreToOffset(StoreOperandType type, Register reg, Register base, + int32_t offset) { + switch (type) { + case kStoreByte: + Sb(reg, base, offset); + break; + case kStoreHalfword: + Sh(reg, base, offset); + break; + case kStoreWord: + Sw(reg, base, offset); + break; + case kStoreWordPair: + LOG(FATAL) << "UNREACHABLE"; + break; + default: + LOG(FATAL) << "UNREACHABLE"; + } +} + +void MipsAssembler::StoreFToOffset(FRegister reg, Register base, int32_t offset) { + Swc1(reg, base, offset); +} + +void MipsAssembler::StoreDToOffset(DRegister reg, Register base, int32_t offset) { + Sdc1(reg, base, offset); +} + +void MipsAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, + const std::vector& callee_save_regs, + const std::vector& entry_spills) { + CHECK_ALIGNED(frame_size, kStackAlignment); + + // Increase frame to required size. + IncreaseFrameSize(frame_size); + + // Push callee saves and return address + int stack_offset = frame_size - kPointerSize; + StoreToOffset(kStoreWord, RA, SP, stack_offset); + for (int i = callee_save_regs.size() - 1; i >= 0; --i) { + stack_offset -= kPointerSize; + Register reg = callee_save_regs.at(i).AsMips().AsCoreRegister(); + StoreToOffset(kStoreWord, reg, SP, stack_offset); + } + + // Write out Method*. + StoreToOffset(kStoreWord, method_reg.AsMips().AsCoreRegister(), SP, 0); + + // Write out entry spills. + for (size_t i = 0; i < entry_spills.size(); ++i) { + Register reg = entry_spills.at(i).AsMips().AsCoreRegister(); + StoreToOffset(kStoreWord, reg, SP, frame_size + kPointerSize + (i * kPointerSize)); + } +} + +void MipsAssembler::RemoveFrame(size_t frame_size, + const std::vector& callee_save_regs) { + CHECK_ALIGNED(frame_size, kStackAlignment); + + // Pop callee saves and return address + int stack_offset = frame_size - (callee_save_regs.size() * kPointerSize) - kPointerSize; + for (size_t i = 0; i < callee_save_regs.size(); ++i) { + Register reg = callee_save_regs.at(i).AsMips().AsCoreRegister(); + LoadFromOffset(kLoadWord, reg, SP, stack_offset); + stack_offset += kPointerSize; + } + LoadFromOffset(kLoadWord, RA, SP, stack_offset); + + // Decrease frame to required size. + DecreaseFrameSize(frame_size); + + // Then jump to the return address. + Jr(RA); +} + +void MipsAssembler::IncreaseFrameSize(size_t adjust) { + CHECK_ALIGNED(adjust, kStackAlignment); + AddConstant(SP, SP, -adjust); +} + +void MipsAssembler::DecreaseFrameSize(size_t adjust) { + CHECK_ALIGNED(adjust, kStackAlignment); + AddConstant(SP, SP, adjust); +} + +void MipsAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) { + MipsManagedRegister src = msrc.AsMips(); + if (src.IsNoRegister()) { + CHECK_EQ(0u, size); + } else if (src.IsCoreRegister()) { + CHECK_EQ(4u, size); + StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); + } else if (src.IsRegisterPair()) { + CHECK_EQ(8u, size); + StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value()); + StoreToOffset(kStoreWord, src.AsRegisterPairHigh(), + SP, dest.Int32Value() + 4); + } else if (src.IsFRegister()) { + StoreFToOffset(src.AsFRegister(), SP, dest.Int32Value()); + } else { + CHECK(src.IsDRegister()); + StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value()); + } +} + +void MipsAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) { + MipsManagedRegister src = msrc.AsMips(); + CHECK(src.IsCoreRegister()); + StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); +} + +void MipsAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) { + MipsManagedRegister src = msrc.AsMips(); + CHECK(src.IsCoreRegister()); + StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); +} + +void MipsAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadImmediate(scratch.AsCoreRegister(), imm); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); +} + +void MipsAssembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadImmediate(scratch.AsCoreRegister(), imm); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), S1, dest.Int32Value()); +} + +void MipsAssembler::StoreStackOffsetToThread(ThreadOffset thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), + S1, thr_offs.Int32Value()); +} + +void MipsAssembler::StoreStackPointerToThread(ThreadOffset thr_offs) { + StoreToOffset(kStoreWord, SP, S1, thr_offs.Int32Value()); +} + +void MipsAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc, + FrameOffset in_off, ManagedRegister mscratch) { + MipsManagedRegister src = msrc.AsMips(); + MipsManagedRegister scratch = mscratch.AsMips(); + StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4); +} + +void MipsAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) { + return EmitLoad(mdest, SP, src.Int32Value(), size); +} + +void MipsAssembler::Load(ManagedRegister mdest, ThreadOffset src, size_t size) { + return EmitLoad(mdest, S1, src.Int32Value(), size); +} + +void MipsAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) { + MipsManagedRegister dest = mdest.AsMips(); + CHECK(dest.IsCoreRegister()); + LoadFromOffset(kLoadWord, dest.AsCoreRegister(), SP, src.Int32Value()); +} + +void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, + MemberOffset offs) { + MipsManagedRegister dest = mdest.AsMips(); + CHECK(dest.IsCoreRegister() && dest.IsCoreRegister()); + LoadFromOffset(kLoadWord, dest.AsCoreRegister(), + base.AsMips().AsCoreRegister(), offs.Int32Value()); +} + +void MipsAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, + Offset offs) { + MipsManagedRegister dest = mdest.AsMips(); + CHECK(dest.IsCoreRegister() && dest.IsCoreRegister()) << dest; + LoadFromOffset(kLoadWord, dest.AsCoreRegister(), + base.AsMips().AsCoreRegister(), offs.Int32Value()); +} + +void MipsAssembler::LoadRawPtrFromThread(ManagedRegister mdest, + ThreadOffset offs) { + MipsManagedRegister dest = mdest.AsMips(); + CHECK(dest.IsCoreRegister()); + LoadFromOffset(kLoadWord, dest.AsCoreRegister(), S1, offs.Int32Value()); +} + +void MipsAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "no sign extension necessary for mips"; +} + +void MipsAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "no zero extension necessary for mips"; +} + +void MipsAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t /*size*/) { + MipsManagedRegister dest = mdest.AsMips(); + MipsManagedRegister src = msrc.AsMips(); + if (!dest.Equals(src)) { + if (dest.IsCoreRegister()) { + CHECK(src.IsCoreRegister()) << src; + Move(dest.AsCoreRegister(), src.AsCoreRegister()); + } else if (dest.IsFRegister()) { + CHECK(src.IsFRegister()) << src; + MovS(dest.AsFRegister(), src.AsFRegister()); + } else if (dest.IsDRegister()) { + CHECK(src.IsDRegister()) << src; + MovD(dest.AsDRegister(), src.AsDRegister()); + } else { + CHECK(dest.IsRegisterPair()) << dest; + CHECK(src.IsRegisterPair()) << src; + // Ensure that the first move doesn't clobber the input of the second + if (src.AsRegisterPairHigh() != dest.AsRegisterPairLow()) { + Move(dest.AsRegisterPairLow(), src.AsRegisterPairLow()); + Move(dest.AsRegisterPairHigh(), src.AsRegisterPairHigh()); + } else { + Move(dest.AsRegisterPairHigh(), src.AsRegisterPairHigh()); + Move(dest.AsRegisterPairLow(), src.AsRegisterPairLow()); + } + } + } +} + +void MipsAssembler::CopyRef(FrameOffset dest, FrameOffset src, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); +} + +void MipsAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset thr_offs, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + S1, thr_offs.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), + SP, fr_offs.Int32Value()); +} + +void MipsAssembler::CopyRawPtrToThread(ThreadOffset thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + SP, fr_offs.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), + S1, thr_offs.Int32Value()); +} + +void MipsAssembler::Copy(FrameOffset dest, FrameOffset src, + ManagedRegister mscratch, size_t size) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + CHECK(size == 4 || size == 8) << size; + if (size == 4) { + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); + } else if (size == 8) { + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4); + } +} + +void MipsAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, + ManagedRegister mscratch, size_t size) { + Register scratch = mscratch.AsMips().AsCoreRegister(); + CHECK_EQ(size, 4u); + LoadFromOffset(kLoadWord, scratch, src_base.AsMips().AsCoreRegister(), src_offset.Int32Value()); + StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value()); +} + +void MipsAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, + ManagedRegister mscratch, size_t size) { + Register scratch = mscratch.AsMips().AsCoreRegister(); + CHECK_EQ(size, 4u); + LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value()); + StoreToOffset(kStoreWord, scratch, dest_base.AsMips().AsCoreRegister(), dest_offset.Int32Value()); +} + +void MipsAssembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offset /*src_offset*/, + ManagedRegister /*mscratch*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "no arm implementation"; +#if 0 + Register scratch = mscratch.AsMips().AsCoreRegister(); + CHECK_EQ(size, 4u); + movl(scratch, Address(ESP, src_base)); + movl(scratch, Address(scratch, src_offset)); + movl(Address(ESP, dest), scratch); +#endif +} + +void MipsAssembler::Copy(ManagedRegister dest, Offset dest_offset, + ManagedRegister src, Offset src_offset, + ManagedRegister mscratch, size_t size) { + CHECK_EQ(size, 4u); + Register scratch = mscratch.AsMips().AsCoreRegister(); + LoadFromOffset(kLoadWord, scratch, src.AsMips().AsCoreRegister(), src_offset.Int32Value()); + StoreToOffset(kStoreWord, scratch, dest.AsMips().AsCoreRegister(), dest_offset.Int32Value()); +} + +void MipsAssembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/, + ManagedRegister /*mscratch*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "no arm implementation"; +#if 0 + Register scratch = mscratch.AsMips().AsCoreRegister(); + CHECK_EQ(size, 4u); + CHECK_EQ(dest.Int32Value(), src.Int32Value()); + movl(scratch, Address(ESP, src)); + pushl(Address(scratch, src_offset)); + popl(Address(scratch, dest_offset)); +#endif +} + +void MipsAssembler::MemoryBarrier(ManagedRegister) { + UNIMPLEMENTED(FATAL) << "NEEDS TO BE IMPLEMENTED"; +#if 0 +#if ANDROID_SMP != 0 + mfence(); +#endif +#endif +} + +void MipsAssembler::CreateSirtEntry(ManagedRegister mout_reg, + FrameOffset sirt_offset, + ManagedRegister min_reg, bool null_allowed) { + MipsManagedRegister out_reg = mout_reg.AsMips(); + MipsManagedRegister in_reg = min_reg.AsMips(); + CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg; + CHECK(out_reg.IsCoreRegister()) << out_reg; + if (null_allowed) { + Label null_arg; + // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is + // the address in the SIRT holding the reference. + // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset) + if (in_reg.IsNoRegister()) { + LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), + SP, sirt_offset.Int32Value()); + in_reg = out_reg; + } + if (!out_reg.Equals(in_reg)) { + LoadImmediate(out_reg.AsCoreRegister(), 0); + } + EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true); + AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value()); + Bind(&null_arg, false); + } else { + AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value()); + } +} + +void MipsAssembler::CreateSirtEntry(FrameOffset out_off, + FrameOffset sirt_offset, + ManagedRegister mscratch, + bool null_allowed) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + if (null_allowed) { + Label null_arg; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, + sirt_offset.Int32Value()); + // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is + // the address in the SIRT holding the reference. + // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset) + EmitBranch(scratch.AsCoreRegister(), ZERO, &null_arg, true); + AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value()); + Bind(&null_arg, false); + } else { + AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value()); + } + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value()); +} + +// Given a SIRT entry, load the associated reference. +void MipsAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg, + ManagedRegister min_reg) { + MipsManagedRegister out_reg = mout_reg.AsMips(); + MipsManagedRegister in_reg = min_reg.AsMips(); + CHECK(out_reg.IsCoreRegister()) << out_reg; + CHECK(in_reg.IsCoreRegister()) << in_reg; + Label null_arg; + if (!out_reg.Equals(in_reg)) { + LoadImmediate(out_reg.AsCoreRegister(), 0); + } + EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true); + LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), + in_reg.AsCoreRegister(), 0); + Bind(&null_arg, false); +} + +void MipsAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) { + // TODO: not validating references +} + +void MipsAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) { + // TODO: not validating references +} + +void MipsAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister mscratch) { + MipsManagedRegister base = mbase.AsMips(); + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(base.IsCoreRegister()) << base; + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + base.AsCoreRegister(), offset.Int32Value()); + Jalr(scratch.AsCoreRegister()); + // TODO: place reference map on call +} + +void MipsAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + // Call *(*(SP + base) + offset) + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + SP, base.Int32Value()); + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + scratch.AsCoreRegister(), offset.Int32Value()); + Jalr(scratch.AsCoreRegister()); + // TODO: place reference map on call +} + +void MipsAssembler::Call(ThreadOffset /*offset*/, ManagedRegister /*mscratch*/) { + UNIMPLEMENTED(FATAL) << "no arm implementation"; +#if 0 + fs()->call(Address::Absolute(offset)); +#endif +} + +void MipsAssembler::GetCurrentThread(ManagedRegister tr) { + Move(tr.AsMips().AsCoreRegister(), S1); +} + +void MipsAssembler::GetCurrentThread(FrameOffset offset, + ManagedRegister /*mscratch*/) { + StoreToOffset(kStoreWord, S1, SP, offset.Int32Value()); +} + +void MipsAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) { + MipsManagedRegister scratch = mscratch.AsMips(); + MipsExceptionSlowPath* slow = new MipsExceptionSlowPath(scratch, stack_adjust); + buffer_.EnqueueSlowPath(slow); + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + S1, Thread::ExceptionOffset().Int32Value()); + EmitBranch(scratch.AsCoreRegister(), ZERO, slow->Entry(), false); +} + +void MipsExceptionSlowPath::Emit(Assembler* sasm) { + MipsAssembler* sp_asm = down_cast(sasm); +#define __ sp_asm-> + __ Bind(&entry_, false); + if (stack_adjust_ != 0) { // Fix up the frame. + __ DecreaseFrameSize(stack_adjust_); + } + // Pass exception object as argument + // Don't care about preserving A0 as this call won't return + __ Move(A0, scratch_.AsCoreRegister()); + // Set up call to Thread::Current()->pDeliverException + __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pDeliverException)); + __ Jr(T9); + // Call never returns + __ Break(); +#undef __ +} + +} // namespace mips +} // namespace art diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h new file mode 100644 index 0000000000..0f5f2fe199 --- /dev/null +++ b/compiler/utils/mips/assembler_mips.h @@ -0,0 +1,507 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_ +#define ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_ + +#include + +#include "base/macros.h" +#include "constants_mips.h" +#include "globals.h" +#include "managed_register_mips.h" +#include "utils/assembler.h" +#include "offsets.h" +#include "utils.h" + +namespace art { +namespace mips { +#if 0 +class Operand { + public: + uint8_t mod() const { + return (encoding_at(0) >> 6) & 3; + } + + Register rm() const { + return static_cast(encoding_at(0) & 7); + } + + ScaleFactor scale() const { + return static_cast((encoding_at(1) >> 6) & 3); + } + + Register index() const { + return static_cast((encoding_at(1) >> 3) & 7); + } + + Register base() const { + return static_cast(encoding_at(1) & 7); + } + + int8_t disp8() const { + CHECK_GE(length_, 2); + return static_cast(encoding_[length_ - 1]); + } + + int32_t disp32() const { + CHECK_GE(length_, 5); + int32_t value; + memcpy(&value, &encoding_[length_ - 4], sizeof(value)); + return value; + } + + bool IsRegister(Register reg) const { + return ((encoding_[0] & 0xF8) == 0xC0) // Addressing mode is register only. + && ((encoding_[0] & 0x07) == reg); // Register codes match. + } + + protected: + // Operand can be sub classed (e.g: Address). + Operand() : length_(0) { } + + void SetModRM(int mod, Register rm) { + CHECK_EQ(mod & ~3, 0); + encoding_[0] = (mod << 6) | rm; + length_ = 1; + } + + void SetSIB(ScaleFactor scale, Register index, Register base) { + CHECK_EQ(length_, 1); + CHECK_EQ(scale & ~3, 0); + encoding_[1] = (scale << 6) | (index << 3) | base; + length_ = 2; + } + + void SetDisp8(int8_t disp) { + CHECK(length_ == 1 || length_ == 2); + encoding_[length_++] = static_cast(disp); + } + + void SetDisp32(int32_t disp) { + CHECK(length_ == 1 || length_ == 2); + int disp_size = sizeof(disp); + memmove(&encoding_[length_], &disp, disp_size); + length_ += disp_size; + } + + private: + byte length_; + byte encoding_[6]; + byte padding_; + + explicit Operand(Register reg) { SetModRM(3, reg); } + + // Get the operand encoding byte at the given index. + uint8_t encoding_at(int index) const { + CHECK_GE(index, 0); + CHECK_LT(index, length_); + return encoding_[index]; + } + + friend class MipsAssembler; + + DISALLOW_COPY_AND_ASSIGN(Operand); +}; + + +class Address : public Operand { + public: + Address(Register base, int32_t disp) { + Init(base, disp); + } + + Address(Register base, Offset disp) { + Init(base, disp.Int32Value()); + } + + Address(Register base, FrameOffset disp) { + CHECK_EQ(base, ESP); + Init(ESP, disp.Int32Value()); + } + + Address(Register base, MemberOffset disp) { + Init(base, disp.Int32Value()); + } + + void Init(Register base, int32_t disp) { + if (disp == 0 && base != EBP) { + SetModRM(0, base); + if (base == ESP) SetSIB(TIMES_1, ESP, base); + } else if (disp >= -128 && disp <= 127) { + SetModRM(1, base); + if (base == ESP) SetSIB(TIMES_1, ESP, base); + SetDisp8(disp); + } else { + SetModRM(2, base); + if (base == ESP) SetSIB(TIMES_1, ESP, base); + SetDisp32(disp); + } + } + + + Address(Register index, ScaleFactor scale, int32_t disp) { + CHECK_NE(index, ESP); // Illegal addressing mode. + SetModRM(0, ESP); + SetSIB(scale, index, EBP); + SetDisp32(disp); + } + + Address(Register base, Register index, ScaleFactor scale, int32_t disp) { + CHECK_NE(index, ESP); // Illegal addressing mode. + if (disp == 0 && base != EBP) { + SetModRM(0, ESP); + SetSIB(scale, index, base); + } else if (disp >= -128 && disp <= 127) { + SetModRM(1, ESP); + SetSIB(scale, index, base); + SetDisp8(disp); + } else { + SetModRM(2, ESP); + SetSIB(scale, index, base); + SetDisp32(disp); + } + } + + static Address Absolute(uword addr) { + Address result; + result.SetModRM(0, EBP); + result.SetDisp32(addr); + return result; + } + + static Address Absolute(ThreadOffset addr) { + return Absolute(addr.Int32Value()); + } + + private: + Address() {} + + DISALLOW_COPY_AND_ASSIGN(Address); +}; + +#endif + +enum LoadOperandType { + kLoadSignedByte, + kLoadUnsignedByte, + kLoadSignedHalfword, + kLoadUnsignedHalfword, + kLoadWord, + kLoadWordPair, + kLoadSWord, + kLoadDWord +}; + +enum StoreOperandType { + kStoreByte, + kStoreHalfword, + kStoreWord, + kStoreWordPair, + kStoreSWord, + kStoreDWord +}; + +class MipsAssembler : public Assembler { + public: + MipsAssembler() {} + virtual ~MipsAssembler() {} + + // Emit Machine Instructions. + void Add(Register rd, Register rs, Register rt); + void Addu(Register rd, Register rs, Register rt); + void Addi(Register rt, Register rs, uint16_t imm16); + void Addiu(Register rt, Register rs, uint16_t imm16); + void Sub(Register rd, Register rs, Register rt); + void Subu(Register rd, Register rs, Register rt); + void Mult(Register rs, Register rt); + void Multu(Register rs, Register rt); + void Div(Register rs, Register rt); + void Divu(Register rs, Register rt); + + void And(Register rd, Register rs, Register rt); + void Andi(Register rt, Register rs, uint16_t imm16); + void Or(Register rd, Register rs, Register rt); + void Ori(Register rt, Register rs, uint16_t imm16); + void Xor(Register rd, Register rs, Register rt); + void Xori(Register rt, Register rs, uint16_t imm16); + void Nor(Register rd, Register rs, Register rt); + + void Sll(Register rd, Register rs, int shamt); + void Srl(Register rd, Register rs, int shamt); + void Sra(Register rd, Register rs, int shamt); + void Sllv(Register rd, Register rs, Register rt); + void Srlv(Register rd, Register rs, Register rt); + void Srav(Register rd, Register rs, Register rt); + + void Lb(Register rt, Register rs, uint16_t imm16); + void Lh(Register rt, Register rs, uint16_t imm16); + void Lw(Register rt, Register rs, uint16_t imm16); + void Lbu(Register rt, Register rs, uint16_t imm16); + void Lhu(Register rt, Register rs, uint16_t imm16); + void Lui(Register rt, uint16_t imm16); + void Mfhi(Register rd); + void Mflo(Register rd); + + void Sb(Register rt, Register rs, uint16_t imm16); + void Sh(Register rt, Register rs, uint16_t imm16); + void Sw(Register rt, Register rs, uint16_t imm16); + + void Slt(Register rd, Register rs, Register rt); + void Sltu(Register rd, Register rs, Register rt); + void Slti(Register rt, Register rs, uint16_t imm16); + void Sltiu(Register rt, Register rs, uint16_t imm16); + + void Beq(Register rt, Register rs, uint16_t imm16); + void Bne(Register rt, Register rs, uint16_t imm16); + void J(uint32_t address); + void Jal(uint32_t address); + void Jr(Register rs); + void Jalr(Register rs); + + void AddS(FRegister fd, FRegister fs, FRegister ft); + void SubS(FRegister fd, FRegister fs, FRegister ft); + void MulS(FRegister fd, FRegister fs, FRegister ft); + void DivS(FRegister fd, FRegister fs, FRegister ft); + void AddD(DRegister fd, DRegister fs, DRegister ft); + void SubD(DRegister fd, DRegister fs, DRegister ft); + void MulD(DRegister fd, DRegister fs, DRegister ft); + void DivD(DRegister fd, DRegister fs, DRegister ft); + void MovS(FRegister fd, FRegister fs); + void MovD(DRegister fd, DRegister fs); + + void Mfc1(Register rt, FRegister fs); + void Mtc1(FRegister ft, Register rs); + void Lwc1(FRegister ft, Register rs, uint16_t imm16); + void Ldc1(DRegister ft, Register rs, uint16_t imm16); + void Swc1(FRegister ft, Register rs, uint16_t imm16); + void Sdc1(DRegister ft, Register rs, uint16_t imm16); + + void Break(); + void Nop(); + void Move(Register rt, Register rs); + void Clear(Register rt); + void Not(Register rt, Register rs); + void Mul(Register rd, Register rs, Register rt); + void Div(Register rd, Register rs, Register rt); + void Rem(Register rd, Register rs, Register rt); + + void AddConstant(Register rt, Register rs, int32_t value); + void LoadImmediate(Register rt, int32_t value); + + void EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset, size_t size); + void LoadFromOffset(LoadOperandType type, Register reg, Register base, int32_t offset); + void LoadSFromOffset(FRegister reg, Register base, int32_t offset); + void LoadDFromOffset(DRegister reg, Register base, int32_t offset); + void StoreToOffset(StoreOperandType type, Register reg, Register base, int32_t offset); + void StoreFToOffset(FRegister reg, Register base, int32_t offset); + void StoreDToOffset(DRegister reg, Register base, int32_t offset); + +#if 0 + MipsAssembler* lock(); + + void mfence(); + + MipsAssembler* fs(); + + // + // Macros for High-level operations. + // + + void AddImmediate(Register reg, const Immediate& imm); + + void LoadDoubleConstant(XmmRegister dst, double value); + + void DoubleNegate(XmmRegister d); + void FloatNegate(XmmRegister f); + + void DoubleAbs(XmmRegister reg); + + void LockCmpxchgl(const Address& address, Register reg) { + lock()->cmpxchgl(address, reg); + } + + // + // Misc. functionality + // + int PreferredLoopAlignment() { return 16; } + void Align(int alignment, int offset); + + // Debugging and bringup support. + void Stop(const char* message); +#endif + + // Emit data (e.g. encoded instruction or immediate) to the instruction stream. + void Emit(int32_t value); + void EmitBranch(Register rt, Register rs, Label* label, bool equal); + void EmitJump(Label* label, bool link); + void Bind(Label* label, bool is_jump); + + // + // Overridden common assembler high-level functionality + // + + // Emit code that will create an activation on the stack + virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg, + const std::vector& callee_save_regs, + const std::vector& entry_spills); + + // Emit code that will remove an activation from the stack + virtual void RemoveFrame(size_t frame_size, + const std::vector& callee_save_regs); + + virtual void IncreaseFrameSize(size_t adjust); + virtual void DecreaseFrameSize(size_t adjust); + + // Store routines + virtual void Store(FrameOffset offs, ManagedRegister msrc, size_t size); + virtual void StoreRef(FrameOffset dest, ManagedRegister msrc); + virtual void StoreRawPtr(FrameOffset dest, ManagedRegister msrc); + + virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, + ManagedRegister mscratch); + + virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm, + ManagedRegister mscratch); + + virtual void StoreStackOffsetToThread(ThreadOffset thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch); + + virtual void StoreStackPointerToThread(ThreadOffset thr_offs); + + virtual void StoreSpanning(FrameOffset dest, ManagedRegister msrc, + FrameOffset in_off, ManagedRegister mscratch); + + // Load routines + virtual void Load(ManagedRegister mdest, FrameOffset src, size_t size); + + virtual void Load(ManagedRegister mdest, ThreadOffset src, size_t size); + + virtual void LoadRef(ManagedRegister dest, FrameOffset src); + + virtual void LoadRef(ManagedRegister mdest, ManagedRegister base, + MemberOffset offs); + + virtual void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, + Offset offs); + + virtual void LoadRawPtrFromThread(ManagedRegister mdest, + ThreadOffset offs); + + // Copying routines + virtual void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size); + + virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs, + ManagedRegister mscratch); + + virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs, + ManagedRegister mscratch); + + virtual void CopyRef(FrameOffset dest, FrameOffset src, + ManagedRegister mscratch); + + virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size); + + virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, + ManagedRegister mscratch, size_t size); + + virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, + ManagedRegister mscratch, size_t size); + + virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, + ManagedRegister mscratch, size_t size); + + virtual void Copy(ManagedRegister dest, Offset dest_offset, + ManagedRegister src, Offset src_offset, + ManagedRegister mscratch, size_t size); + + virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, + ManagedRegister mscratch, size_t size); + + virtual void MemoryBarrier(ManagedRegister); + + // Sign extension + virtual void SignExtend(ManagedRegister mreg, size_t size); + + // Zero extension + virtual void ZeroExtend(ManagedRegister mreg, size_t size); + + // Exploit fast access in managed code to Thread::Current() + virtual void GetCurrentThread(ManagedRegister tr); + virtual void GetCurrentThread(FrameOffset dest_offset, + ManagedRegister mscratch); + + // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the + // value is null and null_allowed. in_reg holds a possibly stale reference + // that can be used to avoid loading the SIRT entry to see if the value is + // NULL. + virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, + ManagedRegister in_reg, bool null_allowed); + + // Set up out_off to hold a Object** into the SIRT, or to be NULL if the + // value is null and null_allowed. + virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, + ManagedRegister mscratch, bool null_allowed); + + // src holds a SIRT entry (Object**) load this into dst + virtual void LoadReferenceFromSirt(ManagedRegister dst, + ManagedRegister src); + + // Heap::VerifyObject on src. In some cases (such as a reference to this) we + // know that src may not be null. + virtual void VerifyObject(ManagedRegister src, bool could_be_null); + virtual void VerifyObject(FrameOffset src, bool could_be_null); + + // Call to address held at [base+offset] + virtual void Call(ManagedRegister base, Offset offset, + ManagedRegister mscratch); + virtual void Call(FrameOffset base, Offset offset, + ManagedRegister mscratch); + virtual void Call(ThreadOffset offset, ManagedRegister mscratch); + + // Generate code to check if Thread::Current()->exception_ is non-null + // and branch to a ExceptionSlowPath if it is. + virtual void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust); + + private: + void EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct); + void EmitI(int opcode, Register rs, Register rt, uint16_t imm); + void EmitJ(int opcode, int address); + void EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct); + void EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm); + + int32_t EncodeBranchOffset(int offset, int32_t inst, bool is_jump); + int DecodeBranchOffset(int32_t inst, bool is_jump); + + DISALLOW_COPY_AND_ASSIGN(MipsAssembler); +}; + +// Slowpath entered when Thread::Current()->_exception is non-null +class MipsExceptionSlowPath : public SlowPath { + public: + explicit MipsExceptionSlowPath(MipsManagedRegister scratch, size_t stack_adjust) + : scratch_(scratch), stack_adjust_(stack_adjust) {} + virtual void Emit(Assembler *sp_asm); + private: + const MipsManagedRegister scratch_; + const size_t stack_adjust_; +}; + +} // namespace mips +} // namespace art + +#endif // ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_ diff --git a/compiler/utils/mips/constants_mips.h b/compiler/utils/mips/constants_mips.h new file mode 100644 index 0000000000..44ed5cc124 --- /dev/null +++ b/compiler/utils/mips/constants_mips.h @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_MIPS_CONSTANTS_MIPS_H_ +#define ART_COMPILER_UTILS_MIPS_CONSTANTS_MIPS_H_ + +#include + +#include "arch/mips/registers_mips.h" +#include "base/logging.h" +#include "base/macros.h" +#include "globals.h" + +namespace art { +namespace mips { + +// Values for double-precision floating point registers. +enum DRegister { + D0 = 0, + D1 = 1, + D2 = 2, + D3 = 3, + D4 = 4, + D5 = 5, + D6 = 6, + D7 = 7, + D8 = 8, + D9 = 9, + D10 = 10, + D11 = 11, + D12 = 12, + D13 = 13, + D14 = 14, + D15 = 15, + kNumberOfDRegisters = 16, + kNumberOfOverlappingDRegisters = 16, + kNoDRegister = -1, +}; +std::ostream& operator<<(std::ostream& os, const DRegister& rhs); + +// Constants used for the decoding or encoding of the individual fields of instructions. +enum InstructionFields { + kOpcodeShift = 26, + kOpcodeBits = 6, + kRsShift = 21, + kRsBits = 5, + kRtShift = 16, + kRtBits = 5, + kRdShift = 11, + kRdBits = 5, + kShamtShift = 6, + kShamtBits = 5, + kFunctShift = 0, + kFunctBits = 6, + + kFmtShift = 21, + kFmtBits = 5, + kFtShift = 16, + kFtBits = 5, + kFsShift = 11, + kFsBits = 5, + kFdShift = 6, + kFdBits = 5, + + kBranchOffsetMask = 0x0000ffff, + kJumpOffsetMask = 0x03ffffff, +}; + +enum ScaleFactor { + TIMES_1 = 0, + TIMES_2 = 1, + TIMES_4 = 2, + TIMES_8 = 3 +}; + +class Instr { + public: + static const uint32_t kBreakPointInstruction = 0x0000000D; + + bool IsBreakPoint() { + return ((*reinterpret_cast(this)) & 0xFC0000CF) == kBreakPointInstruction; + } + + // Instructions are read out of a code stream. The only way to get a + // reference to an instruction is to convert a pointer. There is no way + // to allocate or create instances of class Instr. + // Use the At(pc) function to create references to Instr. + static Instr* At(uintptr_t pc) { return reinterpret_cast(pc); } + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(Instr); +}; + +} // namespace mips +} // namespace art + +#endif // ART_COMPILER_UTILS_MIPS_CONSTANTS_MIPS_H_ diff --git a/compiler/utils/mips/managed_register_mips.cc b/compiler/utils/mips/managed_register_mips.cc new file mode 100644 index 0000000000..195dafb0a1 --- /dev/null +++ b/compiler/utils/mips/managed_register_mips.cc @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "managed_register_mips.h" + +#include "globals.h" + +namespace art { +namespace mips { + +// These core registers are never available for allocation. +static const Register kReservedCoreRegistersArray[] = { S0, S1 }; + +// We need all registers for caching. +static const int kNumberOfAvailableCoreRegisters = (S7 - T0) + 1; +static const int kNumberOfAvailableFRegisters = kNumberOfFRegisters; +static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters; +static const int kNumberOfAvailableOverlappingDRegisters = + kNumberOfOverlappingDRegisters; +static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs; + +bool MipsManagedRegister::Overlaps(const MipsManagedRegister& other) const { + if (IsNoRegister() || other.IsNoRegister()) return false; + CHECK(IsValidManagedRegister()); + CHECK(other.IsValidManagedRegister()); + if (Equals(other)) return true; + if (IsRegisterPair()) { + Register low = AsRegisterPairLow(); + Register high = AsRegisterPairHigh(); + return MipsManagedRegister::FromCoreRegister(low).Overlaps(other) || + MipsManagedRegister::FromCoreRegister(high).Overlaps(other); + } + if (IsOverlappingDRegister()) { + if (other.IsDRegister()) return Equals(other); + if (other.IsFRegister()) { + FRegister low = AsOverlappingDRegisterLow(); + FRegister high = AsOverlappingDRegisterHigh(); + FRegister other_freg = other.AsFRegister(); + return (low == other_freg) || (high == other_freg); + } + return false; + } + if (other.IsRegisterPair() || other.IsOverlappingDRegister()) { + return other.Overlaps(*this); + } + return false; +} + + +int MipsManagedRegister::AllocIdLow() const { + CHECK(IsOverlappingDRegister() || IsRegisterPair()); + const int r = RegId() - (kNumberOfCoreRegIds + kNumberOfFRegIds); + int low; + if (r < kNumberOfOverlappingDRegIds) { + CHECK(IsOverlappingDRegister()); + low = (r * 2) + kNumberOfCoreRegIds; // Return an FRegister. + } else { + CHECK(IsRegisterPair()); + low = (r - kNumberOfDRegIds) * 2 + 2; // Return a Register. + if (low >= 24) { + // we got a pair higher than S6_S7, must be the dalvik special case + low = 5; + } + } + return low; +} + + +int MipsManagedRegister::AllocIdHigh() const { + return AllocIdLow() + 1; +} + + +void MipsManagedRegister::Print(std::ostream& os) const { + if (!IsValidManagedRegister()) { + os << "No Register"; + } else if (IsCoreRegister()) { + os << "Core: " << static_cast(AsCoreRegister()); + } else if (IsRegisterPair()) { + os << "Pair: " << AsRegisterPairLow() << ", " << AsRegisterPairHigh(); + } else if (IsFRegister()) { + os << "FRegister: " << static_cast(AsFRegister()); + } else if (IsDRegister()) { + os << "DRegister: " << static_cast(AsDRegister()); + } else { + os << "??: " << RegId(); + } +} + +std::ostream& operator<<(std::ostream& os, const MipsManagedRegister& reg) { + reg.Print(os); + return os; +} + +std::ostream& operator<<(std::ostream& os, const RegisterPair& reg) { + os << MipsManagedRegister::FromRegisterPair(reg); + return os; +} + +} // namespace mips +} // namespace art diff --git a/compiler/utils/mips/managed_register_mips.h b/compiler/utils/mips/managed_register_mips.h new file mode 100644 index 0000000000..dd55cc4e6a --- /dev/null +++ b/compiler/utils/mips/managed_register_mips.h @@ -0,0 +1,228 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_ +#define ART_COMPILER_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_ + +#include "constants_mips.h" +#include "utils/managed_register.h" + +namespace art { +namespace mips { + +// Values for register pairs. +enum RegisterPair { + V0_V1 = 0, + A0_A1 = 1, + A2_A3 = 2, + T0_T1 = 3, + T2_T3 = 4, + T4_T5 = 5, + T6_T7 = 6, + S0_S1 = 7, + S2_S3 = 8, + S4_S5 = 9, + S6_S7 = 10, + A1_A2 = 11, // Dalvik style passing + kNumberOfRegisterPairs = 12, + kNoRegisterPair = -1, +}; + +std::ostream& operator<<(std::ostream& os, const RegisterPair& reg); + +const int kNumberOfCoreRegIds = kNumberOfCoreRegisters; +const int kNumberOfCoreAllocIds = kNumberOfCoreRegisters; + +const int kNumberOfFRegIds = kNumberOfFRegisters; +const int kNumberOfFAllocIds = kNumberOfFRegisters; + +const int kNumberOfDRegIds = kNumberOfDRegisters; +const int kNumberOfOverlappingDRegIds = kNumberOfOverlappingDRegisters; +const int kNumberOfDAllocIds = kNumberOfDRegisters; + +const int kNumberOfPairRegIds = kNumberOfRegisterPairs; + +const int kNumberOfRegIds = kNumberOfCoreRegIds + kNumberOfFRegIds + + kNumberOfDRegIds + kNumberOfPairRegIds; +const int kNumberOfAllocIds = + kNumberOfCoreAllocIds + kNumberOfFAllocIds + kNumberOfDAllocIds; + +// Register ids map: +// [0..R[ core registers (enum Register) +// [R..F[ single precision FP registers (enum FRegister) +// [F..D[ double precision FP registers (enum DRegister) +// [D..P[ core register pairs (enum RegisterPair) +// where +// R = kNumberOfCoreRegIds +// F = R + kNumberOfFRegIds +// D = F + kNumberOfDRegIds +// P = D + kNumberOfRegisterPairs + +// Allocation ids map: +// [0..R[ core registers (enum Register) +// [R..F[ single precision FP registers (enum FRegister) +// where +// R = kNumberOfCoreRegIds +// F = R + kNumberOfFRegIds + + +// An instance of class 'ManagedRegister' represents a single core register (enum +// Register), a single precision FP register (enum FRegister), a double precision +// FP register (enum DRegister), or a pair of core registers (enum RegisterPair). +// 'ManagedRegister::NoRegister()' provides an invalid register. +// There is a one-to-one mapping between ManagedRegister and register id. +class MipsManagedRegister : public ManagedRegister { + public: + Register AsCoreRegister() const { + CHECK(IsCoreRegister()); + return static_cast(id_); + } + + FRegister AsFRegister() const { + CHECK(IsFRegister()); + return static_cast(id_ - kNumberOfCoreRegIds); + } + + DRegister AsDRegister() const { + CHECK(IsDRegister()); + return static_cast(id_ - kNumberOfCoreRegIds - kNumberOfFRegIds); + } + + FRegister AsOverlappingDRegisterLow() const { + CHECK(IsOverlappingDRegister()); + DRegister d_reg = AsDRegister(); + return static_cast(d_reg * 2); + } + + FRegister AsOverlappingDRegisterHigh() const { + CHECK(IsOverlappingDRegister()); + DRegister d_reg = AsDRegister(); + return static_cast(d_reg * 2 + 1); + } + + Register AsRegisterPairLow() const { + CHECK(IsRegisterPair()); + // Appropriate mapping of register ids allows to use AllocIdLow(). + return FromRegId(AllocIdLow()).AsCoreRegister(); + } + + Register AsRegisterPairHigh() const { + CHECK(IsRegisterPair()); + // Appropriate mapping of register ids allows to use AllocIdHigh(). + return FromRegId(AllocIdHigh()).AsCoreRegister(); + } + + bool IsCoreRegister() const { + CHECK(IsValidManagedRegister()); + return (0 <= id_) && (id_ < kNumberOfCoreRegIds); + } + + bool IsFRegister() const { + CHECK(IsValidManagedRegister()); + const int test = id_ - kNumberOfCoreRegIds; + return (0 <= test) && (test < kNumberOfFRegIds); + } + + bool IsDRegister() const { + CHECK(IsValidManagedRegister()); + const int test = id_ - (kNumberOfCoreRegIds + kNumberOfFRegIds); + return (0 <= test) && (test < kNumberOfDRegIds); + } + + // Returns true if this DRegister overlaps FRegisters. + bool IsOverlappingDRegister() const { + CHECK(IsValidManagedRegister()); + const int test = id_ - (kNumberOfCoreRegIds + kNumberOfFRegIds); + return (0 <= test) && (test < kNumberOfOverlappingDRegIds); + } + + bool IsRegisterPair() const { + CHECK(IsValidManagedRegister()); + const int test = + id_ - (kNumberOfCoreRegIds + kNumberOfFRegIds + kNumberOfDRegIds); + return (0 <= test) && (test < kNumberOfPairRegIds); + } + + void Print(std::ostream& os) const; + + // Returns true if the two managed-registers ('this' and 'other') overlap. + // Either managed-register may be the NoRegister. If both are the NoRegister + // then false is returned. + bool Overlaps(const MipsManagedRegister& other) const; + + static MipsManagedRegister FromCoreRegister(Register r) { + CHECK_NE(r, kNoRegister); + return FromRegId(r); + } + + static MipsManagedRegister FromFRegister(FRegister r) { + CHECK_NE(r, kNoFRegister); + return FromRegId(r + kNumberOfCoreRegIds); + } + + static MipsManagedRegister FromDRegister(DRegister r) { + CHECK_NE(r, kNoDRegister); + return FromRegId(r + kNumberOfCoreRegIds + kNumberOfFRegIds); + } + + static MipsManagedRegister FromRegisterPair(RegisterPair r) { + CHECK_NE(r, kNoRegisterPair); + return FromRegId(r + (kNumberOfCoreRegIds + kNumberOfFRegIds + kNumberOfDRegIds)); + } + + private: + bool IsValidManagedRegister() const { + return (0 <= id_) && (id_ < kNumberOfRegIds); + } + + int RegId() const { + CHECK(!IsNoRegister()); + return id_; + } + + int AllocId() const { + CHECK(IsValidManagedRegister() && !IsOverlappingDRegister() && !IsRegisterPair()); + CHECK_LT(id_, kNumberOfAllocIds); + return id_; + } + + int AllocIdLow() const; + int AllocIdHigh() const; + + friend class ManagedRegister; + + explicit MipsManagedRegister(int reg_id) : ManagedRegister(reg_id) {} + + static MipsManagedRegister FromRegId(int reg_id) { + MipsManagedRegister reg(reg_id); + CHECK(reg.IsValidManagedRegister()); + return reg; + } +}; + +std::ostream& operator<<(std::ostream& os, const MipsManagedRegister& reg); + +} // namespace mips + +inline mips::MipsManagedRegister ManagedRegister::AsMips() const { + mips::MipsManagedRegister reg(id_); + CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister()); + return reg; +} + +} // namespace art + +#endif // ART_COMPILER_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_ diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc new file mode 100644 index 0000000000..9095180246 --- /dev/null +++ b/compiler/utils/x86/assembler_x86.cc @@ -0,0 +1,1847 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "assembler_x86.h" + +#include "base/casts.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "memory_region.h" +#include "thread.h" + +namespace art { +namespace x86 { + +class DirectCallRelocation : public AssemblerFixup { + public: + void Process(const MemoryRegion& region, int position) { + // Direct calls are relative to the following instruction on x86. + int32_t pointer = region.Load(position); + int32_t start = reinterpret_cast(region.start()); + int32_t delta = start + position + sizeof(int32_t); + region.Store(position, pointer - delta); + } +}; + +std::ostream& operator<<(std::ostream& os, const XmmRegister& reg) { + return os << "XMM" << static_cast(reg); +} + +std::ostream& operator<<(std::ostream& os, const X87Register& reg) { + return os << "ST" << static_cast(reg); +} + +void X86Assembler::call(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xFF); + EmitRegisterOperand(2, reg); +} + + +void X86Assembler::call(const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xFF); + EmitOperand(2, address); +} + + +void X86Assembler::call(Label* label) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xE8); + static const int kSize = 5; + EmitLabel(label, kSize); +} + + +void X86Assembler::pushl(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x50 + reg); +} + + +void X86Assembler::pushl(const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xFF); + EmitOperand(6, address); +} + + +void X86Assembler::pushl(const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + if (imm.is_int8()) { + EmitUint8(0x6A); + EmitUint8(imm.value() & 0xFF); + } else { + EmitUint8(0x68); + EmitImmediate(imm); + } +} + + +void X86Assembler::popl(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x58 + reg); +} + + +void X86Assembler::popl(const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x8F); + EmitOperand(0, address); +} + + +void X86Assembler::movl(Register dst, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xB8 + dst); + EmitImmediate(imm); +} + + +void X86Assembler::movl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x89); + EmitRegisterOperand(src, dst); +} + + +void X86Assembler::movl(Register dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x8B); + EmitOperand(dst, src); +} + + +void X86Assembler::movl(const Address& dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x89); + EmitOperand(src, dst); +} + + +void X86Assembler::movl(const Address& dst, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC7); + EmitOperand(0, dst); + EmitImmediate(imm); +} + +void X86Assembler::movl(const Address& dst, Label* lbl) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC7); + EmitOperand(0, dst); + EmitLabel(lbl, dst.length_ + 5); +} + +void X86Assembler::movzxb(Register dst, ByteRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xB6); + EmitRegisterOperand(dst, src); +} + + +void X86Assembler::movzxb(Register dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xB6); + EmitOperand(dst, src); +} + + +void X86Assembler::movsxb(Register dst, ByteRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xBE); + EmitRegisterOperand(dst, src); +} + + +void X86Assembler::movsxb(Register dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xBE); + EmitOperand(dst, src); +} + + +void X86Assembler::movb(Register /*dst*/, const Address& /*src*/) { + LOG(FATAL) << "Use movzxb or movsxb instead."; +} + + +void X86Assembler::movb(const Address& dst, ByteRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x88); + EmitOperand(src, dst); +} + + +void X86Assembler::movb(const Address& dst, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC6); + EmitOperand(EAX, dst); + CHECK(imm.is_int8()); + EmitUint8(imm.value() & 0xFF); +} + + +void X86Assembler::movzxw(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xB7); + EmitRegisterOperand(dst, src); +} + + +void X86Assembler::movzxw(Register dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xB7); + EmitOperand(dst, src); +} + + +void X86Assembler::movsxw(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xBF); + EmitRegisterOperand(dst, src); +} + + +void X86Assembler::movsxw(Register dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xBF); + EmitOperand(dst, src); +} + + +void X86Assembler::movw(Register /*dst*/, const Address& /*src*/) { + LOG(FATAL) << "Use movzxw or movsxw instead."; +} + + +void X86Assembler::movw(const Address& dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOperandSizeOverride(); + EmitUint8(0x89); + EmitOperand(src, dst); +} + + +void X86Assembler::leal(Register dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x8D); + EmitOperand(dst, src); +} + + +void X86Assembler::cmovl(Condition condition, Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x40 + condition); + EmitRegisterOperand(dst, src); +} + + +void X86Assembler::setb(Condition condition, Register dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x90 + condition); + EmitOperand(0, Operand(dst)); +} + + +void X86Assembler::movss(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x10); + EmitOperand(dst, src); +} + + +void X86Assembler::movss(const Address& dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x11); + EmitOperand(src, dst); +} + + +void X86Assembler::movss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x11); + EmitXmmRegisterOperand(src, dst); +} + + +void X86Assembler::movd(XmmRegister dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x6E); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::movd(Register dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x7E); + EmitOperand(src, Operand(dst)); +} + + +void X86Assembler::addss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x58); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::addss(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x58); + EmitOperand(dst, src); +} + + +void X86Assembler::subss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x5C); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::subss(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x5C); + EmitOperand(dst, src); +} + + +void X86Assembler::mulss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x59); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::mulss(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x59); + EmitOperand(dst, src); +} + + +void X86Assembler::divss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x5E); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::divss(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x5E); + EmitOperand(dst, src); +} + + +void X86Assembler::flds(const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitOperand(0, src); +} + + +void X86Assembler::fstps(const Address& dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitOperand(3, dst); +} + + +void X86Assembler::movsd(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x10); + EmitOperand(dst, src); +} + + +void X86Assembler::movsd(const Address& dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x11); + EmitOperand(src, dst); +} + + +void X86Assembler::movsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x11); + EmitXmmRegisterOperand(src, dst); +} + + +void X86Assembler::addsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x58); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::addsd(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x58); + EmitOperand(dst, src); +} + + +void X86Assembler::subsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x5C); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::subsd(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x5C); + EmitOperand(dst, src); +} + + +void X86Assembler::mulsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x59); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::mulsd(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x59); + EmitOperand(dst, src); +} + + +void X86Assembler::divsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x5E); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::divsd(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x5E); + EmitOperand(dst, src); +} + + +void X86Assembler::cvtsi2ss(XmmRegister dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x2A); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::cvtsi2sd(XmmRegister dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x2A); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::cvtss2si(Register dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x2D); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::cvtss2sd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x5A); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::cvtsd2si(Register dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x2D); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::cvttss2si(Register dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x2C); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::cvttsd2si(Register dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x2C); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::cvtsd2ss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x5A); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::cvtdq2pd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0xE6); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::comiss(XmmRegister a, XmmRegister b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x2F); + EmitXmmRegisterOperand(a, b); +} + + +void X86Assembler::comisd(XmmRegister a, XmmRegister b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x2F); + EmitXmmRegisterOperand(a, b); +} + + +void X86Assembler::sqrtsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x51); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::sqrtss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x51); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::xorpd(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x57); + EmitOperand(dst, src); +} + + +void X86Assembler::xorpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x57); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::xorps(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x57); + EmitOperand(dst, src); +} + + +void X86Assembler::xorps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x57); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::andpd(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x54); + EmitOperand(dst, src); +} + + +void X86Assembler::fldl(const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDD); + EmitOperand(0, src); +} + + +void X86Assembler::fstpl(const Address& dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDD); + EmitOperand(3, dst); +} + + +void X86Assembler::fnstcw(const Address& dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitOperand(7, dst); +} + + +void X86Assembler::fldcw(const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitOperand(5, src); +} + + +void X86Assembler::fistpl(const Address& dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDF); + EmitOperand(7, dst); +} + + +void X86Assembler::fistps(const Address& dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDB); + EmitOperand(3, dst); +} + + +void X86Assembler::fildl(const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDF); + EmitOperand(5, src); +} + + +void X86Assembler::fincstp() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitUint8(0xF7); +} + + +void X86Assembler::ffree(const Immediate& index) { + CHECK_LT(index.value(), 7); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDD); + EmitUint8(0xC0 + index.value()); +} + + +void X86Assembler::fsin() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitUint8(0xFE); +} + + +void X86Assembler::fcos() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitUint8(0xFF); +} + + +void X86Assembler::fptan() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitUint8(0xF2); +} + + +void X86Assembler::xchgl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x87); + EmitRegisterOperand(dst, src); +} + +void X86Assembler::xchgl(Register reg, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x87); + EmitOperand(reg, address); +} + + +void X86Assembler::cmpl(Register reg, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(7, Operand(reg), imm); +} + + +void X86Assembler::cmpl(Register reg0, Register reg1) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x3B); + EmitOperand(reg0, Operand(reg1)); +} + + +void X86Assembler::cmpl(Register reg, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x3B); + EmitOperand(reg, address); +} + + +void X86Assembler::addl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x03); + EmitRegisterOperand(dst, src); +} + + +void X86Assembler::addl(Register reg, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x03); + EmitOperand(reg, address); +} + + +void X86Assembler::cmpl(const Address& address, Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x39); + EmitOperand(reg, address); +} + + +void X86Assembler::cmpl(const Address& address, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(7, address, imm); +} + + +void X86Assembler::testl(Register reg1, Register reg2) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x85); + EmitRegisterOperand(reg1, reg2); +} + + +void X86Assembler::testl(Register reg, const Immediate& immediate) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + // For registers that have a byte variant (EAX, EBX, ECX, and EDX) + // we only test the byte register to keep the encoding short. + if (immediate.is_uint8() && reg < 4) { + // Use zero-extended 8-bit immediate. + if (reg == EAX) { + EmitUint8(0xA8); + } else { + EmitUint8(0xF6); + EmitUint8(0xC0 + reg); + } + EmitUint8(immediate.value() & 0xFF); + } else if (reg == EAX) { + // Use short form if the destination is EAX. + EmitUint8(0xA9); + EmitImmediate(immediate); + } else { + EmitUint8(0xF7); + EmitOperand(0, Operand(reg)); + EmitImmediate(immediate); + } +} + + +void X86Assembler::andl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x23); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::andl(Register dst, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(4, Operand(dst), imm); +} + + +void X86Assembler::orl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0B); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::orl(Register dst, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(1, Operand(dst), imm); +} + + +void X86Assembler::xorl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x33); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::addl(Register reg, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(0, Operand(reg), imm); +} + + +void X86Assembler::addl(const Address& address, Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x01); + EmitOperand(reg, address); +} + + +void X86Assembler::addl(const Address& address, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(0, address, imm); +} + + +void X86Assembler::adcl(Register reg, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(2, Operand(reg), imm); +} + + +void X86Assembler::adcl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x13); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::adcl(Register dst, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x13); + EmitOperand(dst, address); +} + + +void X86Assembler::subl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x2B); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::subl(Register reg, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(5, Operand(reg), imm); +} + + +void X86Assembler::subl(Register reg, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x2B); + EmitOperand(reg, address); +} + + +void X86Assembler::cdq() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x99); +} + + +void X86Assembler::idivl(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF7); + EmitUint8(0xF8 | reg); +} + + +void X86Assembler::imull(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xAF); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::imull(Register reg, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x69); + EmitOperand(reg, Operand(reg)); + EmitImmediate(imm); +} + + +void X86Assembler::imull(Register reg, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xAF); + EmitOperand(reg, address); +} + + +void X86Assembler::imull(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF7); + EmitOperand(5, Operand(reg)); +} + + +void X86Assembler::imull(const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF7); + EmitOperand(5, address); +} + + +void X86Assembler::mull(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF7); + EmitOperand(4, Operand(reg)); +} + + +void X86Assembler::mull(const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF7); + EmitOperand(4, address); +} + + +void X86Assembler::sbbl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x1B); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::sbbl(Register reg, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(3, Operand(reg), imm); +} + + +void X86Assembler::sbbl(Register dst, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x1B); + EmitOperand(dst, address); +} + + +void X86Assembler::incl(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x40 + reg); +} + + +void X86Assembler::incl(const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xFF); + EmitOperand(0, address); +} + + +void X86Assembler::decl(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x48 + reg); +} + + +void X86Assembler::decl(const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xFF); + EmitOperand(1, address); +} + + +void X86Assembler::shll(Register reg, const Immediate& imm) { + EmitGenericShift(4, reg, imm); +} + + +void X86Assembler::shll(Register operand, Register shifter) { + EmitGenericShift(4, operand, shifter); +} + + +void X86Assembler::shrl(Register reg, const Immediate& imm) { + EmitGenericShift(5, reg, imm); +} + + +void X86Assembler::shrl(Register operand, Register shifter) { + EmitGenericShift(5, operand, shifter); +} + + +void X86Assembler::sarl(Register reg, const Immediate& imm) { + EmitGenericShift(7, reg, imm); +} + + +void X86Assembler::sarl(Register operand, Register shifter) { + EmitGenericShift(7, operand, shifter); +} + + +void X86Assembler::shld(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xA5); + EmitRegisterOperand(src, dst); +} + + +void X86Assembler::negl(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF7); + EmitOperand(3, Operand(reg)); +} + + +void X86Assembler::notl(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF7); + EmitUint8(0xD0 | reg); +} + + +void X86Assembler::enter(const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC8); + CHECK(imm.is_uint16()); + EmitUint8(imm.value() & 0xFF); + EmitUint8((imm.value() >> 8) & 0xFF); + EmitUint8(0x00); +} + + +void X86Assembler::leave() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC9); +} + + +void X86Assembler::ret() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC3); +} + + +void X86Assembler::ret(const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC2); + CHECK(imm.is_uint16()); + EmitUint8(imm.value() & 0xFF); + EmitUint8((imm.value() >> 8) & 0xFF); +} + + + +void X86Assembler::nop() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x90); +} + + +void X86Assembler::int3() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xCC); +} + + +void X86Assembler::hlt() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF4); +} + + +void X86Assembler::j(Condition condition, Label* label) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + if (label->IsBound()) { + static const int kShortSize = 2; + static const int kLongSize = 6; + int offset = label->Position() - buffer_.Size(); + CHECK_LE(offset, 0); + if (IsInt(8, offset - kShortSize)) { + EmitUint8(0x70 + condition); + EmitUint8((offset - kShortSize) & 0xFF); + } else { + EmitUint8(0x0F); + EmitUint8(0x80 + condition); + EmitInt32(offset - kLongSize); + } + } else { + EmitUint8(0x0F); + EmitUint8(0x80 + condition); + EmitLabelLink(label); + } +} + + +void X86Assembler::jmp(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xFF); + EmitRegisterOperand(4, reg); +} + +void X86Assembler::jmp(const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xFF); + EmitOperand(4, address); +} + +void X86Assembler::jmp(Label* label) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + if (label->IsBound()) { + static const int kShortSize = 2; + static const int kLongSize = 5; + int offset = label->Position() - buffer_.Size(); + CHECK_LE(offset, 0); + if (IsInt(8, offset - kShortSize)) { + EmitUint8(0xEB); + EmitUint8((offset - kShortSize) & 0xFF); + } else { + EmitUint8(0xE9); + EmitInt32(offset - kLongSize); + } + } else { + EmitUint8(0xE9); + EmitLabelLink(label); + } +} + + +X86Assembler* X86Assembler::lock() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF0); + return this; +} + + +void X86Assembler::cmpxchgl(const Address& address, Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xB1); + EmitOperand(reg, address); +} + +void X86Assembler::mfence() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xAE); + EmitUint8(0xF0); +} + +X86Assembler* X86Assembler::fs() { + // TODO: fs is a prefix and not an instruction + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x64); + return this; +} + +void X86Assembler::AddImmediate(Register reg, const Immediate& imm) { + int value = imm.value(); + if (value > 0) { + if (value == 1) { + incl(reg); + } else if (value != 0) { + addl(reg, imm); + } + } else if (value < 0) { + value = -value; + if (value == 1) { + decl(reg); + } else if (value != 0) { + subl(reg, Immediate(value)); + } + } +} + + +void X86Assembler::LoadDoubleConstant(XmmRegister dst, double value) { + // TODO: Need to have a code constants table. + int64_t constant = bit_cast(value); + pushl(Immediate(High32Bits(constant))); + pushl(Immediate(Low32Bits(constant))); + movsd(dst, Address(ESP, 0)); + addl(ESP, Immediate(2 * kWordSize)); +} + + +void X86Assembler::FloatNegate(XmmRegister f) { + static const struct { + uint32_t a; + uint32_t b; + uint32_t c; + uint32_t d; + } float_negate_constant __attribute__((aligned(16))) = + { 0x80000000, 0x00000000, 0x80000000, 0x00000000 }; + xorps(f, Address::Absolute(reinterpret_cast(&float_negate_constant))); +} + + +void X86Assembler::DoubleNegate(XmmRegister d) { + static const struct { + uint64_t a; + uint64_t b; + } double_negate_constant __attribute__((aligned(16))) = + {0x8000000000000000LL, 0x8000000000000000LL}; + xorpd(d, Address::Absolute(reinterpret_cast(&double_negate_constant))); +} + + +void X86Assembler::DoubleAbs(XmmRegister reg) { + static const struct { + uint64_t a; + uint64_t b; + } double_abs_constant __attribute__((aligned(16))) = + {0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL}; + andpd(reg, Address::Absolute(reinterpret_cast(&double_abs_constant))); +} + + +void X86Assembler::Align(int alignment, int offset) { + CHECK(IsPowerOfTwo(alignment)); + // Emit nop instruction until the real position is aligned. + while (((offset + buffer_.GetPosition()) & (alignment-1)) != 0) { + nop(); + } +} + + +void X86Assembler::Bind(Label* label) { + int bound = buffer_.Size(); + CHECK(!label->IsBound()); // Labels can only be bound once. + while (label->IsLinked()) { + int position = label->LinkPosition(); + int next = buffer_.Load(position); + buffer_.Store(position, bound - (position + 4)); + label->position_ = next; + } + label->BindTo(bound); +} + + +void X86Assembler::Stop(const char* message) { + // Emit the message address as immediate operand in the test rax instruction, + // followed by the int3 instruction. + // Execution can be resumed with the 'cont' command in gdb. + testl(EAX, Immediate(reinterpret_cast(message))); + int3(); +} + + +void X86Assembler::EmitOperand(int reg_or_opcode, const Operand& operand) { + CHECK_GE(reg_or_opcode, 0); + CHECK_LT(reg_or_opcode, 8); + const int length = operand.length_; + CHECK_GT(length, 0); + // Emit the ModRM byte updated with the given reg value. + CHECK_EQ(operand.encoding_[0] & 0x38, 0); + EmitUint8(operand.encoding_[0] + (reg_or_opcode << 3)); + // Emit the rest of the encoded operand. + for (int i = 1; i < length; i++) { + EmitUint8(operand.encoding_[i]); + } +} + + +void X86Assembler::EmitImmediate(const Immediate& imm) { + EmitInt32(imm.value()); +} + + +void X86Assembler::EmitComplex(int reg_or_opcode, + const Operand& operand, + const Immediate& immediate) { + CHECK_GE(reg_or_opcode, 0); + CHECK_LT(reg_or_opcode, 8); + if (immediate.is_int8()) { + // Use sign-extended 8-bit immediate. + EmitUint8(0x83); + EmitOperand(reg_or_opcode, operand); + EmitUint8(immediate.value() & 0xFF); + } else if (operand.IsRegister(EAX)) { + // Use short form if the destination is eax. + EmitUint8(0x05 + (reg_or_opcode << 3)); + EmitImmediate(immediate); + } else { + EmitUint8(0x81); + EmitOperand(reg_or_opcode, operand); + EmitImmediate(immediate); + } +} + + +void X86Assembler::EmitLabel(Label* label, int instruction_size) { + if (label->IsBound()) { + int offset = label->Position() - buffer_.Size(); + CHECK_LE(offset, 0); + EmitInt32(offset - instruction_size); + } else { + EmitLabelLink(label); + } +} + + +void X86Assembler::EmitLabelLink(Label* label) { + CHECK(!label->IsBound()); + int position = buffer_.Size(); + EmitInt32(label->position_); + label->LinkTo(position); +} + + +void X86Assembler::EmitGenericShift(int reg_or_opcode, + Register reg, + const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_int8()); + if (imm.value() == 1) { + EmitUint8(0xD1); + EmitOperand(reg_or_opcode, Operand(reg)); + } else { + EmitUint8(0xC1); + EmitOperand(reg_or_opcode, Operand(reg)); + EmitUint8(imm.value() & 0xFF); + } +} + + +void X86Assembler::EmitGenericShift(int reg_or_opcode, + Register operand, + Register shifter) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK_EQ(shifter, ECX); + EmitUint8(0xD3); + EmitOperand(reg_or_opcode, Operand(operand)); +} + +void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, + const std::vector& spill_regs, + const std::vector& entry_spills) { + CHECK_ALIGNED(frame_size, kStackAlignment); + for (int i = spill_regs.size() - 1; i >= 0; --i) { + pushl(spill_regs.at(i).AsX86().AsCpuRegister()); + } + // return address then method on stack + addl(ESP, Immediate(-frame_size + (spill_regs.size() * kPointerSize) + + kPointerSize /*method*/ + kPointerSize /*return address*/)); + pushl(method_reg.AsX86().AsCpuRegister()); + for (size_t i = 0; i < entry_spills.size(); ++i) { + movl(Address(ESP, frame_size + kPointerSize + (i * kPointerSize)), + entry_spills.at(i).AsX86().AsCpuRegister()); + } +} + +void X86Assembler::RemoveFrame(size_t frame_size, + const std::vector& spill_regs) { + CHECK_ALIGNED(frame_size, kStackAlignment); + addl(ESP, Immediate(frame_size - (spill_regs.size() * kPointerSize) - kPointerSize)); + for (size_t i = 0; i < spill_regs.size(); ++i) { + popl(spill_regs.at(i).AsX86().AsCpuRegister()); + } + ret(); +} + +void X86Assembler::IncreaseFrameSize(size_t adjust) { + CHECK_ALIGNED(adjust, kStackAlignment); + addl(ESP, Immediate(-adjust)); +} + +void X86Assembler::DecreaseFrameSize(size_t adjust) { + CHECK_ALIGNED(adjust, kStackAlignment); + addl(ESP, Immediate(adjust)); +} + +void X86Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) { + X86ManagedRegister src = msrc.AsX86(); + if (src.IsNoRegister()) { + CHECK_EQ(0u, size); + } else if (src.IsCpuRegister()) { + CHECK_EQ(4u, size); + movl(Address(ESP, offs), src.AsCpuRegister()); + } else if (src.IsRegisterPair()) { + CHECK_EQ(8u, size); + movl(Address(ESP, offs), src.AsRegisterPairLow()); + movl(Address(ESP, FrameOffset(offs.Int32Value()+4)), + src.AsRegisterPairHigh()); + } else if (src.IsX87Register()) { + if (size == 4) { + fstps(Address(ESP, offs)); + } else { + fstpl(Address(ESP, offs)); + } + } else { + CHECK(src.IsXmmRegister()); + if (size == 4) { + movss(Address(ESP, offs), src.AsXmmRegister()); + } else { + movsd(Address(ESP, offs), src.AsXmmRegister()); + } + } +} + +void X86Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) { + X86ManagedRegister src = msrc.AsX86(); + CHECK(src.IsCpuRegister()); + movl(Address(ESP, dest), src.AsCpuRegister()); +} + +void X86Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) { + X86ManagedRegister src = msrc.AsX86(); + CHECK(src.IsCpuRegister()); + movl(Address(ESP, dest), src.AsCpuRegister()); +} + +void X86Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, + ManagedRegister) { + movl(Address(ESP, dest), Immediate(imm)); +} + +void X86Assembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm, + ManagedRegister) { + fs()->movl(Address::Absolute(dest), Immediate(imm)); +} + +void X86Assembler::StoreStackOffsetToThread(ThreadOffset thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { + X86ManagedRegister scratch = mscratch.AsX86(); + CHECK(scratch.IsCpuRegister()); + leal(scratch.AsCpuRegister(), Address(ESP, fr_offs)); + fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister()); +} + +void X86Assembler::StoreStackPointerToThread(ThreadOffset thr_offs) { + fs()->movl(Address::Absolute(thr_offs), ESP); +} + +void X86Assembler::StoreLabelToThread(ThreadOffset thr_offs, Label* lbl) { + fs()->movl(Address::Absolute(thr_offs), lbl); +} + +void X86Assembler::StoreSpanning(FrameOffset /*dst*/, ManagedRegister /*src*/, + FrameOffset /*in_off*/, ManagedRegister /*scratch*/) { + UNIMPLEMENTED(FATAL); // this case only currently exists for ARM +} + +void X86Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) { + X86ManagedRegister dest = mdest.AsX86(); + if (dest.IsNoRegister()) { + CHECK_EQ(0u, size); + } else if (dest.IsCpuRegister()) { + CHECK_EQ(4u, size); + movl(dest.AsCpuRegister(), Address(ESP, src)); + } else if (dest.IsRegisterPair()) { + CHECK_EQ(8u, size); + movl(dest.AsRegisterPairLow(), Address(ESP, src)); + movl(dest.AsRegisterPairHigh(), Address(ESP, FrameOffset(src.Int32Value()+4))); + } else if (dest.IsX87Register()) { + if (size == 4) { + flds(Address(ESP, src)); + } else { + fldl(Address(ESP, src)); + } + } else { + CHECK(dest.IsXmmRegister()); + if (size == 4) { + movss(dest.AsXmmRegister(), Address(ESP, src)); + } else { + movsd(dest.AsXmmRegister(), Address(ESP, src)); + } + } +} + +void X86Assembler::Load(ManagedRegister mdest, ThreadOffset src, size_t size) { + X86ManagedRegister dest = mdest.AsX86(); + if (dest.IsNoRegister()) { + CHECK_EQ(0u, size); + } else if (dest.IsCpuRegister()) { + CHECK_EQ(4u, size); + fs()->movl(dest.AsCpuRegister(), Address::Absolute(src)); + } else if (dest.IsRegisterPair()) { + CHECK_EQ(8u, size); + fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src)); + fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset(src.Int32Value()+4))); + } else if (dest.IsX87Register()) { + if (size == 4) { + fs()->flds(Address::Absolute(src)); + } else { + fs()->fldl(Address::Absolute(src)); + } + } else { + CHECK(dest.IsXmmRegister()); + if (size == 4) { + fs()->movss(dest.AsXmmRegister(), Address::Absolute(src)); + } else { + fs()->movsd(dest.AsXmmRegister(), Address::Absolute(src)); + } + } +} + +void X86Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) { + X86ManagedRegister dest = mdest.AsX86(); + CHECK(dest.IsCpuRegister()); + movl(dest.AsCpuRegister(), Address(ESP, src)); +} + +void X86Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, + MemberOffset offs) { + X86ManagedRegister dest = mdest.AsX86(); + CHECK(dest.IsCpuRegister() && dest.IsCpuRegister()); + movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs)); +} + +void X86Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, + Offset offs) { + X86ManagedRegister dest = mdest.AsX86(); + CHECK(dest.IsCpuRegister() && dest.IsCpuRegister()); + movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs)); +} + +void X86Assembler::LoadRawPtrFromThread(ManagedRegister mdest, + ThreadOffset offs) { + X86ManagedRegister dest = mdest.AsX86(); + CHECK(dest.IsCpuRegister()); + fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs)); +} + +void X86Assembler::SignExtend(ManagedRegister mreg, size_t size) { + X86ManagedRegister reg = mreg.AsX86(); + CHECK(size == 1 || size == 2) << size; + CHECK(reg.IsCpuRegister()) << reg; + if (size == 1) { + movsxb(reg.AsCpuRegister(), reg.AsByteRegister()); + } else { + movsxw(reg.AsCpuRegister(), reg.AsCpuRegister()); + } +} + +void X86Assembler::ZeroExtend(ManagedRegister mreg, size_t size) { + X86ManagedRegister reg = mreg.AsX86(); + CHECK(size == 1 || size == 2) << size; + CHECK(reg.IsCpuRegister()) << reg; + if (size == 1) { + movzxb(reg.AsCpuRegister(), reg.AsByteRegister()); + } else { + movzxw(reg.AsCpuRegister(), reg.AsCpuRegister()); + } +} + +void X86Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) { + X86ManagedRegister dest = mdest.AsX86(); + X86ManagedRegister src = msrc.AsX86(); + if (!dest.Equals(src)) { + if (dest.IsCpuRegister() && src.IsCpuRegister()) { + movl(dest.AsCpuRegister(), src.AsCpuRegister()); + } else if (src.IsX87Register() && dest.IsXmmRegister()) { + // Pass via stack and pop X87 register + subl(ESP, Immediate(16)); + if (size == 4) { + CHECK_EQ(src.AsX87Register(), ST0); + fstps(Address(ESP, 0)); + movss(dest.AsXmmRegister(), Address(ESP, 0)); + } else { + CHECK_EQ(src.AsX87Register(), ST0); + fstpl(Address(ESP, 0)); + movsd(dest.AsXmmRegister(), Address(ESP, 0)); + } + addl(ESP, Immediate(16)); + } else { + // TODO: x87, SSE + UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src; + } + } +} + +void X86Assembler::CopyRef(FrameOffset dest, FrameOffset src, + ManagedRegister mscratch) { + X86ManagedRegister scratch = mscratch.AsX86(); + CHECK(scratch.IsCpuRegister()); + movl(scratch.AsCpuRegister(), Address(ESP, src)); + movl(Address(ESP, dest), scratch.AsCpuRegister()); +} + +void X86Assembler::CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset thr_offs, + ManagedRegister mscratch) { + X86ManagedRegister scratch = mscratch.AsX86(); + CHECK(scratch.IsCpuRegister()); + fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs)); + Store(fr_offs, scratch, 4); +} + +void X86Assembler::CopyRawPtrToThread(ThreadOffset thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { + X86ManagedRegister scratch = mscratch.AsX86(); + CHECK(scratch.IsCpuRegister()); + Load(scratch, fr_offs, 4); + fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister()); +} + +void X86Assembler::Copy(FrameOffset dest, FrameOffset src, + ManagedRegister mscratch, + size_t size) { + X86ManagedRegister scratch = mscratch.AsX86(); + if (scratch.IsCpuRegister() && size == 8) { + Load(scratch, src, 4); + Store(dest, scratch, 4); + Load(scratch, FrameOffset(src.Int32Value() + 4), 4); + Store(FrameOffset(dest.Int32Value() + 4), scratch, 4); + } else { + Load(scratch, src, size); + Store(dest, scratch, size); + } +} + +void X86Assembler::Copy(FrameOffset /*dst*/, ManagedRegister /*src_base*/, Offset /*src_offset*/, + ManagedRegister /*scratch*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL); +} + +void X86Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, + ManagedRegister scratch, size_t size) { + CHECK(scratch.IsNoRegister()); + CHECK_EQ(size, 4u); + pushl(Address(ESP, src)); + popl(Address(dest_base.AsX86().AsCpuRegister(), dest_offset)); +} + +void X86Assembler::Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, + ManagedRegister mscratch, size_t size) { + Register scratch = mscratch.AsX86().AsCpuRegister(); + CHECK_EQ(size, 4u); + movl(scratch, Address(ESP, src_base)); + movl(scratch, Address(scratch, src_offset)); + movl(Address(ESP, dest), scratch); +} + +void X86Assembler::Copy(ManagedRegister dest, Offset dest_offset, + ManagedRegister src, Offset src_offset, + ManagedRegister scratch, size_t size) { + CHECK_EQ(size, 4u); + CHECK(scratch.IsNoRegister()); + pushl(Address(src.AsX86().AsCpuRegister(), src_offset)); + popl(Address(dest.AsX86().AsCpuRegister(), dest_offset)); +} + +void X86Assembler::Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, + ManagedRegister mscratch, size_t size) { + Register scratch = mscratch.AsX86().AsCpuRegister(); + CHECK_EQ(size, 4u); + CHECK_EQ(dest.Int32Value(), src.Int32Value()); + movl(scratch, Address(ESP, src)); + pushl(Address(scratch, src_offset)); + popl(Address(scratch, dest_offset)); +} + +void X86Assembler::MemoryBarrier(ManagedRegister) { +#if ANDROID_SMP != 0 + mfence(); +#endif +} + +void X86Assembler::CreateSirtEntry(ManagedRegister mout_reg, + FrameOffset sirt_offset, + ManagedRegister min_reg, bool null_allowed) { + X86ManagedRegister out_reg = mout_reg.AsX86(); + X86ManagedRegister in_reg = min_reg.AsX86(); + CHECK(in_reg.IsCpuRegister()); + CHECK(out_reg.IsCpuRegister()); + VerifyObject(in_reg, null_allowed); + if (null_allowed) { + Label null_arg; + if (!out_reg.Equals(in_reg)) { + xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister()); + } + testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister()); + j(kZero, &null_arg); + leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset)); + Bind(&null_arg); + } else { + leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset)); + } +} + +void X86Assembler::CreateSirtEntry(FrameOffset out_off, + FrameOffset sirt_offset, + ManagedRegister mscratch, + bool null_allowed) { + X86ManagedRegister scratch = mscratch.AsX86(); + CHECK(scratch.IsCpuRegister()); + if (null_allowed) { + Label null_arg; + movl(scratch.AsCpuRegister(), Address(ESP, sirt_offset)); + testl(scratch.AsCpuRegister(), scratch.AsCpuRegister()); + j(kZero, &null_arg); + leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset)); + Bind(&null_arg); + } else { + leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset)); + } + Store(out_off, scratch, 4); +} + +// Given a SIRT entry, load the associated reference. +void X86Assembler::LoadReferenceFromSirt(ManagedRegister mout_reg, + ManagedRegister min_reg) { + X86ManagedRegister out_reg = mout_reg.AsX86(); + X86ManagedRegister in_reg = min_reg.AsX86(); + CHECK(out_reg.IsCpuRegister()); + CHECK(in_reg.IsCpuRegister()); + Label null_arg; + if (!out_reg.Equals(in_reg)) { + xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister()); + } + testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister()); + j(kZero, &null_arg); + movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0)); + Bind(&null_arg); +} + +void X86Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) { + // TODO: not validating references +} + +void X86Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) { + // TODO: not validating references +} + +void X86Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) { + X86ManagedRegister base = mbase.AsX86(); + CHECK(base.IsCpuRegister()); + call(Address(base.AsCpuRegister(), offset.Int32Value())); + // TODO: place reference map on call +} + +void X86Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) { + Register scratch = mscratch.AsX86().AsCpuRegister(); + movl(scratch, Address(ESP, base)); + call(Address(scratch, offset)); +} + +void X86Assembler::Call(ThreadOffset offset, ManagedRegister /*mscratch*/) { + fs()->call(Address::Absolute(offset)); +} + +void X86Assembler::GetCurrentThread(ManagedRegister tr) { + fs()->movl(tr.AsX86().AsCpuRegister(), + Address::Absolute(Thread::SelfOffset())); +} + +void X86Assembler::GetCurrentThread(FrameOffset offset, + ManagedRegister mscratch) { + X86ManagedRegister scratch = mscratch.AsX86(); + fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset())); + movl(Address(ESP, offset), scratch.AsCpuRegister()); +} + +void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) { + X86ExceptionSlowPath* slow = new X86ExceptionSlowPath(stack_adjust); + buffer_.EnqueueSlowPath(slow); + fs()->cmpl(Address::Absolute(Thread::ExceptionOffset()), Immediate(0)); + j(kNotEqual, slow->Entry()); +} + +void X86ExceptionSlowPath::Emit(Assembler *sasm) { + X86Assembler* sp_asm = down_cast(sasm); +#define __ sp_asm-> + __ Bind(&entry_); + // Note: the return value is dead + if (stack_adjust_ != 0) { // Fix up the frame. + __ DecreaseFrameSize(stack_adjust_); + } + // Pass exception as argument in EAX + __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset())); + __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(pDeliverException))); + // this call should never return + __ int3(); +#undef __ +} + +} // namespace x86 +} // namespace art diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h new file mode 100644 index 0000000000..4ba03d1bd3 --- /dev/null +++ b/compiler/utils/x86/assembler_x86.h @@ -0,0 +1,646 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_X86_ASSEMBLER_X86_H_ +#define ART_COMPILER_UTILS_X86_ASSEMBLER_X86_H_ + +#include +#include "base/macros.h" +#include "constants_x86.h" +#include "globals.h" +#include "managed_register_x86.h" +#include "offsets.h" +#include "utils/assembler.h" +#include "utils.h" + +namespace art { +namespace x86 { + +class Immediate { + public: + explicit Immediate(int32_t value) : value_(value) {} + + int32_t value() const { return value_; } + + bool is_int8() const { return IsInt(8, value_); } + bool is_uint8() const { return IsUint(8, value_); } + bool is_uint16() const { return IsUint(16, value_); } + + private: + const int32_t value_; + + DISALLOW_COPY_AND_ASSIGN(Immediate); +}; + + +class Operand { + public: + uint8_t mod() const { + return (encoding_at(0) >> 6) & 3; + } + + Register rm() const { + return static_cast(encoding_at(0) & 7); + } + + ScaleFactor scale() const { + return static_cast((encoding_at(1) >> 6) & 3); + } + + Register index() const { + return static_cast((encoding_at(1) >> 3) & 7); + } + + Register base() const { + return static_cast(encoding_at(1) & 7); + } + + int8_t disp8() const { + CHECK_GE(length_, 2); + return static_cast(encoding_[length_ - 1]); + } + + int32_t disp32() const { + CHECK_GE(length_, 5); + int32_t value; + memcpy(&value, &encoding_[length_ - 4], sizeof(value)); + return value; + } + + bool IsRegister(Register reg) const { + return ((encoding_[0] & 0xF8) == 0xC0) // Addressing mode is register only. + && ((encoding_[0] & 0x07) == reg); // Register codes match. + } + + protected: + // Operand can be sub classed (e.g: Address). + Operand() : length_(0) { } + + void SetModRM(int mod, Register rm) { + CHECK_EQ(mod & ~3, 0); + encoding_[0] = (mod << 6) | rm; + length_ = 1; + } + + void SetSIB(ScaleFactor scale, Register index, Register base) { + CHECK_EQ(length_, 1); + CHECK_EQ(scale & ~3, 0); + encoding_[1] = (scale << 6) | (index << 3) | base; + length_ = 2; + } + + void SetDisp8(int8_t disp) { + CHECK(length_ == 1 || length_ == 2); + encoding_[length_++] = static_cast(disp); + } + + void SetDisp32(int32_t disp) { + CHECK(length_ == 1 || length_ == 2); + int disp_size = sizeof(disp); + memmove(&encoding_[length_], &disp, disp_size); + length_ += disp_size; + } + + private: + byte length_; + byte encoding_[6]; + byte padding_; + + explicit Operand(Register reg) { SetModRM(3, reg); } + + // Get the operand encoding byte at the given index. + uint8_t encoding_at(int index) const { + CHECK_GE(index, 0); + CHECK_LT(index, length_); + return encoding_[index]; + } + + friend class X86Assembler; + + DISALLOW_COPY_AND_ASSIGN(Operand); +}; + + +class Address : public Operand { + public: + Address(Register base, int32_t disp) { + Init(base, disp); + } + + Address(Register base, Offset disp) { + Init(base, disp.Int32Value()); + } + + Address(Register base, FrameOffset disp) { + CHECK_EQ(base, ESP); + Init(ESP, disp.Int32Value()); + } + + Address(Register base, MemberOffset disp) { + Init(base, disp.Int32Value()); + } + + void Init(Register base, int32_t disp) { + if (disp == 0 && base != EBP) { + SetModRM(0, base); + if (base == ESP) SetSIB(TIMES_1, ESP, base); + } else if (disp >= -128 && disp <= 127) { + SetModRM(1, base); + if (base == ESP) SetSIB(TIMES_1, ESP, base); + SetDisp8(disp); + } else { + SetModRM(2, base); + if (base == ESP) SetSIB(TIMES_1, ESP, base); + SetDisp32(disp); + } + } + + + Address(Register index, ScaleFactor scale, int32_t disp) { + CHECK_NE(index, ESP); // Illegal addressing mode. + SetModRM(0, ESP); + SetSIB(scale, index, EBP); + SetDisp32(disp); + } + + Address(Register base, Register index, ScaleFactor scale, int32_t disp) { + CHECK_NE(index, ESP); // Illegal addressing mode. + if (disp == 0 && base != EBP) { + SetModRM(0, ESP); + SetSIB(scale, index, base); + } else if (disp >= -128 && disp <= 127) { + SetModRM(1, ESP); + SetSIB(scale, index, base); + SetDisp8(disp); + } else { + SetModRM(2, ESP); + SetSIB(scale, index, base); + SetDisp32(disp); + } + } + + static Address Absolute(uword addr) { + Address result; + result.SetModRM(0, EBP); + result.SetDisp32(addr); + return result; + } + + static Address Absolute(ThreadOffset addr) { + return Absolute(addr.Int32Value()); + } + + private: + Address() {} + + DISALLOW_COPY_AND_ASSIGN(Address); +}; + + +class X86Assembler : public Assembler { + public: + X86Assembler() {} + virtual ~X86Assembler() {} + + /* + * Emit Machine Instructions. + */ + void call(Register reg); + void call(const Address& address); + void call(Label* label); + + void pushl(Register reg); + void pushl(const Address& address); + void pushl(const Immediate& imm); + + void popl(Register reg); + void popl(const Address& address); + + void movl(Register dst, const Immediate& src); + void movl(Register dst, Register src); + + void movl(Register dst, const Address& src); + void movl(const Address& dst, Register src); + void movl(const Address& dst, const Immediate& imm); + void movl(const Address& dst, Label* lbl); + + void movzxb(Register dst, ByteRegister src); + void movzxb(Register dst, const Address& src); + void movsxb(Register dst, ByteRegister src); + void movsxb(Register dst, const Address& src); + void movb(Register dst, const Address& src); + void movb(const Address& dst, ByteRegister src); + void movb(const Address& dst, const Immediate& imm); + + void movzxw(Register dst, Register src); + void movzxw(Register dst, const Address& src); + void movsxw(Register dst, Register src); + void movsxw(Register dst, const Address& src); + void movw(Register dst, const Address& src); + void movw(const Address& dst, Register src); + + void leal(Register dst, const Address& src); + + void cmovl(Condition condition, Register dst, Register src); + + void setb(Condition condition, Register dst); + + void movss(XmmRegister dst, const Address& src); + void movss(const Address& dst, XmmRegister src); + void movss(XmmRegister dst, XmmRegister src); + + void movd(XmmRegister dst, Register src); + void movd(Register dst, XmmRegister src); + + void addss(XmmRegister dst, XmmRegister src); + void addss(XmmRegister dst, const Address& src); + void subss(XmmRegister dst, XmmRegister src); + void subss(XmmRegister dst, const Address& src); + void mulss(XmmRegister dst, XmmRegister src); + void mulss(XmmRegister dst, const Address& src); + void divss(XmmRegister dst, XmmRegister src); + void divss(XmmRegister dst, const Address& src); + + void movsd(XmmRegister dst, const Address& src); + void movsd(const Address& dst, XmmRegister src); + void movsd(XmmRegister dst, XmmRegister src); + + void addsd(XmmRegister dst, XmmRegister src); + void addsd(XmmRegister dst, const Address& src); + void subsd(XmmRegister dst, XmmRegister src); + void subsd(XmmRegister dst, const Address& src); + void mulsd(XmmRegister dst, XmmRegister src); + void mulsd(XmmRegister dst, const Address& src); + void divsd(XmmRegister dst, XmmRegister src); + void divsd(XmmRegister dst, const Address& src); + + void cvtsi2ss(XmmRegister dst, Register src); + void cvtsi2sd(XmmRegister dst, Register src); + + void cvtss2si(Register dst, XmmRegister src); + void cvtss2sd(XmmRegister dst, XmmRegister src); + + void cvtsd2si(Register dst, XmmRegister src); + void cvtsd2ss(XmmRegister dst, XmmRegister src); + + void cvttss2si(Register dst, XmmRegister src); + void cvttsd2si(Register dst, XmmRegister src); + + void cvtdq2pd(XmmRegister dst, XmmRegister src); + + void comiss(XmmRegister a, XmmRegister b); + void comisd(XmmRegister a, XmmRegister b); + + void sqrtsd(XmmRegister dst, XmmRegister src); + void sqrtss(XmmRegister dst, XmmRegister src); + + void xorpd(XmmRegister dst, const Address& src); + void xorpd(XmmRegister dst, XmmRegister src); + void xorps(XmmRegister dst, const Address& src); + void xorps(XmmRegister dst, XmmRegister src); + + void andpd(XmmRegister dst, const Address& src); + + void flds(const Address& src); + void fstps(const Address& dst); + + void fldl(const Address& src); + void fstpl(const Address& dst); + + void fnstcw(const Address& dst); + void fldcw(const Address& src); + + void fistpl(const Address& dst); + void fistps(const Address& dst); + void fildl(const Address& src); + + void fincstp(); + void ffree(const Immediate& index); + + void fsin(); + void fcos(); + void fptan(); + + void xchgl(Register dst, Register src); + void xchgl(Register reg, const Address& address); + + void cmpl(Register reg, const Immediate& imm); + void cmpl(Register reg0, Register reg1); + void cmpl(Register reg, const Address& address); + + void cmpl(const Address& address, Register reg); + void cmpl(const Address& address, const Immediate& imm); + + void testl(Register reg1, Register reg2); + void testl(Register reg, const Immediate& imm); + + void andl(Register dst, const Immediate& imm); + void andl(Register dst, Register src); + + void orl(Register dst, const Immediate& imm); + void orl(Register dst, Register src); + + void xorl(Register dst, Register src); + + void addl(Register dst, Register src); + void addl(Register reg, const Immediate& imm); + void addl(Register reg, const Address& address); + + void addl(const Address& address, Register reg); + void addl(const Address& address, const Immediate& imm); + + void adcl(Register dst, Register src); + void adcl(Register reg, const Immediate& imm); + void adcl(Register dst, const Address& address); + + void subl(Register dst, Register src); + void subl(Register reg, const Immediate& imm); + void subl(Register reg, const Address& address); + + void cdq(); + + void idivl(Register reg); + + void imull(Register dst, Register src); + void imull(Register reg, const Immediate& imm); + void imull(Register reg, const Address& address); + + void imull(Register reg); + void imull(const Address& address); + + void mull(Register reg); + void mull(const Address& address); + + void sbbl(Register dst, Register src); + void sbbl(Register reg, const Immediate& imm); + void sbbl(Register reg, const Address& address); + + void incl(Register reg); + void incl(const Address& address); + + void decl(Register reg); + void decl(const Address& address); + + void shll(Register reg, const Immediate& imm); + void shll(Register operand, Register shifter); + void shrl(Register reg, const Immediate& imm); + void shrl(Register operand, Register shifter); + void sarl(Register reg, const Immediate& imm); + void sarl(Register operand, Register shifter); + void shld(Register dst, Register src); + + void negl(Register reg); + void notl(Register reg); + + void enter(const Immediate& imm); + void leave(); + + void ret(); + void ret(const Immediate& imm); + + void nop(); + void int3(); + void hlt(); + + void j(Condition condition, Label* label); + + void jmp(Register reg); + void jmp(const Address& address); + void jmp(Label* label); + + X86Assembler* lock(); + void cmpxchgl(const Address& address, Register reg); + + void mfence(); + + X86Assembler* fs(); + + // + // Macros for High-level operations. + // + + void AddImmediate(Register reg, const Immediate& imm); + + void LoadDoubleConstant(XmmRegister dst, double value); + + void DoubleNegate(XmmRegister d); + void FloatNegate(XmmRegister f); + + void DoubleAbs(XmmRegister reg); + + void LockCmpxchgl(const Address& address, Register reg) { + lock()->cmpxchgl(address, reg); + } + + // + // Misc. functionality + // + int PreferredLoopAlignment() { return 16; } + void Align(int alignment, int offset); + void Bind(Label* label); + + // Debugging and bringup support. + void Stop(const char* message); + + // + // Overridden common assembler high-level functionality + // + + // Emit code that will create an activation on the stack + virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg, + const std::vector& callee_save_regs, + const std::vector& entry_spills); + + // Emit code that will remove an activation from the stack + virtual void RemoveFrame(size_t frame_size, + const std::vector& callee_save_regs); + + virtual void IncreaseFrameSize(size_t adjust); + virtual void DecreaseFrameSize(size_t adjust); + + // Store routines + virtual void Store(FrameOffset offs, ManagedRegister src, size_t size); + virtual void StoreRef(FrameOffset dest, ManagedRegister src); + virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src); + + virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, + ManagedRegister scratch); + + virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm, + ManagedRegister scratch); + + virtual void StoreStackOffsetToThread(ThreadOffset thr_offs, + FrameOffset fr_offs, + ManagedRegister scratch); + + virtual void StoreStackPointerToThread(ThreadOffset thr_offs); + + void StoreLabelToThread(ThreadOffset thr_offs, Label* lbl); + + virtual void StoreSpanning(FrameOffset dest, ManagedRegister src, + FrameOffset in_off, ManagedRegister scratch); + + // Load routines + virtual void Load(ManagedRegister dest, FrameOffset src, size_t size); + + virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size); + + virtual void LoadRef(ManagedRegister dest, FrameOffset src); + + virtual void LoadRef(ManagedRegister dest, ManagedRegister base, + MemberOffset offs); + + virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, + Offset offs); + + virtual void LoadRawPtrFromThread(ManagedRegister dest, + ThreadOffset offs); + + // Copying routines + virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size); + + virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs, + ManagedRegister scratch); + + virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs, + ManagedRegister scratch); + + virtual void CopyRef(FrameOffset dest, FrameOffset src, + ManagedRegister scratch); + + virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size); + + virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, + ManagedRegister scratch, size_t size); + + virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, + ManagedRegister scratch, size_t size); + + virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, + ManagedRegister scratch, size_t size); + + virtual void Copy(ManagedRegister dest, Offset dest_offset, + ManagedRegister src, Offset src_offset, + ManagedRegister scratch, size_t size); + + virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, + ManagedRegister scratch, size_t size); + + virtual void MemoryBarrier(ManagedRegister); + + // Sign extension + virtual void SignExtend(ManagedRegister mreg, size_t size); + + // Zero extension + virtual void ZeroExtend(ManagedRegister mreg, size_t size); + + // Exploit fast access in managed code to Thread::Current() + virtual void GetCurrentThread(ManagedRegister tr); + virtual void GetCurrentThread(FrameOffset dest_offset, + ManagedRegister scratch); + + // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the + // value is null and null_allowed. in_reg holds a possibly stale reference + // that can be used to avoid loading the SIRT entry to see if the value is + // NULL. + virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, + ManagedRegister in_reg, bool null_allowed); + + // Set up out_off to hold a Object** into the SIRT, or to be NULL if the + // value is null and null_allowed. + virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, + ManagedRegister scratch, bool null_allowed); + + // src holds a SIRT entry (Object**) load this into dst + virtual void LoadReferenceFromSirt(ManagedRegister dst, + ManagedRegister src); + + // Heap::VerifyObject on src. In some cases (such as a reference to this) we + // know that src may not be null. + virtual void VerifyObject(ManagedRegister src, bool could_be_null); + virtual void VerifyObject(FrameOffset src, bool could_be_null); + + // Call to address held at [base+offset] + virtual void Call(ManagedRegister base, Offset offset, + ManagedRegister scratch); + virtual void Call(FrameOffset base, Offset offset, + ManagedRegister scratch); + virtual void Call(ThreadOffset offset, ManagedRegister scratch); + + // Generate code to check if Thread::Current()->exception_ is non-null + // and branch to a ExceptionSlowPath if it is. + virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust); + + private: + inline void EmitUint8(uint8_t value); + inline void EmitInt32(int32_t value); + inline void EmitRegisterOperand(int rm, int reg); + inline void EmitXmmRegisterOperand(int rm, XmmRegister reg); + inline void EmitFixup(AssemblerFixup* fixup); + inline void EmitOperandSizeOverride(); + + void EmitOperand(int rm, const Operand& operand); + void EmitImmediate(const Immediate& imm); + void EmitComplex(int rm, const Operand& operand, const Immediate& immediate); + void EmitLabel(Label* label, int instruction_size); + void EmitLabelLink(Label* label); + void EmitNearLabelLink(Label* label); + + void EmitGenericShift(int rm, Register reg, const Immediate& imm); + void EmitGenericShift(int rm, Register operand, Register shifter); + + DISALLOW_COPY_AND_ASSIGN(X86Assembler); +}; + +inline void X86Assembler::EmitUint8(uint8_t value) { + buffer_.Emit(value); +} + +inline void X86Assembler::EmitInt32(int32_t value) { + buffer_.Emit(value); +} + +inline void X86Assembler::EmitRegisterOperand(int rm, int reg) { + CHECK_GE(rm, 0); + CHECK_LT(rm, 8); + buffer_.Emit(0xC0 + (rm << 3) + reg); +} + +inline void X86Assembler::EmitXmmRegisterOperand(int rm, XmmRegister reg) { + EmitRegisterOperand(rm, static_cast(reg)); +} + +inline void X86Assembler::EmitFixup(AssemblerFixup* fixup) { + buffer_.EmitFixup(fixup); +} + +inline void X86Assembler::EmitOperandSizeOverride() { + EmitUint8(0x66); +} + +// Slowpath entered when Thread::Current()->_exception is non-null +class X86ExceptionSlowPath : public SlowPath { + public: + explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {} + virtual void Emit(Assembler *sp_asm); + private: + const size_t stack_adjust_; +}; + +} // namespace x86 +} // namespace art + +#endif // ART_COMPILER_UTILS_X86_ASSEMBLER_X86_H_ diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc new file mode 100644 index 0000000000..5d8a3b1521 --- /dev/null +++ b/compiler/utils/x86/assembler_x86_test.cc @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "assembler_x86.h" + +#include "gtest/gtest.h" + +namespace art { + +TEST(AssemblerX86, CreateBuffer) { + AssemblerBuffer buffer; + AssemblerBuffer::EnsureCapacity ensured(&buffer); + buffer.Emit(0x42); + ASSERT_EQ(static_cast(1), buffer.Size()); + buffer.Emit(42); + ASSERT_EQ(static_cast(5), buffer.Size()); +} + +} // namespace art diff --git a/compiler/utils/x86/constants_x86.h b/compiler/utils/x86/constants_x86.h new file mode 100644 index 0000000000..45c3834a98 --- /dev/null +++ b/compiler/utils/x86/constants_x86.h @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_X86_CONSTANTS_X86_H_ +#define ART_COMPILER_UTILS_X86_CONSTANTS_X86_H_ + +#include + +#include "arch/x86/registers_x86.h" +#include "base/logging.h" +#include "base/macros.h" +#include "globals.h" + +namespace art { +namespace x86 { + +enum ByteRegister { + AL = 0, + CL = 1, + DL = 2, + BL = 3, + AH = 4, + CH = 5, + DH = 6, + BH = 7, + kNoByteRegister = -1 // Signals an illegal register. +}; + + +enum XmmRegister { + XMM0 = 0, + XMM1 = 1, + XMM2 = 2, + XMM3 = 3, + XMM4 = 4, + XMM5 = 5, + XMM6 = 6, + XMM7 = 7, + kNumberOfXmmRegisters = 8, + kNoXmmRegister = -1 // Signals an illegal register. +}; +std::ostream& operator<<(std::ostream& os, const XmmRegister& reg); + +enum X87Register { + ST0 = 0, + ST1 = 1, + ST2 = 2, + ST3 = 3, + ST4 = 4, + ST5 = 5, + ST6 = 6, + ST7 = 7, + kNumberOfX87Registers = 8, + kNoX87Register = -1 // Signals an illegal register. +}; +std::ostream& operator<<(std::ostream& os, const X87Register& reg); + +enum ScaleFactor { + TIMES_1 = 0, + TIMES_2 = 1, + TIMES_4 = 2, + TIMES_8 = 3 +}; + +enum Condition { + kOverflow = 0, + kNoOverflow = 1, + kBelow = 2, + kAboveEqual = 3, + kEqual = 4, + kNotEqual = 5, + kBelowEqual = 6, + kAbove = 7, + kSign = 8, + kNotSign = 9, + kParityEven = 10, + kParityOdd = 11, + kLess = 12, + kGreaterEqual = 13, + kLessEqual = 14, + kGreater = 15, + + kZero = kEqual, + kNotZero = kNotEqual, + kNegative = kSign, + kPositive = kNotSign +}; + + +class Instr { + public: + static const uint8_t kHltInstruction = 0xF4; + // We prefer not to use the int3 instruction since it conflicts with gdb. + static const uint8_t kBreakPointInstruction = kHltInstruction; + + bool IsBreakPoint() { + return (*reinterpret_cast(this)) == kBreakPointInstruction; + } + + // Instructions are read out of a code stream. The only way to get a + // reference to an instruction is to convert a pointer. There is no way + // to allocate or create instances of class Instr. + // Use the At(pc) function to create references to Instr. + static Instr* At(uintptr_t pc) { return reinterpret_cast(pc); } + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(Instr); +}; + +} // namespace x86 +} // namespace art + +#endif // ART_COMPILER_UTILS_X86_CONSTANTS_X86_H_ diff --git a/compiler/utils/x86/managed_register_x86.cc b/compiler/utils/x86/managed_register_x86.cc new file mode 100644 index 0000000000..4697d06136 --- /dev/null +++ b/compiler/utils/x86/managed_register_x86.cc @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "managed_register_x86.h" + +#include "globals.h" + +namespace art { +namespace x86 { + +// These cpu registers are never available for allocation. +static const Register kReservedCpuRegistersArray[] = { ESP }; + + +// We reduce the number of available registers for allocation in debug-code +// mode in order to increase register pressure. + +// We need all registers for caching. +static const int kNumberOfAvailableCpuRegisters = kNumberOfCpuRegisters; +static const int kNumberOfAvailableXmmRegisters = kNumberOfXmmRegisters; +static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs; + + +// Define register pairs. +// This list must be kept in sync with the RegisterPair enum. +#define REGISTER_PAIR_LIST(P) \ + P(EAX, EDX) \ + P(EAX, ECX) \ + P(EAX, EBX) \ + P(EAX, EDI) \ + P(EDX, ECX) \ + P(EDX, EBX) \ + P(EDX, EDI) \ + P(ECX, EBX) \ + P(ECX, EDI) \ + P(EBX, EDI) + + +struct RegisterPairDescriptor { + RegisterPair reg; // Used to verify that the enum is in sync. + Register low; + Register high; +}; + + +static const RegisterPairDescriptor kRegisterPairs[] = { +#define REGISTER_PAIR_ENUMERATION(low, high) { low##_##high, low, high }, + REGISTER_PAIR_LIST(REGISTER_PAIR_ENUMERATION) +#undef REGISTER_PAIR_ENUMERATION +}; + +std::ostream& operator<<(std::ostream& os, const RegisterPair& reg) { + os << X86ManagedRegister::FromRegisterPair(reg); + return os; +} + +bool X86ManagedRegister::Overlaps(const X86ManagedRegister& other) const { + if (IsNoRegister() || other.IsNoRegister()) return false; + CHECK(IsValidManagedRegister()); + CHECK(other.IsValidManagedRegister()); + if (Equals(other)) return true; + if (IsRegisterPair()) { + Register low = AsRegisterPairLow(); + Register high = AsRegisterPairHigh(); + return X86ManagedRegister::FromCpuRegister(low).Overlaps(other) || + X86ManagedRegister::FromCpuRegister(high).Overlaps(other); + } + if (other.IsRegisterPair()) { + return other.Overlaps(*this); + } + return false; +} + + +int X86ManagedRegister::AllocIdLow() const { + CHECK(IsRegisterPair()); + const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds + + kNumberOfX87RegIds); + CHECK_EQ(r, kRegisterPairs[r].reg); + return kRegisterPairs[r].low; +} + + +int X86ManagedRegister::AllocIdHigh() const { + CHECK(IsRegisterPair()); + const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds + + kNumberOfX87RegIds); + CHECK_EQ(r, kRegisterPairs[r].reg); + return kRegisterPairs[r].high; +} + + +void X86ManagedRegister::Print(std::ostream& os) const { + if (!IsValidManagedRegister()) { + os << "No Register"; + } else if (IsXmmRegister()) { + os << "XMM: " << static_cast(AsXmmRegister()); + } else if (IsX87Register()) { + os << "X87: " << static_cast(AsX87Register()); + } else if (IsCpuRegister()) { + os << "CPU: " << static_cast(AsCpuRegister()); + } else if (IsRegisterPair()) { + os << "Pair: " << AsRegisterPairLow() << ", " << AsRegisterPairHigh(); + } else { + os << "??: " << RegId(); + } +} + +std::ostream& operator<<(std::ostream& os, const X86ManagedRegister& reg) { + reg.Print(os); + return os; +} + +} // namespace x86 +} // namespace art diff --git a/compiler/utils/x86/managed_register_x86.h b/compiler/utils/x86/managed_register_x86.h new file mode 100644 index 0000000000..0201a96ad0 --- /dev/null +++ b/compiler/utils/x86/managed_register_x86.h @@ -0,0 +1,218 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_X86_MANAGED_REGISTER_X86_H_ +#define ART_COMPILER_UTILS_X86_MANAGED_REGISTER_X86_H_ + +#include "constants_x86.h" +#include "utils/managed_register.h" + +namespace art { +namespace x86 { + +// Values for register pairs. +// The registers in kReservedCpuRegistersArray in x86.cc are not used in pairs. +// The table kRegisterPairs in x86.cc must be kept in sync with this enum. +enum RegisterPair { + EAX_EDX = 0, + EAX_ECX = 1, + EAX_EBX = 2, + EAX_EDI = 3, + EDX_ECX = 4, + EDX_EBX = 5, + EDX_EDI = 6, + ECX_EBX = 7, + ECX_EDI = 8, + EBX_EDI = 9, + kNumberOfRegisterPairs = 10, + kNoRegisterPair = -1, +}; + +std::ostream& operator<<(std::ostream& os, const RegisterPair& reg); + +const int kNumberOfCpuRegIds = kNumberOfCpuRegisters; +const int kNumberOfCpuAllocIds = kNumberOfCpuRegisters; + +const int kNumberOfXmmRegIds = kNumberOfXmmRegisters; +const int kNumberOfXmmAllocIds = kNumberOfXmmRegisters; + +const int kNumberOfX87RegIds = kNumberOfX87Registers; +const int kNumberOfX87AllocIds = kNumberOfX87Registers; + +const int kNumberOfPairRegIds = kNumberOfRegisterPairs; + +const int kNumberOfRegIds = kNumberOfCpuRegIds + kNumberOfXmmRegIds + + kNumberOfX87RegIds + kNumberOfPairRegIds; +const int kNumberOfAllocIds = kNumberOfCpuAllocIds + kNumberOfXmmAllocIds + + kNumberOfX87RegIds; + +// Register ids map: +// [0..R[ cpu registers (enum Register) +// [R..X[ xmm registers (enum XmmRegister) +// [X..S[ x87 registers (enum X87Register) +// [S..P[ register pairs (enum RegisterPair) +// where +// R = kNumberOfCpuRegIds +// X = R + kNumberOfXmmRegIds +// S = X + kNumberOfX87RegIds +// P = X + kNumberOfRegisterPairs + +// Allocation ids map: +// [0..R[ cpu registers (enum Register) +// [R..X[ xmm registers (enum XmmRegister) +// [X..S[ x87 registers (enum X87Register) +// where +// R = kNumberOfCpuRegIds +// X = R + kNumberOfXmmRegIds +// S = X + kNumberOfX87RegIds + + +// An instance of class 'ManagedRegister' represents a single cpu register (enum +// Register), an xmm register (enum XmmRegister), or a pair of cpu registers +// (enum RegisterPair). +// 'ManagedRegister::NoRegister()' provides an invalid register. +// There is a one-to-one mapping between ManagedRegister and register id. +class X86ManagedRegister : public ManagedRegister { + public: + ByteRegister AsByteRegister() const { + CHECK(IsCpuRegister()); + CHECK_LT(AsCpuRegister(), ESP); // ESP, EBP, ESI and EDI cannot be encoded as byte registers. + return static_cast(id_); + } + + Register AsCpuRegister() const { + CHECK(IsCpuRegister()); + return static_cast(id_); + } + + XmmRegister AsXmmRegister() const { + CHECK(IsXmmRegister()); + return static_cast(id_ - kNumberOfCpuRegIds); + } + + X87Register AsX87Register() const { + CHECK(IsX87Register()); + return static_cast(id_ - + (kNumberOfCpuRegIds + kNumberOfXmmRegIds)); + } + + Register AsRegisterPairLow() const { + CHECK(IsRegisterPair()); + // Appropriate mapping of register ids allows to use AllocIdLow(). + return FromRegId(AllocIdLow()).AsCpuRegister(); + } + + Register AsRegisterPairHigh() const { + CHECK(IsRegisterPair()); + // Appropriate mapping of register ids allows to use AllocIdHigh(). + return FromRegId(AllocIdHigh()).AsCpuRegister(); + } + + bool IsCpuRegister() const { + CHECK(IsValidManagedRegister()); + return (0 <= id_) && (id_ < kNumberOfCpuRegIds); + } + + bool IsXmmRegister() const { + CHECK(IsValidManagedRegister()); + const int test = id_ - kNumberOfCpuRegIds; + return (0 <= test) && (test < kNumberOfXmmRegIds); + } + + bool IsX87Register() const { + CHECK(IsValidManagedRegister()); + const int test = id_ - (kNumberOfCpuRegIds + kNumberOfXmmRegIds); + return (0 <= test) && (test < kNumberOfX87RegIds); + } + + bool IsRegisterPair() const { + CHECK(IsValidManagedRegister()); + const int test = id_ - + (kNumberOfCpuRegIds + kNumberOfXmmRegIds + kNumberOfX87RegIds); + return (0 <= test) && (test < kNumberOfPairRegIds); + } + + void Print(std::ostream& os) const; + + // Returns true if the two managed-registers ('this' and 'other') overlap. + // Either managed-register may be the NoRegister. If both are the NoRegister + // then false is returned. + bool Overlaps(const X86ManagedRegister& other) const; + + static X86ManagedRegister FromCpuRegister(Register r) { + CHECK_NE(r, kNoRegister); + return FromRegId(r); + } + + static X86ManagedRegister FromXmmRegister(XmmRegister r) { + CHECK_NE(r, kNoXmmRegister); + return FromRegId(r + kNumberOfCpuRegIds); + } + + static X86ManagedRegister FromX87Register(X87Register r) { + CHECK_NE(r, kNoX87Register); + return FromRegId(r + kNumberOfCpuRegIds + kNumberOfXmmRegIds); + } + + static X86ManagedRegister FromRegisterPair(RegisterPair r) { + CHECK_NE(r, kNoRegisterPair); + return FromRegId(r + (kNumberOfCpuRegIds + kNumberOfXmmRegIds + + kNumberOfX87RegIds)); + } + + private: + bool IsValidManagedRegister() const { + return (0 <= id_) && (id_ < kNumberOfRegIds); + } + + int RegId() const { + CHECK(!IsNoRegister()); + return id_; + } + + int AllocId() const { + CHECK(IsValidManagedRegister() && !IsRegisterPair()); + CHECK_LT(id_, kNumberOfAllocIds); + return id_; + } + + int AllocIdLow() const; + int AllocIdHigh() const; + + friend class ManagedRegister; + + explicit X86ManagedRegister(int reg_id) : ManagedRegister(reg_id) {} + + static X86ManagedRegister FromRegId(int reg_id) { + X86ManagedRegister reg(reg_id); + CHECK(reg.IsValidManagedRegister()); + return reg; + } +}; + +std::ostream& operator<<(std::ostream& os, const X86ManagedRegister& reg); + +} // namespace x86 + +inline x86::X86ManagedRegister ManagedRegister::AsX86() const { + x86::X86ManagedRegister reg(id_); + CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister()); + return reg; +} + +} // namespace art + +#endif // ART_COMPILER_UTILS_X86_MANAGED_REGISTER_X86_H_ diff --git a/compiler/utils/x86/managed_register_x86_test.cc b/compiler/utils/x86/managed_register_x86_test.cc new file mode 100644 index 0000000000..4fbafdadf9 --- /dev/null +++ b/compiler/utils/x86/managed_register_x86_test.cc @@ -0,0 +1,359 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "globals.h" +#include "managed_register_x86.h" +#include "gtest/gtest.h" + +namespace art { +namespace x86 { + +TEST(X86ManagedRegister, NoRegister) { + X86ManagedRegister reg = ManagedRegister::NoRegister().AsX86(); + EXPECT_TRUE(reg.IsNoRegister()); + EXPECT_TRUE(!reg.Overlaps(reg)); +} + +TEST(X86ManagedRegister, CpuRegister) { + X86ManagedRegister reg = X86ManagedRegister::FromCpuRegister(EAX); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(reg.IsCpuRegister()); + EXPECT_TRUE(!reg.IsXmmRegister()); + EXPECT_TRUE(!reg.IsX87Register()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(EAX, reg.AsCpuRegister()); + + reg = X86ManagedRegister::FromCpuRegister(EBX); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(reg.IsCpuRegister()); + EXPECT_TRUE(!reg.IsXmmRegister()); + EXPECT_TRUE(!reg.IsX87Register()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(EBX, reg.AsCpuRegister()); + + reg = X86ManagedRegister::FromCpuRegister(ECX); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(reg.IsCpuRegister()); + EXPECT_TRUE(!reg.IsXmmRegister()); + EXPECT_TRUE(!reg.IsX87Register()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(ECX, reg.AsCpuRegister()); + + reg = X86ManagedRegister::FromCpuRegister(EDI); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(reg.IsCpuRegister()); + EXPECT_TRUE(!reg.IsXmmRegister()); + EXPECT_TRUE(!reg.IsX87Register()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(EDI, reg.AsCpuRegister()); +} + +TEST(X86ManagedRegister, XmmRegister) { + X86ManagedRegister reg = X86ManagedRegister::FromXmmRegister(XMM0); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCpuRegister()); + EXPECT_TRUE(reg.IsXmmRegister()); + EXPECT_TRUE(!reg.IsX87Register()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(XMM0, reg.AsXmmRegister()); + + reg = X86ManagedRegister::FromXmmRegister(XMM1); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCpuRegister()); + EXPECT_TRUE(reg.IsXmmRegister()); + EXPECT_TRUE(!reg.IsX87Register()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(XMM1, reg.AsXmmRegister()); + + reg = X86ManagedRegister::FromXmmRegister(XMM7); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCpuRegister()); + EXPECT_TRUE(reg.IsXmmRegister()); + EXPECT_TRUE(!reg.IsX87Register()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(XMM7, reg.AsXmmRegister()); +} + +TEST(X86ManagedRegister, X87Register) { + X86ManagedRegister reg = X86ManagedRegister::FromX87Register(ST0); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCpuRegister()); + EXPECT_TRUE(!reg.IsXmmRegister()); + EXPECT_TRUE(reg.IsX87Register()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(ST0, reg.AsX87Register()); + + reg = X86ManagedRegister::FromX87Register(ST1); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCpuRegister()); + EXPECT_TRUE(!reg.IsXmmRegister()); + EXPECT_TRUE(reg.IsX87Register()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(ST1, reg.AsX87Register()); + + reg = X86ManagedRegister::FromX87Register(ST7); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCpuRegister()); + EXPECT_TRUE(!reg.IsXmmRegister()); + EXPECT_TRUE(reg.IsX87Register()); + EXPECT_TRUE(!reg.IsRegisterPair()); + EXPECT_EQ(ST7, reg.AsX87Register()); +} + +TEST(X86ManagedRegister, RegisterPair) { + X86ManagedRegister reg = X86ManagedRegister::FromRegisterPair(EAX_EDX); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCpuRegister()); + EXPECT_TRUE(!reg.IsXmmRegister()); + EXPECT_TRUE(!reg.IsX87Register()); + EXPECT_TRUE(reg.IsRegisterPair()); + EXPECT_EQ(EAX, reg.AsRegisterPairLow()); + EXPECT_EQ(EDX, reg.AsRegisterPairHigh()); + + reg = X86ManagedRegister::FromRegisterPair(EAX_ECX); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCpuRegister()); + EXPECT_TRUE(!reg.IsXmmRegister()); + EXPECT_TRUE(!reg.IsX87Register()); + EXPECT_TRUE(reg.IsRegisterPair()); + EXPECT_EQ(EAX, reg.AsRegisterPairLow()); + EXPECT_EQ(ECX, reg.AsRegisterPairHigh()); + + reg = X86ManagedRegister::FromRegisterPair(EAX_EBX); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCpuRegister()); + EXPECT_TRUE(!reg.IsXmmRegister()); + EXPECT_TRUE(!reg.IsX87Register()); + EXPECT_TRUE(reg.IsRegisterPair()); + EXPECT_EQ(EAX, reg.AsRegisterPairLow()); + EXPECT_EQ(EBX, reg.AsRegisterPairHigh()); + + reg = X86ManagedRegister::FromRegisterPair(EAX_EDI); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCpuRegister()); + EXPECT_TRUE(!reg.IsXmmRegister()); + EXPECT_TRUE(!reg.IsX87Register()); + EXPECT_TRUE(reg.IsRegisterPair()); + EXPECT_EQ(EAX, reg.AsRegisterPairLow()); + EXPECT_EQ(EDI, reg.AsRegisterPairHigh()); + + reg = X86ManagedRegister::FromRegisterPair(EDX_ECX); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCpuRegister()); + EXPECT_TRUE(!reg.IsXmmRegister()); + EXPECT_TRUE(!reg.IsX87Register()); + EXPECT_TRUE(reg.IsRegisterPair()); + EXPECT_EQ(EDX, reg.AsRegisterPairLow()); + EXPECT_EQ(ECX, reg.AsRegisterPairHigh()); + + reg = X86ManagedRegister::FromRegisterPair(EDX_EBX); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCpuRegister()); + EXPECT_TRUE(!reg.IsXmmRegister()); + EXPECT_TRUE(!reg.IsX87Register()); + EXPECT_TRUE(reg.IsRegisterPair()); + EXPECT_EQ(EDX, reg.AsRegisterPairLow()); + EXPECT_EQ(EBX, reg.AsRegisterPairHigh()); + + reg = X86ManagedRegister::FromRegisterPair(EDX_EDI); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCpuRegister()); + EXPECT_TRUE(!reg.IsXmmRegister()); + EXPECT_TRUE(!reg.IsX87Register()); + EXPECT_TRUE(reg.IsRegisterPair()); + EXPECT_EQ(EDX, reg.AsRegisterPairLow()); + EXPECT_EQ(EDI, reg.AsRegisterPairHigh()); + + reg = X86ManagedRegister::FromRegisterPair(ECX_EBX); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCpuRegister()); + EXPECT_TRUE(!reg.IsXmmRegister()); + EXPECT_TRUE(!reg.IsX87Register()); + EXPECT_TRUE(reg.IsRegisterPair()); + EXPECT_EQ(ECX, reg.AsRegisterPairLow()); + EXPECT_EQ(EBX, reg.AsRegisterPairHigh()); + + reg = X86ManagedRegister::FromRegisterPair(ECX_EDI); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCpuRegister()); + EXPECT_TRUE(!reg.IsXmmRegister()); + EXPECT_TRUE(!reg.IsX87Register()); + EXPECT_TRUE(reg.IsRegisterPair()); + EXPECT_EQ(ECX, reg.AsRegisterPairLow()); + EXPECT_EQ(EDI, reg.AsRegisterPairHigh()); + + reg = X86ManagedRegister::FromRegisterPair(EBX_EDI); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCpuRegister()); + EXPECT_TRUE(!reg.IsXmmRegister()); + EXPECT_TRUE(!reg.IsX87Register()); + EXPECT_TRUE(reg.IsRegisterPair()); + EXPECT_EQ(EBX, reg.AsRegisterPairLow()); + EXPECT_EQ(EDI, reg.AsRegisterPairHigh()); +} + +TEST(X86ManagedRegister, Equals) { + X86ManagedRegister reg_eax = X86ManagedRegister::FromCpuRegister(EAX); + EXPECT_TRUE(reg_eax.Equals(X86ManagedRegister::FromCpuRegister(EAX))); + EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromCpuRegister(EBX))); + EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromCpuRegister(EDI))); + EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromXmmRegister(XMM0))); + EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromXmmRegister(XMM7))); + EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromX87Register(ST0))); + EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromX87Register(ST7))); + EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX))); + EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI))); + + X86ManagedRegister reg_xmm0 = X86ManagedRegister::FromXmmRegister(XMM0); + EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromCpuRegister(EAX))); + EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromCpuRegister(EBX))); + EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromCpuRegister(EDI))); + EXPECT_TRUE(reg_xmm0.Equals(X86ManagedRegister::FromXmmRegister(XMM0))); + EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromXmmRegister(XMM7))); + EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromX87Register(ST0))); + EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromX87Register(ST7))); + EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX))); + EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI))); + + X86ManagedRegister reg_st0 = X86ManagedRegister::FromX87Register(ST0); + EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromCpuRegister(EAX))); + EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromCpuRegister(EBX))); + EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromCpuRegister(EDI))); + EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromXmmRegister(XMM0))); + EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromXmmRegister(XMM7))); + EXPECT_TRUE(reg_st0.Equals(X86ManagedRegister::FromX87Register(ST0))); + EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromX87Register(ST7))); + EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX))); + EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI))); + + X86ManagedRegister reg_pair = X86ManagedRegister::FromRegisterPair(EAX_EDX); + EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromCpuRegister(EAX))); + EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromCpuRegister(EBX))); + EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromCpuRegister(EDI))); + EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromXmmRegister(XMM0))); + EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromXmmRegister(XMM7))); + EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromX87Register(ST0))); + EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromX87Register(ST7))); + EXPECT_TRUE(reg_pair.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX))); + EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI))); +} + +TEST(X86ManagedRegister, Overlaps) { + X86ManagedRegister reg = X86ManagedRegister::FromCpuRegister(EAX); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); + + reg = X86ManagedRegister::FromCpuRegister(EDX); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); + + reg = X86ManagedRegister::FromCpuRegister(EDI); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); + + reg = X86ManagedRegister::FromCpuRegister(EBX); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); + + reg = X86ManagedRegister::FromXmmRegister(XMM0); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); + + reg = X86ManagedRegister::FromX87Register(ST0); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); + + reg = X86ManagedRegister::FromRegisterPair(EAX_EDX); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EDX_ECX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); + + reg = X86ManagedRegister::FromRegisterPair(EBX_EDI); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EDX_EBX))); + + reg = X86ManagedRegister::FromRegisterPair(EDX_ECX); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); + EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); + EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EDX_EBX))); +} + +} // namespace x86 +} // namespace art diff --git a/compiler/vector_output_stream.cc b/compiler/vector_output_stream.cc new file mode 100644 index 0000000000..e5ff729036 --- /dev/null +++ b/compiler/vector_output_stream.cc @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "vector_output_stream.h" + +#include "base/logging.h" + +namespace art { + +VectorOutputStream::VectorOutputStream(const std::string& location, std::vector& vector) + : OutputStream(location), offset_(vector.size()), vector_(vector) {} + +off_t VectorOutputStream::Seek(off_t offset, Whence whence) { + CHECK(whence == kSeekSet || whence == kSeekCurrent || whence == kSeekEnd) << whence; + off_t new_offset = 0; + switch (whence) { + case kSeekSet: { + new_offset = offset; + break; + } + case kSeekCurrent: { + new_offset = offset_ + offset; + break; + } + case kSeekEnd: { + new_offset = vector_.size() + offset; + break; + } + } + EnsureCapacity(new_offset); + offset_ = new_offset; + return offset_; +} + +} // namespace art diff --git a/compiler/vector_output_stream.h b/compiler/vector_output_stream.h new file mode 100644 index 0000000000..a3f82262af --- /dev/null +++ b/compiler/vector_output_stream.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_VECTOR_OUTPUT_STREAM_H_ +#define ART_COMPILER_VECTOR_OUTPUT_STREAM_H_ + +#include "output_stream.h" + +#include +#include +#include + +namespace art { + +class VectorOutputStream : public OutputStream { + public: + VectorOutputStream(const std::string& location, std::vector& vector); + + virtual ~VectorOutputStream() {} + + bool WriteFully(const void* buffer, int64_t byte_count) { + if (static_cast(offset_) == vector_.size()) { + const uint8_t* start = reinterpret_cast(buffer); + vector_.insert(vector_.end(), &start[0], &start[byte_count]); + offset_ += byte_count; + } else { + off_t new_offset = offset_ + byte_count; + EnsureCapacity(new_offset); + memcpy(&vector_[offset_], buffer, byte_count); + offset_ = new_offset; + } + return true; + } + + off_t Seek(off_t offset, Whence whence); + + private: + void EnsureCapacity(off_t new_offset) { + if (new_offset > static_cast(vector_.size())) { + vector_.resize(new_offset); + } + } + + off_t offset_; + std::vector& vector_; + + DISALLOW_COPY_AND_ASSIGN(VectorOutputStream); +}; + +} // namespace art + +#endif // ART_COMPILER_VECTOR_OUTPUT_STREAM_H_ diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index f79ddb1935..c8c43476dc 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -230,7 +230,7 @@ class Dex2Oat { bool image, UniquePtr& image_classes, bool dump_stats, - TimingLogger& timings) + base::TimingLogger& timings) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // SirtRef and ClassLoader creation needs to come after Runtime::Create jobject class_loader = NULL; @@ -263,11 +263,11 @@ class Dex2Oat { Thread::Current()->TransitionFromRunnableToSuspended(kNative); - timings.AddSplit("dex2oat Setup"); driver->CompileAll(class_loader, dex_files, timings); Thread::Current()->TransitionFromSuspendedToRunnable(); + timings.NewSplit("dex2oat OatWriter"); std::string image_file_location; uint32_t image_file_location_oat_checksum = 0; uint32_t image_file_location_oat_data_begin = 0; @@ -287,13 +287,11 @@ class Dex2Oat { image_file_location_oat_data_begin, image_file_location, driver.get()); - timings.AddSplit("dex2oat OatWriter"); if (!driver->WriteElf(android_root, is_host, dex_files, oat_writer, oat_file)) { LOG(ERROR) << "Failed to write ELF file " << oat_file->GetPath(); return NULL; } - timings.AddSplit("dex2oat ElfWriter"); return driver.release(); } @@ -563,7 +561,7 @@ const unsigned int WatchDog::kWatchDogWarningSeconds; const unsigned int WatchDog::kWatchDogTimeoutSeconds; static int dex2oat(int argc, char** argv) { - TimingLogger timings("compiler", false); + base::TimingLogger timings("compiler", false, false); InitLogging(argv); @@ -928,6 +926,7 @@ static int dex2oat(int argc, char** argv) { } } + timings.StartSplit("dex2oat Setup"); UniquePtr compiler(dex2oat->CreateOatFile(boot_image_option, host_prefix.get(), android_root, @@ -998,13 +997,13 @@ static int dex2oat(int argc, char** argv) { // Elf32_Phdr.p_vaddr values by the desired base address. // if (image) { + timings.NewSplit("dex2oat ImageWriter"); Thread::Current()->TransitionFromRunnableToSuspended(kNative); bool image_creation_success = dex2oat->CreateImageFile(image_filename, image_base, oat_unstripped, oat_location, *compiler.get()); - timings.AddSplit("dex2oat ImageWriter"); Thread::Current()->TransitionFromSuspendedToRunnable(); if (!image_creation_success) { return EXIT_FAILURE; @@ -1014,7 +1013,7 @@ static int dex2oat(int argc, char** argv) { if (is_host) { if (dump_timings && timings.GetTotalNs() > MsToNs(1000)) { - LOG(INFO) << Dumpable(timings); + LOG(INFO) << Dumpable(timings); } return EXIT_SUCCESS; } @@ -1022,6 +1021,7 @@ static int dex2oat(int argc, char** argv) { // If we don't want to strip in place, copy from unstripped location to stripped location. // We need to strip after image creation because FixupElf needs to use .strtab. if (oat_unstripped != oat_stripped) { + timings.NewSplit("dex2oat OatFile copy"); oat_file.reset(); UniquePtr in(OS::OpenFile(oat_unstripped.c_str(), false)); UniquePtr out(OS::OpenFile(oat_stripped.c_str(), true)); @@ -1036,23 +1036,25 @@ static int dex2oat(int argc, char** argv) { CHECK(write_ok); } oat_file.reset(out.release()); - timings.AddSplit("dex2oat OatFile copy"); LOG(INFO) << "Oat file copied successfully (stripped): " << oat_stripped; } #if ART_USE_PORTABLE_COMPILER // We currently only generate symbols on Portable + timings.NewSplit("dex2oat ElfStripper"); // Strip unneeded sections for target off_t seek_actual = lseek(oat_file->Fd(), 0, SEEK_SET); CHECK_EQ(0, seek_actual); ElfStripper::Strip(oat_file.get()); - timings.AddSplit("dex2oat ElfStripper"); + // We wrote the oat file successfully, and want to keep it. LOG(INFO) << "Oat file written successfully (stripped): " << oat_location; #endif // ART_USE_PORTABLE_COMPILER + timings.EndSplit(); + if (dump_timings && timings.GetTotalNs() > MsToNs(1000)) { - LOG(INFO) << Dumpable(timings); + LOG(INFO) << Dumpable(timings); } return EXIT_SUCCESS; } diff --git a/runtime/Android.mk b/runtime/Android.mk index 7734aa5d08..51bb3eb2d3 100644 --- a/runtime/Android.mk +++ b/runtime/Android.mk @@ -111,13 +111,6 @@ LIBART_COMMON_SRC_FILES := \ native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc \ native/sun_misc_Unsafe.cc \ oat.cc \ - oat/utils/arm/assembler_arm.cc \ - oat/utils/arm/managed_register_arm.cc \ - oat/utils/assembler.cc \ - oat/utils/mips/assembler_mips.cc \ - oat/utils/mips/managed_register_mips.cc \ - oat/utils/x86/assembler_x86.cc \ - oat/utils/x86/managed_register_x86.cc \ oat_file.cc \ offsets.cc \ os_linux.cc \ @@ -125,8 +118,6 @@ LIBART_COMMON_SRC_FILES := \ reference_table.cc \ reflection.cc \ runtime.cc \ - runtime_support.cc \ - runtime_support_llvm.cc \ signal_catcher.cc \ stack.cc \ thread.cc \ @@ -136,7 +127,6 @@ LIBART_COMMON_SRC_FILES := \ trace.cc \ utf.cc \ utils.cc \ - vector_output_stream.cc \ verifier/dex_gc_map.cc \ verifier/instruction_flags.cc \ verifier/method_verifier.cc \ @@ -147,23 +137,41 @@ LIBART_COMMON_SRC_FILES := \ zip_archive.cc LIBART_COMMON_SRC_FILES += \ - oat/runtime/context.cc \ - oat/runtime/support_alloc.cc \ - oat/runtime/support_cast.cc \ - oat/runtime/support_deoptimize.cc \ - oat/runtime/support_dexcache.cc \ - oat/runtime/support_field.cc \ - oat/runtime/support_fillarray.cc \ - oat/runtime/support_instrumentation.cc \ - oat/runtime/support_invoke.cc \ - oat/runtime/support_jni.cc \ - oat/runtime/support_locks.cc \ - oat/runtime/support_math.cc \ - oat/runtime/support_proxy.cc \ - oat/runtime/support_stubs.cc \ - oat/runtime/support_thread.cc \ - oat/runtime/support_throw.cc \ - oat/runtime/support_interpreter.cc + arch/context.cc \ + arch/arm/registers_arm.cc \ + arch/x86/registers_x86.cc \ + arch/mips/registers_mips.cc \ + entrypoints/entrypoint_utils.cc \ + entrypoints/jni/jni_entrypoints.cc \ + entrypoints/math_entrypoints.cc \ + entrypoints/portable/portable_alloc_entrypoints.cc \ + entrypoints/portable/portable_cast_entrypoints.cc \ + entrypoints/portable/portable_dexcache_entrypoints.cc \ + entrypoints/portable/portable_field_entrypoints.cc \ + entrypoints/portable/portable_fillarray_entrypoints.cc \ + entrypoints/portable/portable_invoke_entrypoints.cc \ + entrypoints/portable/portable_jni_entrypoints.cc \ + entrypoints/portable/portable_lock_entrypoints.cc \ + entrypoints/portable/portable_proxy_entrypoints.cc \ + entrypoints/portable/portable_stub_entrypoints.cc \ + entrypoints/portable/portable_thread_entrypoints.cc \ + entrypoints/portable/portable_throw_entrypoints.cc \ + entrypoints/quick/quick_alloc_entrypoints.cc \ + entrypoints/quick/quick_cast_entrypoints.cc \ + entrypoints/quick/quick_deoptimization_entrypoints.cc \ + entrypoints/quick/quick_dexcache_entrypoints.cc \ + entrypoints/quick/quick_field_entrypoints.cc \ + entrypoints/quick/quick_fillarray_entrypoints.cc \ + entrypoints/quick/quick_instrumentation_entrypoints.cc \ + entrypoints/quick/quick_interpreter_entrypoints.cc \ + entrypoints/quick/quick_invoke_entrypoints.cc \ + entrypoints/quick/quick_jni_entrypoints.cc \ + entrypoints/quick/quick_lock_entrypoints.cc \ + entrypoints/quick/quick_math_entrypoints.cc \ + entrypoints/quick/quick_proxy_entrypoints.cc \ + entrypoints/quick/quick_stub_entrypoints.cc \ + entrypoints/quick/quick_thread_entrypoints.cc \ + entrypoints/quick/quick_throw_entrypoints.cc LIBART_TARGET_SRC_FILES := \ $(LIBART_COMMON_SRC_FILES) \ @@ -175,40 +183,36 @@ LIBART_TARGET_SRC_FILES := \ ifeq ($(TARGET_ARCH),arm) LIBART_TARGET_SRC_FILES += \ - oat/runtime/arm/context_arm.cc.arm \ - oat/runtime/arm/oat_support_entrypoints_arm.cc \ - oat/runtime/arm/runtime_support_arm.S + arch/arm/context_arm.cc.arm \ + arch/arm/entrypoints_init_arm.cc \ + arch/arm/jni_entrypoints_arm.S \ + arch/arm/portable_entrypoints_arm.S \ + arch/arm/quick_entrypoints_arm.S \ + arch/arm/thread_arm.cc else # TARGET_ARCH != arm ifeq ($(TARGET_ARCH),x86) LIBART_TARGET_SRC_FILES += \ - oat/runtime/x86/context_x86.cc \ - oat/runtime/x86/oat_support_entrypoints_x86.cc \ - oat/runtime/x86/runtime_support_x86.S + arch/x86/context_x86.cc \ + arch/x86/entrypoints_init_x86.cc \ + arch/x86/jni_entrypoints_x86.S \ + arch/x86/portable_entrypoints_x86.S \ + arch/x86/quick_entrypoints_x86.S \ + arch/x86/thread_x86.cc else # TARGET_ARCH != x86 ifeq ($(TARGET_ARCH),mips) LIBART_TARGET_SRC_FILES += \ - oat/runtime/mips/context_mips.cc \ - oat/runtime/mips/oat_support_entrypoints_mips.cc \ - oat/runtime/mips/runtime_support_mips.S + arch/mips/context_mips.cc \ + arch/mips/entrypoints_init_mips.cc \ + arch/mips/jni_entrypoints_mips.S \ + arch/mips/portable_entrypoints_mips.S \ + arch/mips/quick_entrypoints_mips.S \ + arch/mips/thread_mips.cc else # TARGET_ARCH != mips $(error unsupported TARGET_ARCH=$(TARGET_ARCH)) endif # TARGET_ARCH != mips endif # TARGET_ARCH != x86 endif # TARGET_ARCH != arm -ifeq ($(TARGET_ARCH),arm) -LIBART_TARGET_SRC_FILES += thread_arm.cc -else # TARGET_ARCH != arm -ifeq ($(TARGET_ARCH),x86) -LIBART_TARGET_SRC_FILES += thread_x86.cc -else # TARGET_ARCH != x86 -ifeq ($(TARGET_ARCH),mips) -LIBART_TARGET_SRC_FILES += thread_mips.cc -else # TARGET_ARCH != mips -$(error unsupported TARGET_ARCH=$(TARGET_ARCH)) -endif # TARGET_ARCH != mips -endif # TARGET_ARCH != x86 -endif # TARGET_ARCH != arm LIBART_HOST_SRC_FILES := \ $(LIBART_COMMON_SRC_FILES) \ @@ -219,15 +223,12 @@ LIBART_HOST_SRC_FILES := \ ifeq ($(HOST_ARCH),x86) LIBART_HOST_SRC_FILES += \ - oat/runtime/x86/context_x86.cc \ - oat/runtime/x86/oat_support_entrypoints_x86.cc \ - oat/runtime/x86/runtime_support_x86.S -else # HOST_ARCH != x86 -$(error unsupported HOST_ARCH=$(HOST_ARCH)) -endif # HOST_ARCH != x86 - -ifeq ($(HOST_ARCH),x86) -LIBART_HOST_SRC_FILES += thread_x86.cc + arch/x86/context_x86.cc \ + arch/x86/entrypoints_init_x86.cc \ + arch/x86/jni_entrypoints_x86.S \ + arch/x86/portable_entrypoints_x86.S \ + arch/x86/quick_entrypoints_x86.S \ + arch/x86/thread_x86.cc else # HOST_ARCH != x86 $(error unsupported HOST_ARCH=$(HOST_ARCH)) endif # HOST_ARCH != x86 diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S new file mode 100644 index 0000000000..ed655e95b1 --- /dev/null +++ b/runtime/arch/arm/asm_support_arm.S @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ +#define ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ + +#include "asm_support_arm.h" + +.macro ENTRY name + .type \name, #function + .global \name + /* Cache alignment for function entry */ + .balign 16 +\name: + .cfi_startproc + .fnstart +.endm + +.macro END name + .fnend + .cfi_endproc + .size \name, .-\name +.endm + +#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ diff --git a/runtime/arch/arm/asm_support_arm.h b/runtime/arch/arm/asm_support_arm.h new file mode 100644 index 0000000000..ed3d476b24 --- /dev/null +++ b/runtime/arch/arm/asm_support_arm.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_ +#define ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_ + +#include "asm_support.h" + +// Register holding suspend check count down. +#define rSUSPEND r4 +// Register holding Thread::Current(). +#define rSELF r9 +// Offset of field Thread::suspend_count_ verified in InitCpu +#define THREAD_FLAGS_OFFSET 0 +// Offset of field Thread::exception_ verified in InitCpu +#define THREAD_EXCEPTION_OFFSET 12 + +#endif // ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_ diff --git a/runtime/arch/arm/context_arm.cc b/runtime/arch/arm/context_arm.cc new file mode 100644 index 0000000000..6b9538e801 --- /dev/null +++ b/runtime/arch/arm/context_arm.cc @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "context_arm.h" + +#include "mirror/abstract_method.h" +#include "mirror/object-inl.h" +#include "stack.h" +#include "thread.h" + +namespace art { +namespace arm { + +static const uint32_t gZero = 0; + +void ArmContext::Reset() { + for (size_t i = 0; i < kNumberOfCoreRegisters; i++) { + gprs_[i] = NULL; + } + for (size_t i = 0; i < kNumberOfSRegisters; i++) { + fprs_[i] = NULL; + } + gprs_[SP] = &sp_; + gprs_[PC] = &pc_; + // Initialize registers with easy to spot debug values. + sp_ = ArmContext::kBadGprBase + SP; + pc_ = ArmContext::kBadGprBase + PC; +} + +void ArmContext::FillCalleeSaves(const StackVisitor& fr) { + mirror::AbstractMethod* method = fr.GetMethod(); + uint32_t core_spills = method->GetCoreSpillMask(); + uint32_t fp_core_spills = method->GetFpSpillMask(); + size_t spill_count = __builtin_popcount(core_spills); + size_t fp_spill_count = __builtin_popcount(fp_core_spills); + size_t frame_size = method->GetFrameSizeInBytes(); + if (spill_count > 0) { + // Lowest number spill is farthest away, walk registers and fill into context + int j = 1; + for (size_t i = 0; i < kNumberOfCoreRegisters; i++) { + if (((core_spills >> i) & 1) != 0) { + gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size); + j++; + } + } + } + if (fp_spill_count > 0) { + // Lowest number spill is farthest away, walk registers and fill into context + int j = 1; + for (size_t i = 0; i < kNumberOfSRegisters; i++) { + if (((fp_core_spills >> i) & 1) != 0) { + fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_size); + j++; + } + } + } +} + +void ArmContext::SetGPR(uint32_t reg, uintptr_t value) { + DCHECK_LT(reg, static_cast(kNumberOfCoreRegisters)); + DCHECK_NE(gprs_[reg], &gZero); // Can't overwrite this static value since they are never reset. + DCHECK(gprs_[reg] != NULL); + *gprs_[reg] = value; +} + +void ArmContext::SmashCallerSaves() { + // This needs to be 0 because we want a null/zero return value. + gprs_[R0] = const_cast(&gZero); + gprs_[R1] = const_cast(&gZero); + gprs_[R2] = NULL; + gprs_[R3] = NULL; +} + +extern "C" void art_quick_do_long_jump(uint32_t*, uint32_t*); + +void ArmContext::DoLongJump() { + uintptr_t gprs[16]; + uint32_t fprs[32]; + for (size_t i = 0; i < kNumberOfCoreRegisters; ++i) { + gprs[i] = gprs_[i] != NULL ? *gprs_[i] : ArmContext::kBadGprBase + i; + } + for (size_t i = 0; i < kNumberOfSRegisters; ++i) { + fprs[i] = fprs_[i] != NULL ? *fprs_[i] : ArmContext::kBadGprBase + i; + } + DCHECK_EQ(reinterpret_cast(Thread::Current()), gprs[TR]); + art_quick_do_long_jump(gprs, fprs); +} + +} // namespace arm +} // namespace art diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h new file mode 100644 index 0000000000..00651ffb80 --- /dev/null +++ b/runtime/arch/arm/context_arm.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_ARM_CONTEXT_ARM_H_ +#define ART_RUNTIME_ARCH_ARM_CONTEXT_ARM_H_ + +#include "locks.h" +#include "arch/context.h" +#include "base/logging.h" +#include "registers_arm.h" + +namespace art { +namespace arm { + +class ArmContext : public Context { + public: + ArmContext() { + Reset(); + } + + virtual ~ArmContext() {} + + virtual void Reset(); + + virtual void FillCalleeSaves(const StackVisitor& fr); + + virtual void SetSP(uintptr_t new_sp) { + SetGPR(SP, new_sp); + } + + virtual void SetPC(uintptr_t new_pc) { + SetGPR(PC, new_pc); + } + + virtual uintptr_t GetGPR(uint32_t reg) { + DCHECK_LT(reg, static_cast(kNumberOfCoreRegisters)); + return *gprs_[reg]; + } + + virtual void SetGPR(uint32_t reg, uintptr_t value); + virtual void SmashCallerSaves(); + virtual void DoLongJump(); + + private: + // Pointers to register locations, initialized to NULL or the specific registers below. + uintptr_t* gprs_[kNumberOfCoreRegisters]; + uint32_t* fprs_[kNumberOfSRegisters]; + // Hold values for sp and pc if they are not located within a stack frame. + uintptr_t sp_, pc_; +}; + +} // namespace arm +} // namespace art + +#endif // ART_RUNTIME_ARCH_ARM_CONTEXT_ARM_H_ diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc new file mode 100644 index 0000000000..b71a158289 --- /dev/null +++ b/runtime/arch/arm/entrypoints_init_arm.cc @@ -0,0 +1,241 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/portable/portable_entrypoints.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "entrypoints/entrypoint_utils.h" +#include "entrypoints/math_entrypoints.h" + +namespace art { + +// Alloc entrypoints. +extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); +extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); +extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); + +// Cast entrypoints. +extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, + const mirror::Class* ref_class); +extern "C" void art_quick_can_put_array_element_from_code(void*, void*); +extern "C" void art_quick_check_cast_from_code(void*, void*); + +// DexCache entrypoints. +extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); +extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); + +// Exception entrypoints. +extern "C" void* GetAndClearException(Thread*); + +// Field entrypoints. +extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); +extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); +extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); +extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); +extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); +extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_static_from_code(uint32_t); +extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); +extern "C" int64_t art_quick_get64_static_from_code(uint32_t); +extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); +extern "C" void* art_quick_get_obj_static_from_code(uint32_t); + +// FillArray entrypoint. +extern "C" void art_quick_handle_fill_data_from_code(void*, void*); + +// Lock entrypoints. +extern "C" void art_quick_lock_object_from_code(void*); +extern "C" void art_quick_unlock_object_from_code(void*); + +// Math entrypoints. +extern int32_t CmpgDouble(double a, double b); +extern int32_t CmplDouble(double a, double b); +extern int32_t CmpgFloat(float a, float b); +extern int32_t CmplFloat(float a, float b); + +// Math conversions. +extern "C" int32_t __aeabi_f2iz(float op1); // FLOAT_TO_INT +extern "C" int32_t __aeabi_d2iz(double op1); // DOUBLE_TO_INT +extern "C" float __aeabi_l2f(int64_t op1); // LONG_TO_FLOAT +extern "C" double __aeabi_l2d(int64_t op1); // LONG_TO_DOUBLE + +// Single-precision FP arithmetics. +extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR] + +// Double-precision FP arithmetics. +extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR] + +// Integer arithmetics. +extern "C" int __aeabi_idivmod(int32_t, int32_t); // [DIV|REM]_INT[_2ADDR|_LIT8|_LIT16] + +// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR] +extern "C" int64_t __aeabi_ldivmod(int64_t, int64_t); +extern "C" int64_t art_quick_mul_long(int64_t, int64_t); +extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t); +extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t); +extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t); + +// Interpreter entrypoints. +extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); +extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); + +// Intrinsic entrypoints. +extern "C" int32_t __memcmp16(void*, void*, int32_t); +extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); +extern "C" int32_t art_quick_string_compareto(void*, void*); + +// Invoke entrypoints. +extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); + +// Thread entrypoints. +extern void CheckSuspendFromCode(Thread* thread); +extern "C" void art_quick_test_suspend(); + +// Throw entrypoints. +extern "C" void art_quick_deliver_exception_from_code(void*); +extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); +extern "C" void art_quick_throw_div_zero_from_code(); +extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); +extern "C" void art_quick_throw_null_pointer_exception_from_code(); +extern "C" void art_quick_throw_stack_overflow_from_code(void*); + +void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { + // Alloc + qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code; + qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; + qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code; + qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; + qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; + qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; + + // Cast + qpoints->pInstanceofNonTrivialFromCode = artIsAssignableFromCode; + qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; + qpoints->pCheckCastFromCode = art_quick_check_cast_from_code; + + // DexCache + qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; + qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; + qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code; + qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code; + + // Field + qpoints->pSet32Instance = art_quick_set32_instance_from_code; + qpoints->pSet32Static = art_quick_set32_static_from_code; + qpoints->pSet64Instance = art_quick_set64_instance_from_code; + qpoints->pSet64Static = art_quick_set64_static_from_code; + qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code; + qpoints->pSetObjStatic = art_quick_set_obj_static_from_code; + qpoints->pGet32Instance = art_quick_get32_instance_from_code; + qpoints->pGet64Instance = art_quick_get64_instance_from_code; + qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code; + qpoints->pGet32Static = art_quick_get32_static_from_code; + qpoints->pGet64Static = art_quick_get64_static_from_code; + qpoints->pGetObjStatic = art_quick_get_obj_static_from_code; + + // FillArray + qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; + + // JNI + qpoints->pJniMethodStart = JniMethodStart; + qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; + qpoints->pJniMethodEnd = JniMethodEnd; + qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; + qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; + qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; + + // Locks + qpoints->pLockObjectFromCode = art_quick_lock_object_from_code; + qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code; + + // Math + qpoints->pCmpgDouble = CmpgDouble; + qpoints->pCmpgFloat = CmpgFloat; + qpoints->pCmplDouble = CmplDouble; + qpoints->pCmplFloat = CmplFloat; + qpoints->pFmod = fmod; + qpoints->pSqrt = sqrt; + qpoints->pL2d = __aeabi_l2d; + qpoints->pFmodf = fmodf; + qpoints->pL2f = __aeabi_l2f; + qpoints->pD2iz = __aeabi_d2iz; + qpoints->pF2iz = __aeabi_f2iz; + qpoints->pIdivmod = __aeabi_idivmod; + qpoints->pD2l = art_d2l; + qpoints->pF2l = art_f2l; + qpoints->pLdiv = __aeabi_ldivmod; + qpoints->pLdivmod = __aeabi_ldivmod; // result returned in r2:r3 + qpoints->pLmul = art_quick_mul_long; + qpoints->pShlLong = art_quick_shl_long; + qpoints->pShrLong = art_quick_shr_long; + qpoints->pUshrLong = art_quick_ushr_long; + + // Interpreter + qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; + qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry; + + // Intrinsics + qpoints->pIndexOf = art_quick_indexof; + qpoints->pMemcmp16 = __memcmp16; + qpoints->pStringCompareTo = art_quick_string_compareto; + qpoints->pMemcpy = memcpy; + + // Invocation + qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; + qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; + qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; + qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; + qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; + qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; + qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; + + // Thread + qpoints->pCheckSuspendFromCode = CheckSuspendFromCode; + qpoints->pTestSuspendFromCode = art_quick_test_suspend; + + // Throws + qpoints->pDeliverException = art_quick_deliver_exception_from_code; + qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; + qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; + qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; + qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; + qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; + + // Portable + ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; +}; + +} // namespace art diff --git a/runtime/arch/arm/jni_entrypoints_arm.S b/runtime/arch/arm/jni_entrypoints_arm.S new file mode 100644 index 0000000000..0a0d06a22a --- /dev/null +++ b/runtime/arch/arm/jni_entrypoints_arm.S @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_arm.S" + + /* + * Jni dlsym lookup stub. + */ + .extern artFindNativeMethod +ENTRY art_jni_dlsym_lookup_stub + push {r0, r1, r2, r3, lr} @ spill regs + .save {r0, r1, r2, r3, lr} + .pad #20 + .cfi_adjust_cfa_offset 20 + sub sp, #12 @ pad stack pointer to align frame + .pad #12 + .cfi_adjust_cfa_offset 12 + mov r0, r9 @ pass Thread::Current + blx artFindNativeMethod @ (Thread*) + mov r12, r0 @ save result in r12 + add sp, #12 @ restore stack pointer + .cfi_adjust_cfa_offset -12 + pop {r0, r1, r2, r3, lr} @ restore regs + .cfi_adjust_cfa_offset -20 + cmp r12, #0 @ is method code null? + bxne r12 @ if non-null, tail call to method's code + bx lr @ otherwise, return to caller to handle exception +END art_jni_dlsym_lookup_stub + + /* + * Entry point of native methods when JNI bug compatibility is enabled. + */ + .extern artWorkAroundAppJniBugs +ENTRY art_quick_work_around_app_jni_bugs + @ save registers that may contain arguments and LR that will be crushed by a call + push {r0-r3, lr} + .save {r0-r3, lr} + .cfi_adjust_cfa_offset 16 + .cfi_rel_offset r0, 0 + .cfi_rel_offset r1, 4 + .cfi_rel_offset r2, 8 + .cfi_rel_offset r3, 12 + sub sp, #12 @ 3 words of space for alignment + mov r0, r9 @ pass Thread::Current + mov r1, sp @ pass SP + bl artWorkAroundAppJniBugs @ (Thread*, SP) + add sp, #12 @ rewind stack + mov r12, r0 @ save target address + pop {r0-r3, lr} @ restore possibly modified argument registers + .cfi_adjust_cfa_offset -16 + bx r12 @ tail call into JNI routine +END art_quick_work_around_app_jni_bugs diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S new file mode 100644 index 0000000000..4cc6654ebb --- /dev/null +++ b/runtime/arch/arm/portable_entrypoints_arm.S @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_arm.S" + + /* + * Portable invocation stub. + * On entry: + * r0 = method pointer + * r1 = argument array or NULL for no argument methods + * r2 = size of argument array in bytes + * r3 = (managed) thread pointer + * [sp] = JValue* result + * [sp + 4] = result type char + */ +ENTRY art_portable_invoke_stub + push {r0, r4, r5, r9, r11, lr} @ spill regs + .save {r0, r4, r5, r9, r11, lr} + .pad #24 + .cfi_adjust_cfa_offset 24 + .cfi_rel_offset r0, 0 + .cfi_rel_offset r4, 4 + .cfi_rel_offset r5, 8 + .cfi_rel_offset r9, 12 + .cfi_rel_offset r11, 16 + .cfi_rel_offset lr, 20 + mov r11, sp @ save the stack pointer + .cfi_def_cfa_register r11 + mov r9, r3 @ move managed thread pointer into r9 + mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval + add r5, r2, #16 @ create space for method pointer in frame + and r5, #0xFFFFFFF0 @ align frame size to 16 bytes + sub sp, r5 @ reserve stack space for argument array + add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy + bl memcpy @ memcpy (dest, src, bytes) + ldr r0, [r11] @ restore method* + ldr r1, [sp, #4] @ copy arg value for r1 + ldr r2, [sp, #8] @ copy arg value for r2 + ldr r3, [sp, #12] @ copy arg value for r3 + mov ip, #0 @ set ip to 0 + str ip, [sp] @ store NULL for method* at bottom of frame + add sp, #16 @ first 4 args are not passed on stack for portable + ldr ip, [r0, #METHOD_CODE_OFFSET] @ get pointer to the code + blx ip @ call the method + mov sp, r11 @ restore the stack pointer + ldr ip, [sp, #24] @ load the result pointer + strd r0, [ip] @ store r0/r1 into result pointer + pop {r0, r4, r5, r9, r11, lr} @ restore spill regs + .cfi_adjust_cfa_offset -24 + bx lr +END art_portable_invoke_stub + + .extern artPortableProxyInvokeHandler +ENTRY art_portable_proxy_invoke_handler + @ Fake callee save ref and args frame set up, note portable doesn't use callee save frames. + @ TODO: just save the registers that are needed in artPortableProxyInvokeHandler. + push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves + .save {r1-r3, r5-r8, r10-r11, lr} + .cfi_adjust_cfa_offset 40 + .cfi_rel_offset r1, 0 + .cfi_rel_offset r2, 4 + .cfi_rel_offset r3, 8 + .cfi_rel_offset r5, 12 + .cfi_rel_offset r6, 16 + .cfi_rel_offset r7, 20 + .cfi_rel_offset r8, 24 + .cfi_rel_offset r10, 28 + .cfi_rel_offset r11, 32 + .cfi_rel_offset lr, 36 + sub sp, #8 @ 2 words of space, bottom word will hold Method* + .pad #8 + .cfi_adjust_cfa_offset 8 + @ Begin argument set up. + str r0, [sp, #0] @ place proxy method at bottom of frame + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + blx artPortableProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP) + ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ + ldr lr, [sp, #44] @ restore lr + add sp, #48 @ pop frame + .cfi_adjust_cfa_offset -48 + bx lr @ return +END art_portable_proxy_invoke_handler diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S new file mode 100644 index 0000000000..9b8d238ab8 --- /dev/null +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -0,0 +1,1288 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_arm.S" + + /* Deliver the given exception */ + .extern artDeliverExceptionFromCode + /* Deliver an exception pending on a thread */ + .extern artDeliverPendingException + + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kSaveAll) + */ +.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME + push {r4-r11, lr} @ 9 words of callee saves + .save {r4-r11, lr} + .cfi_adjust_cfa_offset 36 + .cfi_rel_offset r4, 0 + .cfi_rel_offset r5, 4 + .cfi_rel_offset r6, 8 + .cfi_rel_offset r7, 12 + .cfi_rel_offset r8, 16 + .cfi_rel_offset r9, 20 + .cfi_rel_offset r10, 24 + .cfi_rel_offset r11, 28 + .cfi_rel_offset lr, 32 + vpush {s0-s31} + .pad #128 + .cfi_adjust_cfa_offset 128 + sub sp, #12 @ 3 words of space, bottom word will hold Method* + .pad #12 + .cfi_adjust_cfa_offset 12 +.endm + + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kRefsOnly). Restoration assumes non-moving GC. + */ +.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME + push {r5-r8, r10-r11, lr} @ 7 words of callee saves + .save {r5-r8, r10-r11, lr} + .cfi_adjust_cfa_offset 28 + .cfi_rel_offset r5, 0 + .cfi_rel_offset r6, 4 + .cfi_rel_offset r7, 8 + .cfi_rel_offset r8, 12 + .cfi_rel_offset r10, 16 + .cfi_rel_offset r11, 20 + .cfi_rel_offset lr, 24 + sub sp, #4 @ bottom word will hold Method* + .pad #4 + .cfi_adjust_cfa_offset 4 +.endm + +.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + ldr lr, [sp, #28] @ restore lr for return + add sp, #32 @ unwind stack + .cfi_adjust_cfa_offset -32 +.endm + +.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN + ldr lr, [sp, #28] @ restore lr for return + add sp, #32 @ unwind stack + .cfi_adjust_cfa_offset -32 + bx lr @ return +.endm + + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC. + */ +.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME + push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves + .save {r1-r3, r5-r8, r10-r11, lr} + .cfi_adjust_cfa_offset 40 + .cfi_rel_offset r1, 0 + .cfi_rel_offset r2, 4 + .cfi_rel_offset r3, 8 + .cfi_rel_offset r5, 12 + .cfi_rel_offset r6, 16 + .cfi_rel_offset r7, 20 + .cfi_rel_offset r8, 24 + .cfi_rel_offset r10, 28 + .cfi_rel_offset r11, 32 + .cfi_rel_offset lr, 36 + sub sp, #8 @ 2 words of space, bottom word will hold Method* + .pad #8 + .cfi_adjust_cfa_offset 8 +.endm + +.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME + ldr r1, [sp, #8] @ restore non-callee save r1 + ldrd r2, [sp, #12] @ restore non-callee saves r2-r3 + ldr lr, [sp, #44] @ restore lr + add sp, #48 @ rewind sp + .cfi_adjust_cfa_offset -48 +.endm + + /* + * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending + * exception is Thread::Current()->exception_ + */ +.macro DELIVER_PENDING_EXCEPTION + .fnend + .fnstart + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME @ save callee saves for throw + mov r0, r9 @ pass Thread::Current + mov r1, sp @ pass SP + b artDeliverPendingExceptionFromCode @ artDeliverPendingExceptionFromCode(Thread*, SP) +.endm + +.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name + .extern \cxx_name +ENTRY \c_name + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context + mov r0, r9 @ pass Thread::Current + mov r1, sp @ pass SP + b \cxx_name @ \cxx_name(Thread*, SP) +END \c_name +.endm + +.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name + .extern \cxx_name +ENTRY \c_name + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context + mov r1, r9 @ pass Thread::Current + mov r2, sp @ pass SP + b \cxx_name @ \cxx_name(Thread*, SP) +END \c_name +.endm + +.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name + .extern \cxx_name +ENTRY \c_name + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + b \cxx_name @ \cxx_name(Thread*, SP) +END \c_name +.endm + + /* + * Called by managed code, saves callee saves and then calls artThrowException + * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception. + */ +ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception_from_code, artDeliverExceptionFromCode + + /* + * Called by managed code to create and deliver a NullPointerException. + */ +NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_code, artThrowNullPointerExceptionFromCode + + /* + * Called by managed code to create and deliver an ArithmeticException. + */ +NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero_from_code, artThrowDivZeroFromCode + + /* + * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds + * index, arg2 holds limit. + */ +TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds_from_code, artThrowArrayBoundsFromCode + + /* + * Called by managed code to create and deliver a StackOverflowError. + */ +NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow_from_code, artThrowStackOverflowFromCode + + /* + * Called by managed code to create and deliver a NoSuchMethodError. + */ +ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method_from_code, artThrowNoSuchMethodFromCode + + /* + * All generated callsites for interface invokes and invocation slow paths will load arguments + * as usual - except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain + * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the + * stack and call the appropriate C helper. + * NOTE: "this" is first visible argument of the target, and so can be found in arg1/r1. + * + * The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting + * of the target Method* in r0 and method->code_ in r1. + * + * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the + * thread and we branch to another stub to deliver it. + * + * On success this wrapper will restore arguments and *jump* to the target, leaving the lr + * pointing back to the original caller. + */ +.macro INVOKE_TRAMPOLINE c_name, cxx_name + .extern \cxx_name +ENTRY \c_name + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME @ save callee saves in case allocation triggers GC + ldr r2, [sp, #48] @ pass caller Method* + mov r3, r9 @ pass Thread::Current + mov r12, sp + str r12, [sp, #-16]! @ expand the frame and pass SP + .pad #16 + .cfi_adjust_cfa_offset 16 + bl \cxx_name @ (method_idx, this, caller, Thread*, SP) + add sp, #16 @ strip the extra frame + .cfi_adjust_cfa_offset -16 + mov r12, r1 @ save Method*->code_ + RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME + cmp r0, #0 @ did we find the target? + bxne r12 @ tail call to target if so + DELIVER_PENDING_EXCEPTION +END \c_name +.endm + +INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline +INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck + +INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck +INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck +INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck +INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck + + /* + * Quick invocation stub. + * On entry: + * r0 = method pointer + * r1 = argument array or NULL for no argument methods + * r2 = size of argument array in bytes + * r3 = (managed) thread pointer + * [sp] = JValue* result + * [sp + 4] = result type char + */ +ENTRY art_quick_invoke_stub + push {r0, r4, r5, r9, r11, lr} @ spill regs + .save {r0, r4, r5, r9, r11, lr} + .pad #24 + .cfi_adjust_cfa_offset 24 + .cfi_rel_offset r0, 0 + .cfi_rel_offset r4, 4 + .cfi_rel_offset r5, 8 + .cfi_rel_offset r9, 12 + .cfi_rel_offset r11, 16 + .cfi_rel_offset lr, 20 + mov r11, sp @ save the stack pointer + .cfi_def_cfa_register r11 + mov r9, r3 @ move managed thread pointer into r9 + mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval + add r5, r2, #16 @ create space for method pointer in frame + and r5, #0xFFFFFFF0 @ align frame size to 16 bytes + sub sp, r5 @ reserve stack space for argument array + add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy + bl memcpy @ memcpy (dest, src, bytes) + ldr r0, [r11] @ restore method* + ldr r1, [sp, #4] @ copy arg value for r1 + ldr r2, [sp, #8] @ copy arg value for r2 + ldr r3, [sp, #12] @ copy arg value for r3 + mov ip, #0 @ set ip to 0 + str ip, [sp] @ store NULL for method* at bottom of frame + ldr ip, [r0, #METHOD_CODE_OFFSET] @ get pointer to the code + blx ip @ call the method + mov sp, r11 @ restore the stack pointer + ldr ip, [sp, #24] @ load the result pointer + strd r0, [ip] @ store r0/r1 into result pointer + pop {r0, r4, r5, r9, r11, lr} @ restore spill regs + .cfi_adjust_cfa_offset -24 + bx lr +END art_quick_invoke_stub + + /* + * On entry r0 is uint32_t* gprs_ and r1 is uint32_t* fprs_ + */ +ENTRY art_quick_do_long_jump + vldm r1, {s0-s31} @ load all fprs from argument fprs_ + ldr r2, [r0, #60] @ r2 = r15 (PC from gprs_ 60=4*15) + add r0, r0, #12 @ increment r0 to skip gprs_[0..2] 12=4*3 + ldm r0, {r3-r14} @ load remaining gprs from argument gprs_ + mov r0, #0 @ clear result registers r0 and r1 + mov r1, #0 + bx r2 @ do long jump +END art_quick_do_long_jump + + /* + * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on + * failure. + */ + .extern artHandleFillArrayDataFromCode +ENTRY art_quick_handle_fill_data_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case exception allocation triggers GC + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + bl artHandleFillArrayDataFromCode @ (Array*, const DexFile::Payload*, Thread*, SP) + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r0, #0 @ success? + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_handle_fill_data_from_code + + /* + * Entry from managed code that calls artLockObjectFromCode, may block for GC. + */ + .extern artLockObjectFromCode +ENTRY art_quick_lock_object_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case we block + mov r1, r9 @ pass Thread::Current + mov r2, sp @ pass SP + bl artLockObjectFromCode @ (Object* obj, Thread*, SP) + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN +END art_quick_lock_object_from_code + + /* + * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. + */ + .extern artUnlockObjectFromCode +ENTRY art_quick_unlock_object_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case exception allocation triggers GC + mov r1, r9 @ pass Thread::Current + mov r2, sp @ pass SP + bl artUnlockObjectFromCode @ (Object* obj, Thread*, SP) + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r0, #0 @ success? + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_unlock_object_from_code + + /* + * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure. + */ + .extern artCheckCastFromCode +ENTRY art_quick_check_cast_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case exception allocation triggers GC + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + bl artCheckCastFromCode @ (Class* a, Class* b, Thread*, SP) + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r0, #0 @ success? + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_check_cast_from_code + + /* + * Entry from managed code that calls artCanPutArrayElementFromCode and delivers exception on + * failure. + */ + .extern artCanPutArrayElementFromCode +ENTRY art_quick_can_put_array_element_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case exception allocation triggers GC + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + bl artCanPutArrayElementFromCode @ (Object* element, Class* array_class, Thread*, SP) + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r0, #0 @ success? + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_can_put_array_element_from_code + + /* + * Entry from managed code when uninitialized static storage, this stub will run the class + * initializer and deliver the exception on error. On success the static storage base is + * returned. + */ + .extern artInitializeStaticStorageFromCode +ENTRY art_quick_initialize_static_storage_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + @ artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*, SP) + bl artInitializeStaticStorageFromCode + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r0, #0 @ success if result is non-null + bxne lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_initialize_static_storage_from_code + + /* + * Entry from managed code when dex cache misses for a type_idx + */ + .extern artInitializeTypeFromCode +ENTRY art_quick_initialize_type_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + @ artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, SP) + bl artInitializeTypeFromCode + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r0, #0 @ success if result is non-null + bxne lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_initialize_type_from_code + + /* + * Entry from managed code when type_idx needs to be checked for access and dex cache may also + * miss. + */ + .extern artInitializeTypeAndVerifyAccessFromCode +ENTRY art_quick_initialize_type_and_verify_access_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + @ artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Method* referrer, Thread*, SP) + bl artInitializeTypeAndVerifyAccessFromCode + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r0, #0 @ success if result is non-null + bxne lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_initialize_type_and_verify_access_from_code + + /* + * Called by managed code to resolve a static field and load a 32-bit primitive value. + */ + .extern artGet32StaticFromCode +ENTRY art_quick_get32_static_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + ldr r1, [sp, #32] @ pass referrer + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + bl artGet32StaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*, SP) + ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r12, #0 @ success if no exception is pending + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_get32_static_from_code + + /* + * Called by managed code to resolve a static field and load a 64-bit primitive value. + */ + .extern artGet64StaticFromCode +ENTRY art_quick_get64_static_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + ldr r1, [sp, #32] @ pass referrer + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + bl artGet64StaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*, SP) + ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r12, #0 @ success if no exception is pending + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_get64_static_from_code + + /* + * Called by managed code to resolve a static field and load an object reference. + */ + .extern artGetObjStaticFromCode +ENTRY art_quick_get_obj_static_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + ldr r1, [sp, #32] @ pass referrer + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + bl artGetObjStaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*, SP) + ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r12, #0 @ success if no exception is pending + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_get_obj_static_from_code + + /* + * Called by managed code to resolve an instance field and load a 32-bit primitive value. + */ + .extern artGet32InstanceFromCode +ENTRY art_quick_get32_instance_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + ldr r2, [sp, #32] @ pass referrer + mov r3, r9 @ pass Thread::Current + mov r12, sp + str r12, [sp, #-16]! @ expand the frame and pass SP + bl artGet32InstanceFromCode @ (field_idx, Object*, referrer, Thread*, SP) + add sp, #16 @ strip the extra frame + ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r12, #0 @ success if no exception is pending + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_get32_instance_from_code + + /* + * Called by managed code to resolve an instance field and load a 64-bit primitive value. + */ + .extern artGet64InstanceFromCode +ENTRY art_quick_get64_instance_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + ldr r2, [sp, #32] @ pass referrer + mov r3, r9 @ pass Thread::Current + mov r12, sp + str r12, [sp, #-16]! @ expand the frame and pass SP + .pad #16 + .cfi_adjust_cfa_offset 16 + bl artGet64InstanceFromCode @ (field_idx, Object*, referrer, Thread*, SP) + add sp, #16 @ strip the extra frame + .cfi_adjust_cfa_offset -16 + ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r12, #0 @ success if no exception is pending + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_get64_instance_from_code + + /* + * Called by managed code to resolve an instance field and load an object reference. + */ + .extern artGetObjInstanceFromCode +ENTRY art_quick_get_obj_instance_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + ldr r2, [sp, #32] @ pass referrer + mov r3, r9 @ pass Thread::Current + mov r12, sp + str r12, [sp, #-16]! @ expand the frame and pass SP + .pad #16 + .cfi_adjust_cfa_offset 16 + bl artGetObjInstanceFromCode @ (field_idx, Object*, referrer, Thread*, SP) + add sp, #16 @ strip the extra frame + .cfi_adjust_cfa_offset -16 + ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r12, #0 @ success if no exception is pending + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_get_obj_instance_from_code + + /* + * Called by managed code to resolve a static field and store a 32-bit primitive value. + */ + .extern artSet32StaticFromCode +ENTRY art_quick_set32_static_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + ldr r2, [sp, #32] @ pass referrer + mov r3, r9 @ pass Thread::Current + mov r12, sp + str r12, [sp, #-16]! @ expand the frame and pass SP + .pad #16 + .cfi_adjust_cfa_offset 16 + bl artSet32StaticFromCode @ (field_idx, new_val, referrer, Thread*, SP) + add sp, #16 @ strip the extra frame + .cfi_adjust_cfa_offset -16 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r0, #0 @ success if result is 0 + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_set32_static_from_code + + /* + * Called by managed code to resolve a static field and store a 64-bit primitive value. + * On entry r0 holds field index, r1:r2 hold new_val + */ + .extern artSet64StaticFromCode +ENTRY art_quick_set64_static_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + mov r3, r2 @ pass one half of wide argument + mov r2, r1 @ pass other half of wide argument + ldr r1, [sp, #32] @ pass referrer + mov r12, sp @ save SP + sub sp, #8 @ grow frame for alignment with stack args + .pad #8 + .cfi_adjust_cfa_offset 8 + push {r9, r12} @ pass Thread::Current and SP + .save {r9, r12} + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset r9, 0 + bl artSet64StaticFromCode @ (field_idx, referrer, new_val, Thread*, SP) + add sp, #16 @ release out args + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here + cmp r0, #0 @ success if result is 0 + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_set64_static_from_code + + /* + * Called by managed code to resolve a static field and store an object reference. + */ + .extern artSetObjStaticFromCode +ENTRY art_quick_set_obj_static_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + ldr r2, [sp, #32] @ pass referrer + mov r3, r9 @ pass Thread::Current + mov r12, sp + str r12, [sp, #-16]! @ expand the frame and pass SP + .pad #16 + .cfi_adjust_cfa_offset 16 + bl artSetObjStaticFromCode @ (field_idx, new_val, referrer, Thread*, SP) + add sp, #16 @ strip the extra frame + .cfi_adjust_cfa_offset -16 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r0, #0 @ success if result is 0 + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_set_obj_static_from_code + + /* + * Called by managed code to resolve an instance field and store a 32-bit primitive value. + */ + .extern artSet32InstanceFromCode +ENTRY art_quick_set32_instance_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + ldr r3, [sp, #32] @ pass referrer + mov r12, sp @ save SP + sub sp, #8 @ grow frame for alignment with stack args + .pad #8 + .cfi_adjust_cfa_offset 8 + push {r9, r12} @ pass Thread::Current and SP + .save {r9, r12} + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset r9, 0 + .cfi_rel_offset r12, 4 + bl artSet32InstanceFromCode @ (field_idx, Object*, new_val, referrer, Thread*, SP) + add sp, #16 @ release out args + .cfi_adjust_cfa_offset -16 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here + cmp r0, #0 @ success if result is 0 + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_set32_instance_from_code + + /* + * Called by managed code to resolve an instance field and store a 64-bit primitive value. + */ + .extern artSet32InstanceFromCode +ENTRY art_quick_set64_instance_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + mov r12, sp @ save SP + sub sp, #8 @ grow frame for alignment with stack args + .pad #8 + .cfi_adjust_cfa_offset 8 + push {r9, r12} @ pass Thread::Current and SP + .save {r9, r12} + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset r9, 0 + bl artSet64InstanceFromCode @ (field_idx, Object*, new_val, Thread*, SP) + add sp, #16 @ release out args + .cfi_adjust_cfa_offset -16 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here + cmp r0, #0 @ success if result is 0 + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_set64_instance_from_code + + /* + * Called by managed code to resolve an instance field and store an object reference. + */ + .extern artSetObjInstanceFromCode +ENTRY art_quick_set_obj_instance_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + ldr r3, [sp, #32] @ pass referrer + mov r12, sp @ save SP + sub sp, #8 @ grow frame for alignment with stack args + .pad #8 + .cfi_adjust_cfa_offset 8 + push {r9, r12} @ pass Thread::Current and SP + .save {r9, r12} + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset r9, 0 + bl artSetObjInstanceFromCode @ (field_idx, Object*, new_val, referrer, Thread*, SP) + add sp, #16 @ release out args + .cfi_adjust_cfa_offset -16 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here + cmp r0, #0 @ success if result is 0 + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_set_obj_instance_from_code + + /* + * Entry from managed code to resolve a string, this stub will allocate a String and deliver an + * exception on error. On success the String is returned. R0 holds the referring method, + * R1 holds the string index. The fast path check for hit in strings cache has already been + * performed. + */ + .extern artResolveStringFromCode +ENTRY art_quick_resolve_string_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + @ artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*, SP) + bl artResolveStringFromCode + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r0, #0 @ success if result is non-null + bxne lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_resolve_string_from_code + + /* + * Called by managed code to allocate an object + */ + .extern artAllocObjectFromCode +ENTRY art_quick_alloc_object_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + bl artAllocObjectFromCode @ (uint32_t type_idx, Method* method, Thread*, SP) + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r0, #0 @ success if result is non-null + bxne lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_alloc_object_from_code + + /* + * Called by managed code to allocate an object when the caller doesn't know whether it has + * access to the created type. + */ + .extern artAllocObjectFromCodeWithAccessCheck +ENTRY art_quick_alloc_object_from_code_with_access_check + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + bl artAllocObjectFromCodeWithAccessCheck @ (uint32_t type_idx, Method* method, Thread*, SP) + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r0, #0 @ success if result is non-null + bxne lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_alloc_object_from_code_with_access_check + + /* + * Called by managed code to allocate an array. + */ + .extern artAllocArrayFromCode +ENTRY art_quick_alloc_array_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + mov r3, r9 @ pass Thread::Current + mov r12, sp + str r12, [sp, #-16]! @ expand the frame and pass SP + .pad #16 + .cfi_adjust_cfa_offset 16 + @ artAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, Thread*, SP) + bl artAllocArrayFromCode + add sp, #16 @ strip the extra frame + .cfi_adjust_cfa_offset -16 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r0, #0 @ success if result is non-null + bxne lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_alloc_array_from_code + + /* + * Called by managed code to allocate an array when the caller doesn't know whether it has + * access to the created type. + */ + .extern artAllocArrayFromCodeWithAccessCheck +ENTRY art_quick_alloc_array_from_code_with_access_check + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + mov r3, r9 @ pass Thread::Current + mov r12, sp + str r12, [sp, #-16]! @ expand the frame and pass SP + .pad #16 + .cfi_adjust_cfa_offset 16 + @ artAllocArrayFromCodeWithAccessCheck(type_idx, method, component_count, Thread*, SP) + bl artAllocArrayFromCodeWithAccessCheck + add sp, #16 @ strip the extra frame + .cfi_adjust_cfa_offset -16 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r0, #0 @ success if result is non-null + bxne lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_alloc_array_from_code_with_access_check + + /* + * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY. + */ + .extern artCheckAndAllocArrayFromCode +ENTRY art_quick_check_and_alloc_array_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + mov r3, r9 @ pass Thread::Current + mov r12, sp + str r12, [sp, #-16]! @ expand the frame and pass SP + .pad #16 + .cfi_adjust_cfa_offset 16 + @ artCheckAndAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t count, Thread* , SP) + bl artCheckAndAllocArrayFromCode + add sp, #16 @ strip the extra frame + .cfi_adjust_cfa_offset -16 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r0, #0 @ success if result is non-null + bxne lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_check_and_alloc_array_from_code + + /* + * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY. + */ + .extern artCheckAndAllocArrayFromCodeWithAccessCheck +ENTRY art_quick_check_and_alloc_array_from_code_with_access_check + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + mov r3, r9 @ pass Thread::Current + mov r12, sp + str r12, [sp, #-16]! @ expand the frame and pass SP + .pad #16 + .cfi_adjust_cfa_offset 16 + @ artCheckAndAllocArrayFromCodeWithAccessCheck(type_idx, method, count, Thread* , SP) + bl artCheckAndAllocArrayFromCodeWithAccessCheck + add sp, #16 @ strip the extra frame + .cfi_adjust_cfa_offset -16 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + cmp r0, #0 @ success if result is non-null + bxne lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_check_and_alloc_array_from_code_with_access_check + + /* + * Called by managed code when the value in rSUSPEND has been decremented to 0. + */ + .extern artTestSuspendFromCode +ENTRY art_quick_test_suspend + ldrh r0, [rSELF, #THREAD_FLAGS_OFFSET] + mov rSUSPEND, #SUSPEND_CHECK_INTERVAL @ reset rSUSPEND to SUSPEND_CHECK_INTERVAL + cmp r0, #0 @ check Thread::Current()->suspend_count_ == 0 + bxeq lr @ return if suspend_count_ == 0 + mov r0, rSELF + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves for stack crawl + mov r1, sp + bl artTestSuspendFromCode @ (Thread*, SP) + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN +END art_quick_test_suspend + + /* + * Called by managed code that is attempting to call a method on a proxy class. On entry + * r0 holds the proxy method and r1 holds the receiver; r2 and r3 may contain arguments. The + * frame size of the invoked proxy method agrees with a ref and args callee save frame. + */ + .extern artQuickProxyInvokeHandler +ENTRY art_quick_proxy_invoke_handler + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME + str r0, [sp, #0] @ place proxy method at bottom of frame + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + blx artQuickProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP) + ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ + ldr lr, [sp, #44] @ restore lr + add sp, #48 @ pop frame + .cfi_adjust_cfa_offset -48 + cmp r12, #0 @ success if no exception is pending + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_proxy_invoke_handler + + .extern artInterpreterEntry +ENTRY art_quick_interpreter_entry + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME + str r0, [sp, #0] @ place proxy method at bottom of frame + mov r1, r9 @ pass Thread::Current + mov r2, sp @ pass SP + blx artInterpreterEntry @ (Method* method, Thread*, SP) + ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ + ldr lr, [sp, #44] @ restore lr + add sp, #48 @ pop frame + .cfi_adjust_cfa_offset -48 + cmp r12, #0 @ success if no exception is pending + bxeq lr @ return on success + DELIVER_PENDING_EXCEPTION +END art_quick_interpreter_entry + + /* + * Routine that intercepts method calls and returns. + */ + .extern artInstrumentationMethodEntryFromCode + .extern artInstrumentationMethodExitFromCode +ENTRY art_quick_instrumentation_entry_from_code + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME + str r0, [sp, #4] @ preserve r0 + mov r12, sp @ remember sp + str lr, [sp, #-16]! @ expand the frame and pass LR + .pad #16 + .cfi_adjust_cfa_offset 16 + .cfi_rel_offset lr, 0 + mov r2, r9 @ pass Thread::Current + mov r3, r12 @ pass SP + blx artInstrumentationMethodEntryFromCode @ (Method*, Object*, Thread*, SP, LR) + add sp, #16 @ remove out argument and padding from stack + .cfi_adjust_cfa_offset -16 + mov r12, r0 @ r12 holds reference to code + ldr r0, [sp, #4] @ restore r0 + RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME + blx r12 @ call method with lr set to art_quick_instrumentation_exit_from_code +END art_quick_instrumentation_entry_from_code + .type art_quick_instrumentation_exit_from_code, #function + .global art_quick_instrumentation_exit_from_code +art_quick_instrumentation_exit_from_code: + .cfi_startproc + .fnstart + mov lr, #0 @ link register is to here, so clobber with 0 for later checks + SETUP_REF_ONLY_CALLEE_SAVE_FRAME + mov r12, sp @ remember bottom of caller's frame + push {r0-r1} @ save return value + .save {r0-r1} + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset r0, 0 + .cfi_rel_offset r1, 4 + sub sp, #8 @ space for return value argument + .pad #8 + .cfi_adjust_cfa_offset 8 + strd r0, [sp] @ r0/r1 -> [sp] for fpr_res + mov r2, r0 @ pass return value as gpr_res + mov r3, r1 + mov r0, r9 @ pass Thread::Current + mov r1, r12 @ pass SP + blx artInstrumentationMethodExitFromCode @ (Thread*, SP, gpr_res, fpr_res) + add sp, #8 + .cfi_adjust_cfa_offset -8 + + mov r2, r0 @ link register saved by instrumentation + mov lr, r1 @ r1 is holding link register if we're to bounce to deoptimize + pop {r0, r1} @ restore return value + add sp, #32 @ remove callee save frame + .cfi_adjust_cfa_offset -32 + bx r2 @ return +END art_quick_instrumentation_exit_from_code + + /* + * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization + * will long jump to the upcall with a special exception of -1. + */ + .extern artDeoptimize +ENTRY art_quick_deoptimize + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME + mov r0, r9 @ Set up args. + mov r1, sp + blx artDeoptimize @ artDeoptimize(Thread*, SP) +END art_quick_deoptimize + + /* + * Portable abstract method error stub. r0 contains method* on entry. SP unused in portable. + */ + .extern artThrowAbstractMethodErrorFromCode +ENTRY art_portable_abstract_method_error_stub + mov r1, r9 @ pass Thread::Current + b artThrowAbstractMethodErrorFromCode @ (Method*, Thread*, SP) +END art_portable_abstract_method_error_stub + + /* + * Quick abstract method error stub. r0 contains method* on entry. + */ +ENTRY art_quick_abstract_method_error_stub + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME + mov r1, r9 @ pass Thread::Current + mov r2, sp @ pass SP + b artThrowAbstractMethodErrorFromCode @ (Method*, Thread*, SP) +END art_quick_abstract_method_error_stub + + /* + * Signed 64-bit integer multiply. + * + * Consider WXxYZ (r1r0 x r3r2) with a long multiply: + * WX + * x YZ + * -------- + * ZW ZX + * YW YX + * + * The low word of the result holds ZX, the high word holds + * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because + * it doesn't fit in the low 64 bits. + * + * Unlike most ARM math operations, multiply instructions have + * restrictions on using the same register more than once (Rd and Rm + * cannot be the same). + */ + /* mul-long vAA, vBB, vCC */ +ENTRY art_quick_mul_long + push {r9 - r10} + .save {r9 - r10} + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset r9, 0 + .cfi_rel_offset r10, 4 + mul ip, r2, r1 @ ip<- ZxW + umull r9, r10, r2, r0 @ r9/r10 <- ZxX + mla r2, r0, r3, ip @ r2<- YxX + (ZxW) + add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) + mov r0,r9 + mov r1,r10 + pop {r9 - r10} + .cfi_adjust_cfa_offset -8 + bx lr +END art_quick_mul_long + + /* + * Long integer shift. This is different from the generic 32/64-bit + * binary operations because vAA/vBB are 64-bit but vCC (the shift + * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low + * 6 bits. + * On entry: + * r0: low word + * r1: high word + * r2: shift count + */ + /* shl-long vAA, vBB, vCC */ +ENTRY art_quick_shl_long + and r2, r2, #63 @ r2<- r2 & 0x3f + mov r1, r1, asl r2 @ r1<- r1 << r2 + rsb r3, r2, #32 @ r3<- 32 - r2 + orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) + subs ip, r2, #32 @ ip<- r2 - 32 + movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) + mov r0, r0, asl r2 @ r0<- r0 << r2 + bx lr +END art_quick_shl_long + + /* + * Long integer shift. This is different from the generic 32/64-bit + * binary operations because vAA/vBB are 64-bit but vCC (the shift + * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low + * 6 bits. + * On entry: + * r0: low word + * r1: high word + * r2: shift count + */ + /* shr-long vAA, vBB, vCC */ +ENTRY art_quick_shr_long + and r2, r2, #63 @ r0<- r0 & 0x3f + mov r0, r0, lsr r2 @ r0<- r2 >> r2 + rsb r3, r2, #32 @ r3<- 32 - r2 + orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) + subs ip, r2, #32 @ ip<- r2 - 32 + movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) + mov r1, r1, asr r2 @ r1<- r1 >> r2 + bx lr +END art_quick_shr_long + + /* + * Long integer shift. This is different from the generic 32/64-bit + * binary operations because vAA/vBB are 64-bit but vCC (the shift + * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low + * 6 bits. + * On entry: + * r0: low word + * r1: high word + * r2: shift count + */ + /* ushr-long vAA, vBB, vCC */ +ENTRY art_quick_ushr_long + and r2, r2, #63 @ r0<- r0 & 0x3f + mov r0, r0, lsr r2 @ r0<- r2 >> r2 + rsb r3, r2, #32 @ r3<- 32 - r2 + orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) + subs ip, r2, #32 @ ip<- r2 - 32 + movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) + mov r1, r1, lsr r2 @ r1<- r1 >>> r2 + bx lr +END art_quick_ushr_long + + /* + * String's indexOf. + * + * On entry: + * r0: string object (known non-null) + * r1: char to match (known <= 0xFFFF) + * r2: Starting offset in string data + */ +ENTRY art_quick_indexof + push {r4, r10-r11, lr} @ 4 words of callee saves + .save {r4, r10-r11, lr} + .cfi_adjust_cfa_offset 16 + .cfi_rel_offset r4, 0 + .cfi_rel_offset r10, 4 + .cfi_rel_offset r11, 8 + .cfi_rel_offset lr, 12 + ldr r3, [r0, #STRING_COUNT_OFFSET] + ldr r12, [r0, #STRING_OFFSET_OFFSET] + ldr r0, [r0, #STRING_VALUE_OFFSET] + + /* Clamp start to [0..count] */ + cmp r2, #0 + movlt r2, #0 + cmp r2, r3 + movgt r2, r3 + + /* Build a pointer to the start of string data */ + add r0, #STRING_DATA_OFFSET + add r0, r0, r12, lsl #1 + + /* Save a copy in r12 to later compute result */ + mov r12, r0 + + /* Build pointer to start of data to compare and pre-bias */ + add r0, r0, r2, lsl #1 + sub r0, #2 + + /* Compute iteration count */ + sub r2, r3, r2 + + /* + * At this point we have: + * r0: start of data to test + * r1: char to compare + * r2: iteration count + * r12: original start of string data + * r3, r4, r10, r11 available for loading string data + */ + + subs r2, #4 + blt indexof_remainder + +indexof_loop4: + ldrh r3, [r0, #2]! + ldrh r4, [r0, #2]! + ldrh r10, [r0, #2]! + ldrh r11, [r0, #2]! + cmp r3, r1 + beq match_0 + cmp r4, r1 + beq match_1 + cmp r10, r1 + beq match_2 + cmp r11, r1 + beq match_3 + subs r2, #4 + bge indexof_loop4 + +indexof_remainder: + adds r2, #4 + beq indexof_nomatch + +indexof_loop1: + ldrh r3, [r0, #2]! + cmp r3, r1 + beq match_3 + subs r2, #1 + bne indexof_loop1 + +indexof_nomatch: + mov r0, #-1 + pop {r4, r10-r11, pc} + +match_0: + sub r0, #6 + sub r0, r12 + asr r0, r0, #1 + pop {r4, r10-r11, pc} +match_1: + sub r0, #4 + sub r0, r12 + asr r0, r0, #1 + pop {r4, r10-r11, pc} +match_2: + sub r0, #2 + sub r0, r12 + asr r0, r0, #1 + pop {r4, r10-r11, pc} +match_3: + sub r0, r12 + asr r0, r0, #1 + pop {r4, r10-r11, pc} +END art_quick_indexof + + /* + * String's compareTo. + * + * Requires rARG0/rARG1 to have been previously checked for null. Will + * return negative if this's string is < comp, 0 if they are the + * same and positive if >. + * + * On entry: + * r0: this object pointer + * r1: comp object pointer + * + */ + .extern __memcmp16 +ENTRY art_quick_string_compareto + mov r2, r0 @ this to r2, opening up r0 for return value + subs r0, r2, r1 @ Same? + bxeq lr + + push {r4, r7-r12, lr} @ 8 words - keep alignment + .save {r4, r7-r12, lr} + .cfi_adjust_cfa_offset 32 + .cfi_rel_offset r4, 0 + .cfi_rel_offset r7, 4 + .cfi_rel_offset r8, 8 + .cfi_rel_offset r9, 12 + .cfi_rel_offset r10, 16 + .cfi_rel_offset r11, 20 + .cfi_rel_offset r12, 24 + .cfi_rel_offset lr, 28 + + ldr r4, [r2, #STRING_OFFSET_OFFSET] + ldr r9, [r1, #STRING_OFFSET_OFFSET] + ldr r7, [r2, #STRING_COUNT_OFFSET] + ldr r10, [r1, #STRING_COUNT_OFFSET] + ldr r2, [r2, #STRING_VALUE_OFFSET] + ldr r1, [r1, #STRING_VALUE_OFFSET] + + /* + * At this point, we have: + * value: r2/r1 + * offset: r4/r9 + * count: r7/r10 + * We're going to compute + * r11 <- countDiff + * r10 <- minCount + */ + subs r11, r7, r10 + movls r10, r7 + + /* Now, build pointers to the string data */ + add r2, r2, r4, lsl #1 + add r1, r1, r9, lsl #1 + /* + * Note: data pointers point to previous element so we can use pre-index + * mode with base writeback. + */ + add r2, #STRING_DATA_OFFSET-2 @ offset to contents[-1] + add r1, #STRING_DATA_OFFSET-2 @ offset to contents[-1] + + /* + * At this point we have: + * r2: *this string data + * r1: *comp string data + * r10: iteration count for comparison + * r11: value to return if the first part of the string is equal + * r0: reserved for result + * r3, r4, r7, r8, r9, r12 available for loading string data + */ + + subs r10, #2 + blt do_remainder2 + + /* + * Unroll the first two checks so we can quickly catch early mismatch + * on long strings (but preserve incoming alignment) + */ + + ldrh r3, [r2, #2]! + ldrh r4, [r1, #2]! + ldrh r7, [r2, #2]! + ldrh r8, [r1, #2]! + subs r0, r3, r4 + subeqs r0, r7, r8 + bne done + cmp r10, #28 + bgt do_memcmp16 + subs r10, #3 + blt do_remainder + +loopback_triple: + ldrh r3, [r2, #2]! + ldrh r4, [r1, #2]! + ldrh r7, [r2, #2]! + ldrh r8, [r1, #2]! + ldrh r9, [r2, #2]! + ldrh r12,[r1, #2]! + subs r0, r3, r4 + subeqs r0, r7, r8 + subeqs r0, r9, r12 + bne done + subs r10, #3 + bge loopback_triple + +do_remainder: + adds r10, #3 + beq returnDiff + +loopback_single: + ldrh r3, [r2, #2]! + ldrh r4, [r1, #2]! + subs r0, r3, r4 + bne done + subs r10, #1 + bne loopback_single + +returnDiff: + mov r0, r11 + pop {r4, r7-r12, pc} + +do_remainder2: + adds r10, #2 + bne loopback_single + mov r0, r11 + pop {r4, r7-r12, pc} + + /* Long string case */ +do_memcmp16: + mov r7, r11 + add r0, r2, #2 + add r1, r1, #2 + mov r2, r10 + bl __memcmp16 + cmp r0, #0 + moveq r0, r7 +done: + pop {r4, r7-r12, pc} +END art_quick_string_compareto diff --git a/runtime/arch/arm/registers_arm.cc b/runtime/arch/arm/registers_arm.cc new file mode 100644 index 0000000000..4f046479f1 --- /dev/null +++ b/runtime/arch/arm/registers_arm.cc @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "registers_arm.h" + +#include + +namespace art { +namespace arm { + +static const char* kRegisterNames[] = { + "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", + "fp", "ip", "sp", "lr", "pc" +}; +std::ostream& operator<<(std::ostream& os, const Register& rhs) { + if (rhs >= R0 && rhs <= PC) { + os << kRegisterNames[rhs]; + } else { + os << "Register[" << static_cast(rhs) << "]"; + } + return os; +} + +std::ostream& operator<<(std::ostream& os, const SRegister& rhs) { + if (rhs >= S0 && rhs < kNumberOfSRegisters) { + os << "s" << static_cast(rhs); + } else { + os << "SRegister[" << static_cast(rhs) << "]"; + } + return os; +} + +} // namespace arm +} // namespace art diff --git a/runtime/arch/arm/registers_arm.h b/runtime/arch/arm/registers_arm.h new file mode 100644 index 0000000000..932095d0c9 --- /dev/null +++ b/runtime/arch/arm/registers_arm.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2009 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_ARM_REGISTERS_ARM_H_ +#define ART_RUNTIME_ARCH_ARM_REGISTERS_ARM_H_ + +#include + +namespace art { +namespace arm { + +// Values for registers. +enum Register { + R0 = 0, + R1 = 1, + R2 = 2, + R3 = 3, + R4 = 4, + R5 = 5, + R6 = 6, + R7 = 7, + R8 = 8, + R9 = 9, + R10 = 10, + R11 = 11, + R12 = 12, + R13 = 13, + R14 = 14, + R15 = 15, + TR = 9, // thread register + FP = 11, + IP = 12, + SP = 13, + LR = 14, + PC = 15, + kNumberOfCoreRegisters = 16, + kNoRegister = -1, +}; +std::ostream& operator<<(std::ostream& os, const Register& rhs); + + +// Values for single-precision floating point registers. +enum SRegister { + S0 = 0, + S1 = 1, + S2 = 2, + S3 = 3, + S4 = 4, + S5 = 5, + S6 = 6, + S7 = 7, + S8 = 8, + S9 = 9, + S10 = 10, + S11 = 11, + S12 = 12, + S13 = 13, + S14 = 14, + S15 = 15, + S16 = 16, + S17 = 17, + S18 = 18, + S19 = 19, + S20 = 20, + S21 = 21, + S22 = 22, + S23 = 23, + S24 = 24, + S25 = 25, + S26 = 26, + S27 = 27, + S28 = 28, + S29 = 29, + S30 = 30, + S31 = 31, + kNumberOfSRegisters = 32, + kNoSRegister = -1, +}; +std::ostream& operator<<(std::ostream& os, const SRegister& rhs); + +} // namespace arm +} // namespace art + +#endif // ART_RUNTIME_ARCH_ARM_REGISTERS_ARM_H_ diff --git a/runtime/arch/arm/thread_arm.cc b/runtime/arch/arm/thread_arm.cc new file mode 100644 index 0000000000..ea908be22c --- /dev/null +++ b/runtime/arch/arm/thread_arm.cc @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "thread.h" + +#include "asm_support_arm.h" +#include "base/logging.h" + +namespace art { + +void Thread::InitCpu() { + CHECK_EQ(THREAD_FLAGS_OFFSET, OFFSETOF_MEMBER(Thread, state_and_flags_)); + CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); +} + +} // namespace art diff --git a/runtime/arch/context.cc b/runtime/arch/context.cc new file mode 100644 index 0000000000..7075e42575 --- /dev/null +++ b/runtime/arch/context.cc @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "context.h" + +#if defined(__arm__) +#include "arm/context_arm.h" +#elif defined(__mips__) +#include "mips/context_mips.h" +#elif defined(__i386__) +#include "x86/context_x86.h" +#endif + +namespace art { + +Context* Context::Create() { +#if defined(__arm__) + return new arm::ArmContext(); +#elif defined(__mips__) + return new mips::MipsContext(); +#elif defined(__i386__) + return new x86::X86Context(); +#else + UNIMPLEMENTED(FATAL); +#endif +} + +} // namespace art diff --git a/runtime/arch/context.h b/runtime/arch/context.h new file mode 100644 index 0000000000..91e0cd69db --- /dev/null +++ b/runtime/arch/context.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_CONTEXT_H_ +#define ART_RUNTIME_ARCH_CONTEXT_H_ + +#include +#include + +namespace art { + +class StackVisitor; + +// Representation of a thread's context on the executing machine, used to implement long jumps in +// the quick stack frame layout. +class Context { + public: + // Creates a context for the running architecture + static Context* Create(); + + virtual ~Context() {} + + // Re-initializes the registers for context re-use. + virtual void Reset() = 0; + + // Read values from callee saves in the given frame. The frame also holds + // the method that holds the layout. + virtual void FillCalleeSaves(const StackVisitor& fr) = 0; + + // Set the stack pointer value + virtual void SetSP(uintptr_t new_sp) = 0; + + // Set the program counter value + virtual void SetPC(uintptr_t new_pc) = 0; + + // Read the given GPR + virtual uintptr_t GetGPR(uint32_t reg) = 0; + + // Set the given GPR. + virtual void SetGPR(uint32_t reg, uintptr_t value) = 0; + + // Smash the caller save registers. If we're throwing, we don't want to return bogus values. + virtual void SmashCallerSaves() = 0; + + // Switch execution of the executing context to this context + virtual void DoLongJump() = 0; + + protected: + enum { + kBadGprBase = 0xebad6070, + kBadFprBase = 0xebad8070, + }; +}; + +} // namespace art + +#endif // ART_RUNTIME_ARCH_CONTEXT_H_ diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S new file mode 100644 index 0000000000..8a34b9dbd0 --- /dev/null +++ b/runtime/arch/mips/asm_support_mips.S @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_ +#define ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_ + +#include "asm_support_mips.h" + + /* Cache alignment for function entry */ +.macro ENTRY name + .type \name, %function + .global \name + .balign 16 +\name: + .cfi_startproc +.endm + +.macro END name + .cfi_endproc + .size \name, .-\name +.endm + + /* Generates $gp for function calls */ +.macro GENERATE_GLOBAL_POINTER + .cpload $t9 +.endm + +#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_ diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h new file mode 100644 index 0000000000..9a66352ad1 --- /dev/null +++ b/runtime/arch/mips/asm_support_mips.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_ +#define ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_ + +#include "asm_support.h" + +// Register holding suspend check count down. +#define rSUSPEND $s0 +// Register holding Thread::Current(). +#define rSELF $s1 +// Offset of field Thread::suspend_count_ verified in InitCpu +#define THREAD_FLAGS_OFFSET 0 +// Offset of field Thread::exception_ verified in InitCpu +#define THREAD_EXCEPTION_OFFSET 12 + +#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_ diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc new file mode 100644 index 0000000000..a78e5ee80d --- /dev/null +++ b/runtime/arch/mips/context_mips.cc @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "context_mips.h" + +#include "mirror/abstract_method.h" +#include "mirror/object-inl.h" +#include "stack.h" + +namespace art { +namespace mips { + +static const uint32_t gZero = 0; + +void MipsContext::Reset() { + for (size_t i = 0; i < kNumberOfCoreRegisters; i++) { + gprs_[i] = NULL; + } + for (size_t i = 0; i < kNumberOfFRegisters; i++) { + fprs_[i] = NULL; + } + gprs_[SP] = &sp_; + gprs_[RA] = &ra_; + // Initialize registers with easy to spot debug values. + sp_ = MipsContext::kBadGprBase + SP; + ra_ = MipsContext::kBadGprBase + RA; +} + +void MipsContext::FillCalleeSaves(const StackVisitor& fr) { + mirror::AbstractMethod* method = fr.GetMethod(); + uint32_t core_spills = method->GetCoreSpillMask(); + uint32_t fp_core_spills = method->GetFpSpillMask(); + size_t spill_count = __builtin_popcount(core_spills); + size_t fp_spill_count = __builtin_popcount(fp_core_spills); + size_t frame_size = method->GetFrameSizeInBytes(); + if (spill_count > 0) { + // Lowest number spill is farthest away, walk registers and fill into context. + int j = 1; + for (size_t i = 0; i < kNumberOfCoreRegisters; i++) { + if (((core_spills >> i) & 1) != 0) { + gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size); + j++; + } + } + } + if (fp_spill_count > 0) { + // Lowest number spill is farthest away, walk registers and fill into context. + int j = 1; + for (size_t i = 0; i < kNumberOfFRegisters; i++) { + if (((fp_core_spills >> i) & 1) != 0) { + fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_size); + j++; + } + } + } +} + +void MipsContext::SetGPR(uint32_t reg, uintptr_t value) { + CHECK_LT(reg, static_cast(kNumberOfCoreRegisters)); + CHECK_NE(gprs_[reg], &gZero); // Can't overwrite this static value since they are never reset. + CHECK(gprs_[reg] != NULL); + *gprs_[reg] = value; +} + +void MipsContext::SmashCallerSaves() { + // This needs to be 0 because we want a null/zero return value. + gprs_[V0] = const_cast(&gZero); + gprs_[V1] = const_cast(&gZero); + gprs_[A1] = NULL; + gprs_[A2] = NULL; + gprs_[A3] = NULL; +} + +extern "C" void art_quick_do_long_jump(uint32_t*, uint32_t*); + +void MipsContext::DoLongJump() { + uintptr_t gprs[kNumberOfCoreRegisters]; + uint32_t fprs[kNumberOfFRegisters]; + for (size_t i = 0; i < kNumberOfCoreRegisters; ++i) { + gprs[i] = gprs_[i] != NULL ? *gprs_[i] : MipsContext::kBadGprBase + i; + } + for (size_t i = 0; i < kNumberOfFRegisters; ++i) { + fprs[i] = fprs_[i] != NULL ? *fprs_[i] : MipsContext::kBadGprBase + i; + } + art_quick_do_long_jump(gprs, fprs); +} + +} // namespace mips +} // namespace art diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h new file mode 100644 index 0000000000..5595f8631e --- /dev/null +++ b/runtime/arch/mips/context_mips.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_MIPS_CONTEXT_MIPS_H_ +#define ART_RUNTIME_ARCH_MIPS_CONTEXT_MIPS_H_ + +#include "arch/context.h" +#include "base/logging.h" +#include "registers_mips.h" + +namespace art { +namespace mips { + +class MipsContext : public Context { + public: + MipsContext() { + Reset(); + } + virtual ~MipsContext() {} + + virtual void Reset(); + + virtual void FillCalleeSaves(const StackVisitor& fr); + + virtual void SetSP(uintptr_t new_sp) { + SetGPR(SP, new_sp); + } + + virtual void SetPC(uintptr_t new_pc) { + SetGPR(RA, new_pc); + } + + virtual uintptr_t GetGPR(uint32_t reg) { + CHECK_LT(reg, static_cast(kNumberOfCoreRegisters)); + return *gprs_[reg]; + } + + virtual void SetGPR(uint32_t reg, uintptr_t value); + virtual void SmashCallerSaves(); + virtual void DoLongJump(); + + private: + // Pointers to registers in the stack, initialized to NULL except for the special cases below. + uintptr_t* gprs_[kNumberOfCoreRegisters]; + uint32_t* fprs_[kNumberOfFRegisters]; + // Hold values for sp and ra (return address) if they are not located within a stack frame. + uintptr_t sp_, ra_; +}; +} // namespace mips +} // namespace art + +#endif // ART_RUNTIME_ARCH_MIPS_CONTEXT_MIPS_H_ diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc new file mode 100644 index 0000000000..0a62a4096d --- /dev/null +++ b/runtime/arch/mips/entrypoints_init_mips.cc @@ -0,0 +1,242 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/portable/portable_entrypoints.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "entrypoints/entrypoint_utils.h" +#include "entrypoints/math_entrypoints.h" + +namespace art { + +// Alloc entrypoints. +extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); +extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); +extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); + +// Cast entrypoints. +extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, + const mirror::Class* ref_class); +extern "C" void art_quick_can_put_array_element_from_code(void*, void*); +extern "C" void art_quick_check_cast_from_code(void*, void*); + +// DexCache entrypoints. +extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); +extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); + +// Exception entrypoints. +extern "C" void* GetAndClearException(Thread*); + +// Field entrypoints. +extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); +extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); +extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); +extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); +extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); +extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_static_from_code(uint32_t); +extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); +extern "C" int64_t art_quick_get64_static_from_code(uint32_t); +extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); +extern "C" void* art_quick_get_obj_static_from_code(uint32_t); + +// FillArray entrypoint. +extern "C" void art_quick_handle_fill_data_from_code(void*, void*); + +// Lock entrypoints. +extern "C" void art_quick_lock_object_from_code(void*); +extern "C" void art_quick_unlock_object_from_code(void*); + +// Math entrypoints. +extern int32_t CmpgDouble(double a, double b); +extern int32_t CmplDouble(double a, double b); +extern int32_t CmpgFloat(float a, float b); +extern int32_t CmplFloat(float a, float b); +extern "C" int64_t artLmulFromCode(int64_t a, int64_t b); +extern "C" int64_t artLdivFromCode(int64_t a, int64_t b); +extern "C" int64_t artLdivmodFromCode(int64_t a, int64_t b); + +// Math conversions. +extern "C" int32_t __fixsfsi(float op1); // FLOAT_TO_INT +extern "C" int32_t __fixdfsi(double op1); // DOUBLE_TO_INT +extern "C" float __floatdisf(int64_t op1); // LONG_TO_FLOAT +extern "C" double __floatdidf(int64_t op1); // LONG_TO_DOUBLE +extern "C" int64_t __fixsfdi(float op1); // FLOAT_TO_LONG +extern "C" int64_t __fixdfdi(double op1); // DOUBLE_TO_LONG + +// Single-precision FP arithmetics. +extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR] + +// Double-precision FP arithmetics. +extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR] + +// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR] +extern "C" int64_t __divdi3(int64_t, int64_t); +extern "C" int64_t __moddi3(int64_t, int64_t); +extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t); +extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t); +extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t); + +// Interpreter entrypoints. +extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); +extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); + +// Intrinsic entrypoints. +extern "C" int32_t __memcmp16(void*, void*, int32_t); +extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); +extern "C" int32_t art_quick_string_compareto(void*, void*); + +// Invoke entrypoints. +extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); + +// Thread entrypoints. +extern void CheckSuspendFromCode(Thread* thread); +extern "C" void art_quick_test_suspend(); + +// Throw entrypoints. +extern "C" void art_quick_deliver_exception_from_code(void*); +extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); +extern "C" void art_quick_throw_div_zero_from_code(); +extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); +extern "C" void art_quick_throw_null_pointer_exception_from_code(); +extern "C" void art_quick_throw_stack_overflow_from_code(void*); + +void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { + // Alloc + qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code; + qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; + qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code; + qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; + qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; + qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; + + // Cast + qpoints->pInstanceofNonTrivialFromCode = artIsAssignableFromCode; + qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; + qpoints->pCheckCastFromCode = art_quick_check_cast_from_code; + + // DexCache + qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; + qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; + qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code; + qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code; + + // Field + qpoints->pSet32Instance = art_quick_set32_instance_from_code; + qpoints->pSet32Static = art_quick_set32_static_from_code; + qpoints->pSet64Instance = art_quick_set64_instance_from_code; + qpoints->pSet64Static = art_quick_set64_static_from_code; + qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code; + qpoints->pSetObjStatic = art_quick_set_obj_static_from_code; + qpoints->pGet32Instance = art_quick_get32_instance_from_code; + qpoints->pGet64Instance = art_quick_get64_instance_from_code; + qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code; + qpoints->pGet32Static = art_quick_get32_static_from_code; + qpoints->pGet64Static = art_quick_get64_static_from_code; + qpoints->pGetObjStatic = art_quick_get_obj_static_from_code; + + // FillArray + qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; + + // JNI + qpoints->pJniMethodStart = JniMethodStart; + qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; + qpoints->pJniMethodEnd = JniMethodEnd; + qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; + qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; + qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; + + // Locks + qpoints->pLockObjectFromCode = art_quick_lock_object_from_code; + qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code; + + // Math + qpoints->pCmpgDouble = CmpgDouble; + qpoints->pCmpgFloat = CmpgFloat; + qpoints->pCmplDouble = CmplDouble; + qpoints->pCmplFloat = CmplFloat; + qpoints->pFmod = fmod; + qpoints->pL2d = __floatdidf; + qpoints->pFmodf = fmodf; + qpoints->pL2f = __floatdisf; + qpoints->pD2iz = __fixdfsi; + qpoints->pF2iz = __fixsfsi; + qpoints->pIdivmod = NULL; + qpoints->pD2l = art_d2l; + qpoints->pF2l = art_f2l; + qpoints->pLdiv = artLdivFromCode; + qpoints->pLdivmod = artLdivmodFromCode; + qpoints->pLmul = artLmulFromCode; + qpoints->pShlLong = art_quick_shl_long; + qpoints->pShrLong = art_quick_shr_long; + qpoints->pUshrLong = art_quick_ushr_long; + + // Interpreter + qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; + qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry; + + // Intrinsics + qpoints->pIndexOf = art_quick_indexof; + qpoints->pMemcmp16 = __memcmp16; + qpoints->pStringCompareTo = art_quick_string_compareto; + qpoints->pMemcpy = memcpy; + + // Invocation + qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; + qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; + qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; + qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; + qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; + qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; + qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; + + // Thread + qpoints->pCheckSuspendFromCode = CheckSuspendFromCode; + qpoints->pTestSuspendFromCode = art_quick_test_suspend; + + // Throws + qpoints->pDeliverException = art_quick_deliver_exception_from_code; + qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; + qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; + qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; + qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; + qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; + + // Portable + ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; +}; + +} // namespace art diff --git a/runtime/arch/mips/jni_entrypoints_mips.S b/runtime/arch/mips/jni_entrypoints_mips.S new file mode 100644 index 0000000000..fca6d777ab --- /dev/null +++ b/runtime/arch/mips/jni_entrypoints_mips.S @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_mips.S" + + .set noreorder + .balign 4 + + /* + * Jni dlsym lookup stub. + */ + .extern artFindNativeMethod +ENTRY art_jni_dlsym_lookup_stub + GENERATE_GLOBAL_POINTER + addiu $sp, $sp, -32 # leave room for $a0, $a1, $a2, $a3, and $ra + .cfi_adjust_cfa_offset 32 + sw $ra, 16($sp) + .cfi_rel_offset 31, 16 + sw $a3, 12($sp) + .cfi_rel_offset 7, 12 + sw $a2, 8($sp) + .cfi_rel_offset 6, 8 + sw $a1, 4($sp) + .cfi_rel_offset 5, 4 + sw $a0, 0($sp) + .cfi_rel_offset 4, 0 + jal artFindNativeMethod # (Thread*) + move $a0, $s1 # pass Thread::Current() + lw $a0, 0($sp) # restore registers from stack + lw $a1, 4($sp) + lw $a2, 8($sp) + lw $a3, 12($sp) + lw $ra, 16($sp) + beq $v0, $zero, no_native_code_found + addiu $sp, $sp, 32 # restore the stack + .cfi_adjust_cfa_offset -32 + move $t9, $v0 # put method code result in $t9 + jr $t9 # leaf call to method's code + nop +no_native_code_found: + jr $ra + nop +END art_jni_dlsym_lookup_stub + + /* + * Entry point of native methods when JNI bug compatibility is enabled. + */ + .extern artWorkAroundAppJniBugs +ENTRY art_quick_work_around_app_jni_bugs + GENERATE_GLOBAL_POINTER + # save registers that may contain arguments and LR that will be crushed by a call + addiu $sp, $sp, -32 + .cfi_adjust_cfa_offset 32 + sw $ra, 28($sp) + .cfi_rel_offset 31, 28 + sw $a3, 24($sp) + .cfi_rel_offset 7, 28 + sw $a2, 20($sp) + .cfi_rel_offset 6, 28 + sw $a1, 16($sp) + .cfi_rel_offset 5, 28 + sw $a0, 12($sp) + .cfi_rel_offset 4, 28 + move $a0, rSELF # pass Thread::Current + jal artWorkAroundAppJniBugs # (Thread*, $sp) + move $a1, $sp # pass $sp + move $t9, $v0 # save target address + lw $a0, 12($sp) + lw $a1, 16($sp) + lw $a2, 20($sp) + lw $a3, 24($sp) + lw $ra, 28($sp) + jr $t9 # tail call into JNI routine + addiu $sp, $sp, 32 + .cfi_adjust_cfa_offset -32 +END art_quick_work_around_app_jni_bugs diff --git a/runtime/arch/mips/portable_entrypoints_mips.S b/runtime/arch/mips/portable_entrypoints_mips.S new file mode 100644 index 0000000000..e7a9b0fb60 --- /dev/null +++ b/runtime/arch/mips/portable_entrypoints_mips.S @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_mips.S" + + .set noreorder + .balign 4 + + .extern artPortableProxyInvokeHandler +ENTRY art_portable_proxy_invoke_handler + GENERATE_GLOBAL_POINTER + # Fake callee save ref and args frame set up, note portable doesn't use callee save frames. + # TODO: just save the registers that are needed in artPortableProxyInvokeHandler. + addiu $sp, $sp, -64 + .cfi_adjust_cfa_offset 64 + sw $ra, 60($sp) + .cfi_rel_offset 31, 60 + sw $s8, 56($sp) + .cfi_rel_offset 30, 56 + sw $gp, 52($sp) + .cfi_rel_offset 28, 52 + sw $s7, 48($sp) + .cfi_rel_offset 23, 48 + sw $s6, 44($sp) + .cfi_rel_offset 22, 44 + sw $s5, 40($sp) + .cfi_rel_offset 21, 40 + sw $s4, 36($sp) + .cfi_rel_offset 20, 36 + sw $s3, 32($sp) + .cfi_rel_offset 19, 32 + sw $s2, 28($sp) + .cfi_rel_offset 18, 28 + sw $a3, 12($sp) + .cfi_rel_offset 7, 12 + sw $a2, 8($sp) + .cfi_rel_offset 6, 8 + sw $a1, 4($sp) + .cfi_rel_offset 5, 4 + # Begin argument set up. + sw $a0, 0($sp) # place proxy method at bottom of frame + move $a2, rSELF # pass Thread::Current + jal artPortableProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP) + move $a3, $sp # pass $sp + lw $ra, 60($sp) # restore $ra + jr $ra + addiu $sp, $sp, 64 # pop frame + .cfi_adjust_cfa_offset -64 +END art_portable_proxy_invoke_handler + + /* + * Portable abstract method error stub. $a0 contains method* on entry. SP unused in portable. + */ + .extern artThrowAbstractMethodErrorFromCode +ENTRY art_portable_abstract_method_error_stub + GENERATE_GLOBAL_POINTER + la $t9, artThrowAbstractMethodErrorFromCode + jr $t9 # (Method*, Thread*, SP) + move $a1, $s1 # pass Thread::Current +END art_portable_abstract_method_error_stub diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S new file mode 100644 index 0000000000..d32a2b4a15 --- /dev/null +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -0,0 +1,1074 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_mips.S" + + .set noreorder + .balign 4 + + /* Deliver the given exception */ + .extern artDeliverExceptionFromCode + /* Deliver an exception pending on a thread */ + .extern artDeliverPendingExceptionFromCode + + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kSaveAll) + * callee-save: $s0-$s8 + $gp + $ra, 11 total + 1 word padding + 4 open words for args + */ +.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME + addiu $sp, $sp, -64 + .cfi_adjust_cfa_offset 64 + sw $ra, 60($sp) + .cfi_rel_offset 31, 60 + sw $s8, 56($sp) + .cfi_rel_offset 30, 56 + sw $gp, 52($sp) + .cfi_rel_offset 28, 52 + sw $s7, 48($sp) + .cfi_rel_offset 23, 48 + sw $s6, 44($sp) + .cfi_rel_offset 22, 44 + sw $s5, 40($sp) + .cfi_rel_offset 21, 40 + sw $s4, 36($sp) + .cfi_rel_offset 20, 36 + sw $s3, 32($sp) + .cfi_rel_offset 19, 32 + sw $s2, 28($sp) + .cfi_rel_offset 18, 28 + sw $s1, 24($sp) + .cfi_rel_offset 17, 24 + sw $s0, 20($sp) + .cfi_rel_offset 16, 20 + # 1 word for alignment, 4 open words for args $a0-$a3, bottom will hold Method* +.endm + + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kRefsOnly). Restoration assumes non-moving GC. + * Does not include rSUSPEND or rSELF + * callee-save: $s2-$s8 + $gp + $ra, 9 total + 3 words padding + 4 open words for args + */ +.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME + addiu $sp, $sp, -64 + .cfi_adjust_cfa_offset 64 + sw $ra, 60($sp) + .cfi_rel_offset 31, 60 + sw $s8, 56($sp) + .cfi_rel_offset 30, 56 + sw $gp, 52($sp) + .cfi_rel_offset 28, 52 + sw $s7, 48($sp) + .cfi_rel_offset 23, 48 + sw $s6, 44($sp) + .cfi_rel_offset 22, 44 + sw $s5, 40($sp) + .cfi_rel_offset 21, 40 + sw $s4, 36($sp) + .cfi_rel_offset 20, 36 + sw $s3, 32($sp) + .cfi_rel_offset 19, 32 + sw $s2, 28($sp) + .cfi_rel_offset 18, 28 + # 3 words for alignment and extra args, 4 open words for args $a0-$a3, bottom will hold Method* +.endm + +.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + lw $gp, 52($sp) + lw $ra, 60($sp) + addiu $sp, $sp, 64 + .cfi_adjust_cfa_offset -64 +.endm + +.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN + lw $gp, 52($sp) + lw $ra, 60($sp) + jr $ra + addiu $sp, $sp, 64 + .cfi_adjust_cfa_offset -64 +.endm + + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC. + * callee-save: $a1-$a3, $s2-$s8 + $gp + $ra, 12 total + 3 words padding + method* + */ +.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME + addiu $sp, $sp, -64 + .cfi_adjust_cfa_offset 64 + sw $ra, 60($sp) + .cfi_rel_offset 31, 60 + sw $s8, 56($sp) + .cfi_rel_offset 30, 56 + sw $gp, 52($sp) + .cfi_rel_offset 28, 52 + sw $s7, 48($sp) + .cfi_rel_offset 23, 48 + sw $s6, 44($sp) + .cfi_rel_offset 22, 44 + sw $s5, 40($sp) + .cfi_rel_offset 21, 40 + sw $s4, 36($sp) + .cfi_rel_offset 20, 36 + sw $s3, 32($sp) + .cfi_rel_offset 19, 32 + sw $s2, 28($sp) + .cfi_rel_offset 18, 28 + sw $a3, 12($sp) + .cfi_rel_offset 7, 12 + sw $a2, 8($sp) + .cfi_rel_offset 6, 8 + sw $a1, 4($sp) + .cfi_rel_offset 5, 4 + # bottom will hold Method* +.endm + +.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME + lw $ra, 60($sp) # restore $ra + lw $gp, 52($sp) # restore $gp + lw $a1, 4($sp) # restore non-callee save $a1 + lw $a2, 8($sp) # restore non-callee save $a2 + lw $a3, 12($sp) # restore non-callee save $a3 + addiu $sp, $sp, 64 # strip frame + .cfi_adjust_cfa_offset -64 +.endm + + /* + * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending + * exception is Thread::Current()->exception_ + */ +.macro DELIVER_PENDING_EXCEPTION + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME # save callee saves for throw + move $a0, rSELF # pass Thread::Current + la $t9, artDeliverPendingExceptionFromCode + jr $t9 # artDeliverPendingExceptionFromCode(Thread*, $sp) + move $a1, $sp # pass $sp +.endm + +.macro RETURN_IF_NO_EXCEPTION + lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_ + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + bnez $t0, 1f # success if no exception is pending + nop + jr $ra + nop +1: + DELIVER_PENDING_EXCEPTION +.endm + +.macro RETURN_IF_ZERO + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + bnez $v0, 1f # success? + nop + jr $ra # return on success + nop +1: + DELIVER_PENDING_EXCEPTION +.endm + +.macro RETURN_IF_NONZERO + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + beqz $v0, 1f # success? + nop + jr $ra # return on success + nop +1: + DELIVER_PENDING_EXCEPTION +.endm + + /* + * On entry $a0 is uint32_t* gprs_ and $a1 is uint32_t* fprs_ + * FIXME: just guessing about the shape of the jmpbuf. Where will pc be? + */ +ENTRY art_quick_do_long_jump + l.s $f0, 0($a1) + l.s $f1, 4($a1) + l.s $f2, 8($a1) + l.s $f3, 12($a1) + l.s $f4, 16($a1) + l.s $f5, 20($a1) + l.s $f6, 24($a1) + l.s $f7, 28($a1) + l.s $f8, 32($a1) + l.s $f9, 36($a1) + l.s $f10, 40($a1) + l.s $f11, 44($a1) + l.s $f12, 48($a1) + l.s $f13, 52($a1) + l.s $f14, 56($a1) + l.s $f15, 60($a1) + l.s $f16, 64($a1) + l.s $f17, 68($a1) + l.s $f18, 72($a1) + l.s $f19, 76($a1) + l.s $f20, 80($a1) + l.s $f21, 84($a1) + l.s $f22, 88($a1) + l.s $f23, 92($a1) + l.s $f24, 96($a1) + l.s $f25, 100($a1) + l.s $f26, 104($a1) + l.s $f27, 108($a1) + l.s $f28, 112($a1) + l.s $f29, 116($a1) + l.s $f30, 120($a1) + l.s $f31, 124($a1) + lw $at, 4($a0) + lw $v0, 8($a0) + lw $v1, 12($a0) + lw $a1, 20($a0) + lw $a2, 24($a0) + lw $a3, 28($a0) + lw $t0, 32($a0) + lw $t1, 36($a0) + lw $t2, 40($a0) + lw $t3, 44($a0) + lw $t4, 48($a0) + lw $t5, 52($a0) + lw $t6, 56($a0) + lw $t7, 60($a0) + lw $s0, 64($a0) + lw $s1, 68($a0) + lw $s2, 72($a0) + lw $s3, 76($a0) + lw $s4, 80($a0) + lw $s5, 84($a0) + lw $s6, 88($a0) + lw $s7, 92($a0) + lw $t8, 96($a0) + lw $t9, 100($a0) + lw $k0, 104($a0) + lw $k1, 108($a0) + lw $gp, 112($a0) + lw $sp, 116($a0) + lw $fp, 120($a0) + lw $ra, 124($a0) + lw $a0, 16($a0) + move $v0, $zero # clear result registers r0 and r1 + jr $ra # do long jump + move $v1, $zero +END art_quick_do_long_jump + + /* + * Called by managed code, saves most registers (forms basis of long jump context) and passes + * the bottom of the stack. artDeliverExceptionFromCode will place the callee save Method* at + * the bottom of the thread. On entry r0 holds Throwable* + */ +ENTRY art_quick_deliver_exception_from_code + GENERATE_GLOBAL_POINTER + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME + move $a1, rSELF # pass Thread::Current + la $t9, artDeliverExceptionFromCode + jr $t9 # artDeliverExceptionFromCode(Throwable*, Thread*, $sp) + move $a2, $sp # pass $sp +END art_quick_deliver_exception_from_code + + /* + * Called by managed code to create and deliver a NullPointerException + */ + .extern artThrowNullPointerExceptionFromCode +ENTRY art_quick_throw_null_pointer_exception_from_code + GENERATE_GLOBAL_POINTER + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME + move $a0, rSELF # pass Thread::Current + la $t9, artThrowNullPointerExceptionFromCode + jr $t9 # artThrowNullPointerExceptionFromCode(Thread*, $sp) + move $a1, $sp # pass $sp +END art_quick_throw_null_pointer_exception_from_code + + /* + * Called by managed code to create and deliver an ArithmeticException + */ + .extern artThrowDivZeroFromCode +ENTRY art_quick_throw_div_zero_from_code + GENERATE_GLOBAL_POINTER + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME + move $a0, rSELF # pass Thread::Current + la $t9, artThrowDivZeroFromCode + jr $t9 # artThrowDivZeroFromCode(Thread*, $sp) + move $a1, $sp # pass $sp +END art_quick_throw_div_zero_from_code + + /* + * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException + */ + .extern artThrowArrayBoundsFromCode +ENTRY art_quick_throw_array_bounds_from_code + GENERATE_GLOBAL_POINTER + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME + move $a2, rSELF # pass Thread::Current + la $t9, artThrowArrayBoundsFromCode + jr $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*, $sp) + move $a3, $sp # pass $sp +END art_quick_throw_array_bounds_from_code + + /* + * Called by managed code to create and deliver a StackOverflowError. + */ + .extern artThrowStackOverflowFromCode +ENTRY art_quick_throw_stack_overflow_from_code + GENERATE_GLOBAL_POINTER + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME + move $a0, rSELF # pass Thread::Current + la $t9, artThrowStackOverflowFromCode + jr $t9 # artThrowStackOverflowFromCode(Thread*, $sp) + move $a1, $sp # pass $sp +END art_quick_throw_stack_overflow_from_code + + /* + * Called by managed code to create and deliver a NoSuchMethodError. + */ + .extern artThrowNoSuchMethodFromCode +ENTRY art_quick_throw_no_such_method_from_code + GENERATE_GLOBAL_POINTER + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME + move $a1, rSELF # pass Thread::Current + la $t9, artThrowNoSuchMethodFromCode + jr $t9 # artThrowNoSuchMethodFromCode(method_idx, Thread*, $sp) + move $a2, $sp # pass $sp +END art_quick_throw_no_such_method_from_code + + /* + * All generated callsites for interface invokes and invocation slow paths will load arguments + * as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain + * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the + * stack and call the appropriate C helper. + * NOTE: "this" is first visable argument of the target, and so can be found in arg1/$a1. + * + * The helper will attempt to locate the target and return a 64-bit result in $v0/$v1 consisting + * of the target Method* in $v0 and method->code_ in $v1. + * + * If unsuccessful, the helper will return NULL/NULL. There will be a pending exception in the + * thread and we branch to another stub to deliver it. + * + * On success this wrapper will restore arguments and *jump* to the target, leaving the lr + * pointing back to the original caller. + */ +.macro INVOKE_TRAMPOLINE c_name, cxx_name + .extern \cxx_name +ENTRY \c_name + GENERATE_GLOBAL_POINTER + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME # save callee saves in case allocation triggers GC + lw $a2, 64($sp) # pass caller Method* + move $t0, $sp # save $sp + addiu $sp, $sp, -32 # make space for extra args + .cfi_adjust_cfa_offset 32 + move $a3, rSELF # pass Thread::Current + .cfi_rel_offset 28, 12 + jal \cxx_name # (method_idx, this, caller, Thread*, $sp) + sw $t0, 16($sp) # pass $sp + addiu $sp, $sp, 32 # release out args + .cfi_adjust_cfa_offset -32 + move $a0, $v0 # save target Method* + move $t9, $v1 # save $v0->code_ + RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME + beqz $v0, 1f + nop + jr $t9 + nop +1: + DELIVER_PENDING_EXCEPTION +END \c_name +.endm + +INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline +INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck + +INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck +INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck +INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck +INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck + + /* + * Common invocation stub for portable and quick. + * On entry: + * a0 = method pointer + * a1 = argument array or NULL for no argument methods + * a2 = size of argument array in bytes + * a3 = (managed) thread pointer + * [sp + 16] = JValue* result + * [sp + 20] = result type char + */ + .type art_portable_invoke_stub, %function + .global art_portable_invoke_stub +art_portable_invoke_stub: +ENTRY art_quick_invoke_stub + GENERATE_GLOBAL_POINTER + sw $a0, 0($sp) # save out a0 + addiu $sp, $sp, -16 # spill s0, s1, fp, ra + .cfi_adjust_cfa_offset 16 + sw $ra, 12($sp) + .cfi_rel_offset 31, 12 + sw $fp, 8($sp) + .cfi_rel_offset 30, 8 + sw $s1, 4($sp) + .cfi_rel_offset 17, 4 + sw $s0, 0($sp) + .cfi_rel_offset 16, 0 + move $fp, $sp # save sp in fp + .cfi_def_cfa_register 30 + move $s1, $a3 # move managed thread pointer into s1 + addiu $s0, $zero, SUSPEND_CHECK_INTERVAL # reset s0 to suspend check interval + addiu $t0, $a2, 16 # create space for method pointer in frame + srl $t0, $t0, 3 # shift the frame size right 3 + sll $t0, $t0, 3 # shift the frame size left 3 to align to 16 bytes + subu $sp, $sp, $t0 # reserve stack space for argument array + addiu $a0, $sp, 4 # pass stack pointer + method ptr as dest for memcpy + jal memcpy # (dest, src, bytes) + addiu $sp, $sp, -16 # make space for argument slots for memcpy + addiu $sp, $sp, 16 # restore stack after memcpy + lw $a0, 16($fp) # restore method* + lw $a1, 4($sp) # copy arg value for a1 + lw $a2, 8($sp) # copy arg value for a2 + lw $a3, 12($sp) # copy arg value for a3 + lw $t9, METHOD_CODE_OFFSET($a0) # get pointer to the code + jalr $t9 # call the method + sw $zero, 0($sp) # store NULL for method* at bottom of frame + move $sp, $fp # restore the stack + lw $s0, 0($sp) + lw $s1, 4($sp) + lw $fp, 8($sp) + lw $ra, 12($sp) + addiu $sp, $sp, 16 + .cfi_adjust_cfa_offset -16 + lw $t0, 16($sp) # get result pointer + lw $t1, 20($sp) # get result type char + li $t2, 68 # put char 'D' into t2 + beq $t1, $t2, 1f # branch if result type char == 'D' + li $t3, 70 # put char 'F' into t3 + beq $t1, $t3, 1f # branch if result type char == 'F' + sw $v0, 0($t0) # store the result + jr $ra + sw $v1, 4($t0) # store the other half of the result +1: + s.s $f0, 0($t0) # store floating point result + jr $ra + s.s $f1, 4($t0) # store other half of floating point result +END art_quick_invoke_stub + .size art_portable_invoke_stub, .-art_portable_invoke_stub + + /* + * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on + * failure. + */ + .extern artHandleFillArrayDataFromCode +ENTRY art_quick_handle_fill_data_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC + move $a2, rSELF # pass Thread::Current + jal artHandleFillArrayDataFromCode # (Array*, const DexFile::Payload*, Thread*, $sp) + move $a3, $sp # pass $sp + RETURN_IF_ZERO +END art_quick_handle_fill_data_from_code + + /* + * Entry from managed code that calls artLockObjectFromCode, may block for GC. + */ + .extern artLockObjectFromCode +ENTRY art_quick_lock_object_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block + move $a1, rSELF # pass Thread::Current + jal artLockObjectFromCode # (Object* obj, Thread*, $sp) + move $a2, $sp # pass $sp + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN +END art_quick_lock_object_from_code + + /* + * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. + */ + .extern artUnlockObjectFromCode +ENTRY art_quick_unlock_object_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC + move $a1, rSELF # pass Thread::Current + jal artUnlockObjectFromCode # (Object* obj, Thread*, $sp) + move $a2, $sp # pass $sp + RETURN_IF_ZERO +END art_quick_unlock_object_from_code + + /* + * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure. + */ + .extern artCheckCastFromCode +ENTRY art_quick_check_cast_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC + move $a2, rSELF # pass Thread::Current + jal artCheckCastFromCode # (Class* a, Class* b, Thread*, $sp) + move $a3, $sp # pass $sp + RETURN_IF_ZERO +END art_quick_check_cast_from_code + + /* + * Entry from managed code that calls artCanPutArrayElementFromCode and delivers exception on + * failure. + */ + .extern artCanPutArrayElementFromCode +ENTRY art_quick_can_put_array_element_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC + move $a2, rSELF # pass Thread::Current + jal artCanPutArrayElementFromCode # (Object* element, Class* array_class, Thread*, $sp) + move $a3, $sp # pass $sp + RETURN_IF_ZERO +END art_quick_can_put_array_element_from_code + + /* + * Entry from managed code when uninitialized static storage, this stub will run the class + * initializer and deliver the exception on error. On success the static storage base is + * returned. + */ + .extern artInitializeStaticStorageFromCode +ENTRY art_quick_initialize_static_storage_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + move $a2, rSELF # pass Thread::Current + # artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*, $sp) + jal artInitializeStaticStorageFromCode + move $a3, $sp # pass $sp + RETURN_IF_NONZERO +END art_quick_initialize_static_storage_from_code + + /* + * Entry from managed code when dex cache misses for a type_idx. + */ + .extern artInitializeTypeFromCode +ENTRY art_quick_initialize_type_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + move $a2, rSELF # pass Thread::Current + # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, $sp) + jal artInitializeTypeFromCode + move $a3, $sp # pass $sp + RETURN_IF_NONZERO +END art_quick_initialize_type_from_code + + /* + * Entry from managed code when type_idx needs to be checked for access and dex cache may also + * miss. + */ + .extern artInitializeTypeAndVerifyAccessFromCode +ENTRY art_quick_initialize_type_and_verify_access_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + move $a2, rSELF # pass Thread::Current + # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, $sp) + jal artInitializeTypeAndVerifyAccessFromCode + move $a3, $sp # pass $sp + RETURN_IF_NONZERO +END art_quick_initialize_type_and_verify_access_from_code + + /* + * Called by managed code to resolve a static field and load a 32-bit primitive value. + */ + .extern artGet32StaticFromCode +ENTRY art_quick_get32_static_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a1, 64($sp) # pass referrer's Method* + move $a2, rSELF # pass Thread::Current + jal artGet32StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp) + move $a3, $sp # pass $sp + RETURN_IF_NO_EXCEPTION +END art_quick_get32_static_from_code + + /* + * Called by managed code to resolve a static field and load a 64-bit primitive value. + */ + .extern artGet64StaticFromCode +ENTRY art_quick_get64_static_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a1, 64($sp) # pass referrer's Method* + move $a2, rSELF # pass Thread::Current + jal artGet64StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp) + move $a3, $sp # pass $sp + RETURN_IF_NO_EXCEPTION +END art_quick_get64_static_from_code + + /* + * Called by managed code to resolve a static field and load an object reference. + */ + .extern artGetObjStaticFromCode +ENTRY art_quick_get_obj_static_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a1, 64($sp) # pass referrer's Method* + move $a2, rSELF # pass Thread::Current + jal artGetObjStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp) + move $a3, $sp # pass $sp + RETURN_IF_NO_EXCEPTION +END art_quick_get_obj_static_from_code + + /* + * Called by managed code to resolve an instance field and load a 32-bit primitive value. + */ + .extern artGet32InstanceFromCode +ENTRY art_quick_get32_instance_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a2, 64($sp) # pass referrer's Method* + move $a3, rSELF # pass Thread::Current + jal artGet32InstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp) + sw $sp, 16($sp) # pass $sp + RETURN_IF_NO_EXCEPTION +END art_quick_get32_instance_from_code + + /* + * Called by managed code to resolve an instance field and load a 64-bit primitive value. + */ + .extern artGet64InstanceFromCode +ENTRY art_quick_get64_instance_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a2, 64($sp) # pass referrer's Method* + move $a3, rSELF # pass Thread::Current + jal artGet64InstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp) + sw $sp, 16($sp) # pass $sp + RETURN_IF_NO_EXCEPTION +END art_quick_get64_instance_from_code + + /* + * Called by managed code to resolve an instance field and load an object reference. + */ + .extern artGetObjInstanceFromCode +ENTRY art_quick_get_obj_instance_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a2, 64($sp) # pass referrer's Method* + move $a3, rSELF # pass Thread::Current + jal artGetObjInstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp) + sw $sp, 16($sp) # pass $sp + RETURN_IF_NO_EXCEPTION +END art_quick_get_obj_instance_from_code + + /* + * Called by managed code to resolve a static field and store a 32-bit primitive value. + */ + .extern artSet32StaticFromCode +ENTRY art_quick_set32_static_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a2, 64($sp) # pass referrer's Method* + move $a3, rSELF # pass Thread::Current + jal artSet32StaticFromCode # (field_idx, new_val, referrer, Thread*, $sp) + sw $sp, 16($sp) # pass $sp + RETURN_IF_ZERO +END art_quick_set32_static_from_code + + /* + * Called by managed code to resolve a static field and store a 64-bit primitive value. + */ + .extern artSet32StaticFromCode +ENTRY art_quick_set64_static_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a1, 64($sp) # pass referrer's Method* + sw rSELF, 16($sp) # pass Thread::Current + jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*, $sp) + sw $sp, 20($sp) # pass $sp + RETURN_IF_ZERO +END art_quick_set64_static_from_code + + /* + * Called by managed code to resolve a static field and store an object reference. + */ + .extern artSetObjStaticFromCode +ENTRY art_quick_set_obj_static_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a2, 64($sp) # pass referrer's Method* + move $a3, rSELF # pass Thread::Current + jal artSetObjStaticFromCode # (field_idx, new_val, referrer, Thread*, $sp) + sw $sp, 16($sp) # pass $sp + RETURN_IF_ZERO +END art_quick_set_obj_static_from_code + + /* + * Called by managed code to resolve an instance field and store a 32-bit primitive value. + */ + .extern artSet32InstanceFromCode +ENTRY art_quick_set32_instance_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a3, 64($sp) # pass referrer's Method* + sw rSELF, 16($sp) # pass Thread::Current + jal artSet32InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*, $sp) + sw $sp, 20($sp) # pass $sp + RETURN_IF_ZERO +END art_quick_set32_instance_from_code + + /* + * Called by managed code to resolve an instance field and store a 64-bit primitive value. + */ + .extern artSet32InstanceFromCode +ENTRY art_quick_set64_instance_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + sw rSELF, 16($sp) # pass Thread::Current + jal artSet64InstanceFromCode # (field_idx, Object*, new_val, Thread*, $sp) + sw $sp, 20($sp) # pass $sp + RETURN_IF_ZERO +END art_quick_set64_instance_from_code + + /* + * Called by managed code to resolve an instance field and store an object reference. + */ + .extern artSetObjInstanceFromCode +ENTRY art_quick_set_obj_instance_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a3, 64($sp) # pass referrer's Method* + sw rSELF, 16($sp) # pass Thread::Current + jal artSetObjInstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*, $sp) + sw $sp, 20($sp) # pass $sp + RETURN_IF_ZERO +END art_quick_set_obj_instance_from_code + + /* + * Entry from managed code to resolve a string, this stub will allocate a String and deliver an + * exception on error. On success the String is returned. R0 holds the referring method, + * R1 holds the string index. The fast path check for hit in strings cache has already been + * performed. + */ + .extern artResolveStringFromCode +ENTRY art_quick_resolve_string_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + move $a2, rSELF # pass Thread::Current + # artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*, $sp) + jal artResolveStringFromCode + move $a3, $sp # pass $sp + RETURN_IF_NONZERO +END art_quick_resolve_string_from_code + + /* + * Called by managed code to allocate an object. + */ + .extern artAllocObjectFromCode +ENTRY art_quick_alloc_object_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + move $a2, rSELF # pass Thread::Current + jal artAllocObjectFromCode # (uint32_t type_idx, Method* method, Thread*, $sp) + move $a3, $sp # pass $sp + RETURN_IF_NONZERO +END art_quick_alloc_object_from_code + + /* + * Called by managed code to allocate an object when the caller doesn't know whether it has + * access to the created type. + */ + .extern artAllocObjectFromCodeWithAccessCheck +ENTRY art_quick_alloc_object_from_code_with_access_check + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + move $a2, rSELF # pass Thread::Current + jal artAllocObjectFromCodeWithAccessCheck # (uint32_t type_idx, Method* method, Thread*, $sp) + move $a3, $sp # pass $sp + RETURN_IF_NONZERO +END art_quick_alloc_object_from_code_with_access_check + + /* + * Called by managed code to allocate an array. + */ + .extern artAllocArrayFromCode +ENTRY art_quick_alloc_array_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + move $a3, rSELF # pass Thread::Current + # artAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, Thread*, $sp) + jal artAllocArrayFromCode + sw $sp, 16($sp) # pass $sp + RETURN_IF_NONZERO +END art_quick_alloc_array_from_code + + /* + * Called by managed code to allocate an array when the caller doesn't know whether it has + * access to the created type. + */ + .extern artAllocArrayFromCodeWithAccessCheck +ENTRY art_quick_alloc_array_from_code_with_access_check + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + move $a3, rSELF # pass Thread::Current + # artAllocArrayFromCodeWithAccessCheck(type_idx, method, component_count, Thread*, $sp) + jal artAllocArrayFromCodeWithAccessCheck + sw $sp, 16($sp) # pass $sp + RETURN_IF_NONZERO +END art_quick_alloc_array_from_code_with_access_check + + /* + * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY. + */ + .extern artCheckAndAllocArrayFromCode +ENTRY art_quick_check_and_alloc_array_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + move $a3, rSELF # pass Thread::Current + # artCheckAndAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t count, Thread* , $sp) + jal artCheckAndAllocArrayFromCode + sw $sp, 16($sp) # pass $sp + RETURN_IF_NONZERO +END art_quick_check_and_alloc_array_from_code + + /* + * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY. + */ + .extern artCheckAndAllocArrayFromCodeWithAccessCheck +ENTRY art_quick_check_and_alloc_array_from_code_with_access_check + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + move $a3, rSELF # pass Thread::Current + # artCheckAndAllocArrayFromCodeWithAccessCheck(type_idx, method, count, Thread* , $sp) + jal artCheckAndAllocArrayFromCodeWithAccessCheck + sw $sp, 16($sp) # pass $sp + RETURN_IF_NONZERO +END art_quick_check_and_alloc_array_from_code_with_access_check + + /* + * Called by managed code when the value in rSUSPEND has been decremented to 0. + */ + .extern artTestSuspendFromCode +ENTRY art_quick_test_suspend + GENERATE_GLOBAL_POINTER + lh $a0, THREAD_FLAGS_OFFSET(rSELF) + bnez $a0, 1f + addi rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL + jr $ra + nop +1: + move $a0, rSELF + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves for stack crawl + jal artTestSuspendFromCode # (Thread*, $sp) + move $a1, $sp + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN +END art_quick_test_suspend + + /* + * Called by managed code that is attempting to call a method on a proxy class. On entry + * r0 holds the proxy method; r1, r2 and r3 may contain arguments. + */ + .extern artQuickProxyInvokeHandler +ENTRY art_quick_proxy_invoke_handler + GENERATE_GLOBAL_POINTER + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME + sw $a0, 0($sp) # place proxy method at bottom of frame + move $a2, rSELF # pass Thread::Current + jal artQuickProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP) + move $a3, $sp # pass $sp + lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_ + lw $gp, 52($sp) # restore $gp + lw $ra, 60($sp) # restore $ra + bnez $t0, 1f + addiu $sp, $sp, 64 # pop frame + .cfi_adjust_cfa_offset -64 + jr $ra + nop +1: + DELIVER_PENDING_EXCEPTION +END art_quick_proxy_invoke_handler + + .extern artInterpreterEntry +ENTRY art_quick_interpreter_entry + GENERATE_GLOBAL_POINTER + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME + sw $a0, 0($sp) # place proxy method at bottom of frame + move $a1, rSELF # pass Thread::Current + jal artInterpreterEntry # (Method* method, Thread*, SP) + move $a2, $sp # pass $sp + lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_ + lw $gp, 52($sp) # restore $gp + lw $ra, 60($sp) # restore $ra + bnez $t0, 1f + addiu $sp, $sp, 64 # pop frame + .cfi_adjust_cfa_offset -64 + jr $ra + nop +1: + DELIVER_PENDING_EXCEPTION +END art_quick_interpreter_entry + + /* + * Routine that intercepts method calls and returns. + */ + .extern artInstrumentationMethodEntryFromCode + .extern artInstrumentationMethodExitFromCode +ENTRY art_quick_instrumentation_entry_from_code + GENERATE_GLOBAL_POINTER + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME + move $t0, $sp # remember bottom of caller's frame + addiu $sp, $sp, -32 # space for args, pad (3 words), arguments (5 words) + .cfi_adjust_cfa_offset 32 + sw $a0, 28($sp) # save arg0 + sw $ra, 16($sp) # pass $ra + move $a3, $t0 # pass $sp + jal artInstrumentationMethodEntryFromCode # (Method*, Object*, Thread*, SP, LR) + move $a2, rSELF # pass Thread::Current + move $t9, $v0 # $t9 holds reference to code + lw $a0, 28($sp) # restore arg0 + addiu $sp, $sp, 32 # remove args + .cfi_adjust_cfa_offset -32 + RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME + jalr $t9 # call method + nop +END art_quick_instrumentation_entry_from_code + /* intentional fallthrough */ + .global art_quick_instrumentation_exit_from_code +art_quick_instrumentation_exit_from_code: + .cfi_startproc + addiu $t9, $ra, 4 # put current address into $t9 to rebuild $gp + GENERATE_GLOBAL_POINTER + move $t0, $sp # remember bottom of caller's frame + SETUP_REF_ONLY_CALLEE_SAVE_FRAME + addiu $sp, $sp, -48 # save return values and set up args + .cfi_adjust_cfa_offset 48 + sw $v0, 32($sp) + .cfi_rel_offset 2, 0 + sw $v1, 36($sp) + .cfi_rel_offset 3, 4 + s.s $f0, 40($sp) + s.s $f1, 44($sp) + s.s $f0, 16($sp) # pass fpr result + s.s $f1, 20($sp) + move $a2, $v0 # pass gpr result + move $a3, $v1 + move $a1, $t0 # pass $sp + jal artInstrumentationMethodExitFromCode # (Thread*, SP, gpr_res, fpr_res) + move $a0, rSELF # pass Thread::Current + move $t0, $v0 # set aside returned link register + move $ra, $v1 # set link register for deoptimization + lw $v0, 32($sp) # restore return values + lw $v1, 36($sp) + l.s $f0, 40($sp) + l.s $f1, 44($sp) + jr $t0 # return + addiu $sp, $sp, 112 # 48 bytes of args + 64 bytes of callee save frame + .cfi_adjust_cfa_offset -112 +END art_quick_instrumentation_exit_from_code + + /* + * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization + * will long jump to the upcall with a special exception of -1. + */ + .extern artDeoptimize + .extern artEnterInterpreterFromDeoptimize +ENTRY art_quick_deoptimize + GENERATE_GLOBAL_POINTER + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME + move $a0, rSELF # pass Thread::current + jal artDeoptimize # artDeoptimize(Thread*, SP) + # Returns caller method's frame size. + move $a1, $sp # pass $sp +END art_quick_deoptimize + + /* + * Quick abstract method error stub. $a0 contains method* on entry. + */ +ENTRY art_quick_abstract_method_error_stub + GENERATE_GLOBAL_POINTER + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME + move $a1, $s1 # pass Thread::Current + la $t9, artThrowAbstractMethodErrorFromCode + jr $t9 # (Method*, Thread*, SP) + move $a2, $sp # pass SP +END art_quick_abstract_method_error_stub + + /* + * Long integer shift. This is different from the generic 32/64-bit + * binary operations because vAA/vBB are 64-bit but vCC (the shift + * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low + * 6 bits. + * On entry: + * $a0: low word + * $a1: high word + * $a2: shift count + */ +ENTRY art_quick_shl_long + /* shl-long vAA, vBB, vCC */ + sll $v0, $a0, $a2 # rlo<- alo << (shift&31) + not $v1, $a2 # rhi<- 31-shift (shift is 5b) + srl $a0, 1 + srl $a0, $v1 # alo<- alo >> (32-(shift&31)) + sll $v1, $a1, $a2 # rhi<- ahi << (shift&31) + or $v1, $a0 # rhi<- rhi | alo + andi $a2, 0x20 # shift< shift & 0x20 + movn $v1, $v0, $a2 # rhi<- rlo (if shift&0x20) + jr $ra + movn $v0, $zero, $a2 # rlo<- 0 (if shift&0x20) +END art_quick_shl_long + + /* + * Long integer shift. This is different from the generic 32/64-bit + * binary operations because vAA/vBB are 64-bit but vCC (the shift + * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low + * 6 bits. + * On entry: + * $a0: low word + * $a1: high word + * $a2: shift count + */ + .global art_quick_shr_long +ENTRY art_quick_shr_long + sra $v1, $a1, $a2 # rhi<- ahi >> (shift&31) + srl $v0, $a0, $a2 # rlo<- alo >> (shift&31) + sra $a3, $a1, 31 # $a3<- sign(ah) + not $a0, $a2 # alo<- 31-shift (shift is 5b) + sll $a1, 1 + sll $a1, $a0 # ahi<- ahi << (32-(shift&31)) + or $v0, $a1 # rlo<- rlo | ahi + andi $a2, 0x20 # shift & 0x20 + movn $v0, $v1, $a2 # rlo<- rhi (if shift&0x20) + jr $ra + movn $v1, $a3, $a2 # rhi<- sign(ahi) (if shift&0x20) +END art_quick_shr_long + + /* + * Long integer shift. This is different from the generic 32/64-bit + * binary operations because vAA/vBB are 64-bit but vCC (the shift + * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low + * 6 bits. + * On entry: + * r0: low word + * r1: high word + * r2: shift count + */ + /* ushr-long vAA, vBB, vCC */ + .global art_quick_ushr_long +ENTRY art_quick_ushr_long + srl $v1, $a1, $a2 # rhi<- ahi >> (shift&31) + srl $v0, $a0, $a2 # rlo<- alo >> (shift&31) + not $a0, $a2 # alo<- 31-shift (shift is 5b) + sll $a1, 1 + sll $a1, $a0 # ahi<- ahi << (32-(shift&31)) + or $v0, $a1 # rlo<- rlo | ahi + andi $a2, 0x20 # shift & 0x20 + movn $v0, $v1, $a2 # rlo<- rhi (if shift&0x20) + jr $ra + movn $v1, $zero, $a2 # rhi<- 0 (if shift&0x20) +END art_quick_ushr_long + +ENTRY art_quick_indexof + jr $ra + nop +END art_quick_indexof + +ENTRY art_quick_string_compareto + jr $ra + nop +END art_quick_string_compareto diff --git a/runtime/arch/mips/registers_mips.cc b/runtime/arch/mips/registers_mips.cc new file mode 100644 index 0000000000..5d31f2f910 --- /dev/null +++ b/runtime/arch/mips/registers_mips.cc @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "registers_mips.h" + +#include + +namespace art { +namespace mips { + +static const char* kRegisterNames[] = { + "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3", + "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", + "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", + "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra", +}; +std::ostream& operator<<(std::ostream& os, const Register& rhs) { + if (rhs >= ZERO && rhs <= RA) { + os << kRegisterNames[rhs]; + } else { + os << "Register[" << static_cast(rhs) << "]"; + } + return os; +} + +std::ostream& operator<<(std::ostream& os, const FRegister& rhs) { + if (rhs >= F0 && rhs < kNumberOfFRegisters) { + os << "f" << static_cast(rhs); + } else { + os << "FRegister[" << static_cast(rhs) << "]"; + } + return os; +} + +} // namespace mips +} // namespace art diff --git a/runtime/arch/mips/registers_mips.h b/runtime/arch/mips/registers_mips.h new file mode 100644 index 0000000000..0f784ed43f --- /dev/null +++ b/runtime/arch/mips/registers_mips.h @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_MIPS_REGISTERS_MIPS_H_ +#define ART_RUNTIME_ARCH_MIPS_REGISTERS_MIPS_H_ + +#include + +#include "base/logging.h" +#include "base/macros.h" +#include "globals.h" + +namespace art { +namespace mips { + +enum Register { + ZERO = 0, + AT = 1, // Assembler temporary. + V0 = 2, // Values. + V1 = 3, + A0 = 4, // Arguments. + A1 = 5, + A2 = 6, + A3 = 7, + T0 = 8, // Temporaries. + T1 = 9, + T2 = 10, + T3 = 11, + T4 = 12, + T5 = 13, + T6 = 14, + T7 = 15, + S0 = 16, // Saved values. + S1 = 17, + S2 = 18, + S3 = 19, + S4 = 20, + S5 = 21, + S6 = 22, + S7 = 23, + T8 = 24, // More temporaries. + T9 = 25, + K0 = 26, // Reserved for trap handler. + K1 = 27, + GP = 28, // Global pointer. + SP = 29, // Stack pointer. + FP = 30, // Saved value/frame pointer. + RA = 31, // Return address. + kNumberOfCoreRegisters = 32, + kNoRegister = -1 // Signals an illegal register. +}; +std::ostream& operator<<(std::ostream& os, const Register& rhs); + +// Values for single-precision floating point registers. +enum FRegister { + F0 = 0, + F1 = 1, + F2 = 2, + F3 = 3, + F4 = 4, + F5 = 5, + F6 = 6, + F7 = 7, + F8 = 8, + F9 = 9, + F10 = 10, + F11 = 11, + F12 = 12, + F13 = 13, + F14 = 14, + F15 = 15, + F16 = 16, + F17 = 17, + F18 = 18, + F19 = 19, + F20 = 20, + F21 = 21, + F22 = 22, + F23 = 23, + F24 = 24, + F25 = 25, + F26 = 26, + F27 = 27, + F28 = 28, + F29 = 29, + F30 = 30, + F31 = 31, + kNumberOfFRegisters = 32, + kNoFRegister = -1, +}; +std::ostream& operator<<(std::ostream& os, const FRegister& rhs); + +} // namespace mips +} // namespace art + +#endif // ART_RUNTIME_ARCH_MIPS_REGISTERS_MIPS_H_ diff --git a/runtime/arch/mips/thread_mips.cc b/runtime/arch/mips/thread_mips.cc new file mode 100644 index 0000000000..7364de067e --- /dev/null +++ b/runtime/arch/mips/thread_mips.cc @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "thread.h" + +#include "asm_support_mips.h" +#include "base/logging.h" + +namespace art { + +void Thread::InitCpu() { + CHECK_EQ(THREAD_FLAGS_OFFSET, OFFSETOF_MEMBER(Thread, state_and_flags_)); + CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); +} + +} // namespace art diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S new file mode 100644 index 0000000000..7e6dce9c6a --- /dev/null +++ b/runtime/arch/x86/asm_support_x86.S @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ +#define ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ + +#include "asm_support_x86.h" + +#if defined(__APPLE__) + // Mac OS' as(1) doesn't let you name macro parameters. + #define MACRO0(macro_name) .macro macro_name + #define MACRO1(macro_name, macro_arg1) .macro macro_name + #define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name + #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name + #define END_MACRO .endmacro + + // Mac OS' as(1) uses $0, $1, and so on for macro arguments, and function names + // are mangled with an extra underscore prefix. The use of $x for arguments + // mean that literals need to be represented with $$x in macros. + #define SYMBOL(name) _ ## name + #define VAR(name,index) SYMBOL($index) + #define REG_VAR(name,index) %$index + #define CALL_MACRO(name,index) $index + #define LITERAL(value) $value + #define MACRO_LITERAL(value) $$value +#else + // Regular gas(1) lets you name macro parameters. + #define MACRO0(macro_name) .macro macro_name + #define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1 + #define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2 + #define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3 + #define END_MACRO .endm + + // Regular gas(1) uses \argument_name for macro arguments. + // We need to turn on alternate macro syntax so we can use & instead or the preprocessor + // will screw us by inserting a space between the \ and the name. Even in this mode there's + // no special meaning to $, so literals are still just $x. The use of altmacro means % is a + // special character meaning care needs to be taken when passing registers as macro arguments. + .altmacro + #define SYMBOL(name) name + #define VAR(name,index) name& + #define REG_VAR(name,index) %name + #define CALL_MACRO(name,index) name& + #define LITERAL(value) $value + #define MACRO_LITERAL(value) $value +#endif + + /* Cache alignment for function entry */ +MACRO0(ALIGN_FUNCTION_ENTRY) + .balign 16 +END_MACRO + +MACRO1(DEFINE_FUNCTION, c_name) + .type VAR(c_name, 0), @function + .globl VAR(c_name, 0) + ALIGN_FUNCTION_ENTRY +VAR(c_name, 0): + .cfi_startproc +END_MACRO + +MACRO1(END_FUNCTION, c_name) + .cfi_endproc + .size \c_name, .-\c_name +END_MACRO + +MACRO1(PUSH, reg) + pushl REG_VAR(reg, 0) + .cfi_adjust_cfa_offset 4 + .cfi_rel_offset REG_VAR(reg, 0), 0 +END_MACRO + +MACRO1(POP, reg) + popl REG_VAR(reg,0) + .cfi_adjust_cfa_offset -4 + .cfi_restore REG_VAR(reg,0) +END_MACRO + +#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ diff --git a/runtime/arch/x86/asm_support_x86.h b/runtime/arch/x86/asm_support_x86.h new file mode 100644 index 0000000000..1092910d78 --- /dev/null +++ b/runtime/arch/x86/asm_support_x86.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_ +#define ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_ + +#include "asm_support.h" + +// Offset of field Thread::self_ verified in InitCpu +#define THREAD_SELF_OFFSET 40 +// Offset of field Thread::exception_ verified in InitCpu +#define THREAD_EXCEPTION_OFFSET 12 + +#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_ diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc new file mode 100644 index 0000000000..c728ae97ec --- /dev/null +++ b/runtime/arch/x86/context_x86.cc @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "context_x86.h" + +#include "mirror/abstract_method.h" +#include "mirror/object-inl.h" +#include "stack.h" + +namespace art { +namespace x86 { + +static const uint32_t gZero = 0; + +void X86Context::Reset() { + for (int i = 0; i < kNumberOfCpuRegisters; i++) { + gprs_[i] = NULL; + } + gprs_[ESP] = &esp_; + // Initialize registers with easy to spot debug values. + esp_ = X86Context::kBadGprBase + ESP; + eip_ = X86Context::kBadGprBase + kNumberOfCpuRegisters; +} + +void X86Context::FillCalleeSaves(const StackVisitor& fr) { + mirror::AbstractMethod* method = fr.GetMethod(); + uint32_t core_spills = method->GetCoreSpillMask(); + size_t spill_count = __builtin_popcount(core_spills); + DCHECK_EQ(method->GetFpSpillMask(), 0u); + size_t frame_size = method->GetFrameSizeInBytes(); + if (spill_count > 0) { + // Lowest number spill is farthest away, walk registers and fill into context. + int j = 2; // Offset j to skip return address spill. + for (int i = 0; i < kNumberOfCpuRegisters; i++) { + if (((core_spills >> i) & 1) != 0) { + gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size); + j++; + } + } + } +} + +void X86Context::SmashCallerSaves() { + // This needs to be 0 because we want a null/zero return value. + gprs_[EAX] = const_cast(&gZero); + gprs_[EDX] = const_cast(&gZero); + gprs_[ECX] = NULL; + gprs_[EBX] = NULL; +} + +void X86Context::SetGPR(uint32_t reg, uintptr_t value) { + CHECK_LT(reg, static_cast(kNumberOfCpuRegisters)); + CHECK_NE(gprs_[reg], &gZero); + CHECK(gprs_[reg] != NULL); + *gprs_[reg] = value; +} + +void X86Context::DoLongJump() { +#if defined(__i386__) + // Array of GPR values, filled from the context backward for the long jump pop. We add a slot at + // the top for the stack pointer that doesn't get popped in a pop-all. + volatile uintptr_t gprs[kNumberOfCpuRegisters + 1]; + for (size_t i = 0; i < kNumberOfCpuRegisters; ++i) { + gprs[kNumberOfCpuRegisters - i - 1] = gprs_[i] != NULL ? *gprs_[i] : X86Context::kBadGprBase + i; + } + // We want to load the stack pointer one slot below so that the ret will pop eip. + uintptr_t esp = gprs[kNumberOfCpuRegisters - ESP - 1] - kWordSize; + gprs[kNumberOfCpuRegisters] = esp; + *(reinterpret_cast(esp)) = eip_; + __asm__ __volatile__( + "movl %0, %%esp\n\t" // ESP points to gprs. + "popal\n\t" // Load all registers except ESP and EIP with values in gprs. + "popl %%esp\n\t" // Load stack pointer. + "ret\n\t" // From higher in the stack pop eip. + : // output. + : "g"(&gprs[0]) // input. + :); // clobber. +#else + UNIMPLEMENTED(FATAL); +#endif +} + +} // namespace x86 +} // namespace art diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h new file mode 100644 index 0000000000..d7d22101cc --- /dev/null +++ b/runtime/arch/x86/context_x86.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_X86_CONTEXT_X86_H_ +#define ART_RUNTIME_ARCH_X86_CONTEXT_X86_H_ + +#include "arch/context.h" +#include "base/logging.h" +#include "registers_x86.h" + +namespace art { +namespace x86 { + +class X86Context : public Context { + public: + X86Context() { + Reset(); + } + virtual ~X86Context() {} + + virtual void Reset(); + + virtual void FillCalleeSaves(const StackVisitor& fr); + + virtual void SetSP(uintptr_t new_sp) { + SetGPR(ESP, new_sp); + } + + virtual void SetPC(uintptr_t new_pc) { + eip_ = new_pc; + } + + virtual uintptr_t GetGPR(uint32_t reg) { + const uint32_t kNumberOfCpuRegisters = 8; + DCHECK_LT(reg, kNumberOfCpuRegisters); + return *gprs_[reg]; + } + + virtual void SetGPR(uint32_t reg, uintptr_t value); + + virtual void SmashCallerSaves(); + virtual void DoLongJump(); + + private: + // Pointers to register locations, floating point registers are all caller save. Values are + // initialized to NULL or the special registers below. + uintptr_t* gprs_[kNumberOfCpuRegisters]; + // Hold values for esp and eip if they are not located within a stack frame. EIP is somewhat + // special in that it cannot be encoded normally as a register operand to an instruction (except + // in 64bit addressing modes). + uintptr_t esp_, eip_; +}; +} // namespace x86 +} // namespace art + +#endif // ART_RUNTIME_ARCH_X86_CONTEXT_X86_H_ diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc new file mode 100644 index 0000000000..d47dfef047 --- /dev/null +++ b/runtime/arch/x86/entrypoints_init_x86.cc @@ -0,0 +1,224 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/portable/portable_entrypoints.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "entrypoints/entrypoint_utils.h" + +namespace art { + +// Alloc entrypoints. +extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); +extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); +extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); + +// Cast entrypoints. +extern "C" uint32_t art_quick_is_assignable_from_code(const mirror::Class* klass, + const mirror::Class* ref_class); +extern "C" void art_quick_can_put_array_element_from_code(void*, void*); +extern "C" void art_quick_check_cast_from_code(void*, void*); + +// DexCache entrypoints. +extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); +extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); + +// Field entrypoints. +extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); +extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); +extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); +extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); +extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); +extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_static_from_code(uint32_t); +extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); +extern "C" int64_t art_quick_get64_static_from_code(uint32_t); +extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); +extern "C" void* art_quick_get_obj_static_from_code(uint32_t); + +// FillArray entrypoint. +extern "C" void art_quick_handle_fill_data_from_code(void*, void*); + +// Lock entrypoints. +extern "C" void art_quick_lock_object_from_code(void*); +extern "C" void art_quick_unlock_object_from_code(void*); + +// Math entrypoints. +extern "C" double art_quick_fmod_from_code(double, double); +extern "C" float art_quick_fmodf_from_code(float, float); +extern "C" double art_quick_l2d_from_code(int64_t); +extern "C" float art_quick_l2f_from_code(int64_t); +extern "C" int64_t art_quick_d2l_from_code(double); +extern "C" int64_t art_quick_f2l_from_code(float); +extern "C" int32_t art_quick_idivmod_from_code(int32_t, int32_t); +extern "C" int64_t art_quick_ldiv_from_code(int64_t, int64_t); +extern "C" int64_t art_quick_ldivmod_from_code(int64_t, int64_t); +extern "C" int64_t art_quick_lmul_from_code(int64_t, int64_t); +extern "C" uint64_t art_quick_lshl_from_code(uint64_t, uint32_t); +extern "C" uint64_t art_quick_lshr_from_code(uint64_t, uint32_t); +extern "C" uint64_t art_quick_lushr_from_code(uint64_t, uint32_t); + +// Interpreter entrypoints. +extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); +extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); + +// Intrinsic entrypoints. +extern "C" int32_t art_quick_memcmp16(void*, void*, int32_t); +extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); +extern "C" int32_t art_quick_string_compareto(void*, void*); +extern "C" void* art_quick_memcpy(void*, const void*, size_t); + +// Invoke entrypoints. +extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); + +// Thread entrypoints. +extern void CheckSuspendFromCode(Thread* thread); +extern "C" void art_quick_test_suspend(); + +// Throw entrypoints. +extern "C" void art_quick_deliver_exception_from_code(void*); +extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); +extern "C" void art_quick_throw_div_zero_from_code(); +extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); +extern "C" void art_quick_throw_null_pointer_exception_from_code(); +extern "C" void art_quick_throw_stack_overflow_from_code(void*); + +void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { + // Alloc + qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code; + qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; + qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code; + qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; + qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; + qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; + + // Cast + qpoints->pInstanceofNonTrivialFromCode = art_quick_is_assignable_from_code; + qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; + qpoints->pCheckCastFromCode = art_quick_check_cast_from_code; + + // DexCache + qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; + qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; + qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code; + qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code; + + // Field + qpoints->pSet32Instance = art_quick_set32_instance_from_code; + qpoints->pSet32Static = art_quick_set32_static_from_code; + qpoints->pSet64Instance = art_quick_set64_instance_from_code; + qpoints->pSet64Static = art_quick_set64_static_from_code; + qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code; + qpoints->pSetObjStatic = art_quick_set_obj_static_from_code; + qpoints->pGet32Instance = art_quick_get32_instance_from_code; + qpoints->pGet64Instance = art_quick_get64_instance_from_code; + qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code; + qpoints->pGet32Static = art_quick_get32_static_from_code; + qpoints->pGet64Static = art_quick_get64_static_from_code; + qpoints->pGetObjStatic = art_quick_get_obj_static_from_code; + + // FillArray + qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; + + // JNI + qpoints->pJniMethodStart = JniMethodStart; + qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; + qpoints->pJniMethodEnd = JniMethodEnd; + qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; + qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; + qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; + + // Locks + qpoints->pLockObjectFromCode = art_quick_lock_object_from_code; + qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code; + + // Math + // points->pCmpgDouble = NULL; // Not needed on x86. + // points->pCmpgFloat = NULL; // Not needed on x86. + // points->pCmplDouble = NULL; // Not needed on x86. + // points->pCmplFloat = NULL; // Not needed on x86. + qpoints->pFmod = art_quick_fmod_from_code; + qpoints->pL2d = art_quick_l2d_from_code; + qpoints->pFmodf = art_quick_fmodf_from_code; + qpoints->pL2f = art_quick_l2f_from_code; + // points->pD2iz = NULL; // Not needed on x86. + // points->pF2iz = NULL; // Not needed on x86. + qpoints->pIdivmod = art_quick_idivmod_from_code; + qpoints->pD2l = art_quick_d2l_from_code; + qpoints->pF2l = art_quick_f2l_from_code; + qpoints->pLdiv = art_quick_ldiv_from_code; + qpoints->pLdivmod = art_quick_ldivmod_from_code; + qpoints->pLmul = art_quick_lmul_from_code; + qpoints->pShlLong = art_quick_lshl_from_code; + qpoints->pShrLong = art_quick_lshr_from_code; + qpoints->pUshrLong = art_quick_lushr_from_code; + + // Interpreter + qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; + qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry; + + // Intrinsics + qpoints->pIndexOf = art_quick_indexof; + qpoints->pMemcmp16 = art_quick_memcmp16; + qpoints->pStringCompareTo = art_quick_string_compareto; + qpoints->pMemcpy = art_quick_memcpy; + + // Invocation + qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; + qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; + qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; + qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; + qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; + qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; + qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; + + // Thread + qpoints->pCheckSuspendFromCode = CheckSuspendFromCode; + qpoints->pTestSuspendFromCode = art_quick_test_suspend; + + // Throws + qpoints->pDeliverException = art_quick_deliver_exception_from_code; + qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; + qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; + qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; + qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; + qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; + + // Portable + ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; +}; + +} // namespace art diff --git a/runtime/arch/x86/jni_entrypoints_x86.S b/runtime/arch/x86/jni_entrypoints_x86.S new file mode 100644 index 0000000000..e9c88fec02 --- /dev/null +++ b/runtime/arch/x86/jni_entrypoints_x86.S @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_x86.S" + + /* + * Portable resolution trampoline. + */ +DEFINE_FUNCTION art_jni_dlsym_lookup_stub + subl LITERAL(8), %esp // align stack + .cfi_adjust_cfa_offset 8 + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + call SYMBOL(artFindNativeMethod) // (Thread*) + addl LITERAL(12), %esp // restore the stack + .cfi_adjust_cfa_offset -12 + cmpl LITERAL(0), %eax // check if returned method code is null + je no_native_code_found // if null, jump to return to handle + jmp *%eax // otherwise, tail call to intended method +no_native_code_found: + ret +END_FUNCTION art_jni_dlsym_lookup_stub diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S new file mode 100644 index 0000000000..a0fca6cee3 --- /dev/null +++ b/runtime/arch/x86/portable_entrypoints_x86.S @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_x86.S" + + /* + * Portable invocation stub. + * On entry: + * [sp] = return address + * [sp + 4] = method pointer + * [sp + 8] = argument array or NULL for no argument methods + * [sp + 12] = size of argument array in bytes + * [sp + 16] = (managed) thread pointer + * [sp + 20] = JValue* result + * [sp + 24] = result type char + */ +DEFINE_FUNCTION art_portable_invoke_stub + PUSH ebp // save ebp + PUSH ebx // save ebx + mov %esp, %ebp // copy value of stack pointer into base pointer + .cfi_def_cfa_register ebp + mov 20(%ebp), %ebx // get arg array size + addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame + andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes + subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp + subl %ebx, %esp // reserve stack space for argument array + lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy + pushl 20(%ebp) // push size of region to memcpy + pushl 16(%ebp) // push arg array as source of memcpy + pushl %eax // push stack pointer as destination of memcpy + call SYMBOL(memcpy) // (void*, const void*, size_t) + addl LITERAL(12), %esp // pop arguments to memcpy + mov 12(%ebp), %eax // move method pointer into eax + mov %eax, (%esp) // push method pointer onto stack + call *METHOD_CODE_OFFSET(%eax) // call the method + mov %ebp, %esp // restore stack pointer + POP ebx // pop ebx + POP ebp // pop ebp + mov 20(%esp), %ecx // get result pointer + cmpl LITERAL(68), 24(%esp) // test if result type char == 'D' + je return_double_portable + cmpl LITERAL(70), 24(%esp) // test if result type char == 'F' + je return_float_portable + mov %eax, (%ecx) // store the result + mov %edx, 4(%ecx) // store the other half of the result + ret +return_double_portable: + fstpl (%ecx) // store the floating point result as double + ret +return_float_portable: + fstps (%ecx) // store the floating point result as float + ret +END_FUNCTION art_portable_invoke_stub + +DEFINE_FUNCTION art_portable_proxy_invoke_handler + // Fake callee save ref and args frame set up, note portable doesn't use callee save frames. + // TODO: just save the registers that are needed in artPortableProxyInvokeHandler. + PUSH edi // Save callee saves + PUSH esi + PUSH ebp + PUSH ebx // Save args + PUSH edx + PUSH ecx + PUSH eax // Align stack, eax will be clobbered by Method* + // Begin argument set up. + PUSH esp // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH ecx // pass receiver + PUSH eax // pass proxy method + call SYMBOL(artPortableProxyInvokeHandler) // (proxy method, receiver, Thread*, SP) + movd %eax, %xmm0 // place return value also into floating point return value + movd %edx, %xmm1 + punpckldq %xmm1, %xmm0 + addl LITERAL(44), %esp // pop arguments + .cfi_adjust_cfa_offset -44 + ret +END_FUNCTION art_portable_proxy_invoke_handler + + /* + * Portable abstract method error stub. method* is at %esp + 4 on entry. + */ +DEFINE_FUNCTION art_portable_abstract_method_error_stub + PUSH ebp + movl %esp, %ebp // Remember SP. + .cfi_def_cfa_register ebp + subl LITERAL(12), %esp // Align stack. + PUSH esp // Pass sp (not used). + pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current(). + pushl 8(%ebp) // Pass Method*. + call SYMBOL(artThrowAbstractMethodErrorFromCode) // (Method*, Thread*, SP) + leave // Restore the stack and %ebp. + .cfi_def_cfa esp, 4 + .cfi_restore ebp + ret // Return to caller to handle pending exception. +END_FUNCTION art_portable_abstract_method_error_stub diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S new file mode 100644 index 0000000000..89ea71a902 --- /dev/null +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -0,0 +1,1041 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_x86.S" + + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kSaveAll) + */ +MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME) + PUSH edi // Save callee saves (ebx is saved/restored by the upcall) + PUSH esi + PUSH ebp + subl MACRO_LITERAL(16), %esp // Grow stack by 4 words, bottom word will hold Method* + .cfi_adjust_cfa_offset 16 +END_MACRO + + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kRefsOnly) + */ +MACRO0(SETUP_REF_ONLY_CALLEE_SAVE_FRAME) + PUSH edi // Save callee saves (ebx is saved/restored by the upcall) + PUSH esi + PUSH ebp + subl MACRO_LITERAL(16), %esp // Grow stack by 4 words, bottom word will hold Method* + .cfi_adjust_cfa_offset 16 +END_MACRO + +MACRO0(RESTORE_REF_ONLY_CALLEE_SAVE_FRAME) + addl MACRO_LITERAL(28), %esp // Unwind stack up to return address + .cfi_adjust_cfa_offset -28 +END_MACRO + + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kRefsAndArgs) + */ +MACRO0(SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME) + PUSH edi // Save callee saves + PUSH esi + PUSH ebp + PUSH ebx // Save args + PUSH edx + PUSH ecx + PUSH eax // Align stack, eax will be clobbered by Method* +END_MACRO + +MACRO0(RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME) + addl MACRO_LITERAL(4), %esp // Remove padding + .cfi_adjust_cfa_offset -4 + POP ecx // Restore args except eax + POP edx + POP ebx + POP ebp // Restore callee saves + POP esi + POP edi +END_MACRO + + /* + * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending + * exception is Thread::Current()->exception_. + */ +MACRO0(DELIVER_PENDING_EXCEPTION) + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save callee saves for throw + mov %esp, %ecx + // Outgoing argument set up + subl MACRO_LITERAL(8), %esp // Alignment padding + .cfi_adjust_cfa_offset 8 + PUSH ecx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*, SP) + int3 // unreached +END_MACRO + +MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) + DEFINE_FUNCTION VAR(c_name, 0) + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context + mov %esp, %ecx + // Outgoing argument set up + subl MACRO_LITERAL(8), %esp // alignment padding + .cfi_adjust_cfa_offset 8 + PUSH ecx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + call VAR(cxx_name, 1) // cxx_name(Thread*, SP) + int3 // unreached + END_FUNCTION VAR(c_name, 0) +END_MACRO + +MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) + DEFINE_FUNCTION VAR(c_name, 0) + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context + mov %esp, %ecx + // Outgoing argument set up + PUSH eax // alignment padding + PUSH ecx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH eax // pass arg1 + call VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP) + int3 // unreached + END_FUNCTION VAR(c_name, 0) +END_MACRO + +MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) + DEFINE_FUNCTION VAR(c_name, 0) + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context + mov %esp, %edx + // Outgoing argument set up + PUSH edx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH ecx // pass arg2 + PUSH eax // pass arg1 + call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP) + int3 // unreached + END_FUNCTION VAR(c_name, 0) +END_MACRO + + /* + * Called by managed code to create and deliver a NullPointerException. + */ +NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_code, artThrowNullPointerExceptionFromCode + + /* + * Called by managed code to create and deliver an ArithmeticException. + */ +NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero_from_code, artThrowDivZeroFromCode + + /* + * Called by managed code to create and deliver a StackOverflowError. + */ +NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow_from_code, artThrowStackOverflowFromCode + + /* + * Called by managed code, saves callee saves and then calls artThrowException + * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception. + */ +ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception_from_code, artDeliverExceptionFromCode + + /* + * Called by managed code to create and deliver a NoSuchMethodError. + */ +ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method_from_code, artThrowNoSuchMethodFromCode + + /* + * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds + * index, arg2 holds limit. + */ +TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds_from_code, artThrowArrayBoundsFromCode + + /* + * All generated callsites for interface invokes and invocation slow paths will load arguments + * as usual - except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain + * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the + * stack and call the appropriate C helper. + * NOTE: "this" is first visible argument of the target, and so can be found in arg1/r1. + * + * The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting + * of the target Method* in r0 and method->code_ in r1. + * + * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the + * thread and we branch to another stub to deliver it. + * + * On success this wrapper will restore arguments and *jump* to the target, leaving the lr + * pointing back to the original caller. + */ +MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name) + DEFINE_FUNCTION VAR(c_name, 0) + // Set up the callee save frame to conform with Runtime::CreateCalleeSaveMethod(kRefsAndArgs) + // return address + PUSH edi + PUSH esi + PUSH ebp + PUSH ebx + PUSH edx + PUSH ecx + PUSH eax // <-- callee save Method* to go here + movl %esp, %edx // remember SP + // Outgoing argument set up + subl MACRO_LITERAL(12), %esp // alignment padding + .cfi_adjust_cfa_offset 12 + PUSH edx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + pushl 32(%edx) // pass caller Method* + .cfi_adjust_cfa_offset 4 + PUSH ecx // pass arg2 + PUSH eax // pass arg1 + call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP) + movl %edx, %edi // save code pointer in EDI + addl MACRO_LITERAL(36), %esp // Pop arguments skip eax + .cfi_adjust_cfa_offset -36 + POP ecx // Restore args + POP edx + POP ebx + POP ebp // Restore callee saves. + POP esi + // Swap EDI callee save with code pointer. + xchgl %edi, (%esp) + testl %eax, %eax // Branch forward if exception pending. + jz 1f + // Tail call to intended method. + ret +1: + addl MACRO_LITERAL(4), %esp // Pop code pointer off stack + .cfi_adjust_cfa_offset -4 + DELIVER_PENDING_EXCEPTION + END_FUNCTION VAR(c_name, 0) +END_MACRO + +INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline +INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck + +INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck +INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck +INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck +INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck + + /* + * Quick invocation stub. + * On entry: + * [sp] = return address + * [sp + 4] = method pointer + * [sp + 8] = argument array or NULL for no argument methods + * [sp + 12] = size of argument array in bytes + * [sp + 16] = (managed) thread pointer + * [sp + 20] = JValue* result + * [sp + 24] = result type char + */ +DEFINE_FUNCTION art_quick_invoke_stub + PUSH ebp // save ebp + PUSH ebx // save ebx + mov %esp, %ebp // copy value of stack pointer into base pointer + .cfi_def_cfa_register ebp + mov 20(%ebp), %ebx // get arg array size + addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame + andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes + subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp + subl %ebx, %esp // reserve stack space for argument array + lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy + pushl 20(%ebp) // push size of region to memcpy + pushl 16(%ebp) // push arg array as source of memcpy + pushl %eax // push stack pointer as destination of memcpy + call SYMBOL(memcpy) // (void*, const void*, size_t) + addl LITERAL(12), %esp // pop arguments to memcpy + movl LITERAL(0), (%esp) // store NULL for method* + mov 12(%ebp), %eax // move method pointer into eax + mov 4(%esp), %ecx // copy arg1 into ecx + mov 8(%esp), %edx // copy arg2 into edx + mov 12(%esp), %ebx // copy arg3 into ebx + call *METHOD_CODE_OFFSET(%eax) // call the method + mov %ebp, %esp // restore stack pointer + POP ebx // pop ebx + POP ebp // pop ebp + mov 20(%esp), %ecx // get result pointer + cmpl LITERAL(68), 24(%esp) // test if result type char == 'D' + je return_double_quick + cmpl LITERAL(70), 24(%esp) // test if result type char == 'F' + je return_float_quick + mov %eax, (%ecx) // store the result + mov %edx, 4(%ecx) // store the other half of the result + ret +return_double_quick: +return_float_quick: + movsd %xmm0, (%ecx) // store the floating point result + ret +END_FUNCTION art_quick_invoke_stub + +MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro) + DEFINE_FUNCTION VAR(c_name, 0) + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %edx // remember SP + // Outgoing argument set up + subl MACRO_LITERAL(8), %esp // push padding + .cfi_adjust_cfa_offset 8 + PUSH edx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + call VAR(cxx_name, 1) // cxx_name(Thread*, SP) + addl MACRO_LITERAL(16), %esp // pop arguments + .cfi_adjust_cfa_offset -16 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + CALL_MACRO(return_macro, 2) // return or deliver exception + END_FUNCTION VAR(c_name, 0) +END_MACRO + +MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro) + DEFINE_FUNCTION VAR(c_name, 0) + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %edx // remember SP + // Outgoing argument set up + PUSH eax // push padding + PUSH edx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH eax // pass arg1 + call VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP) + addl MACRO_LITERAL(16), %esp // pop arguments + .cfi_adjust_cfa_offset -16 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + CALL_MACRO(return_macro, 2) // return or deliver exception + END_FUNCTION VAR(c_name, 0) +END_MACRO + +MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro) + DEFINE_FUNCTION VAR(c_name, 0) + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %edx // remember SP + // Outgoing argument set up + PUSH edx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH ecx // pass arg2 + PUSH eax // pass arg1 + call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP) + addl MACRO_LITERAL(16), %esp // pop arguments + .cfi_adjust_cfa_offset -16 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + CALL_MACRO(return_macro, 2) // return or deliver exception + END_FUNCTION VAR(c_name, 0) +END_MACRO + +MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro) + DEFINE_FUNCTION VAR(c_name, 0) + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %ebx // remember SP + // Outgoing argument set up + subl MACRO_LITERAL(12), %esp // alignment padding + .cfi_adjust_cfa_offset 12 + PUSH ebx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH edx // pass arg3 + PUSH ecx // pass arg2 + PUSH eax // pass arg1 + call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP) + addl MACRO_LITERAL(32), %esp // pop arguments + .cfi_adjust_cfa_offset -32 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + CALL_MACRO(return_macro, 2) // return or deliver exception + END_FUNCTION VAR(c_name, 0) +END_MACRO + +MACRO0(RETURN_IF_EAX_NOT_ZERO) + testl %eax, %eax // eax == 0 ? + jz 1f // if eax == 0 goto 1 + ret // return +1: // deliver exception on current thread + DELIVER_PENDING_EXCEPTION +END_MACRO + +MACRO0(RETURN_IF_EAX_ZERO) + testl %eax, %eax // eax == 0 ? + jnz 1f // if eax != 0 goto 1 + ret // return +1: // deliver exception on current thread + DELIVER_PENDING_EXCEPTION +END_MACRO + +MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION) + mov %fs:THREAD_EXCEPTION_OFFSET, %ebx // get exception field + testl %ebx, %ebx // ebx == 0 ? + jnz 1f // if ebx != 0 goto 1 + ret // return +1: // deliver exception on current thread + DELIVER_PENDING_EXCEPTION +END_MACRO + +TWO_ARG_DOWNCALL art_quick_alloc_object_from_code, artAllocObjectFromCode, RETURN_IF_EAX_NOT_ZERO +TWO_ARG_DOWNCALL art_quick_alloc_object_from_code_with_access_check, artAllocObjectFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO +THREE_ARG_DOWNCALL art_quick_alloc_array_from_code, artAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO +THREE_ARG_DOWNCALL art_quick_alloc_array_from_code_with_access_check, artAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO +THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_from_code, artCheckAndAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO +THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_from_code_with_access_check, artCheckAndAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO + +TWO_ARG_DOWNCALL art_quick_resolve_string_from_code, artResolveStringFromCode, RETURN_IF_EAX_NOT_ZERO +TWO_ARG_DOWNCALL art_quick_initialize_static_storage_from_code, artInitializeStaticStorageFromCode, RETURN_IF_EAX_NOT_ZERO +TWO_ARG_DOWNCALL art_quick_initialize_type_from_code, artInitializeTypeFromCode, RETURN_IF_EAX_NOT_ZERO +TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access_from_code, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_EAX_NOT_ZERO + +ONE_ARG_DOWNCALL art_quick_lock_object_from_code, artLockObjectFromCode, ret +ONE_ARG_DOWNCALL art_quick_unlock_object_from_code, artUnlockObjectFromCode, RETURN_IF_EAX_ZERO + +TWO_ARG_DOWNCALL art_quick_handle_fill_data_from_code, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO + +DEFINE_FUNCTION art_quick_is_assignable_from_code + PUSH eax // alignment padding + PUSH ecx // pass arg2 + PUSH eax // pass arg1 + call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b, Thread*, SP) + addl LITERAL(12), %esp // pop arguments + .cfi_adjust_cfa_offset -12 + ret +END_FUNCTION art_quick_is_assignable_from_code + +DEFINE_FUNCTION art_quick_memcpy + PUSH edx // pass arg3 + PUSH ecx // pass arg2 + PUSH eax // pass arg1 + call SYMBOL(memcpy) // (void*, const void*, size_t) + addl LITERAL(12), %esp // pop arguments + .cfi_adjust_cfa_offset -12 + ret +END_FUNCTION art_quick_memcpy + +TWO_ARG_DOWNCALL art_quick_check_cast_from_code, artCheckCastFromCode, RETURN_IF_EAX_ZERO +TWO_ARG_DOWNCALL art_quick_can_put_array_element_from_code, artCanPutArrayElementFromCode, RETURN_IF_EAX_ZERO + +NO_ARG_DOWNCALL art_quick_test_suspend, artTestSuspendFromCode, ret + +DEFINE_FUNCTION art_quick_fmod_from_code + subl LITERAL(12), %esp // alignment padding + .cfi_adjust_cfa_offset 12 + PUSH ebx // pass arg4 b.hi + PUSH edx // pass arg3 b.lo + PUSH ecx // pass arg2 a.hi + PUSH eax // pass arg1 a.lo + call SYMBOL(fmod) // (jdouble a, jdouble b) + fstpl (%esp) // pop return value off fp stack + movsd (%esp), %xmm0 // place into %xmm0 + addl LITERAL(28), %esp // pop arguments + .cfi_adjust_cfa_offset -28 + ret +END_FUNCTION art_quick_fmod_from_code + +DEFINE_FUNCTION art_quick_fmodf_from_code + PUSH eax // alignment padding + PUSH ecx // pass arg2 b + PUSH eax // pass arg1 a + call SYMBOL(fmodf) // (jfloat a, jfloat b) + fstps (%esp) // pop return value off fp stack + movss (%esp), %xmm0 // place into %xmm0 + addl LITERAL(12), %esp // pop arguments + .cfi_adjust_cfa_offset -12 + ret +END_FUNCTION art_quick_fmodf_from_code + +DEFINE_FUNCTION art_quick_l2d_from_code + PUSH ecx // push arg2 a.hi + PUSH eax // push arg1 a.lo + fildll (%esp) // load as integer and push into st0 + fstpl (%esp) // pop value off fp stack as double + movsd (%esp), %xmm0 // place into %xmm0 + addl LITERAL(8), %esp // pop arguments + .cfi_adjust_cfa_offset -8 + ret +END_FUNCTION art_quick_l2d_from_code + +DEFINE_FUNCTION art_quick_l2f_from_code + PUSH ecx // push arg2 a.hi + PUSH eax // push arg1 a.lo + fildll (%esp) // load as integer and push into st0 + fstps (%esp) // pop value off fp stack as a single + movss (%esp), %xmm0 // place into %xmm0 + addl LITERAL(8), %esp // pop argument + .cfi_adjust_cfa_offset -8 + ret +END_FUNCTION art_quick_l2f_from_code + +DEFINE_FUNCTION art_quick_d2l_from_code + PUSH eax // alignment padding + PUSH ecx // pass arg2 a.hi + PUSH eax // pass arg1 a.lo + call SYMBOL(art_d2l) // (jdouble a) + addl LITERAL(12), %esp // pop arguments + .cfi_adjust_cfa_offset -12 + ret +END_FUNCTION art_quick_d2l_from_code + +DEFINE_FUNCTION art_quick_f2l_from_code + subl LITERAL(8), %esp // alignment padding + .cfi_adjust_cfa_offset 8 + PUSH eax // pass arg1 a + call SYMBOL(art_f2l) // (jfloat a) + addl LITERAL(12), %esp // pop arguments + .cfi_adjust_cfa_offset -12 + ret +END_FUNCTION art_quick_f2l_from_code + +DEFINE_FUNCTION art_quick_idivmod_from_code + cmpl LITERAL(0x80000000), %eax + je check_arg2 // special case +args_ok: + cdq // edx:eax = sign extend eax + idiv %ecx // (edx,eax) = (edx:eax % ecx, edx:eax / ecx) + ret +check_arg2: + cmpl LITERAL(-1), %ecx + jne args_ok + xorl %edx, %edx + ret // eax already holds min int +END_FUNCTION art_quick_idivmod_from_code + +DEFINE_FUNCTION art_quick_ldiv_from_code + subl LITERAL(12), %esp // alignment padding + .cfi_adjust_cfa_offset 12 + PUSH ebx // pass arg4 b.hi + PUSH edx // pass arg3 b.lo + PUSH ecx // pass arg2 a.hi + PUSH eax // pass arg1 a.lo + call SYMBOL(artLdivFromCode) // (jlong a, jlong b) + addl LITERAL(28), %esp // pop arguments + .cfi_adjust_cfa_offset -28 + ret +END_FUNCTION art_quick_ldiv_from_code + +DEFINE_FUNCTION art_quick_ldivmod_from_code + subl LITERAL(12), %esp // alignment padding + .cfi_adjust_cfa_offset 12 + PUSH ebx // pass arg4 b.hi + PUSH edx // pass arg3 b.lo + PUSH ecx // pass arg2 a.hi + PUSH eax // pass arg1 a.lo + call SYMBOL(artLdivmodFromCode) // (jlong a, jlong b) + addl LITERAL(28), %esp // pop arguments + .cfi_adjust_cfa_offset -28 + ret +END_FUNCTION art_quick_ldivmod_from_code + +DEFINE_FUNCTION art_quick_lmul_from_code + imul %eax, %ebx // ebx = a.lo(eax) * b.hi(ebx) + imul %edx, %ecx // ecx = b.lo(edx) * a.hi(ecx) + mul %edx // edx:eax = a.lo(eax) * b.lo(edx) + add %ebx, %ecx + add %ecx, %edx // edx += (a.lo * b.hi) + (b.lo * a.hi) + ret +END_FUNCTION art_quick_lmul_from_code + +DEFINE_FUNCTION art_quick_lshl_from_code + // ecx:eax << edx + xchg %edx, %ecx + shld %cl,%eax,%edx + shl %cl,%eax + test LITERAL(32), %cl + jz 1f + mov %eax, %edx + xor %eax, %eax +1: + ret +END_FUNCTION art_quick_lshl_from_code + +DEFINE_FUNCTION art_quick_lshr_from_code + // ecx:eax >> edx + xchg %edx, %ecx + shrd %cl,%edx,%eax + sar %cl,%edx + test LITERAL(32),%cl + jz 1f + mov %edx, %eax + sar LITERAL(31), %edx +1: + ret +END_FUNCTION art_quick_lshr_from_code + +DEFINE_FUNCTION art_quick_lushr_from_code + // ecx:eax >>> edx + xchg %edx, %ecx + shrd %cl,%edx,%eax + shr %cl,%edx + test LITERAL(32),%cl + jz 1f + mov %edx, %eax + xor %edx, %edx +1: + ret +END_FUNCTION art_quick_lushr_from_code + +DEFINE_FUNCTION art_quick_set32_instance_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %ebx // remember SP + subl LITERAL(8), %esp // alignment padding + .cfi_adjust_cfa_offset 8 + PUSH ebx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + mov 32(%ebx), %ebx // get referrer + PUSH ebx // pass referrer + PUSH edx // pass new_val + PUSH ecx // pass object + PUSH eax // pass field_idx + call SYMBOL(artSet32InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP) + addl LITERAL(32), %esp // pop arguments + .cfi_adjust_cfa_offset -32 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_IF_EAX_ZERO // return or deliver exception +END_FUNCTION art_quick_set32_instance_from_code + +DEFINE_FUNCTION art_quick_set64_instance_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + subl LITERAL(8), %esp // alignment padding + .cfi_adjust_cfa_offset 8 + PUSH esp // pass SP-8 + addl LITERAL(8), (%esp) // fix SP on stack by adding 8 + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH ebx // pass high half of new_val + PUSH edx // pass low half of new_val + PUSH ecx // pass object + PUSH eax // pass field_idx + call SYMBOL(artSet64InstanceFromCode) // (field_idx, Object*, new_val, Thread*, SP) + addl LITERAL(32), %esp // pop arguments + .cfi_adjust_cfa_offset -32 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_IF_EAX_ZERO // return or deliver exception +END_FUNCTION art_quick_set64_instance_from_code + +DEFINE_FUNCTION art_quick_set_obj_instance_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %ebx // remember SP + subl LITERAL(8), %esp // alignment padding + .cfi_adjust_cfa_offset 8 + PUSH ebx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + mov 32(%ebx), %ebx // get referrer + PUSH ebx // pass referrer + PUSH edx // pass new_val + PUSH ecx // pass object + PUSH eax // pass field_idx + call SYMBOL(artSetObjInstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP) + addl LITERAL(32), %esp // pop arguments + .cfi_adjust_cfa_offset -32 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_IF_EAX_ZERO // return or deliver exception +END_FUNCTION art_quick_set_obj_instance_from_code + +DEFINE_FUNCTION art_quick_get32_instance_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %ebx // remember SP + mov 32(%esp), %edx // get referrer + subl LITERAL(12), %esp // alignment padding + .cfi_adjust_cfa_offset 12 + PUSH ebx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH edx // pass referrer + PUSH ecx // pass object + PUSH eax // pass field_idx + call SYMBOL(artGet32InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP) + addl LITERAL(32), %esp // pop arguments + .cfi_adjust_cfa_offset -32 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception +END_FUNCTION art_quick_get32_instance_from_code + +DEFINE_FUNCTION art_quick_get64_instance_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %ebx // remember SP + mov 32(%esp), %edx // get referrer + subl LITERAL(12), %esp // alignment padding + .cfi_adjust_cfa_offset 12 + PUSH ebx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH edx // pass referrer + PUSH ecx // pass object + PUSH eax // pass field_idx + call SYMBOL(artGet64InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP) + addl LITERAL(32), %esp // pop arguments + .cfi_adjust_cfa_offset -32 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception +END_FUNCTION art_quick_get64_instance_from_code + +DEFINE_FUNCTION art_quick_get_obj_instance_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %ebx // remember SP + mov 32(%esp), %edx // get referrer + subl LITERAL(12), %esp // alignment padding + .cfi_adjust_cfa_offset 12 + PUSH ebx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH edx // pass referrer + PUSH ecx // pass object + PUSH eax // pass field_idx + call SYMBOL(artGetObjInstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP) + addl LITERAL(32), %esp // pop arguments + .cfi_adjust_cfa_offset -32 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception +END_FUNCTION art_quick_get_obj_instance_from_code + +DEFINE_FUNCTION art_quick_set32_static_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %ebx // remember SP + mov 32(%esp), %edx // get referrer + subl LITERAL(12), %esp // alignment padding + .cfi_adjust_cfa_offset 12 + PUSH ebx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH edx // pass referrer + PUSH ecx // pass new_val + PUSH eax // pass field_idx + call SYMBOL(artSet32StaticFromCode) // (field_idx, new_val, referrer, Thread*, SP) + addl LITERAL(32), %esp // pop arguments + .cfi_adjust_cfa_offset -32 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_IF_EAX_ZERO // return or deliver exception +END_FUNCTION art_quick_set32_static_from_code + +DEFINE_FUNCTION art_quick_set64_static_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %ebx // remember SP + subl LITERAL(8), %esp // alignment padding + .cfi_adjust_cfa_offset 8 + PUSH ebx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + mov 32(%ebx), %ebx // get referrer + PUSH edx // pass high half of new_val + PUSH ecx // pass low half of new_val + PUSH ebx // pass referrer + PUSH eax // pass field_idx + call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*, SP) + addl LITERAL(32), %esp // pop arguments + .cfi_adjust_cfa_offset -32 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_IF_EAX_ZERO // return or deliver exception +END_FUNCTION art_quick_set64_static_from_code + +DEFINE_FUNCTION art_quick_set_obj_static_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %ebx // remember SP + mov 32(%esp), %edx // get referrer + subl LITERAL(12), %esp // alignment padding + .cfi_adjust_cfa_offset 12 + PUSH ebx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH edx // pass referrer + PUSH ecx // pass new_val + PUSH eax // pass field_idx + call SYMBOL(artSetObjStaticFromCode) // (field_idx, new_val, referrer, Thread*, SP) + addl LITERAL(32), %esp // pop arguments + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_IF_EAX_ZERO // return or deliver exception +END_FUNCTION art_quick_set_obj_static_from_code + +DEFINE_FUNCTION art_quick_get32_static_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %edx // remember SP + mov 32(%esp), %ecx // get referrer + PUSH edx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH ecx // pass referrer + PUSH eax // pass field_idx + call SYMBOL(artGet32StaticFromCode) // (field_idx, referrer, Thread*, SP) + addl LITERAL(16), %esp // pop arguments + .cfi_adjust_cfa_offset -16 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception +END_FUNCTION art_quick_get32_static_from_code + +DEFINE_FUNCTION art_quick_get64_static_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %edx // remember SP + mov 32(%esp), %ecx // get referrer + PUSH edx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH ecx // pass referrer + PUSH eax // pass field_idx + call SYMBOL(artGet64StaticFromCode) // (field_idx, referrer, Thread*, SP) + addl LITERAL(16), %esp // pop arguments + .cfi_adjust_cfa_offset -16 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception +END_FUNCTION art_quick_get64_static_from_code + +DEFINE_FUNCTION art_quick_get_obj_static_from_code + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %edx // remember SP + mov 32(%esp), %ecx // get referrer + PUSH edx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH ecx // pass referrer + PUSH eax // pass field_idx + call SYMBOL(artGetObjStaticFromCode) // (field_idx, referrer, Thread*, SP) + addl LITERAL(16), %esp // pop arguments + .cfi_adjust_cfa_offset -16 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception +END_FUNCTION art_quick_get_obj_static_from_code + +DEFINE_FUNCTION art_quick_proxy_invoke_handler + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method* + PUSH esp // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH ecx // pass receiver + PUSH eax // pass proxy method + call SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP) + movd %eax, %xmm0 // place return value also into floating point return value + movd %edx, %xmm1 + punpckldq %xmm1, %xmm0 + addl LITERAL(44), %esp // pop arguments + .cfi_adjust_cfa_offset -44 + RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception +END_FUNCTION art_quick_proxy_invoke_handler + +DEFINE_FUNCTION art_quick_interpreter_entry + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame + mov %esp, %edx // remember SP + PUSH eax // alignment padding + PUSH edx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH eax // pass method + call SYMBOL(artInterpreterEntry) // (method, Thread*, SP) + movd %eax, %xmm0 // place return value also into floating point return value + movd %edx, %xmm1 + punpckldq %xmm1, %xmm0 + addl LITERAL(44), %esp // pop arguments + .cfi_adjust_cfa_offset -44 + RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception +END_FUNCTION art_quick_interpreter_entry + + /* + * Routine that intercepts method calls and returns. + */ +DEFINE_FUNCTION art_quick_instrumentation_entry_from_code + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME + movl %esp, %edx // Save SP. + PUSH eax // Save eax which will be clobbered by the callee-save method. + subl LITERAL(8), %esp // Align stack. + .cfi_adjust_cfa_offset 8 + pushl 40(%esp) // Pass LR. + .cfi_adjust_cfa_offset 4 + PUSH edx // Pass SP. + pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current(). + .cfi_adjust_cfa_offset 4 + PUSH ecx // Pass receiver. + PUSH eax // Pass Method*. + call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP, LR) + addl LITERAL(28), %esp // Pop arguments upto saved Method*. + movl 28(%esp), %edi // Restore edi. + movl %eax, 28(%esp) // Place code* over edi, just under return pc. + movl LITERAL(SYMBOL(art_quick_instrumentation_exit_from_code)), 32(%esp) + // Place instrumentation exit as return pc. + movl (%esp), %eax // Restore eax. + movl 8(%esp), %ecx // Restore ecx. + movl 12(%esp), %edx // Restore edx. + movl 16(%esp), %ebx // Restore ebx. + movl 20(%esp), %ebp // Restore ebp. + movl 24(%esp), %esi // Restore esi. + addl LITERAL(28), %esp // Wind stack back upto code*. + ret // Call method (and pop). +END_FUNCTION art_quick_instrumentation_entry_from_code + +DEFINE_FUNCTION art_quick_instrumentation_exit_from_code + pushl LITERAL(0) // Push a fake return PC as there will be none on the stack. + SETUP_REF_ONLY_CALLEE_SAVE_FRAME + mov %esp, %ecx // Remember SP + subl LITERAL(8), %esp // Save float return value. + .cfi_adjust_cfa_offset 8 + movd %xmm0, (%esp) + PUSH edx // Save gpr return value. + PUSH eax + subl LITERAL(8), %esp // Align stack + movd %xmm0, (%esp) + subl LITERAL(8), %esp // Pass float return value. + .cfi_adjust_cfa_offset 8 + movd %xmm0, (%esp) + PUSH edx // Pass gpr return value. + PUSH eax + PUSH ecx // Pass SP. + pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current. + .cfi_adjust_cfa_offset 4 + call SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_result, fpr_result) + mov %eax, %ecx // Move returned link register. + addl LITERAL(32), %esp // Pop arguments. + .cfi_adjust_cfa_offset -32 + movl %edx, %ebx // Move returned link register for deopt + // (ebx is pretending to be our LR). + POP eax // Restore gpr return value. + POP edx + movd (%esp), %xmm0 // Restore fpr return value. + addl LITERAL(8), %esp + .cfi_adjust_cfa_offset -8 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + addl LITERAL(4), %esp // Remove fake return pc. + jmp *%ecx // Return. +END_FUNCTION art_quick_instrumentation_exit_from_code + + /* + * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization + * will long jump to the upcall with a special exception of -1. + */ +DEFINE_FUNCTION art_quick_deoptimize + pushl %ebx // Fake that we were called. + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME + mov %esp, %ecx // Remember SP. + subl LITERAL(8), %esp // Align stack. + .cfi_adjust_cfa_offset 8 + PUSH ecx // Pass SP. + pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current(). + .cfi_adjust_cfa_offset 4 + call SYMBOL(artDeoptimize) // artDeoptimize(Thread*, SP) + int3 // Unreachable. +END_FUNCTION art_quick_deoptimize + + /* + * Quick abstract method error stub. %eax contains method* on entry. + */ +DEFINE_FUNCTION art_quick_abstract_method_error_stub + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME + movl %esp, %ecx // Remember SP. + PUSH eax // Align frame. + PUSH ecx // Pass SP for Method*. + pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current(). + .cfi_adjust_cfa_offset 4 + PUSH eax // Pass Method*. + call SYMBOL(artThrowAbstractMethodErrorFromCode) // (Method*, Thread*, SP) + int3 // Unreachable. +END_FUNCTION art_quick_abstract_method_error_stub + + /* + * String's indexOf. + * + * On entry: + * eax: string object (known non-null) + * ecx: char to match (known <= 0xFFFF) + * edx: Starting offset in string data + */ +DEFINE_FUNCTION art_quick_indexof + PUSH edi // push callee save reg + mov STRING_COUNT_OFFSET(%eax), %ebx + mov STRING_VALUE_OFFSET(%eax), %edi + mov STRING_OFFSET_OFFSET(%eax), %eax + testl %edx, %edx // check if start < 0 + jl clamp_min +clamp_done: + cmpl %ebx, %edx // check if start >= count + jge not_found + lea STRING_DATA_OFFSET(%edi, %eax, 2), %edi // build a pointer to the start of string data + mov %edi, %eax // save a copy in eax to later compute result + lea (%edi, %edx, 2), %edi // build pointer to start of data to compare + subl %edx, %ebx // compute iteration count + /* + * At this point we have: + * eax: original start of string data + * ecx: char to compare + * ebx: length to compare + * edi: start of data to test + */ + mov %eax, %edx + mov %ecx, %eax // put char to match in %eax + mov %ebx, %ecx // put length to compare in %ecx + repne scasw // find %ax, starting at [%edi], up to length %ecx + jne not_found + subl %edx, %edi + sar LITERAL(1), %edi + decl %edi // index = ((curr_ptr - orig_ptr) / 2) - 1 + mov %edi, %eax + POP edi // pop callee save reg + ret + .balign 16 +not_found: + mov LITERAL(-1), %eax // return -1 (not found) + POP edi // pop callee save reg + ret +clamp_min: + xor %edx, %edx // clamp start to 0 + jmp clamp_done +END_FUNCTION art_quick_indexof + + /* + * String's compareTo. + * + * On entry: + * eax: this string object (known non-null) + * ecx: comp string object (known non-null) + */ +DEFINE_FUNCTION art_quick_string_compareto + PUSH esi // push callee save reg + PUSH edi // push callee save reg + mov STRING_COUNT_OFFSET(%eax), %edx + mov STRING_COUNT_OFFSET(%ecx), %ebx + mov STRING_VALUE_OFFSET(%eax), %esi + mov STRING_VALUE_OFFSET(%ecx), %edi + mov STRING_OFFSET_OFFSET(%eax), %eax + mov STRING_OFFSET_OFFSET(%ecx), %ecx + /* Build pointers to the start of string data */ + lea STRING_DATA_OFFSET(%esi, %eax, 2), %esi + lea STRING_DATA_OFFSET(%edi, %ecx, 2), %edi + /* Calculate min length and count diff */ + mov %edx, %ecx + mov %edx, %eax + subl %ebx, %eax + cmovg %ebx, %ecx + /* + * At this point we have: + * eax: value to return if first part of strings are equal + * ecx: minimum among the lengths of the two strings + * esi: pointer to this string data + * edi: pointer to comp string data + */ + repe cmpsw // find nonmatching chars in [%esi] and [%edi], up to length %ecx + jne not_equal + POP edi // pop callee save reg + POP esi // pop callee save reg + ret + .balign 16 +not_equal: + movzwl -2(%esi), %eax // get last compared char from this string + movzwl -2(%edi), %ecx // get last compared char from comp string + subl %ecx, %eax // return the difference + POP edi // pop callee save reg + POP esi // pop callee save reg + ret +END_FUNCTION art_quick_string_compareto + +MACRO1(UNIMPLEMENTED,name) + .globl VAR(name, 0) + ALIGN_FUNCTION_ENTRY +VAR(name, 0): + int3 +END_MACRO + + // TODO: implement these! +UNIMPLEMENTED art_quick_memcmp16 diff --git a/runtime/arch/x86/registers_x86.cc b/runtime/arch/x86/registers_x86.cc new file mode 100644 index 0000000000..4255d6457f --- /dev/null +++ b/runtime/arch/x86/registers_x86.cc @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "registers_x86.h" + +#include + +namespace art { +namespace x86 { + +static const char* kRegisterNames[] = { + "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi", +}; +std::ostream& operator<<(std::ostream& os, const Register& rhs) { + if (rhs >= EAX && rhs <= EDI) { + os << kRegisterNames[rhs]; + } else { + os << "Register[" << static_cast(rhs) << "]"; + } + return os; +} + +} // namespace x86 +} // namespace art diff --git a/runtime/arch/x86/registers_x86.h b/runtime/arch/x86/registers_x86.h new file mode 100644 index 0000000000..23027ed7d7 --- /dev/null +++ b/runtime/arch/x86/registers_x86.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_X86_REGISTERS_X86_H_ +#define ART_RUNTIME_ARCH_X86_REGISTERS_X86_H_ + +#include + +#include "base/logging.h" +#include "base/macros.h" +#include "globals.h" + +namespace art { +namespace x86 { + +enum Register { + EAX = 0, + ECX = 1, + EDX = 2, + EBX = 3, + ESP = 4, + EBP = 5, + ESI = 6, + EDI = 7, + kNumberOfCpuRegisters = 8, + kFirstByteUnsafeRegister = 4, + kNoRegister = -1 // Signals an illegal register. +}; +std::ostream& operator<<(std::ostream& os, const Register& rhs); + +} // namespace x86 +} // namespace art + +#endif // ART_RUNTIME_ARCH_X86_REGISTERS_X86_H_ diff --git a/runtime/arch/x86/thread_x86.cc b/runtime/arch/x86/thread_x86.cc new file mode 100644 index 0000000000..dd3e7dd137 --- /dev/null +++ b/runtime/arch/x86/thread_x86.cc @@ -0,0 +1,139 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "thread.h" + +#include +#include + +#include "asm_support_x86.h" +#include "base/macros.h" +#include "thread.h" +#include "thread_list.h" + +#if defined(__APPLE__) +#include +#include +struct descriptor_table_entry_t { + uint16_t limit0; + uint16_t base0; + unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; + unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; +} __attribute__((packed)); +#define MODIFY_LDT_CONTENTS_DATA 0 +#else +#include +#endif + +namespace art { + +void Thread::InitCpu() { + static Mutex modify_ldt_lock("modify_ldt lock"); + MutexLock mu(Thread::Current(), modify_ldt_lock); + + const uintptr_t base = reinterpret_cast(this); + const size_t limit = kPageSize; + + const int contents = MODIFY_LDT_CONTENTS_DATA; + const int seg_32bit = 1; + const int read_exec_only = 0; + const int limit_in_pages = 0; + const int seg_not_present = 0; + const int useable = 1; + + int entry_number = -1; + +#if defined(__APPLE__) + descriptor_table_entry_t entry; + memset(&entry, 0, sizeof(entry)); + entry.limit0 = (limit & 0x0ffff); + entry.limit = (limit & 0xf0000) >> 16; + entry.base0 = (base & 0x0000ffff); + entry.base1 = (base & 0x00ff0000) >> 16; + entry.base2 = (base & 0xff000000) >> 24; + entry.type = ((read_exec_only ^ 1) << 1) | (contents << 2); + entry.s = 1; + entry.dpl = 0x3; + entry.p = seg_not_present ^ 1; + entry.avl = useable; + entry.l = 0; + entry.d = seg_32bit; + entry.g = limit_in_pages; + + entry_number = i386_set_ldt(LDT_AUTO_ALLOC, reinterpret_cast(&entry), 1); + if (entry_number == -1) { + PLOG(FATAL) << "i386_set_ldt failed"; + } +#else + // Read current LDT entries. + CHECK_EQ((size_t)LDT_ENTRY_SIZE, sizeof(uint64_t)); + std::vector ldt(LDT_ENTRIES); + size_t ldt_size(sizeof(uint64_t) * ldt.size()); + memset(&ldt[0], 0, ldt_size); + // TODO: why doesn't this return LDT_ENTRY_SIZE * LDT_ENTRIES for the main thread? + syscall(__NR_modify_ldt, 0, &ldt[0], ldt_size); + + // Find the first empty slot. + for (entry_number = 0; entry_number < LDT_ENTRIES && ldt[entry_number] != 0; ++entry_number) { + } + if (entry_number >= LDT_ENTRIES) { + LOG(FATAL) << "Failed to find a free LDT slot"; + } + + // Update LDT entry. + user_desc ldt_entry; + memset(&ldt_entry, 0, sizeof(ldt_entry)); + ldt_entry.entry_number = entry_number; + ldt_entry.base_addr = base; + ldt_entry.limit = limit; + ldt_entry.seg_32bit = seg_32bit; + ldt_entry.contents = contents; + ldt_entry.read_exec_only = read_exec_only; + ldt_entry.limit_in_pages = limit_in_pages; + ldt_entry.seg_not_present = seg_not_present; + ldt_entry.useable = useable; + CHECK_EQ(0, syscall(__NR_modify_ldt, 1, &ldt_entry, sizeof(ldt_entry))); + entry_number = ldt_entry.entry_number; +#endif + + // Change %fs to be new LDT entry. + uint16_t table_indicator = 1 << 2; // LDT + uint16_t rpl = 3; // Requested privilege level + uint16_t selector = (entry_number << 3) | table_indicator | rpl; + // TODO: use our assembler to generate code + __asm__ __volatile__("movw %w0, %%fs" + : // output + : "q"(selector) // input + :); // clobber + + // Allow easy indirection back to Thread*. + self_ = this; + + // Sanity check that reads from %fs point to this Thread*. + Thread* self_check; + // TODO: use our assembler to generate code + CHECK_EQ(THREAD_SELF_OFFSET, OFFSETOF_MEMBER(Thread, self_)); + __asm__ __volatile__("movl %%fs:(%1), %0" + : "=r"(self_check) // output + : "r"(THREAD_SELF_OFFSET) // input + :); // clobber + CHECK_EQ(self_check, this); + + // Sanity check other offsets. + CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); +} + +} // namespace art diff --git a/runtime/asm_support.h b/runtime/asm_support.h index 7b20c7aee0..aca93a5552 100644 --- a/runtime/asm_support.h +++ b/runtime/asm_support.h @@ -30,29 +30,4 @@ // Offset of field Method::entry_point_from_compiled_code_ #define METHOD_CODE_OFFSET 40 -#if defined(__arm__) -// Register holding suspend check count down. -#define rSUSPEND r4 -// Register holding Thread::Current(). -#define rSELF r9 -// Offset of field Thread::suspend_count_ verified in InitCpu -#define THREAD_FLAGS_OFFSET 0 -// Offset of field Thread::exception_ verified in InitCpu -#define THREAD_EXCEPTION_OFFSET 12 -#elif defined(__mips__) -// Register holding suspend check count down. -#define rSUSPEND $s0 -// Register holding Thread::Current(). -#define rSELF $s1 -// Offset of field Thread::suspend_count_ verified in InitCpu -#define THREAD_FLAGS_OFFSET 0 -// Offset of field Thread::exception_ verified in InitCpu -#define THREAD_EXCEPTION_OFFSET 12 -#elif defined(__i386__) -// Offset of field Thread::self_ verified in InitCpu -#define THREAD_SELF_OFFSET 40 -// Offset of field Thread::exception_ verified in InitCpu -#define THREAD_EXCEPTION_OFFSET 12 -#endif - #endif // ART_RUNTIME_ASM_SUPPORT_H_ diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index dea52a6615..b924798acf 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -53,7 +53,7 @@ namespace art { class ScopedContentionRecorder; class Thread; -const bool kDebugLocking = kIsDebugBuild; +const bool kDebugLocking = true || kIsDebugBuild; // Base class for all Mutex implementations class BaseMutex { diff --git a/runtime/base/timing_logger.cc b/runtime/base/timing_logger.cc index bf6fd17a49..dfb0220d46 100644 --- a/runtime/base/timing_logger.cc +++ b/runtime/base/timing_logger.cc @@ -14,6 +14,11 @@ * limitations under the License. */ + +#define ATRACE_TAG ATRACE_TAG_DALVIK +#include +#include + #include "timing_logger.h" #include "base/logging.h" @@ -26,49 +31,6 @@ namespace art { -void TimingLogger::Reset() { - times_.clear(); - labels_.clear(); - AddSplit(""); -} - -TimingLogger::TimingLogger(const std::string &name, bool precise) - : name_(name), - precise_(precise) { - AddSplit(""); -} - -void TimingLogger::AddSplit(const std::string &label) { - times_.push_back(NanoTime()); - labels_.push_back(label); -} - -uint64_t TimingLogger::GetTotalNs() const { - return times_.back() - times_.front(); -} - -void TimingLogger::Dump(std::ostream &os) const { - uint64_t largest_time = 0; - os << name_ << ": begin\n"; - for (size_t i = 1; i < times_.size(); ++i) { - uint64_t delta_time = times_[i] - times_[i - 1]; - largest_time = std::max(largest_time, delta_time); - } - // Compute which type of unit we will use for printing the timings. - TimeUnit tu = GetAppropriateTimeUnit(largest_time); - uint64_t divisor = GetNsToTimeUnitDivisor(tu); - for (size_t i = 1; i < times_.size(); ++i) { - uint64_t delta_time = times_[i] - times_[i - 1]; - if (!precise_ && divisor >= 1000) { - // Make the fraction 0. - delta_time -= delta_time % (divisor / 1000); - } - os << name_ << ": " << std::setw(8) << FormatDuration(delta_time, tu) << " " - << labels_[i] << "\n"; - } - os << name_ << ": end, " << NsToMs(GetTotalNs()) << " ms\n"; -} - CumulativeLogger::CumulativeLogger(const std::string& name) : name_(name), lock_name_("CumulativeLoggerLock" + name), @@ -112,17 +74,8 @@ uint64_t CumulativeLogger::GetTotalTime() const { return total; } -void CumulativeLogger::AddLogger(const TimingLogger &logger) { - MutexLock mu(Thread::Current(), lock_); - DCHECK_EQ(logger.times_.size(), logger.labels_.size()); - for (size_t i = 1; i < logger.times_.size(); ++i) { - const uint64_t delta_time = logger.times_[i] - logger.times_[i - 1]; - const std::string &label = logger.labels_[i]; - AddPair(label, delta_time); - } -} -void CumulativeLogger::AddNewLogger(const base::NewTimingLogger &logger) { +void CumulativeLogger::AddLogger(const base::TimingLogger &logger) { MutexLock mu(Thread::Current(), lock_); const std::vector >& splits = logger.GetSplits(); typedef std::vector >::const_iterator It; @@ -183,51 +136,55 @@ void CumulativeLogger::DumpHistogram(std::ostream &os) { namespace base { -NewTimingLogger::NewTimingLogger(const char* name, bool precise, bool verbose) +TimingLogger::TimingLogger(const char* name, bool precise, bool verbose) : name_(name), precise_(precise), verbose_(verbose), current_split_(NULL), current_split_start_ns_(0) { } -void NewTimingLogger::Reset() { +void TimingLogger::Reset() { current_split_ = NULL; current_split_start_ns_ = 0; splits_.clear(); } -void NewTimingLogger::StartSplit(const char* new_split_label) { +void TimingLogger::StartSplit(const char* new_split_label) { DCHECK(current_split_ == NULL); if (verbose_) { LOG(INFO) << "Begin: " << new_split_label; } current_split_ = new_split_label; + ATRACE_BEGIN(current_split_); current_split_start_ns_ = NanoTime(); } // Ends the current split and starts the one given by the label. -void NewTimingLogger::NewSplit(const char* new_split_label) { +void TimingLogger::NewSplit(const char* new_split_label) { DCHECK(current_split_ != NULL); uint64_t current_time = NanoTime(); uint64_t split_time = current_time - current_split_start_ns_; + ATRACE_END(); splits_.push_back(std::pair(split_time, current_split_)); if (verbose_) { LOG(INFO) << "End: " << current_split_ << " " << PrettyDuration(split_time) << "\n" << "Begin: " << new_split_label; } current_split_ = new_split_label; + ATRACE_BEGIN(current_split_); current_split_start_ns_ = current_time; } -void NewTimingLogger::EndSplit() { +void TimingLogger::EndSplit() { DCHECK(current_split_ != NULL); uint64_t current_time = NanoTime(); uint64_t split_time = current_time - current_split_start_ns_; + ATRACE_END(); if (verbose_) { LOG(INFO) << "End: " << current_split_ << " " << PrettyDuration(split_time); } splits_.push_back(std::pair(split_time, current_split_)); } -uint64_t NewTimingLogger::GetTotalNs() const { +uint64_t TimingLogger::GetTotalNs() const { uint64_t total_ns = 0; typedef std::vector >::const_iterator It; for (It it = splits_.begin(), end = splits_.end(); it != end; ++it) { @@ -237,7 +194,7 @@ uint64_t NewTimingLogger::GetTotalNs() const { return total_ns; } -void NewTimingLogger::Dump(std::ostream &os) const { +void TimingLogger::Dump(std::ostream &os) const { uint64_t longest_split = 0; uint64_t total_ns = 0; typedef std::vector >::const_iterator It; diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h index 0f00a046e5..0998837517 100644 --- a/runtime/base/timing_logger.h +++ b/runtime/base/timing_logger.h @@ -26,27 +26,8 @@ namespace art { -class CumulativeLogger; - -class TimingLogger { - public: - explicit TimingLogger(const std::string& name, bool precise); - void AddSplit(const std::string& label); - void Dump(std::ostream& os) const; - void Reset(); - uint64_t GetTotalNs() const; - - protected: - const std::string name_; - const bool precise_; - std::vector times_; - std::vector labels_; - - friend class CumulativeLogger; -}; - namespace base { - class NewTimingLogger; + class TimingLogger; } // namespace base class CumulativeLogger { @@ -62,8 +43,7 @@ class CumulativeLogger { // Allow the name to be modified, particularly when the cumulative logger is a field within a // parent class that is unable to determine the "name" of a sub-class. void SetName(const std::string& name); - void AddLogger(const TimingLogger& logger) LOCKS_EXCLUDED(lock_); - void AddNewLogger(const base::NewTimingLogger& logger) LOCKS_EXCLUDED(lock_); + void AddLogger(const base::TimingLogger& logger) LOCKS_EXCLUDED(lock_); private: void AddPair(const std::string &label, uint64_t delta_time) @@ -84,16 +64,15 @@ class CumulativeLogger { namespace base { // A replacement to timing logger that know when a split starts for the purposes of logging. -// TODO: replace uses of TimingLogger with base::NewTimingLogger. -class NewTimingLogger { +class TimingLogger { public: - explicit NewTimingLogger(const char* name, bool precise, bool verbose); + explicit TimingLogger(const char* name, bool precise, bool verbose); // Clears current splits and labels. void Reset(); // Starts a split, a split shouldn't be in progress. - void StartSplit(const char* new_split_label); + void StartSplit(const char* new_split_label); // Ends the current split and starts the one given by the label. void NewSplit(const char* new_split_label); @@ -111,7 +90,7 @@ class NewTimingLogger { protected: // The name of the timing logger. - const std::string name_; + const char* name_; // Do we want to print the exactly recorded split (true) or round down to the time unit being // used (false). @@ -130,7 +109,7 @@ class NewTimingLogger { std::vector > splits_; private: - DISALLOW_COPY_AND_ASSIGN(NewTimingLogger); + DISALLOW_COPY_AND_ASSIGN(TimingLogger); }; } // namespace base diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 40033b7743..84f186d4b3 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -58,10 +58,7 @@ #include "object_utils.h" #include "os.h" #include "runtime.h" -#include "runtime_support.h" -#if defined(ART_USE_PORTABLE_COMPILER) -#include "runtime_support_llvm.h" -#endif +#include "entrypoints/entrypoint_utils.h" #include "ScopedLocalRef.h" #include "scoped_thread_state_change.h" #include "sirt_ref.h" @@ -1022,15 +1019,19 @@ void ClassLinker::InitFromImageCallback(mirror::Object* obj, void* arg) { return; } - // Set entry points to interpreter for methods in interpreter only mode. if (obj->IsMethod()) { mirror::AbstractMethod* method = obj->AsMethod(); + // Set entry points to interpreter for methods in interpreter only mode. if (Runtime::Current()->GetInstrumentation()->InterpretOnly() && !method->IsNative()) { method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterEntry); if (method != Runtime::Current()->GetResolutionMethod()) { method->SetEntryPointFromCompiledCode(GetInterpreterEntryPoint()); } } + // Populate native method pointer with jni lookup stub. + if (method->IsNative()) { + method->UnregisterNative(Thread::Current()); + } } } @@ -1523,6 +1524,13 @@ const OatFile::OatMethod ClassLinker::GetOatMethodFor(const mirror::AbstractMeth // Special case to get oat code without overwriting a trampoline. const void* ClassLinker::GetOatCodeFor(const mirror::AbstractMethod* method) { CHECK(!method->IsAbstract()) << PrettyMethod(method); + if (method->IsProxyMethod()) { +#if !defined(ART_USE_PORTABLE_COMPILER) + return reinterpret_cast(art_quick_proxy_invoke_handler); +#else + return reinterpret_cast(art_portable_proxy_invoke_handler); +#endif + } const void* result = GetOatMethodFor(method).GetCode(); if (result == NULL) { // No code? You must mean to go into the interpreter. diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index 75886cf7f0..4659fd1982 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -22,6 +22,7 @@ #include "class_linker-inl.h" #include "common_test.h" #include "dex_file.h" +#include "entrypoints/entrypoint_utils.h" #include "gc/heap.h" #include "mirror/class-inl.h" #include "mirror/dex_cache.h" @@ -32,7 +33,6 @@ #include "mirror/object_array-inl.h" #include "mirror/proxy.h" #include "mirror/stack_trace_element.h" -#include "runtime_support.h" #include "sirt_ref.h" using ::art::mirror::AbstractMethod; diff --git a/runtime/common_test.h b/runtime/common_test.h index 842f959cd5..7ee6fe20b2 100644 --- a/runtime/common_test.h +++ b/runtime/common_test.h @@ -31,6 +31,7 @@ #include "class_linker.h" #include "compiler/driver/compiler_driver.h" #include "dex_file-inl.h" +#include "entrypoints/entrypoint_utils.h" #include "gc/heap.h" #include "gtest/gtest.h" #include "instruction_set.h" @@ -39,7 +40,6 @@ #include "object_utils.h" #include "os.h" #include "runtime.h" -#include "runtime_support.h" #include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" #include "thread.h" @@ -473,7 +473,8 @@ class CommonTest : public testing::Test { void CompileMethod(mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK(method != NULL); - TimingLogger timings("CommonTest::CompileMethod", false); + base::TimingLogger timings("CommonTest::CompileMethod", false, false); + timings.StartSplit("CompileOne"); compiler_driver_->CompileOne(method, timings); MakeExecutable(method); } diff --git a/runtime/constants_arm.h b/runtime/constants_arm.h deleted file mode 100644 index bbb9242def..0000000000 --- a/runtime/constants_arm.h +++ /dev/null @@ -1,519 +0,0 @@ -/* - * Copyright (C) 2009 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_CONSTANTS_ARM_H_ -#define ART_RUNTIME_CONSTANTS_ARM_H_ - -#include - -#include - -#include "base/casts.h" -#include "base/logging.h" -#include "globals.h" - -namespace art { -namespace arm { - -// Defines constants and accessor classes to assemble, disassemble and -// simulate ARM instructions. -// -// Section references in the code refer to the "ARM Architecture Reference -// Manual" from July 2005 (available at http://www.arm.com/miscPDFs/14128.pdf) -// -// Constants for specific fields are defined in their respective named enums. -// General constants are in an anonymous enum in class Instr. - - -// We support both VFPv3-D16 and VFPv3-D32 profiles, but currently only one at -// a time, so that compile time optimizations can be applied. -// Warning: VFPv3-D32 is untested. -#define VFPv3_D16 -#if defined(VFPv3_D16) == defined(VFPv3_D32) -#error "Exactly one of VFPv3_D16 or VFPv3_D32 can be defined at a time." -#endif - - -// Values for registers. -enum Register { - R0 = 0, - R1 = 1, - R2 = 2, - R3 = 3, - R4 = 4, - R5 = 5, - R6 = 6, - R7 = 7, - R8 = 8, - R9 = 9, - R10 = 10, - R11 = 11, - R12 = 12, - R13 = 13, - R14 = 14, - R15 = 15, - TR = 9, // thread register - FP = 11, - IP = 12, - SP = 13, - LR = 14, - PC = 15, - kNumberOfCoreRegisters = 16, - kNoRegister = -1, -}; -std::ostream& operator<<(std::ostream& os, const Register& rhs); - - -enum ScaleFactor { - TIMES_1 = 0, - TIMES_2 = 1, - TIMES_4 = 2, - TIMES_8 = 3 -}; - - -// Values for single-precision floating point registers. -enum SRegister { - S0 = 0, - S1 = 1, - S2 = 2, - S3 = 3, - S4 = 4, - S5 = 5, - S6 = 6, - S7 = 7, - S8 = 8, - S9 = 9, - S10 = 10, - S11 = 11, - S12 = 12, - S13 = 13, - S14 = 14, - S15 = 15, - S16 = 16, - S17 = 17, - S18 = 18, - S19 = 19, - S20 = 20, - S21 = 21, - S22 = 22, - S23 = 23, - S24 = 24, - S25 = 25, - S26 = 26, - S27 = 27, - S28 = 28, - S29 = 29, - S30 = 30, - S31 = 31, - kNumberOfSRegisters = 32, - kNoSRegister = -1, -}; -std::ostream& operator<<(std::ostream& os, const SRegister& rhs); - - -// Values for double-precision floating point registers. -enum DRegister { - D0 = 0, - D1 = 1, - D2 = 2, - D3 = 3, - D4 = 4, - D5 = 5, - D6 = 6, - D7 = 7, - D8 = 8, - D9 = 9, - D10 = 10, - D11 = 11, - D12 = 12, - D13 = 13, - D14 = 14, - D15 = 15, -#ifdef VFPv3_D16 - kNumberOfDRegisters = 16, -#else - D16 = 16, - D17 = 17, - D18 = 18, - D19 = 19, - D20 = 20, - D21 = 21, - D22 = 22, - D23 = 23, - D24 = 24, - D25 = 25, - D26 = 26, - D27 = 27, - D28 = 28, - D29 = 29, - D30 = 30, - D31 = 31, - kNumberOfDRegisters = 32, -#endif - kNumberOfOverlappingDRegisters = 16, - kNoDRegister = -1, -}; -std::ostream& operator<<(std::ostream& os, const DRegister& rhs); - - -// Values for the condition field as defined in section A3.2. -enum Condition { - kNoCondition = -1, - EQ = 0, // equal - NE = 1, // not equal - CS = 2, // carry set/unsigned higher or same - CC = 3, // carry clear/unsigned lower - MI = 4, // minus/negative - PL = 5, // plus/positive or zero - VS = 6, // overflow - VC = 7, // no overflow - HI = 8, // unsigned higher - LS = 9, // unsigned lower or same - GE = 10, // signed greater than or equal - LT = 11, // signed less than - GT = 12, // signed greater than - LE = 13, // signed less than or equal - AL = 14, // always (unconditional) - kSpecialCondition = 15, // special condition (refer to section A3.2.1) - kMaxCondition = 16, -}; -std::ostream& operator<<(std::ostream& os, const Condition& rhs); - - -// Opcodes for Data-processing instructions (instructions with a type 0 and 1) -// as defined in section A3.4 -enum Opcode { - kNoOperand = -1, - AND = 0, // Logical AND - EOR = 1, // Logical Exclusive OR - SUB = 2, // Subtract - RSB = 3, // Reverse Subtract - ADD = 4, // Add - ADC = 5, // Add with Carry - SBC = 6, // Subtract with Carry - RSC = 7, // Reverse Subtract with Carry - TST = 8, // Test - TEQ = 9, // Test Equivalence - CMP = 10, // Compare - CMN = 11, // Compare Negated - ORR = 12, // Logical (inclusive) OR - MOV = 13, // Move - BIC = 14, // Bit Clear - MVN = 15, // Move Not - kMaxOperand = 16 -}; - - -// Shifter types for Data-processing operands as defined in section A5.1.2. -enum Shift { - kNoShift = -1, - LSL = 0, // Logical shift left - LSR = 1, // Logical shift right - ASR = 2, // Arithmetic shift right - ROR = 3, // Rotate right - kMaxShift = 4 -}; - - -// Constants used for the decoding or encoding of the individual fields of -// instructions. Based on the "Figure 3-1 ARM instruction set summary". -enum InstructionFields { - kConditionShift = 28, - kConditionBits = 4, - kTypeShift = 25, - kTypeBits = 3, - kLinkShift = 24, - kLinkBits = 1, - kUShift = 23, - kUBits = 1, - kOpcodeShift = 21, - kOpcodeBits = 4, - kSShift = 20, - kSBits = 1, - kRnShift = 16, - kRnBits = 4, - kRdShift = 12, - kRdBits = 4, - kRsShift = 8, - kRsBits = 4, - kRmShift = 0, - kRmBits = 4, - - // Immediate instruction fields encoding. - kRotateShift = 8, - kRotateBits = 4, - kImmed8Shift = 0, - kImmed8Bits = 8, - - // Shift instruction register fields encodings. - kShiftImmShift = 7, - kShiftRegisterShift = 8, - kShiftImmBits = 5, - kShiftShift = 5, - kShiftBits = 2, - - // Load/store instruction offset field encoding. - kOffset12Shift = 0, - kOffset12Bits = 12, - kOffset12Mask = 0x00000fff, - - // Mul instruction register fields encodings. - kMulRdShift = 16, - kMulRdBits = 4, - kMulRnShift = 12, - kMulRnBits = 4, - - kBranchOffsetMask = 0x00ffffff -}; - - -// Size (in bytes) of registers. -const int kRegisterSize = 4; - -// List of registers used in load/store multiple. -typedef uint16_t RegList; - -// The class Instr enables access to individual fields defined in the ARM -// architecture instruction set encoding as described in figure A3-1. -// -// Example: Test whether the instruction at ptr does set the condition code -// bits. -// -// bool InstructionSetsConditionCodes(byte* ptr) { -// Instr* instr = Instr::At(ptr); -// int type = instr->TypeField(); -// return ((type == 0) || (type == 1)) && instr->HasS(); -// } -// -class Instr { - public: - enum { - kInstrSize = 4, - kInstrSizeLog2 = 2, - kPCReadOffset = 8 - }; - - bool IsBreakPoint() { - return IsBkpt(); - } - - // Get the raw instruction bits. - inline int32_t InstructionBits() const { - return *reinterpret_cast(this); - } - - // Set the raw instruction bits to value. - inline void SetInstructionBits(int32_t value) { - *reinterpret_cast(this) = value; - } - - // Read one particular bit out of the instruction bits. - inline int Bit(int nr) const { - return (InstructionBits() >> nr) & 1; - } - - // Read a bit field out of the instruction bits. - inline int Bits(int shift, int count) const { - return (InstructionBits() >> shift) & ((1 << count) - 1); - } - - - // Accessors for the different named fields used in the ARM encoding. - // The naming of these accessor corresponds to figure A3-1. - // Generally applicable fields - inline Condition ConditionField() const { - return static_cast(Bits(kConditionShift, kConditionBits)); - } - inline int TypeField() const { return Bits(kTypeShift, kTypeBits); } - - inline Register RnField() const { return static_cast( - Bits(kRnShift, kRnBits)); } - inline Register RdField() const { return static_cast( - Bits(kRdShift, kRdBits)); } - - // Fields used in Data processing instructions - inline Opcode OpcodeField() const { - return static_cast(Bits(kOpcodeShift, kOpcodeBits)); - } - inline int SField() const { return Bits(kSShift, kSBits); } - // with register - inline Register RmField() const { - return static_cast(Bits(kRmShift, kRmBits)); - } - inline Shift ShiftField() const { return static_cast( - Bits(kShiftShift, kShiftBits)); } - inline int RegShiftField() const { return Bit(4); } - inline Register RsField() const { - return static_cast(Bits(kRsShift, kRsBits)); - } - inline int ShiftAmountField() const { return Bits(kShiftImmShift, - kShiftImmBits); } - // with immediate - inline int RotateField() const { return Bits(kRotateShift, kRotateBits); } - inline int Immed8Field() const { return Bits(kImmed8Shift, kImmed8Bits); } - - // Fields used in Load/Store instructions - inline int PUField() const { return Bits(23, 2); } - inline int BField() const { return Bit(22); } - inline int WField() const { return Bit(21); } - inline int LField() const { return Bit(20); } - // with register uses same fields as Data processing instructions above - // with immediate - inline int Offset12Field() const { return Bits(kOffset12Shift, - kOffset12Bits); } - // multiple - inline int RlistField() const { return Bits(0, 16); } - // extra loads and stores - inline int SignField() const { return Bit(6); } - inline int HField() const { return Bit(5); } - inline int ImmedHField() const { return Bits(8, 4); } - inline int ImmedLField() const { return Bits(0, 4); } - - // Fields used in Branch instructions - inline int LinkField() const { return Bits(kLinkShift, kLinkBits); } - inline int SImmed24Field() const { return ((InstructionBits() << 8) >> 8); } - - // Fields used in Supervisor Call instructions - inline uint32_t SvcField() const { return Bits(0, 24); } - - // Field used in Breakpoint instruction - inline uint16_t BkptField() const { - return ((Bits(8, 12) << 4) | Bits(0, 4)); - } - - // Field used in 16-bit immediate move instructions - inline uint16_t MovwField() const { - return ((Bits(16, 4) << 12) | Bits(0, 12)); - } - - // Field used in VFP float immediate move instruction - inline float ImmFloatField() const { - uint32_t imm32 = (Bit(19) << 31) | (((1 << 5) - Bit(18)) << 25) | - (Bits(16, 2) << 23) | (Bits(0, 4) << 19); - return bit_cast(imm32); - } - - // Field used in VFP double immediate move instruction - inline double ImmDoubleField() const { - uint64_t imm64 = (Bit(19)*(1LL << 63)) | (((1LL << 8) - Bit(18)) << 54) | - (Bits(16, 2)*(1LL << 52)) | (Bits(0, 4)*(1LL << 48)); - return bit_cast(imm64); - } - - // Test for data processing instructions of type 0 or 1. - // See "ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition", - // section A5.1 "ARM instruction set encoding". - inline bool IsDataProcessing() const { - CHECK_NE(ConditionField(), kSpecialCondition); - CHECK_EQ(Bits(26, 2), 0); // Type 0 or 1. - return ((Bits(20, 5) & 0x19) != 0x10) && - ((Bit(25) == 1) || // Data processing immediate. - (Bit(4) == 0) || // Data processing register. - (Bit(7) == 0)); // Data processing register-shifted register. - } - - // Tests for special encodings of type 0 instructions (extra loads and stores, - // as well as multiplications, synchronization primitives, and miscellaneous). - // Can only be called for a type 0 or 1 instruction. - inline bool IsMiscellaneous() const { - CHECK_EQ(Bits(26, 2), 0); // Type 0 or 1. - return ((Bit(25) == 0) && ((Bits(20, 5) & 0x19) == 0x10) && (Bit(7) == 0)); - } - inline bool IsMultiplyOrSyncPrimitive() const { - CHECK_EQ(Bits(26, 2), 0); // Type 0 or 1. - return ((Bit(25) == 0) && (Bits(4, 4) == 9)); - } - - // Test for Supervisor Call instruction. - inline bool IsSvc() const { - return ((InstructionBits() & 0xff000000) == 0xef000000); - } - - // Test for Breakpoint instruction. - inline bool IsBkpt() const { - return ((InstructionBits() & 0xfff000f0) == 0xe1200070); - } - - // VFP register fields. - inline SRegister SnField() const { - return static_cast((Bits(kRnShift, kRnBits) << 1) + Bit(7)); - } - inline SRegister SdField() const { - return static_cast((Bits(kRdShift, kRdBits) << 1) + Bit(22)); - } - inline SRegister SmField() const { - return static_cast((Bits(kRmShift, kRmBits) << 1) + Bit(5)); - } - inline DRegister DnField() const { - return static_cast(Bits(kRnShift, kRnBits) + (Bit(7) << 4)); - } - inline DRegister DdField() const { - return static_cast(Bits(kRdShift, kRdBits) + (Bit(22) << 4)); - } - inline DRegister DmField() const { - return static_cast(Bits(kRmShift, kRmBits) + (Bit(5) << 4)); - } - - // Test for VFP data processing or single transfer instructions of type 7. - inline bool IsVFPDataProcessingOrSingleTransfer() const { - CHECK_NE(ConditionField(), kSpecialCondition); - CHECK_EQ(TypeField(), 7); - return ((Bit(24) == 0) && (Bits(9, 3) == 5)); - // Bit(4) == 0: Data Processing - // Bit(4) == 1: 8, 16, or 32-bit Transfer between ARM Core and VFP - } - - // Test for VFP 64-bit transfer instructions of type 6. - inline bool IsVFPDoubleTransfer() const { - CHECK_NE(ConditionField(), kSpecialCondition); - CHECK_EQ(TypeField(), 6); - return ((Bits(21, 4) == 2) && (Bits(9, 3) == 5) && - ((Bits(4, 4) & 0xd) == 1)); - } - - // Test for VFP load and store instructions of type 6. - inline bool IsVFPLoadStore() const { - CHECK_NE(ConditionField(), kSpecialCondition); - CHECK_EQ(TypeField(), 6); - return ((Bits(20, 5) & 0x12) == 0x10) && (Bits(9, 3) == 5); - } - - // Special accessors that test for existence of a value. - inline bool HasS() const { return SField() == 1; } - inline bool HasB() const { return BField() == 1; } - inline bool HasW() const { return WField() == 1; } - inline bool HasL() const { return LField() == 1; } - inline bool HasSign() const { return SignField() == 1; } - inline bool HasH() const { return HField() == 1; } - inline bool HasLink() const { return LinkField() == 1; } - - // Instructions are read out of a code stream. The only way to get a - // reference to an instruction is to convert a pointer. There is no way - // to allocate or create instances of class Instr. - // Use the At(pc) function to create references to Instr. - static Instr* At(uword pc) { return reinterpret_cast(pc); } - Instr* Next() { return this + kInstrSize; } - - private: - // We need to prevent the creation of instances of class Instr. - DISALLOW_IMPLICIT_CONSTRUCTORS(Instr); -}; - -} // namespace arm -} // namespace art - -#endif // ART_RUNTIME_CONSTANTS_ARM_H_ diff --git a/runtime/constants_mips.h b/runtime/constants_mips.h deleted file mode 100644 index fb56493a14..0000000000 --- a/runtime/constants_mips.h +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_CONSTANTS_MIPS_H_ -#define ART_RUNTIME_CONSTANTS_MIPS_H_ - -#include - -#include "base/logging.h" -#include "base/macros.h" -#include "globals.h" - -namespace art { -namespace mips { - -enum Register { - ZERO = 0, - AT = 1, // Assembler temporary. - V0 = 2, // Values. - V1 = 3, - A0 = 4, // Arguments. - A1 = 5, - A2 = 6, - A3 = 7, - T0 = 8, // Temporaries. - T1 = 9, - T2 = 10, - T3 = 11, - T4 = 12, - T5 = 13, - T6 = 14, - T7 = 15, - S0 = 16, // Saved values. - S1 = 17, - S2 = 18, - S3 = 19, - S4 = 20, - S5 = 21, - S6 = 22, - S7 = 23, - T8 = 24, // More temporaries. - T9 = 25, - K0 = 26, // Reserved for trap handler. - K1 = 27, - GP = 28, // Global pointer. - SP = 29, // Stack pointer. - FP = 30, // Saved value/frame pointer. - RA = 31, // Return address. - kNumberOfCoreRegisters = 32, - kNoRegister = -1 // Signals an illegal register. -}; -std::ostream& operator<<(std::ostream& os, const Register& rhs); - -// Values for single-precision floating point registers. -enum FRegister { - F0 = 0, - F1 = 1, - F2 = 2, - F3 = 3, - F4 = 4, - F5 = 5, - F6 = 6, - F7 = 7, - F8 = 8, - F9 = 9, - F10 = 10, - F11 = 11, - F12 = 12, - F13 = 13, - F14 = 14, - F15 = 15, - F16 = 16, - F17 = 17, - F18 = 18, - F19 = 19, - F20 = 20, - F21 = 21, - F22 = 22, - F23 = 23, - F24 = 24, - F25 = 25, - F26 = 26, - F27 = 27, - F28 = 28, - F29 = 29, - F30 = 30, - F31 = 31, - kNumberOfFRegisters = 32, - kNoFRegister = -1, -}; -std::ostream& operator<<(std::ostream& os, const FRegister& rhs); - -// Values for double-precision floating point registers. -enum DRegister { - D0 = 0, - D1 = 1, - D2 = 2, - D3 = 3, - D4 = 4, - D5 = 5, - D6 = 6, - D7 = 7, - D8 = 8, - D9 = 9, - D10 = 10, - D11 = 11, - D12 = 12, - D13 = 13, - D14 = 14, - D15 = 15, - kNumberOfDRegisters = 16, - kNumberOfOverlappingDRegisters = 16, - kNoDRegister = -1, -}; -std::ostream& operator<<(std::ostream& os, const DRegister& rhs); - -// Constants used for the decoding or encoding of the individual fields of instructions. -enum InstructionFields { - kOpcodeShift = 26, - kOpcodeBits = 6, - kRsShift = 21, - kRsBits = 5, - kRtShift = 16, - kRtBits = 5, - kRdShift = 11, - kRdBits = 5, - kShamtShift = 6, - kShamtBits = 5, - kFunctShift = 0, - kFunctBits = 6, - - kFmtShift = 21, - kFmtBits = 5, - kFtShift = 16, - kFtBits = 5, - kFsShift = 11, - kFsBits = 5, - kFdShift = 6, - kFdBits = 5, - - kBranchOffsetMask = 0x0000ffff, - kJumpOffsetMask = 0x03ffffff, -}; - -enum ScaleFactor { - TIMES_1 = 0, - TIMES_2 = 1, - TIMES_4 = 2, - TIMES_8 = 3 -}; - -class Instr { - public: - static const uint32_t kBreakPointInstruction = 0x0000000D; - - bool IsBreakPoint() { - return ((*reinterpret_cast(this)) & 0xFC0000CF) == kBreakPointInstruction; - } - - // Instructions are read out of a code stream. The only way to get a - // reference to an instruction is to convert a pointer. There is no way - // to allocate or create instances of class Instr. - // Use the At(pc) function to create references to Instr. - static Instr* At(uintptr_t pc) { return reinterpret_cast(pc); } - - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(Instr); -}; - -} // namespace mips -} // namespace art - -#endif // ART_RUNTIME_CONSTANTS_MIPS_H_ diff --git a/runtime/constants_x86.h b/runtime/constants_x86.h deleted file mode 100644 index bb18b6b23b..0000000000 --- a/runtime/constants_x86.h +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_CONSTANTS_X86_H_ -#define ART_RUNTIME_CONSTANTS_X86_H_ - -#include - -#include "base/logging.h" -#include "base/macros.h" -#include "globals.h" - -namespace art { -namespace x86 { - -enum Register { - EAX = 0, - ECX = 1, - EDX = 2, - EBX = 3, - ESP = 4, - EBP = 5, - ESI = 6, - EDI = 7, - kNumberOfCpuRegisters = 8, - kFirstByteUnsafeRegister = 4, - kNoRegister = -1 // Signals an illegal register. -}; -std::ostream& operator<<(std::ostream& os, const Register& rhs); - -enum ByteRegister { - AL = 0, - CL = 1, - DL = 2, - BL = 3, - AH = 4, - CH = 5, - DH = 6, - BH = 7, - kNoByteRegister = -1 // Signals an illegal register. -}; - - -enum XmmRegister { - XMM0 = 0, - XMM1 = 1, - XMM2 = 2, - XMM3 = 3, - XMM4 = 4, - XMM5 = 5, - XMM6 = 6, - XMM7 = 7, - kNumberOfXmmRegisters = 8, - kNoXmmRegister = -1 // Signals an illegal register. -}; -std::ostream& operator<<(std::ostream& os, const XmmRegister& reg); - -enum X87Register { - ST0 = 0, - ST1 = 1, - ST2 = 2, - ST3 = 3, - ST4 = 4, - ST5 = 5, - ST6 = 6, - ST7 = 7, - kNumberOfX87Registers = 8, - kNoX87Register = -1 // Signals an illegal register. -}; -std::ostream& operator<<(std::ostream& os, const X87Register& reg); - -enum ScaleFactor { - TIMES_1 = 0, - TIMES_2 = 1, - TIMES_4 = 2, - TIMES_8 = 3 -}; - -enum Condition { - kOverflow = 0, - kNoOverflow = 1, - kBelow = 2, - kAboveEqual = 3, - kEqual = 4, - kNotEqual = 5, - kBelowEqual = 6, - kAbove = 7, - kSign = 8, - kNotSign = 9, - kParityEven = 10, - kParityOdd = 11, - kLess = 12, - kGreaterEqual = 13, - kLessEqual = 14, - kGreater = 15, - - kZero = kEqual, - kNotZero = kNotEqual, - kNegative = kSign, - kPositive = kNotSign -}; - - -class Instr { - public: - static const uint8_t kHltInstruction = 0xF4; - // We prefer not to use the int3 instruction since it conflicts with gdb. - static const uint8_t kBreakPointInstruction = kHltInstruction; - - bool IsBreakPoint() { - return (*reinterpret_cast(this)) == kBreakPointInstruction; - } - - // Instructions are read out of a code stream. The only way to get a - // reference to an instruction is to convert a pointer. There is no way - // to allocate or create instances of class Instr. - // Use the At(pc) function to create references to Instr. - static Instr* At(uintptr_t pc) { return reinterpret_cast(pc); } - - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(Instr); -}; - -} // namespace x86 -} // namespace art - -#endif // ART_RUNTIME_CONSTANTS_X86_H_ diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 7ebd6a3ae0..3591a5097b 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -20,6 +20,7 @@ #include +#include "arch/context.h" #include "class_linker.h" #include "class_linker-inl.h" #include "dex_file-inl.h" @@ -37,7 +38,6 @@ #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" #include "mirror/throwable.h" -#include "oat/runtime/context.h" #include "object_utils.h" #include "safe_map.h" #include "scoped_thread_state_change.h" diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h index 6be249c76a..13b0f1c270 100644 --- a/runtime/dex_instruction.h +++ b/runtime/dex_instruction.h @@ -281,9 +281,7 @@ class Instruction { // Returns the opcode field of the instruction. Code Opcode() const { - const uint16_t* insns = reinterpret_cast(this); - int opcode = *insns & 0xFF; - return static_cast(opcode); + return static_cast(Fetch16(0) & 0xFF); } void SetOpcode(Code opcode) { diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc new file mode 100644 index 0000000000..c29784151c --- /dev/null +++ b/runtime/entrypoints/entrypoint_utils.cc @@ -0,0 +1,407 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" + +#include "class_linker-inl.h" +#include "dex_file-inl.h" +#include "gc/accounting/card_table-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/class-inl.h" +#include "mirror/field-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/proxy.h" +#include "reflection.h" +#include "scoped_thread_state_change.h" +#include "ScopedLocalRef.h" +#include "well_known_classes.h" + +namespace art { + +// Helper function to allocate array for FILLED_NEW_ARRAY. +mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* referrer, + int32_t component_count, Thread* self, + bool access_check) { + if (UNLIKELY(component_count < 0)) { + ThrowNegativeArraySizeException(component_count); + return NULL; // Failure + } + mirror::Class* klass = referrer->GetDexCacheResolvedTypes()->Get(type_idx); + if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve + klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, referrer); + if (klass == NULL) { // Error + DCHECK(self->IsExceptionPending()); + return NULL; // Failure + } + } + if (UNLIKELY(klass->IsPrimitive() && !klass->IsPrimitiveInt())) { + if (klass->IsPrimitiveLong() || klass->IsPrimitiveDouble()) { + ThrowRuntimeException("Bad filled array request for type %s", + PrettyDescriptor(klass).c_str()); + } else { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + DCHECK(throw_location.GetMethod() == referrer); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/InternalError;", + "Found type %s; filled-new-array not implemented for anything but \'int\'", + PrettyDescriptor(klass).c_str()); + } + return NULL; // Failure + } else { + if (access_check) { + mirror::Class* referrer_klass = referrer->GetDeclaringClass(); + if (UNLIKELY(!referrer_klass->CanAccess(klass))) { + ThrowIllegalAccessErrorClass(referrer_klass, klass); + return NULL; // Failure + } + } + DCHECK(klass->IsArrayClass()) << PrettyClass(klass); + return mirror::Array::Alloc(self, klass, component_count); + } +} + +mirror::Field* FindFieldFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer, + Thread* self, FindFieldType type, size_t expected_size, + bool access_check) { + bool is_primitive; + bool is_set; + bool is_static; + switch (type) { + case InstanceObjectRead: is_primitive = false; is_set = false; is_static = false; break; + case InstanceObjectWrite: is_primitive = false; is_set = true; is_static = false; break; + case InstancePrimitiveRead: is_primitive = true; is_set = false; is_static = false; break; + case InstancePrimitiveWrite: is_primitive = true; is_set = true; is_static = false; break; + case StaticObjectRead: is_primitive = false; is_set = false; is_static = true; break; + case StaticObjectWrite: is_primitive = false; is_set = true; is_static = true; break; + case StaticPrimitiveRead: is_primitive = true; is_set = false; is_static = true; break; + case StaticPrimitiveWrite: // Keep GCC happy by having a default handler, fall-through. + default: is_primitive = true; is_set = true; is_static = true; break; + } + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + mirror::Field* resolved_field = class_linker->ResolveField(field_idx, referrer, is_static); + if (UNLIKELY(resolved_field == NULL)) { + DCHECK(self->IsExceptionPending()); // Throw exception and unwind. + return NULL; // Failure. + } + mirror::Class* fields_class = resolved_field->GetDeclaringClass(); + if (access_check) { + if (UNLIKELY(resolved_field->IsStatic() != is_static)) { + ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, referrer); + return NULL; + } + mirror::Class* referring_class = referrer->GetDeclaringClass(); + if (UNLIKELY(!referring_class->CanAccess(fields_class) || + !referring_class->CanAccessMember(fields_class, + resolved_field->GetAccessFlags()))) { + // The referring class can't access the resolved field, this may occur as a result of a + // protected field being made public by a sub-class. Resort to the dex file to determine + // the correct class for the access check. + const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile(); + fields_class = class_linker->ResolveType(dex_file, + dex_file.GetFieldId(field_idx).class_idx_, + referring_class); + if (UNLIKELY(!referring_class->CanAccess(fields_class))) { + ThrowIllegalAccessErrorClass(referring_class, fields_class); + return NULL; // failure + } else if (UNLIKELY(!referring_class->CanAccessMember(fields_class, + resolved_field->GetAccessFlags()))) { + ThrowIllegalAccessErrorField(referring_class, resolved_field); + return NULL; // failure + } + } + if (UNLIKELY(is_set && resolved_field->IsFinal() && (fields_class != referring_class))) { + ThrowIllegalAccessErrorFinalField(referrer, resolved_field); + return NULL; // failure + } else { + FieldHelper fh(resolved_field); + if (UNLIKELY(fh.IsPrimitiveType() != is_primitive || + fh.FieldSize() != expected_size)) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + DCHECK(throw_location.GetMethod() == referrer); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;", + "Attempted read of %zd-bit %s on field '%s'", + expected_size * (32 / sizeof(int32_t)), + is_primitive ? "primitive" : "non-primitive", + PrettyField(resolved_field, true).c_str()); + return NULL; // failure + } + } + } + if (!is_static) { + // instance fields must be being accessed on an initialized class + return resolved_field; + } else { + // If the class is initialized we're done. + if (fields_class->IsInitialized()) { + return resolved_field; + } else if (Runtime::Current()->GetClassLinker()->EnsureInitialized(fields_class, true, true)) { + // Otherwise let's ensure the class is initialized before resolving the field. + return resolved_field; + } else { + DCHECK(self->IsExceptionPending()); // Throw exception and unwind + return NULL; // failure + } + } +} + +// Slow path method resolution +mirror::AbstractMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* self, bool access_check, InvokeType type) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + bool is_direct = type == kStatic || type == kDirect; + mirror::AbstractMethod* resolved_method = class_linker->ResolveMethod(method_idx, referrer, type); + if (UNLIKELY(resolved_method == NULL)) { + DCHECK(self->IsExceptionPending()); // Throw exception and unwind. + return NULL; // Failure. + } else if (UNLIKELY(this_object == NULL && type != kStatic)) { + // Maintain interpreter-like semantics where NullPointerException is thrown + // after potential NoSuchMethodError from class linker. + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + DCHECK(referrer == throw_location.GetMethod()); + ThrowNullPointerExceptionForMethodAccess(throw_location, method_idx, type); + return NULL; // Failure. + } else { + if (!access_check) { + if (is_direct) { + return resolved_method; + } else if (type == kInterface) { + mirror::AbstractMethod* interface_method = + this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); + if (UNLIKELY(interface_method == NULL)) { + ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object, + referrer); + return NULL; // Failure. + } else { + return interface_method; + } + } else { + mirror::ObjectArray* vtable; + uint16_t vtable_index = resolved_method->GetMethodIndex(); + if (type == kSuper) { + vtable = referrer->GetDeclaringClass()->GetSuperClass()->GetVTable(); + } else { + vtable = this_object->GetClass()->GetVTable(); + } + // TODO: eliminate bounds check? + return vtable->Get(vtable_index); + } + } else { + // Incompatible class change should have been handled in resolve method. + if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(type))) { + ThrowIncompatibleClassChangeError(type, resolved_method->GetInvokeType(), resolved_method, + referrer); + return NULL; // Failure. + } + mirror::Class* methods_class = resolved_method->GetDeclaringClass(); + mirror::Class* referring_class = referrer->GetDeclaringClass(); + if (UNLIKELY(!referring_class->CanAccess(methods_class) || + !referring_class->CanAccessMember(methods_class, + resolved_method->GetAccessFlags()))) { + // The referring class can't access the resolved method, this may occur as a result of a + // protected method being made public by implementing an interface that re-declares the + // method public. Resort to the dex file to determine the correct class for the access check + const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile(); + methods_class = class_linker->ResolveType(dex_file, + dex_file.GetMethodId(method_idx).class_idx_, + referring_class); + if (UNLIKELY(!referring_class->CanAccess(methods_class))) { + ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class, + referrer, resolved_method, type); + return NULL; // Failure. + } else if (UNLIKELY(!referring_class->CanAccessMember(methods_class, + resolved_method->GetAccessFlags()))) { + ThrowIllegalAccessErrorMethod(referring_class, resolved_method); + return NULL; // Failure. + } + } + if (is_direct) { + return resolved_method; + } else if (type == kInterface) { + mirror::AbstractMethod* interface_method = + this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); + if (UNLIKELY(interface_method == NULL)) { + ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object, + referrer); + return NULL; // Failure. + } else { + return interface_method; + } + } else { + mirror::ObjectArray* vtable; + uint16_t vtable_index = resolved_method->GetMethodIndex(); + if (type == kSuper) { + mirror::Class* super_class = referring_class->GetSuperClass(); + if (LIKELY(super_class != NULL)) { + vtable = referring_class->GetSuperClass()->GetVTable(); + } else { + vtable = NULL; + } + } else { + vtable = this_object->GetClass()->GetVTable(); + } + if (LIKELY(vtable != NULL && + vtable_index < static_cast(vtable->GetLength()))) { + return vtable->GetWithoutChecks(vtable_index); + } else { + // Behavior to agree with that of the verifier. + MethodHelper mh(resolved_method); + ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(), mh.GetName(), + mh.GetSignature()); + return NULL; // Failure. + } + } + } + } +} + +void ThrowStackOverflowError(Thread* self) { + CHECK(!self->IsHandlingStackOverflow()) << "Recursive stack overflow."; + + if (Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) { + // Remove extra entry pushed onto second stack during method tracing. + Runtime::Current()->GetInstrumentation()->PopMethodForUnwind(self, false); + } + + self->SetStackEndForStackOverflow(); // Allow space on the stack for constructor to execute. + JNIEnvExt* env = self->GetJniEnv(); + std::string msg("stack size "); + msg += PrettySize(self->GetStackSize()); + // Use low-level JNI routine and pre-baked error class to avoid class linking operations that + // would consume more stack. + int rc = ::art::ThrowNewException(env, WellKnownClasses::java_lang_StackOverflowError, + msg.c_str(), NULL); + if (rc != JNI_OK) { + // TODO: ThrowNewException failed presumably because of an OOME, we continue to throw the OOME + // or die in the CHECK below. We may want to throw a pre-baked StackOverflowError + // instead. + LOG(ERROR) << "Couldn't throw new StackOverflowError because JNI ThrowNew failed."; + CHECK(self->IsExceptionPending()); + } + self->ResetDefaultStackEnd(); // Return to default stack size. +} + +JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char* shorty, + jobject rcvr_jobj, jobject interface_method_jobj, + std::vector& args) { + DCHECK(soa.Env()->IsInstanceOf(rcvr_jobj, WellKnownClasses::java_lang_reflect_Proxy)); + + // Build argument array possibly triggering GC. + soa.Self()->AssertThreadSuspensionIsAllowable(); + jobjectArray args_jobj = NULL; + const JValue zero; + if (args.size() > 0) { + args_jobj = soa.Env()->NewObjectArray(args.size(), WellKnownClasses::java_lang_Object, NULL); + if (args_jobj == NULL) { + CHECK(soa.Self()->IsExceptionPending()); + return zero; + } + for (size_t i = 0; i < args.size(); ++i) { + if (shorty[i + 1] == 'L') { + jobject val = args.at(i).l; + soa.Env()->SetObjectArrayElement(args_jobj, i, val); + } else { + JValue jv; + jv.SetJ(args.at(i).j); + mirror::Object* val = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv); + if (val == NULL) { + CHECK(soa.Self()->IsExceptionPending()); + return zero; + } + soa.Decode* >(args_jobj)->Set(i, val); + } + } + } + + // Call InvocationHandler.invoke(Object proxy, Method method, Object[] args). + jobject inv_hand = soa.Env()->GetObjectField(rcvr_jobj, + WellKnownClasses::java_lang_reflect_Proxy_h); + jvalue invocation_args[3]; + invocation_args[0].l = rcvr_jobj; + invocation_args[1].l = interface_method_jobj; + invocation_args[2].l = args_jobj; + jobject result = + soa.Env()->CallObjectMethodA(inv_hand, + WellKnownClasses::java_lang_reflect_InvocationHandler_invoke, + invocation_args); + + // Unbox result and handle error conditions. + if (LIKELY(!soa.Self()->IsExceptionPending())) { + if (shorty[0] == 'V' || (shorty[0] == 'L' && result == NULL)) { + // Do nothing. + return zero; + } else { + mirror::Object* result_ref = soa.Decode(result); + mirror::Object* rcvr = soa.Decode(rcvr_jobj); + mirror::AbstractMethod* interface_method = + soa.Decode(interface_method_jobj); + mirror::Class* result_type = MethodHelper(interface_method).GetReturnType(); + mirror::AbstractMethod* proxy_method; + if (interface_method->GetDeclaringClass()->IsInterface()) { + proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface(interface_method); + } else { + // Proxy dispatch to a method defined in Object. + DCHECK(interface_method->GetDeclaringClass()->IsObjectClass()); + proxy_method = interface_method; + } + ThrowLocation throw_location(rcvr, proxy_method, -1); + JValue result_unboxed; + if (!UnboxPrimitiveForResult(throw_location, result_ref, result_type, result_unboxed)) { + DCHECK(soa.Self()->IsExceptionPending()); + return zero; + } + return result_unboxed; + } + } else { + // In the case of checked exceptions that aren't declared, the exception must be wrapped by + // a UndeclaredThrowableException. + mirror::Throwable* exception = soa.Self()->GetException(NULL); + if (exception->IsCheckedException()) { + mirror::Object* rcvr = soa.Decode(rcvr_jobj); + mirror::SynthesizedProxyClass* proxy_class = + down_cast(rcvr->GetClass()); + mirror::AbstractMethod* interface_method = + soa.Decode(interface_method_jobj); + mirror::AbstractMethod* proxy_method = + rcvr->GetClass()->FindVirtualMethodForInterface(interface_method); + int throws_index = -1; + size_t num_virt_methods = proxy_class->NumVirtualMethods(); + for (size_t i = 0; i < num_virt_methods; i++) { + if (proxy_class->GetVirtualMethod(i) == proxy_method) { + throws_index = i; + break; + } + } + CHECK_NE(throws_index, -1); + mirror::ObjectArray* declared_exceptions = proxy_class->GetThrows()->Get(throws_index); + mirror::Class* exception_class = exception->GetClass(); + bool declares_exception = false; + for (int i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) { + mirror::Class* declared_exception = declared_exceptions->Get(i); + declares_exception = declared_exception->IsAssignableFrom(exception_class); + } + if (!declares_exception) { + ThrowLocation throw_location(rcvr, proxy_method, -1); + soa.Self()->ThrowNewWrappedException(throw_location, + "Ljava/lang/reflect/UndeclaredThrowableException;", + NULL); + } + } + return zero; + } +} + +} // namespace art diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h new file mode 100644 index 0000000000..3f28b5e41f --- /dev/null +++ b/runtime/entrypoints/entrypoint_utils.h @@ -0,0 +1,412 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_ +#define ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_ + +#include "class_linker.h" +#include "common_throws.h" +#include "dex_file.h" +#include "indirect_reference_table.h" +#include "invoke_type.h" +#include "jni_internal.h" +#include "mirror/abstract_method.h" +#include "mirror/array.h" +#include "mirror/class-inl.h" +#include "mirror/throwable.h" +#include "object_utils.h" +#include "thread.h" + +extern "C" void art_interpreter_invoke_handler(); +extern "C" void art_jni_dlsym_lookup_stub(); +extern "C" void art_portable_abstract_method_error_stub(); +extern "C" void art_portable_proxy_invoke_handler(); +extern "C" void art_quick_abstract_method_error_stub(); +extern "C" void art_quick_deoptimize(); +extern "C" void art_quick_instrumentation_entry_from_code(void*); +extern "C" void art_quick_instrumentation_exit_from_code(); +extern "C" void art_quick_interpreter_entry(void*); +extern "C" void art_quick_proxy_invoke_handler(); +extern "C" void art_work_around_app_jni_bugs(); + +namespace art { +namespace mirror { +class Class; +class Field; +class Object; +} + +// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it +// cannot be resolved, throw an error. If it can, use it to create an instance. +// When verification/compiler hasn't been able to verify access, optionally perform an access +// check. +static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, mirror::AbstractMethod* method, + Thread* self, + bool access_check) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); + Runtime* runtime = Runtime::Current(); + if (UNLIKELY(klass == NULL)) { + klass = runtime->GetClassLinker()->ResolveType(type_idx, method); + if (klass == NULL) { + DCHECK(self->IsExceptionPending()); + return NULL; // Failure + } + } + if (access_check) { + if (UNLIKELY(!klass->IsInstantiable())) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewException(throw_location, "Ljava/lang/InstantiationError;", + PrettyDescriptor(klass).c_str()); + return NULL; // Failure + } + mirror::Class* referrer = method->GetDeclaringClass(); + if (UNLIKELY(!referrer->CanAccess(klass))) { + ThrowIllegalAccessErrorClass(referrer, klass); + return NULL; // Failure + } + } + if (!klass->IsInitialized() && + !runtime->GetClassLinker()->EnsureInitialized(klass, true, true)) { + DCHECK(self->IsExceptionPending()); + return NULL; // Failure + } + return klass->AllocObject(self); +} + +// Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If +// it cannot be resolved, throw an error. If it can, use it to create an array. +// When verification/compiler hasn't been able to verify access, optionally perform an access +// check. +static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, + int32_t component_count, + Thread* self, bool access_check) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (UNLIKELY(component_count < 0)) { + ThrowNegativeArraySizeException(component_count); + return NULL; // Failure + } + mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); + if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve + klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method); + if (klass == NULL) { // Error + DCHECK(Thread::Current()->IsExceptionPending()); + return NULL; // Failure + } + CHECK(klass->IsArrayClass()) << PrettyClass(klass); + } + if (access_check) { + mirror::Class* referrer = method->GetDeclaringClass(); + if (UNLIKELY(!referrer->CanAccess(klass))) { + ThrowIllegalAccessErrorClass(referrer, klass); + return NULL; // Failure + } + } + return mirror::Array::Alloc(self, klass, component_count); +} + +extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, + int32_t component_count, + Thread* self, bool access_check) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// Type of find field operation for fast and slow case. +enum FindFieldType { + InstanceObjectRead, + InstanceObjectWrite, + InstancePrimitiveRead, + InstancePrimitiveWrite, + StaticObjectRead, + StaticObjectWrite, + StaticPrimitiveRead, + StaticPrimitiveWrite, +}; + +// Slow field find that can initialize classes and may throw exceptions. +extern mirror::Field* FindFieldFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer, + Thread* self, FindFieldType type, size_t expected_size, + bool access_check) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// Fast path field resolution that can't initialize classes or throw exceptions. +static inline mirror::Field* FindFieldFast(uint32_t field_idx, + const mirror::AbstractMethod* referrer, + FindFieldType type, size_t expected_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* resolved_field = + referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx); + if (UNLIKELY(resolved_field == NULL)) { + return NULL; + } + mirror::Class* fields_class = resolved_field->GetDeclaringClass(); + // Check class is initiliazed or initializing. + if (UNLIKELY(!fields_class->IsInitializing())) { + return NULL; + } + // Check for incompatible class change. + bool is_primitive; + bool is_set; + bool is_static; + switch (type) { + case InstanceObjectRead: is_primitive = false; is_set = false; is_static = false; break; + case InstanceObjectWrite: is_primitive = false; is_set = true; is_static = false; break; + case InstancePrimitiveRead: is_primitive = true; is_set = false; is_static = false; break; + case InstancePrimitiveWrite: is_primitive = true; is_set = true; is_static = false; break; + case StaticObjectRead: is_primitive = false; is_set = false; is_static = true; break; + case StaticObjectWrite: is_primitive = false; is_set = true; is_static = true; break; + case StaticPrimitiveRead: is_primitive = true; is_set = false; is_static = true; break; + case StaticPrimitiveWrite: is_primitive = true; is_set = true; is_static = true; break; + default: + LOG(FATAL) << "UNREACHABLE"; // Assignment below to avoid GCC warnings. + is_primitive = true; + is_set = true; + is_static = true; + break; + } + if (UNLIKELY(resolved_field->IsStatic() != is_static)) { + // Incompatible class change. + return NULL; + } + mirror::Class* referring_class = referrer->GetDeclaringClass(); + if (UNLIKELY(!referring_class->CanAccess(fields_class) || + !referring_class->CanAccessMember(fields_class, + resolved_field->GetAccessFlags()) || + (is_set && resolved_field->IsFinal() && (fields_class != referring_class)))) { + // Illegal access. + return NULL; + } + FieldHelper fh(resolved_field); + if (UNLIKELY(fh.IsPrimitiveType() != is_primitive || + fh.FieldSize() != expected_size)) { + return NULL; + } + return resolved_field; +} + +// Fast path method resolution that can't throw exceptions. +static inline mirror::AbstractMethod* FindMethodFast(uint32_t method_idx, + mirror::Object* this_object, + const mirror::AbstractMethod* referrer, + bool access_check, InvokeType type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool is_direct = type == kStatic || type == kDirect; + if (UNLIKELY(this_object == NULL && !is_direct)) { + return NULL; + } + mirror::AbstractMethod* resolved_method = + referrer->GetDeclaringClass()->GetDexCache()->GetResolvedMethod(method_idx); + if (UNLIKELY(resolved_method == NULL)) { + return NULL; + } + if (access_check) { + // Check for incompatible class change errors and access. + bool icce = resolved_method->CheckIncompatibleClassChange(type); + if (UNLIKELY(icce)) { + return NULL; + } + mirror::Class* methods_class = resolved_method->GetDeclaringClass(); + mirror::Class* referring_class = referrer->GetDeclaringClass(); + if (UNLIKELY(!referring_class->CanAccess(methods_class) || + !referring_class->CanAccessMember(methods_class, + resolved_method->GetAccessFlags()))) { + // Potential illegal access, may need to refine the method's class. + return NULL; + } + } + if (type == kInterface) { // Most common form of slow path dispatch. + return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); + } else if (is_direct) { + return resolved_method; + } else if (type == kSuper) { + return referrer->GetDeclaringClass()->GetSuperClass()->GetVTable()-> + Get(resolved_method->GetMethodIndex()); + } else { + DCHECK(type == kVirtual); + return this_object->GetClass()->GetVTable()->Get(resolved_method->GetMethodIndex()); + } +} + +extern mirror::AbstractMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* self, bool access_check, InvokeType type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx, + const mirror::AbstractMethod* referrer, + Thread* self, bool can_run_clinit, + bool verify_access) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + mirror::Class* klass = class_linker->ResolveType(type_idx, referrer); + if (UNLIKELY(klass == NULL)) { + CHECK(self->IsExceptionPending()); + return NULL; // Failure - Indicate to caller to deliver exception + } + // Perform access check if necessary. + mirror::Class* referring_class = referrer->GetDeclaringClass(); + if (verify_access && UNLIKELY(!referring_class->CanAccess(klass))) { + ThrowIllegalAccessErrorClass(referring_class, klass); + return NULL; // Failure - Indicate to caller to deliver exception + } + // If we're just implementing const-class, we shouldn't call . + if (!can_run_clinit) { + return klass; + } + // If we are the of this class, just return our storage. + // + // Do not set the DexCache InitializedStaticStorage, since that implies has finished + // running. + if (klass == referring_class && MethodHelper(referrer).IsClassInitializer()) { + return klass; + } + if (!class_linker->EnsureInitialized(klass, true, true)) { + CHECK(self->IsExceptionPending()); + return NULL; // Failure - Indicate to caller to deliver exception + } + referrer->GetDexCacheInitializedStaticStorage()->Set(type_idx, klass); + return klass; +} + +extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +static inline mirror::String* ResolveStringFromCode(const mirror::AbstractMethod* referrer, + uint32_t string_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + return class_linker->ResolveString(string_idx, referrer); +} + +static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + UNLOCK_FUNCTION(monitor_lock_) { + // Save any pending exception over monitor exit call. + mirror::Throwable* saved_exception = NULL; + ThrowLocation saved_throw_location; + if (UNLIKELY(self->IsExceptionPending())) { + saved_exception = self->GetException(&saved_throw_location); + self->ClearException(); + } + // Decode locked object and unlock, before popping local references. + self->DecodeJObject(locked)->MonitorExit(self); + if (UNLIKELY(self->IsExceptionPending())) { + LOG(FATAL) << "Synchronized JNI code returning with an exception:\n" + << saved_exception->Dump() + << "\nEncountered second exception during implicit MonitorExit:\n" + << self->GetException(NULL)->Dump(); + } + // Restore pending exception. + if (saved_exception != NULL) { + self->SetException(saved_throw_location, saved_exception); + } +} + +static inline void CheckReferenceResult(mirror::Object* o, Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (o == NULL) { + return; + } + mirror::AbstractMethod* m = self->GetCurrentMethod(NULL); + if (o == kInvalidIndirectRefObject) { + JniAbortF(NULL, "invalid reference returned from %s", PrettyMethod(m).c_str()); + } + // Make sure that the result is an instance of the type this method was expected to return. + mirror::Class* return_type = MethodHelper(m).GetReturnType(); + + if (!o->InstanceOf(return_type)) { + JniAbortF(NULL, "attempt to return an instance of %s from %s", + PrettyTypeOf(o).c_str(), PrettyMethod(m).c_str()); + } +} + +static inline void CheckSuspend(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + for (;;) { + if (thread->ReadFlag(kCheckpointRequest)) { + thread->RunCheckpointFunction(); + thread->AtomicClearFlag(kCheckpointRequest); + } else if (thread->ReadFlag(kSuspendRequest)) { + thread->FullSuspendCheck(); + } else { + break; + } + } +} + +JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char* shorty, + jobject rcvr_jobj, jobject interface_method_jobj, + std::vector& args) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// Entry point for deoptimization. +static inline uintptr_t GetDeoptimizationEntryPoint() { + return reinterpret_cast(art_quick_deoptimize); +} + +// Return address of instrumentation stub. +static inline void* GetInstrumentationEntryPoint() { + return reinterpret_cast(art_quick_instrumentation_entry_from_code); +} + +// The return_pc of instrumentation exit stub. +static inline uintptr_t GetInstrumentationExitPc() { + return reinterpret_cast(art_quick_instrumentation_exit_from_code); +} + +// Return address of interpreter stub. +static inline void* GetInterpreterEntryPoint() { + return reinterpret_cast(art_quick_interpreter_entry); +} + +static inline const void* GetPortableResolutionTrampoline(ClassLinker* class_linker) { + return class_linker->GetPortableResolutionTrampoline(); +} + +static inline const void* GetQuickResolutionTrampoline(ClassLinker* class_linker) { + return class_linker->GetQuickResolutionTrampoline(); +} + +// Return address of resolution trampoline stub for defined compiler. +static inline const void* GetResolutionTrampoline(ClassLinker* class_linker) { +#if defined(ART_USE_PORTABLE_COMPILER) + return GetPortableResolutionTrampoline(class_linker); +#else + return GetQuickResolutionTrampoline(class_linker); +#endif +} + +static inline void* GetPortableAbstractMethodErrorStub() { + return reinterpret_cast(art_portable_abstract_method_error_stub); +} + +static inline void* GetQuickAbstractMethodErrorStub() { + return reinterpret_cast(art_quick_abstract_method_error_stub); +} + +// Return address of abstract method error stub for defined compiler. +static inline void* GetAbstractMethodErrorStub() { +#if defined(ART_USE_PORTABLE_COMPILER) + return GetPortableAbstractMethodErrorStub(); +#else + return GetQuickAbstractMethodErrorStub(); +#endif +} + +static inline void* GetJniDlsymLookupStub() { + return reinterpret_cast(art_jni_dlsym_lookup_stub); +} + +} // namespace art + +#endif // ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_ diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc new file mode 100644 index 0000000000..98f7b1283c --- /dev/null +++ b/runtime/entrypoints/jni/jni_entrypoints.cc @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "base/logging.h" +#include "mirror/abstract_method.h" +#include "scoped_thread_state_change.h" +#include "thread.h" + +namespace art { + +// Used by the JNI dlsym stub to find the native method to invoke if none is registered. +extern "C" void* artFindNativeMethod(Thread* self) { + Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native. + DCHECK(Thread::Current() == self); + ScopedObjectAccess soa(self); + + mirror::AbstractMethod* method = self->GetCurrentMethod(NULL); + DCHECK(method != NULL); + + // Lookup symbol address for method, on failure we'll return NULL with an + // exception set, otherwise we return the address of the method we found. + void* native_code = soa.Vm()->FindCodeForNativeMethod(method); + if (native_code == NULL) { + DCHECK(self->IsExceptionPending()); + return NULL; + } else { + // Register so that future calls don't come here + method->RegisterNative(self, native_code); + return native_code; + } +} + +} // namespace art diff --git a/runtime/entrypoints/math_entrypoints.cc b/runtime/entrypoints/math_entrypoints.cc new file mode 100644 index 0000000000..31d13c8cd5 --- /dev/null +++ b/runtime/entrypoints/math_entrypoints.cc @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "math_entrypoints.h" + +namespace art { + +extern "C" double art_l2d(int64_t l) { + return static_cast(l); +} + +extern "C" float art_l2f(int64_t l) { + return static_cast(l); +} + +/* + * Float/double conversion requires clamping to min and max of integer form. If + * target doesn't support this normally, use these. + */ +extern "C" int64_t art_d2l(double d) { + static const double kMaxLong = static_cast(static_cast(0x7fffffffffffffffULL)); + static const double kMinLong = static_cast(static_cast(0x8000000000000000ULL)); + if (d >= kMaxLong) { + return static_cast(0x7fffffffffffffffULL); + } else if (d <= kMinLong) { + return static_cast(0x8000000000000000ULL); + } else if (d != d) { // NaN case + return 0; + } else { + return static_cast(d); + } +} + +extern "C" int64_t art_f2l(float f) { + static const float kMaxLong = static_cast(static_cast(0x7fffffffffffffffULL)); + static const float kMinLong = static_cast(static_cast(0x8000000000000000ULL)); + if (f >= kMaxLong) { + return static_cast(0x7fffffffffffffffULL); + } else if (f <= kMinLong) { + return static_cast(0x8000000000000000ULL); + } else if (f != f) { // NaN case + return 0; + } else { + return static_cast(f); + } +} + +extern "C" int32_t art_d2i(double d) { + static const double kMaxInt = static_cast(static_cast(0x7fffffffUL)); + static const double kMinInt = static_cast(static_cast(0x80000000UL)); + if (d >= kMaxInt) { + return static_cast(0x7fffffffUL); + } else if (d <= kMinInt) { + return static_cast(0x80000000UL); + } else if (d != d) { // NaN case + return 0; + } else { + return static_cast(d); + } +} + +extern "C" int32_t art_f2i(float f) { + static const float kMaxInt = static_cast(static_cast(0x7fffffffUL)); + static const float kMinInt = static_cast(static_cast(0x80000000UL)); + if (f >= kMaxInt) { + return static_cast(0x7fffffffUL); + } else if (f <= kMinInt) { + return static_cast(0x80000000UL); + } else if (f != f) { // NaN case + return 0; + } else { + return static_cast(f); + } +} + +} // namespace art diff --git a/runtime/entrypoints/math_entrypoints.h b/runtime/entrypoints/math_entrypoints.h new file mode 100644 index 0000000000..717c7349bd --- /dev/null +++ b/runtime/entrypoints/math_entrypoints.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_MATH_ENTRYPOINTS_H_ +#define ART_RUNTIME_ENTRYPOINTS_MATH_ENTRYPOINTS_H_ + +#include + +extern "C" double art_l2d(int64_t l); +extern "C" float art_l2f(int64_t l); +extern "C" int64_t art_d2l(double d); +extern "C" int32_t art_d2i(double d); +extern "C" int64_t art_f2l(float f); +extern "C" int32_t art_f2i(float f); + +#endif // ART_RUNTIME_ENTRYPOINTS_MATH_ENTRYPOINTS_H_ diff --git a/runtime/entrypoints/math_entrypoints_test.cc b/runtime/entrypoints/math_entrypoints_test.cc new file mode 100644 index 0000000000..ca8b931309 --- /dev/null +++ b/runtime/entrypoints/math_entrypoints_test.cc @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "math_entrypoints.h" + +#include "common_test.h" +#include + +namespace art { + +class MathEntrypointsTest : public CommonTest {}; + +TEST_F(MathEntrypointsTest, DoubleToLong) { + EXPECT_EQ(std::numeric_limits::max(), art_d2l(1.85e19)); + EXPECT_EQ(std::numeric_limits::min(), art_d2l(-1.85e19)); + EXPECT_EQ(0LL, art_d2l(0)); + EXPECT_EQ(1LL, art_d2l(1.0)); + EXPECT_EQ(10LL, art_d2l(10.0)); + EXPECT_EQ(100LL, art_d2l(100.0)); + EXPECT_EQ(-1LL, art_d2l(-1.0)); + EXPECT_EQ(-10LL, art_d2l(-10.0)); + EXPECT_EQ(-100LL, art_d2l(-100.0)); +} + +TEST_F(MathEntrypointsTest, FloatToLong) { + EXPECT_EQ(std::numeric_limits::max(), art_f2l(1.85e19)); + EXPECT_EQ(std::numeric_limits::min(), art_f2l(-1.85e19)); + EXPECT_EQ(0LL, art_f2l(0)); + EXPECT_EQ(1LL, art_f2l(1.0)); + EXPECT_EQ(10LL, art_f2l(10.0)); + EXPECT_EQ(100LL, art_f2l(100.0)); + EXPECT_EQ(-1LL, art_f2l(-1.0)); + EXPECT_EQ(-10LL, art_f2l(-10.0)); + EXPECT_EQ(-100LL, art_f2l(-100.0)); +} + +TEST_F(MathEntrypointsTest, DoubleToInt) { + EXPECT_EQ(std::numeric_limits::max(), art_d2i(4.3e9)); + EXPECT_EQ(std::numeric_limits::min(), art_d2i(-4.3e9)); + EXPECT_EQ(0L, art_d2i(0)); + EXPECT_EQ(1L, art_d2i(1.0)); + EXPECT_EQ(10L, art_d2i(10.0)); + EXPECT_EQ(100L, art_d2i(100.0)); + EXPECT_EQ(-1L, art_d2i(-1.0)); + EXPECT_EQ(-10L, art_d2i(-10.0)); + EXPECT_EQ(-100L, art_d2i(-100.0)); +} + +TEST_F(MathEntrypointsTest, FloatToInt) { + EXPECT_EQ(std::numeric_limits::max(), art_f2i(4.3e9)); + EXPECT_EQ(std::numeric_limits::min(), art_f2i(-4.3e9)); + EXPECT_EQ(0L, art_f2i(0)); + EXPECT_EQ(1L, art_f2i(1.0)); + EXPECT_EQ(10L, art_f2i(10.0)); + EXPECT_EQ(100L, art_f2i(100.0)); + EXPECT_EQ(-1L, art_f2i(-1.0)); + EXPECT_EQ(-10L, art_f2i(-10.0)); + EXPECT_EQ(-100L, art_f2i(-100.0)); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_alloc_entrypoints.cc b/runtime/entrypoints/portable/portable_alloc_entrypoints.cc new file mode 100644 index 0000000000..286926909c --- /dev/null +++ b/runtime/entrypoints/portable/portable_alloc_entrypoints.cc @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" mirror::Object* art_portable_alloc_object_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return AllocObjectFromCode(type_idx, referrer, thread, false); +} + +extern "C" mirror::Object* art_portable_alloc_object_from_code_with_access_check(uint32_t type_idx, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return AllocObjectFromCode(type_idx, referrer, thread, true); +} + +extern "C" mirror::Object* art_portable_alloc_array_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + uint32_t length, + Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return AllocArrayFromCode(type_idx, referrer, length, self, false); +} + +extern "C" mirror::Object* art_portable_alloc_array_from_code_with_access_check(uint32_t type_idx, + mirror::AbstractMethod* referrer, + uint32_t length, + Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return AllocArrayFromCode(type_idx, referrer, length, self, true); +} + +extern "C" mirror::Object* art_portable_check_and_alloc_array_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + uint32_t length, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, false); +} + +extern "C" mirror::Object* art_portable_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx, + mirror::AbstractMethod* referrer, + uint32_t length, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, true); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_argument_visitor.h b/runtime/entrypoints/portable/portable_argument_visitor.h new file mode 100644 index 0000000000..f268baf790 --- /dev/null +++ b/runtime/entrypoints/portable/portable_argument_visitor.h @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_ +#define ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_ + +#include "object_utils.h" + +namespace art { + +// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. +class PortableArgumentVisitor { + public: +// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame. +// Size of Runtime::kRefAndArgs callee save frame. +// Size of Method* and register parameters in out stack arguments. +#if defined(__arm__) +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8 +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48 +#define PORTABLE_STACK_ARG_SKIP 0 +#elif defined(__mips__) +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64 +#define PORTABLE_STACK_ARG_SKIP 16 +#elif defined(__i386__) +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32 +#define PORTABLE_STACK_ARG_SKIP 4 +#else +#error "Unsupported architecture" +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0 +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0 +#define PORTABLE_STACK_ARG_SKIP 0 +#endif + + PortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : + caller_mh_(caller_mh), + args_in_regs_(ComputeArgsInRegs(caller_mh)), + num_params_(caller_mh.NumArgs()), + reg_args_(reinterpret_cast(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET), + stack_args_(reinterpret_cast(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE + + PORTABLE_STACK_ARG_SKIP), + cur_args_(reg_args_), + cur_arg_index_(0), + param_index_(0) { + } + + virtual ~PortableArgumentVisitor() {} + + virtual void Visit() = 0; + + bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return caller_mh_.IsParamAReference(param_index_); + } + + bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return caller_mh_.IsParamALongOrDouble(param_index_); + } + + Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return caller_mh_.GetParamPrimitiveType(param_index_); + } + + byte* GetParamAddress() const { + return cur_args_ + (cur_arg_index_ * kPointerSize); + } + + void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) { +#if (defined(__arm__) || defined(__mips__)) + if (IsParamALongOrDouble() && cur_arg_index_ == 2) { + break; + } +#endif + Visit(); + cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); + param_index_++; + } + cur_args_ = stack_args_; + cur_arg_index_ = 0; + while (param_index_ < num_params_) { +#if (defined(__arm__) || defined(__mips__)) + if (IsParamALongOrDouble() && cur_arg_index_ % 2 != 0) { + cur_arg_index_++; + } +#endif + Visit(); + cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); + param_index_++; + } + } + + private: + static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +#if (defined(__i386__)) + return 0; +#else + size_t args_in_regs = 0; + size_t num_params = mh.NumArgs(); + for (size_t i = 0; i < num_params; i++) { + args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1); + if (args_in_regs > 3) { + args_in_regs = 3; + break; + } + } + return args_in_regs; +#endif + } + MethodHelper& caller_mh_; + const size_t args_in_regs_; + const size_t num_params_; + byte* const reg_args_; + byte* const stack_args_; + byte* cur_args_; + size_t cur_arg_index_; + size_t param_index_; +}; + +} // namespace art + +#endif // ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_ diff --git a/runtime/entrypoints/portable/portable_cast_entrypoints.cc b/runtime/entrypoints/portable/portable_cast_entrypoints.cc new file mode 100644 index 0000000000..d343c5dc1f --- /dev/null +++ b/runtime/entrypoints/portable/portable_cast_entrypoints.cc @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_throws.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" int32_t art_portable_is_assignable_from_code(const mirror::Class* dest_type, + const mirror::Class* src_type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(dest_type != NULL); + DCHECK(src_type != NULL); + return dest_type->IsAssignableFrom(src_type) ? 1 : 0; +} + +extern "C" void art_portable_check_cast_from_code(const mirror::Class* dest_type, + const mirror::Class* src_type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(dest_type->IsClass()) << PrettyClass(dest_type); + DCHECK(src_type->IsClass()) << PrettyClass(src_type); + if (UNLIKELY(!dest_type->IsAssignableFrom(src_type))) { + ThrowClassCastException(dest_type, src_type); + } +} + +extern "C" void art_portable_check_put_array_element_from_code(const mirror::Object* element, + const mirror::Object* array) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (element == NULL) { + return; + } + DCHECK(array != NULL); + mirror::Class* array_class = array->GetClass(); + DCHECK(array_class != NULL); + mirror::Class* component_type = array_class->GetComponentType(); + mirror::Class* element_class = element->GetClass(); + if (UNLIKELY(!component_type->IsAssignableFrom(element_class))) { + ThrowArrayStoreException(element_class, array_class); + } +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc b/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc new file mode 100644 index 0000000000..bdab587797 --- /dev/null +++ b/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "gc/accounting/card_table-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" mirror::Object* art_portable_initialize_static_storage_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return ResolveVerifyAndClinit(type_idx, referrer, thread, true, false); +} + +extern "C" mirror::Object* art_portable_initialize_type_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return ResolveVerifyAndClinit(type_idx, referrer, thread, false, false); +} + +extern "C" mirror::Object* art_portable_initialize_type_and_verify_access_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Called when caller isn't guaranteed to have access to a type and the dex cache may be + // unpopulated + return ResolveVerifyAndClinit(type_idx, referrer, thread, false, true); +} + +extern "C" mirror::Object* art_portable_resolve_string_from_code(mirror::AbstractMethod* referrer, + uint32_t string_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return ResolveStringFromCode(referrer, string_idx); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_entrypoints.h b/runtime/entrypoints/portable/portable_entrypoints.h new file mode 100644 index 0000000000..a229c76dbd --- /dev/null +++ b/runtime/entrypoints/portable/portable_entrypoints.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_ +#define ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_ + +#include "dex_file-inl.h" +#include "runtime.h" + +namespace art { +namespace mirror { + class AbstractMethod; + class Object; +} // namespace mirror +class Thread; + +#define PORTABLE_ENTRYPOINT_OFFSET(x) \ + (static_cast(OFFSETOF_MEMBER(Thread, portable_entrypoints_)) + \ + static_cast(OFFSETOF_MEMBER(PortableEntryPoints, x))) + +// Pointers to functions that are called by code generated by compiler's adhering to the portable +// compiler ABI. +struct PACKED(4) PortableEntryPoints { + // Invocation + const void* (*pPortableResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*, + mirror::AbstractMethod**, Thread*); +}; + +} // namespace art + +#endif // ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_ diff --git a/runtime/entrypoints/portable/portable_field_entrypoints.cc b/runtime/entrypoints/portable/portable_field_entrypoints.cc new file mode 100644 index 0000000000..aa0f03ce8b --- /dev/null +++ b/runtime/entrypoints/portable/portable_field_entrypoints.cc @@ -0,0 +1,241 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/field-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" int32_t art_portable_set32_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + int32_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, + referrer, + StaticPrimitiveWrite, + sizeof(uint32_t)); + if (LIKELY(field != NULL)) { + field->Set32(field->GetDeclaringClass(), new_value); + return 0; + } + field = FindFieldFromCode(field_idx, + referrer, + Thread::Current(), + StaticPrimitiveWrite, + sizeof(uint32_t), + true); + if (LIKELY(field != NULL)) { + field->Set32(field->GetDeclaringClass(), new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_set64_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + int64_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t)); + if (LIKELY(field != NULL)) { + field->Set64(field->GetDeclaringClass(), new_value); + return 0; + } + field = FindFieldFromCode(field_idx, + referrer, + Thread::Current(), + StaticPrimitiveWrite, + sizeof(uint64_t), + true); + if (LIKELY(field != NULL)) { + field->Set64(field->GetDeclaringClass(), new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_set_obj_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, + sizeof(mirror::Object*)); + if (LIKELY(field != NULL)) { + field->SetObj(field->GetDeclaringClass(), new_value); + return 0; + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + StaticObjectWrite, sizeof(mirror::Object*), true); + if (LIKELY(field != NULL)) { + field->SetObj(field->GetDeclaringClass(), new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_get32_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t)); + if (LIKELY(field != NULL)) { + return field->Get32(field->GetDeclaringClass()); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + StaticPrimitiveRead, sizeof(uint32_t), true); + if (LIKELY(field != NULL)) { + return field->Get32(field->GetDeclaringClass()); + } + return 0; +} + +extern "C" int64_t art_portable_get64_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t)); + if (LIKELY(field != NULL)) { + return field->Get64(field->GetDeclaringClass()); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + StaticPrimitiveRead, sizeof(uint64_t), true); + if (LIKELY(field != NULL)) { + return field->Get64(field->GetDeclaringClass()); + } + return 0; +} + +extern "C" mirror::Object* art_portable_get_obj_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, + sizeof(mirror::Object*)); + if (LIKELY(field != NULL)) { + return field->GetObj(field->GetDeclaringClass()); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + StaticObjectRead, sizeof(mirror::Object*), true); + if (LIKELY(field != NULL)) { + return field->GetObj(field->GetDeclaringClass()); + } + return 0; +} + +extern "C" int32_t art_portable_set32_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj, uint32_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t)); + if (LIKELY(field != NULL)) { + field->Set32(obj, new_value); + return 0; + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstancePrimitiveWrite, sizeof(uint32_t), true); + if (LIKELY(field != NULL)) { + field->Set32(obj, new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_set64_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj, int64_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t)); + if (LIKELY(field != NULL)) { + field->Set64(obj, new_value); + return 0; + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstancePrimitiveWrite, sizeof(uint64_t), true); + if (LIKELY(field != NULL)) { + field->Set64(obj, new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_set_obj_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj, + mirror::Object* new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, + sizeof(mirror::Object*)); + if (LIKELY(field != NULL)) { + field->SetObj(obj, new_value); + return 0; + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstanceObjectWrite, sizeof(mirror::Object*), true); + if (LIKELY(field != NULL)) { + field->SetObj(obj, new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_get32_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t)); + if (LIKELY(field != NULL)) { + return field->Get32(obj); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstancePrimitiveRead, sizeof(uint32_t), true); + if (LIKELY(field != NULL)) { + return field->Get32(obj); + } + return 0; +} + +extern "C" int64_t art_portable_get64_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t)); + if (LIKELY(field != NULL)) { + return field->Get64(obj); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstancePrimitiveRead, sizeof(uint64_t), true); + if (LIKELY(field != NULL)) { + return field->Get64(obj); + } + return 0; +} + +extern "C" mirror::Object* art_portable_get_obj_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, + sizeof(mirror::Object*)); + if (LIKELY(field != NULL)) { + return field->GetObj(obj); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstanceObjectRead, sizeof(mirror::Object*), true); + if (LIKELY(field != NULL)) { + return field->GetObj(obj); + } + return 0; +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc new file mode 100644 index 0000000000..771608b604 --- /dev/null +++ b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex_instruction.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" void art_portable_fill_array_data_from_code(mirror::AbstractMethod* method, + uint32_t dex_pc, + mirror::Array* array, + uint32_t payload_offset) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const DexFile::CodeItem* code_item = MethodHelper(method).GetCodeItem(); + const Instruction::ArrayDataPayload* payload = + reinterpret_cast(code_item->insns_ + payload_offset); + DCHECK_EQ(payload->ident, static_cast(Instruction::kArrayDataSignature)); + if (UNLIKELY(array == NULL)) { + ThrowNullPointerException(NULL, "null array in FILL_ARRAY_DATA"); + return; // Error + } + DCHECK(array->IsArrayInstance() && !array->IsObjectArray()); + if (UNLIKELY(static_cast(payload->element_count) > array->GetLength())) { + Thread* self = Thread::Current(); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;", + "failed FILL_ARRAY_DATA; length=%d, index=%d", + array->GetLength(), payload->element_count - 1); + return; // Error + } + uint32_t size_in_bytes = payload->element_count * payload->element_width; + memcpy(array->GetRawData(payload->element_width), payload->data, size_in_bytes); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_invoke_entrypoints.cc b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc new file mode 100644 index 0000000000..5911ba3d8b --- /dev/null +++ b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/dex_cache-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +static mirror::AbstractMethod* FindMethodHelper(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* caller_method, + bool access_check, + InvokeType type, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::AbstractMethod* method = FindMethodFast(method_idx, + this_object, + caller_method, + access_check, + type); + if (UNLIKELY(method == NULL)) { + method = FindMethodFromCode(method_idx, this_object, caller_method, + thread, access_check, type); + if (UNLIKELY(method == NULL)) { + CHECK(thread->IsExceptionPending()); + return 0; // failure + } + } + DCHECK(!thread->IsExceptionPending()); + const void* code = method->GetEntryPointFromCompiledCode(); + + // When we return, the caller will branch to this address, so it had better not be 0! + if (UNLIKELY(code == NULL)) { + MethodHelper mh(method); + LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method) + << " location: " << mh.GetDexFile().GetLocation(); + } + return method; +} + +extern "C" mirror::Object* art_portable_find_static_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, true, kStatic, thread); +} + +extern "C" mirror::Object* art_portable_find_direct_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, true, kDirect, thread); +} + +extern "C" mirror::Object* art_portable_find_virtual_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, true, kVirtual, thread); +} + +extern "C" mirror::Object* art_portable_find_super_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, true, kSuper, thread); +} + +extern "C" mirror::Object* art_portable_find_interface_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, true, kInterface, thread); +} + +extern "C" mirror::Object* art_portable_find_interface_method_from_code(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, false, kInterface, thread); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_jni_entrypoints.cc b/runtime/entrypoints/portable/portable_jni_entrypoints.cc new file mode 100644 index 0000000000..8df16ae931 --- /dev/null +++ b/runtime/entrypoints/portable/portable_jni_entrypoints.cc @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "thread-inl.h" + +namespace art { + +// Called on entry to JNI, transition out of Runnable and release share of mutator_lock_. +extern "C" uint32_t art_portable_jni_method_start(Thread* self) + UNLOCK_FUNCTION(GlobalSynchronizatio::mutator_lock_) { + JNIEnvExt* env = self->GetJniEnv(); + uint32_t saved_local_ref_cookie = env->local_ref_cookie; + env->local_ref_cookie = env->locals.GetSegmentState(); + self->TransitionFromRunnableToSuspended(kNative); + return saved_local_ref_cookie; +} + +extern "C" uint32_t art_portable_jni_method_start_synchronized(jobject to_lock, Thread* self) + UNLOCK_FUNCTION(Locks::mutator_lock_) { + self->DecodeJObject(to_lock)->MonitorEnter(self); + return art_portable_jni_method_start(self); +} + +static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) { + JNIEnvExt* env = self->GetJniEnv(); + env->locals.SetSegmentState(env->local_ref_cookie); + env->local_ref_cookie = saved_local_ref_cookie; +} + +extern "C" void art_portable_jni_method_end(uint32_t saved_local_ref_cookie, Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { + self->TransitionFromSuspendedToRunnable(); + PopLocalReferences(saved_local_ref_cookie, self); +} + + +extern "C" void art_portable_jni_method_end_synchronized(uint32_t saved_local_ref_cookie, + jobject locked, + Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { + self->TransitionFromSuspendedToRunnable(); + UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. + PopLocalReferences(saved_local_ref_cookie, self); +} + +extern "C" mirror::Object* art_portable_jni_method_end_with_reference(jobject result, + uint32_t saved_local_ref_cookie, + Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { + self->TransitionFromSuspendedToRunnable(); + mirror::Object* o = self->DecodeJObject(result); // Must decode before pop. + PopLocalReferences(saved_local_ref_cookie, self); + // Process result. + if (UNLIKELY(self->GetJniEnv()->check_jni)) { + if (self->IsExceptionPending()) { + return NULL; + } + CheckReferenceResult(o, self); + } + return o; +} + +extern "C" mirror::Object* art_portable_jni_method_end_with_reference_synchronized(jobject result, + uint32_t saved_local_ref_cookie, + jobject locked, + Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { + self->TransitionFromSuspendedToRunnable(); + UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. + mirror::Object* o = self->DecodeJObject(result); + PopLocalReferences(saved_local_ref_cookie, self); + // Process result. + if (UNLIKELY(self->GetJniEnv()->check_jni)) { + if (self->IsExceptionPending()) { + return NULL; + } + CheckReferenceResult(o, self); + } + return o; +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_lock_entrypoints.cc b/runtime/entrypoints/portable/portable_lock_entrypoints.cc new file mode 100644 index 0000000000..44d3da9897 --- /dev/null +++ b/runtime/entrypoints/portable/portable_lock_entrypoints.cc @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" void art_portable_lock_object_from_code(mirror::Object* obj, Thread* thread) + EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) { + DCHECK(obj != NULL); // Assumed to have been checked before entry. + obj->MonitorEnter(thread); // May block. + DCHECK(thread->HoldsLock(obj)); + // Only possible exception is NPE and is handled before entry. + DCHECK(!thread->IsExceptionPending()); +} + +extern "C" void art_portable_unlock_object_from_code(mirror::Object* obj, Thread* thread) + UNLOCK_FUNCTION(monitor_lock_) { + DCHECK(obj != NULL); // Assumed to have been checked before entry. + // MonitorExit may throw exception. + obj->MonitorExit(thread); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_proxy_entrypoints.cc b/runtime/entrypoints/portable/portable_proxy_entrypoints.cc new file mode 100644 index 0000000000..3db39cd0bd --- /dev/null +++ b/runtime/entrypoints/portable/portable_proxy_entrypoints.cc @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "portable_argument_visitor.h" +#include "scoped_thread_state_change.h" + +namespace art { + +// Visits arguments on the stack placing them into the args vector, Object* arguments are converted +// to jobjects. +class BuildPortableArgumentVisitor : public PortableArgumentVisitor { + public: + BuildPortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp, + ScopedObjectAccessUnchecked& soa, std::vector& args) : + PortableArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {} + + virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + jvalue val; + Primitive::Type type = GetParamPrimitiveType(); + switch (type) { + case Primitive::kPrimNot: { + mirror::Object* obj = *reinterpret_cast(GetParamAddress()); + val.l = soa_.AddLocalReference(obj); + break; + } + case Primitive::kPrimLong: // Fall-through. + case Primitive::kPrimDouble: + val.j = *reinterpret_cast(GetParamAddress()); + break; + case Primitive::kPrimBoolean: // Fall-through. + case Primitive::kPrimByte: // Fall-through. + case Primitive::kPrimChar: // Fall-through. + case Primitive::kPrimShort: // Fall-through. + case Primitive::kPrimInt: // Fall-through. + case Primitive::kPrimFloat: + val.i = *reinterpret_cast(GetParamAddress()); + break; + case Primitive::kPrimVoid: + LOG(FATAL) << "UNREACHABLE"; + val.j = 0; + break; + } + args_.push_back(val); + } + + private: + ScopedObjectAccessUnchecked& soa_; + std::vector& args_; + + DISALLOW_COPY_AND_ASSIGN(BuildPortableArgumentVisitor); +}; + +// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method +// which is responsible for recording callee save registers. We explicitly place into jobjects the +// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a +// field within the proxy object, which will box the primitive arguments and deal with error cases. +extern "C" uint64_t artPortableProxyInvokeHandler(mirror::AbstractMethod* proxy_method, + mirror::Object* receiver, + Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Ensure we don't get thread suspension until the object arguments are safely in jobjects. + const char* old_cause = + self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); + self->VerifyStack(); + // Start new JNI local reference state. + JNIEnvExt* env = self->GetJniEnv(); + ScopedObjectAccessUnchecked soa(env); + ScopedJniEnvLocalRefState env_state(env); + // Create local ref. copies of proxy method and the receiver. + jobject rcvr_jobj = soa.AddLocalReference(receiver); + + // Placing arguments into args vector and remove the receiver. + MethodHelper proxy_mh(proxy_method); + std::vector args; + BuildPortableArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args); + local_ref_visitor.VisitArguments(); + args.erase(args.begin()); + + // Convert proxy method into expected interface method. + mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod(); + DCHECK(interface_method != NULL); + DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); + jobject interface_method_jobj = soa.AddLocalReference(interface_method); + + // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code + // that performs allocations. + self->EndAssertNoThreadSuspension(old_cause); + JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(), + rcvr_jobj, interface_method_jobj, args); + return result.GetJ(); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_stub_entrypoints.cc b/runtime/entrypoints/portable/portable_stub_entrypoints.cc new file mode 100644 index 0000000000..c510c653ba --- /dev/null +++ b/runtime/entrypoints/portable/portable_stub_entrypoints.cc @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex_instruction-inl.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +// Lazily resolve a method for portable. Called by stub code. +extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** called_addr, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t dex_pc; + mirror::AbstractMethod* caller = thread->GetCurrentMethod(&dex_pc); + + ClassLinker* linker = Runtime::Current()->GetClassLinker(); + InvokeType invoke_type; + bool is_range; + if (called->IsRuntimeMethod()) { + const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem(); + CHECK_LT(dex_pc, code->insns_size_in_code_units_); + const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); + Instruction::Code instr_code = instr->Opcode(); + switch (instr_code) { + case Instruction::INVOKE_DIRECT: + invoke_type = kDirect; + is_range = false; + break; + case Instruction::INVOKE_DIRECT_RANGE: + invoke_type = kDirect; + is_range = true; + break; + case Instruction::INVOKE_STATIC: + invoke_type = kStatic; + is_range = false; + break; + case Instruction::INVOKE_STATIC_RANGE: + invoke_type = kStatic; + is_range = true; + break; + case Instruction::INVOKE_SUPER: + invoke_type = kSuper; + is_range = false; + break; + case Instruction::INVOKE_SUPER_RANGE: + invoke_type = kSuper; + is_range = true; + break; + case Instruction::INVOKE_VIRTUAL: + invoke_type = kVirtual; + is_range = false; + break; + case Instruction::INVOKE_VIRTUAL_RANGE: + invoke_type = kVirtual; + is_range = true; + break; + case Instruction::INVOKE_INTERFACE: + invoke_type = kInterface; + is_range = false; + break; + case Instruction::INVOKE_INTERFACE_RANGE: + invoke_type = kInterface; + is_range = true; + break; + default: + LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL); + // Avoid used uninitialized warnings. + invoke_type = kDirect; + is_range = true; + } + uint32_t dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); + called = linker->ResolveMethod(dex_method_idx, caller, invoke_type); + // Refine called method based on receiver. + if (invoke_type == kVirtual) { + called = receiver->GetClass()->FindVirtualMethodForVirtual(called); + } else if (invoke_type == kInterface) { + called = receiver->GetClass()->FindVirtualMethodForInterface(called); + } + } else { + CHECK(called->IsStatic()) << PrettyMethod(called); + invoke_type = kStatic; + } + const void* code = NULL; + if (LIKELY(!thread->IsExceptionPending())) { + // Incompatible class change should have been handled in resolve method. + CHECK(!called->CheckIncompatibleClassChange(invoke_type)); + // Ensure that the called method's class is initialized. + mirror::Class* called_class = called->GetDeclaringClass(); + linker->EnsureInitialized(called_class, true, true); + if (LIKELY(called_class->IsInitialized())) { + code = called->GetEntryPointFromCompiledCode(); + // TODO: remove this after we solve the link issue. + { // for lazy link. + if (code == NULL) { + code = linker->GetOatCodeFor(called); + } + } + } else if (called_class->IsInitializing()) { + if (invoke_type == kStatic) { + // Class is still initializing, go to oat and grab code (trampoline must be left in place + // until class is initialized to stop races between threads). + code = linker->GetOatCodeFor(called); + } else { + // No trampoline for non-static methods. + code = called->GetEntryPointFromCompiledCode(); + // TODO: remove this after we solve the link issue. + { // for lazy link. + if (code == NULL) { + code = linker->GetOatCodeFor(called); + } + } + } + } else { + DCHECK(called_class->IsErroneous()); + } + } + if (LIKELY(code != NULL)) { + // Expect class to at least be initializing. + DCHECK(called->GetDeclaringClass()->IsInitializing()); + // Don't want infinite recursion. + DCHECK(code != GetResolutionTrampoline(linker)); + // Set up entry into main method + *called_addr = called; + } + return code; +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_thread_entrypoints.cc b/runtime/entrypoints/portable/portable_thread_entrypoints.cc new file mode 100644 index 0000000000..dac73885a5 --- /dev/null +++ b/runtime/entrypoints/portable/portable_thread_entrypoints.cc @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method.h" +#include "mirror/object-inl.h" +#include "verifier/dex_gc_map.h" +#include "stack.h" + +namespace art { + +class ShadowFrameCopyVisitor : public StackVisitor { + public: + explicit ShadowFrameCopyVisitor(Thread* self) : StackVisitor(self, NULL), prev_frame_(NULL), + top_frame_(NULL) {} + + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (IsShadowFrame()) { + ShadowFrame* cur_frame = GetCurrentShadowFrame(); + size_t num_regs = cur_frame->NumberOfVRegs(); + mirror::AbstractMethod* method = cur_frame->GetMethod(); + uint32_t dex_pc = cur_frame->GetDexPC(); + ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, method, dex_pc); + + const uint8_t* gc_map = method->GetNativeGcMap(); + uint32_t gc_map_length = static_cast((gc_map[0] << 24) | + (gc_map[1] << 16) | + (gc_map[2] << 8) | + (gc_map[3] << 0)); + verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length); + const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc); + for (size_t reg = 0; reg < num_regs; ++reg) { + if (TestBitmap(reg, reg_bitmap)) { + new_frame->SetVRegReference(reg, cur_frame->GetVRegReference(reg)); + } else { + new_frame->SetVReg(reg, cur_frame->GetVReg(reg)); + } + } + + if (prev_frame_ != NULL) { + prev_frame_->SetLink(new_frame); + } else { + top_frame_ = new_frame; + } + prev_frame_ = new_frame; + } + return true; + } + + ShadowFrame* GetShadowFrameCopy() { + return top_frame_; + } + + private: + static bool TestBitmap(int reg, const uint8_t* reg_vector) { + return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0; + } + + ShadowFrame* prev_frame_; + ShadowFrame* top_frame_; +}; + +extern "C" void art_portable_test_suspend_from_code(Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + CheckSuspend(self); + if (Runtime::Current()->GetInstrumentation()->ShouldPortableCodeDeoptimize()) { + // Save out the shadow frame to the heap + ShadowFrameCopyVisitor visitor(self); + visitor.WalkStack(true); + self->SetDeoptimizationShadowFrame(visitor.GetShadowFrameCopy()); + self->SetDeoptimizationReturnValue(JValue()); + self->SetException(ThrowLocation(), reinterpret_cast(-1)); + } +} + +extern "C" ShadowFrame* art_portable_push_shadow_frame_from_code(Thread* thread, + ShadowFrame* new_shadow_frame, + mirror::AbstractMethod* method, + uint32_t num_vregs) { + ShadowFrame* old_frame = thread->PushShadowFrame(new_shadow_frame); + new_shadow_frame->SetMethod(method); + new_shadow_frame->SetNumberOfVRegs(num_vregs); + return old_frame; +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_throw_entrypoints.cc b/runtime/entrypoints/portable/portable_throw_entrypoints.cc new file mode 100644 index 0000000000..64a67ebb4c --- /dev/null +++ b/runtime/entrypoints/portable/portable_throw_entrypoints.cc @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex_instruction.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" void art_portable_throw_div_zero_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ThrowArithmeticExceptionDivideByZero(); +} + +extern "C" void art_portable_throw_array_bounds_from_code(int32_t index, int32_t length) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ThrowArrayIndexOutOfBoundsException(index, length); +} + +extern "C" void art_portable_throw_no_such_method_from_code(int32_t method_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ThrowNoSuchMethodError(method_idx); +} + +extern "C" void art_portable_throw_null_pointer_exception_from_code(uint32_t dex_pc) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // TODO: remove dex_pc argument from caller. + UNUSED(dex_pc); + Thread* self = Thread::Current(); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionFromDexPC(throw_location); +} + +extern "C" void art_portable_throw_stack_overflow_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ThrowStackOverflowError(Thread::Current()); +} + +extern "C" void art_portable_throw_exception_from_code(mirror::Throwable* exception) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self = Thread::Current(); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + if (exception == NULL) { + ThrowNullPointerException(NULL, "throw with null exception"); + } else { + self->SetException(throw_location, exception); + } +} + +extern "C" void* art_portable_get_and_clear_exception(Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(self->IsExceptionPending()); + // TODO: make this inline. + mirror::Throwable* exception = self->GetException(NULL); + self->ClearException(); + return exception; +} + +extern "C" int32_t art_portable_find_catch_block_from_code(mirror::AbstractMethod* current_method, + uint32_t ti_offset) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self = Thread::Current(); // TODO: make an argument. + ThrowLocation throw_location; + mirror::Throwable* exception = self->GetException(&throw_location); + // Check for special deoptimization exception. + if (UNLIKELY(reinterpret_cast(exception) == -1)) { + return -1; + } + mirror::Class* exception_type = exception->GetClass(); + MethodHelper mh(current_method); + const DexFile::CodeItem* code_item = mh.GetCodeItem(); + DCHECK_LT(ti_offset, code_item->tries_size_); + const DexFile::TryItem* try_item = DexFile::GetTryItems(*code_item, ti_offset); + + int iter_index = 0; + int result = -1; + uint32_t catch_dex_pc = -1; + // Iterate over the catch handlers associated with dex_pc + for (CatchHandlerIterator it(*code_item, *try_item); it.HasNext(); it.Next()) { + uint16_t iter_type_idx = it.GetHandlerTypeIndex(); + // Catch all case + if (iter_type_idx == DexFile::kDexNoIndex16) { + catch_dex_pc = it.GetHandlerAddress(); + result = iter_index; + break; + } + // Does this catch exception type apply? + mirror::Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx); + if (UNLIKELY(iter_exception_type == NULL)) { + // TODO: check, the verifier (class linker?) should take care of resolving all exception + // classes early. + LOG(WARNING) << "Unresolved exception class when finding catch block: " + << mh.GetTypeDescriptorFromTypeIdx(iter_type_idx); + } else if (iter_exception_type->IsAssignableFrom(exception_type)) { + catch_dex_pc = it.GetHandlerAddress(); + result = iter_index; + break; + } + ++iter_index; + } + if (result != -1) { + // Handler found. + Runtime::Current()->GetInstrumentation()->ExceptionCaughtEvent(self, + throw_location, + current_method, + catch_dex_pc, + exception); + // If the catch block has no move-exception then clear the exception for it. + const Instruction* first_catch_instr = Instruction::At(&mh.GetCodeItem()->insns_[catch_dex_pc]); + if (first_catch_instr->Opcode() != Instruction::MOVE_EXCEPTION) { + self->ClearException(); + } + } + return result; +} + +} // namespace art diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h new file mode 100644 index 0000000000..0cb578ddd0 --- /dev/null +++ b/runtime/entrypoints/quick/callee_save_frame.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_CALLEE_SAVE_FRAME_H_ +#define ART_RUNTIME_ENTRYPOINTS_QUICK_CALLEE_SAVE_FRAME_H_ + +#include "base/mutex.h" +#include "thread-inl.h" + +namespace art { +namespace mirror { +class AbstractMethod; +} // namespace mirror + +// Place a special frame at the TOS that will save the callee saves for the given type. +static void FinishCalleeSaveFrameSetup(Thread* self, mirror::AbstractMethod** sp, + Runtime::CalleeSaveType type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Be aware the store below may well stomp on an incoming argument. + Locks::mutator_lock_->AssertSharedHeld(self); + *sp = Runtime::Current()->GetCalleeSaveMethod(type); + self->SetTopOfStack(sp, 0); + self->VerifyStack(); +} + +} // namespace art + +#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_CALLEE_SAVE_FRAME_H_ diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc new file mode 100644 index 0000000000..9ed802a2bb --- /dev/null +++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "callee_save_frame.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/class-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" mirror::Object* artAllocObjectFromCode(uint32_t type_idx, mirror::AbstractMethod* method, + Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + return AllocObjectFromCode(type_idx, method, self, false); +} + +extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck(uint32_t type_idx, + mirror::AbstractMethod* method, + Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + return AllocObjectFromCode(type_idx, method, self, true); +} + +extern "C" mirror::Array* artAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, + int32_t component_count, Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + return AllocArrayFromCode(type_idx, method, component_count, self, false); +} + +extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, + mirror::AbstractMethod* method, + int32_t component_count, + Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + return AllocArrayFromCode(type_idx, method, component_count, self, true); +} + +extern "C" mirror::Array* artCheckAndAllocArrayFromCode(uint32_t type_idx, + mirror::AbstractMethod* method, + int32_t component_count, Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, false); +} + +extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, + mirror::AbstractMethod* method, + int32_t component_count, + Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, true); +} + +} // namespace art diff --git a/runtime/entrypoints/quick/quick_argument_visitor.h b/runtime/entrypoints/quick/quick_argument_visitor.h new file mode 100644 index 0000000000..35fa97269c --- /dev/null +++ b/runtime/entrypoints/quick/quick_argument_visitor.h @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ARGUMENT_VISITOR_H_ +#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ARGUMENT_VISITOR_H_ + +#include "object_utils.h" + +namespace art { + +// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. +class QuickArgumentVisitor { + public: +// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame. +// Size of Runtime::kRefAndArgs callee save frame. +// Size of Method* and register parameters in out stack arguments. +#if defined(__arm__) +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8 +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48 +#define QUICK_STACK_ARG_SKIP 16 +#elif defined(__mips__) +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64 +#define QUICK_STACK_ARG_SKIP 16 +#elif defined(__i386__) +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32 +#define QUICK_STACK_ARG_SKIP 16 +#else +#error "Unsupported architecture" +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0 +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0 +#define QUICK_STACK_ARG_SKIP 0 +#endif + + QuickArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : + caller_mh_(caller_mh), + args_in_regs_(ComputeArgsInRegs(caller_mh)), + num_params_(caller_mh.NumArgs()), + reg_args_(reinterpret_cast(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET), + stack_args_(reinterpret_cast(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE + + QUICK_STACK_ARG_SKIP), + cur_args_(reg_args_), + cur_arg_index_(0), + param_index_(0), + is_split_long_or_double_(false) { + } + + virtual ~QuickArgumentVisitor() {} + + virtual void Visit() = 0; + + bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return caller_mh_.IsParamAReference(param_index_); + } + + bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return caller_mh_.IsParamALongOrDouble(param_index_); + } + + Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return caller_mh_.GetParamPrimitiveType(param_index_); + } + + byte* GetParamAddress() const { + return cur_args_ + (cur_arg_index_ * kPointerSize); + } + + bool IsSplitLongOrDouble() const { + return is_split_long_or_double_; + } + + uint64_t ReadSplitLongParam() const { + DCHECK(IsSplitLongOrDouble()); + uint64_t low_half = *reinterpret_cast(GetParamAddress()); + uint64_t high_half = *reinterpret_cast(stack_args_); + return (low_half & 0xffffffffULL) | (high_half << 32); + } + + void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) { + is_split_long_or_double_ = (cur_arg_index_ == 2) && IsParamALongOrDouble(); + Visit(); + cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); + param_index_++; + } + cur_args_ = stack_args_; + cur_arg_index_ = is_split_long_or_double_ ? 1 : 0; + is_split_long_or_double_ = false; + while (param_index_ < num_params_) { + Visit(); + cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); + param_index_++; + } + } + + private: + static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t args_in_regs = 0; + size_t num_params = mh.NumArgs(); + for (size_t i = 0; i < num_params; i++) { + args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1); + if (args_in_regs > 3) { + args_in_regs = 3; + break; + } + } + return args_in_regs; + } + MethodHelper& caller_mh_; + const size_t args_in_regs_; + const size_t num_params_; + byte* const reg_args_; + byte* const stack_args_; + byte* cur_args_; + size_t cur_arg_index_; + size_t param_index_; + // Does a 64bit parameter straddle the register and stack arguments? + bool is_split_long_or_double_; +}; + +} // namespace art + +#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ARGUMENT_VISITOR_H_ diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc new file mode 100644 index 0000000000..b810bb70a6 --- /dev/null +++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "callee_save_frame.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/class-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" + +namespace art { + +// Assignable test for code, won't throw. Null and equality tests already performed +extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, + const mirror::Class* ref_class) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(klass != NULL); + DCHECK(ref_class != NULL); + return klass->IsAssignableFrom(ref_class) ? 1 : 0; +} + +// Check whether it is safe to cast one class to the other, throw exception and return -1 on failure +extern "C" int artCheckCastFromCode(mirror::Class* src_type, mirror::Class* dest_type, + Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(src_type->IsClass()) << PrettyClass(src_type); + DCHECK(dest_type->IsClass()) << PrettyClass(dest_type); + if (LIKELY(dest_type->IsAssignableFrom(src_type))) { + return 0; // Success + } else { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + ThrowClassCastException(dest_type, src_type); + return -1; // Failure + } +} + +// Tests whether 'element' can be assigned into an array of type 'array_class'. +// Returns 0 on success and -1 if an exception is pending. +extern "C" int artCanPutArrayElementFromCode(const mirror::Object* element, + const mirror::Class* array_class, + Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(array_class != NULL); + // element can't be NULL as we catch this is screened in runtime_support + mirror::Class* element_class = element->GetClass(); + mirror::Class* component_type = array_class->GetComponentType(); + if (LIKELY(component_type->IsAssignableFrom(element_class))) { + return 0; // Success + } else { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + ThrowArrayStoreException(element_class, array_class); + return -1; // Failure + } +} + +} // namespace art diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc new file mode 100644 index 0000000000..43fc9d2a2d --- /dev/null +++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "callee_save_frame.h" +#include "dex_file-inl.h" +#include "interpreter/interpreter.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/class-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/object-inl.h" +#include "object_utils.h" +#include "stack.h" +#include "thread.h" +#include "verifier/method_verifier.h" + +namespace art { + +extern "C" void artDeoptimize(Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); + self->SetException(ThrowLocation(), reinterpret_cast(-1)); + self->QuickDeliverException(); +} + +} // namespace art diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc new file mode 100644 index 0000000000..6400161b3e --- /dev/null +++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "callee_save_frame.h" +#include "entrypoints/entrypoint_utils.h" +#include "class_linker-inl.h" +#include "dex_file-inl.h" +#include "gc/accounting/card_table-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, + const mirror::AbstractMethod* referrer, + Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Called to ensure static storage base is initialized for direct static field reads and writes. + // A class may be accessing another class' fields when it doesn't have access, as access has been + // given by inheritance. + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + return ResolveVerifyAndClinit(type_idx, referrer, self, true, false); +} + +extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, + const mirror::AbstractMethod* referrer, + Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Called when method->dex_cache_resolved_types_[] misses. + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + return ResolveVerifyAndClinit(type_idx, referrer, self, false, false); +} + +extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, + const mirror::AbstractMethod* referrer, + Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Called when caller isn't guaranteed to have access to a type and the dex cache may be + // unpopulated. + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + return ResolveVerifyAndClinit(type_idx, referrer, self, false, true); +} + +extern "C" mirror::String* artResolveStringFromCode(mirror::AbstractMethod* referrer, + int32_t string_idx, + Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + return ResolveStringFromCode(referrer, string_idx); +} + +} // namespace art diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h new file mode 100644 index 0000000000..74b8cfd09b --- /dev/null +++ b/runtime/entrypoints/quick/quick_entrypoints.h @@ -0,0 +1,170 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_ +#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_ + +#include "dex_file-inl.h" +#include "runtime.h" + +#define QUICK_ENTRYPOINT_OFFSET(x) \ + (static_cast(OFFSETOF_MEMBER(Thread, quick_entrypoints_)) + \ + static_cast(OFFSETOF_MEMBER(QuickEntryPoints, x))) + +namespace art { +namespace mirror { + class AbstractMethod; + class Class; + class Object; +} // namespace mirror +class DvmDex; +class MethodHelper; +class ShadowFrame; +class Thread; + +// Pointers to functions that are called by quick compiler generated code via thread-local storage. +struct PACKED(4) QuickEntryPoints { + // Alloc + void* (*pAllocArrayFromCode)(uint32_t, void*, int32_t); + void* (*pAllocArrayFromCodeWithAccessCheck)(uint32_t, void*, int32_t); + void* (*pAllocObjectFromCode)(uint32_t, void*); + void* (*pAllocObjectFromCodeWithAccessCheck)(uint32_t, void*); + void* (*pCheckAndAllocArrayFromCode)(uint32_t, void*, int32_t); + void* (*pCheckAndAllocArrayFromCodeWithAccessCheck)(uint32_t, void*, int32_t); + + // Cast + uint32_t (*pInstanceofNonTrivialFromCode)(const mirror::Class*, const mirror::Class*); + void (*pCanPutArrayElementFromCode)(void*, void*); + void (*pCheckCastFromCode)(void*, void*); + + // DexCache + void* (*pInitializeStaticStorage)(uint32_t, void*); + void* (*pInitializeTypeAndVerifyAccessFromCode)(uint32_t, void*); + void* (*pInitializeTypeFromCode)(uint32_t, void*); + void* (*pResolveStringFromCode)(void*, uint32_t); + + // Field + int (*pSet32Instance)(uint32_t, void*, int32_t); // field_idx, obj, src + int (*pSet32Static)(uint32_t, int32_t); + int (*pSet64Instance)(uint32_t, void*, int64_t); + int (*pSet64Static)(uint32_t, int64_t); + int (*pSetObjInstance)(uint32_t, void*, void*); + int (*pSetObjStatic)(uint32_t, void*); + int32_t (*pGet32Instance)(uint32_t, void*); + int32_t (*pGet32Static)(uint32_t); + int64_t (*pGet64Instance)(uint32_t, void*); + int64_t (*pGet64Static)(uint32_t); + void* (*pGetObjInstance)(uint32_t, void*); + void* (*pGetObjStatic)(uint32_t); + + // FillArray + void (*pHandleFillArrayDataFromCode)(void*, void*); + + // JNI + uint32_t (*pJniMethodStart)(Thread*); + uint32_t (*pJniMethodStartSynchronized)(jobject to_lock, Thread* self); + void (*pJniMethodEnd)(uint32_t cookie, Thread* self); + void (*pJniMethodEndSynchronized)(uint32_t cookie, jobject locked, Thread* self); + mirror::Object* (*pJniMethodEndWithReference)(jobject result, uint32_t cookie, Thread* self); + mirror::Object* (*pJniMethodEndWithReferenceSynchronized)(jobject result, uint32_t cookie, + jobject locked, Thread* self); + + // Locks + void (*pLockObjectFromCode)(void*); + void (*pUnlockObjectFromCode)(void*); + + // Math + int32_t (*pCmpgDouble)(double, double); + int32_t (*pCmpgFloat)(float, float); + int32_t (*pCmplDouble)(double, double); + int32_t (*pCmplFloat)(float, float); + double (*pFmod)(double, double); + double (*pSqrt)(double); + double (*pL2d)(int64_t); + float (*pFmodf)(float, float); + float (*pL2f)(int64_t); + int32_t (*pD2iz)(double); + int32_t (*pF2iz)(float); + int32_t (*pIdivmod)(int32_t, int32_t); + int64_t (*pD2l)(double); + int64_t (*pF2l)(float); + int64_t (*pLdiv)(int64_t, int64_t); + int64_t (*pLdivmod)(int64_t, int64_t); + int64_t (*pLmul)(int64_t, int64_t); + uint64_t (*pShlLong)(uint64_t, uint32_t); + uint64_t (*pShrLong)(uint64_t, uint32_t); + uint64_t (*pUshrLong)(uint64_t, uint32_t); + + // Interpreter + void (*pInterpreterToInterpreterEntry)(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); + void (*pInterpreterToQuickEntry)(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); + + // Intrinsics + int32_t (*pIndexOf)(void*, uint32_t, uint32_t, uint32_t); + int32_t (*pMemcmp16)(void*, void*, int32_t); + int32_t (*pStringCompareTo)(void*, void*); + void* (*pMemcpy)(void*, const void*, size_t); + + // Invocation + const void* (*pQuickResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*, + mirror::AbstractMethod**, Thread*); + void (*pInvokeDirectTrampolineWithAccessCheck)(uint32_t, void*); + void (*pInvokeInterfaceTrampoline)(uint32_t, void*); + void (*pInvokeInterfaceTrampolineWithAccessCheck)(uint32_t, void*); + void (*pInvokeStaticTrampolineWithAccessCheck)(uint32_t, void*); + void (*pInvokeSuperTrampolineWithAccessCheck)(uint32_t, void*); + void (*pInvokeVirtualTrampolineWithAccessCheck)(uint32_t, void*); + + // Thread + void (*pCheckSuspendFromCode)(Thread*); // Stub that is called when the suspend count is non-zero + void (*pTestSuspendFromCode)(); // Stub that is periodically called to test the suspend count + + // Throws + void (*pDeliverException)(void*); + void (*pThrowArrayBoundsFromCode)(int32_t, int32_t); + void (*pThrowDivZeroFromCode)(); + void (*pThrowNoSuchMethodFromCode)(int32_t); + void (*pThrowNullPointerFromCode)(); + void (*pThrowStackOverflowFromCode)(void*); +}; + + +// JNI entrypoints. +extern uint32_t JniMethodStart(Thread* self) + UNLOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR; +extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self) + UNLOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR; +extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR; +extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, + Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR; +extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, + Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR; + +extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result, + uint32_t saved_local_ref_cookie, + jobject locked, Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR; + +} // namespace art + +#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_ diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc new file mode 100644 index 0000000000..a4e9dc9b27 --- /dev/null +++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc @@ -0,0 +1,273 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "callee_save_frame.h" +#include "dex_file-inl.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/class-inl.h" +#include "mirror/field-inl.h" + +#include + +namespace art { + +extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, + const mirror::AbstractMethod* referrer, + Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t)); + if (LIKELY(field != NULL)) { + return field->Get32(field->GetDeclaringClass()); + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveRead, sizeof(int32_t), true); + if (LIKELY(field != NULL)) { + return field->Get32(field->GetDeclaringClass()); + } + return 0; // Will throw exception by checking with Thread::Current +} + +extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, + const mirror::AbstractMethod* referrer, + Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t)); + if (LIKELY(field != NULL)) { + return field->Get64(field->GetDeclaringClass()); + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveRead, sizeof(int64_t), true); + if (LIKELY(field != NULL)) { + return field->Get64(field->GetDeclaringClass()); + } + return 0; // Will throw exception by checking with Thread::Current +} + +extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx, + const mirror::AbstractMethod* referrer, + Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, + sizeof(mirror::Object*)); + if (LIKELY(field != NULL)) { + return field->GetObj(field->GetDeclaringClass()); + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode(field_idx, referrer, self, StaticObjectRead, sizeof(mirror::Object*), true); + if (LIKELY(field != NULL)) { + return field->GetObj(field->GetDeclaringClass()); + } + return NULL; // Will throw exception by checking with Thread::Current +} + +extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, + const mirror::AbstractMethod* referrer, Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t)); + if (LIKELY(field != NULL && obj != NULL)) { + return field->Get32(obj); + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveRead, sizeof(int32_t), true); + if (LIKELY(field != NULL)) { + if (UNLIKELY(obj == NULL)) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, true); + } else { + return field->Get32(obj); + } + } + return 0; // Will throw exception by checking with Thread::Current +} + +extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, + const mirror::AbstractMethod* referrer, Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t)); + if (LIKELY(field != NULL && obj != NULL)) { + return field->Get64(obj); + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveRead, sizeof(int64_t), true); + if (LIKELY(field != NULL)) { + if (UNLIKELY(obj == NULL)) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, true); + } else { + return field->Get64(obj); + } + } + return 0; // Will throw exception by checking with Thread::Current +} + +extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj, + const mirror::AbstractMethod* referrer, + Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(mirror::Object*)); + if (LIKELY(field != NULL && obj != NULL)) { + return field->GetObj(obj); + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode(field_idx, referrer, self, InstanceObjectRead, sizeof(mirror::Object*), true); + if (LIKELY(field != NULL)) { + if (UNLIKELY(obj == NULL)) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, true); + } else { + return field->GetObj(obj); + } + } + return NULL; // Will throw exception by checking with Thread::Current +} + +extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value, + const mirror::AbstractMethod* referrer, Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t)); + if (LIKELY(field != NULL)) { + field->Set32(field->GetDeclaringClass(), new_value); + return 0; // success + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveWrite, sizeof(int32_t), true); + if (LIKELY(field != NULL)) { + field->Set32(field->GetDeclaringClass(), new_value); + return 0; // success + } + return -1; // failure +} + +extern "C" int artSet64StaticFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer, + uint64_t new_value, Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t)); + if (LIKELY(field != NULL)) { + field->Set64(field->GetDeclaringClass(), new_value); + return 0; // success + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveWrite, sizeof(int64_t), true); + if (LIKELY(field != NULL)) { + field->Set64(field->GetDeclaringClass(), new_value); + return 0; // success + } + return -1; // failure +} + +extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_value, + const mirror::AbstractMethod* referrer, Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, + sizeof(mirror::Object*)); + if (LIKELY(field != NULL)) { + if (LIKELY(!FieldHelper(field).IsPrimitiveType())) { + field->SetObj(field->GetDeclaringClass(), new_value); + return 0; // success + } + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode(field_idx, referrer, self, StaticObjectWrite, sizeof(mirror::Object*), true); + if (LIKELY(field != NULL)) { + field->SetObj(field->GetDeclaringClass(), new_value); + return 0; // success + } + return -1; // failure +} + +extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value, + const mirror::AbstractMethod* referrer, Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t)); + if (LIKELY(field != NULL && obj != NULL)) { + field->Set32(obj, new_value); + return 0; // success + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveWrite, sizeof(int32_t), true); + if (LIKELY(field != NULL)) { + if (UNLIKELY(obj == NULL)) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, false); + } else { + field->Set32(obj, new_value); + return 0; // success + } + } + return -1; // failure +} + +extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint64_t new_value, + Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::AbstractMethod* callee_save = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsOnly); + mirror::AbstractMethod* referrer = + sp[callee_save->GetFrameSizeInBytes() / sizeof(mirror::AbstractMethod*)]; + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, + sizeof(int64_t)); + if (LIKELY(field != NULL && obj != NULL)) { + field->Set64(obj, new_value); + return 0; // success + } + *sp = callee_save; + self->SetTopOfStack(sp, 0); + field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveWrite, sizeof(int64_t), true); + if (LIKELY(field != NULL)) { + if (UNLIKELY(obj == NULL)) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, false); + } else { + field->Set64(obj, new_value); + return 0; // success + } + } + return -1; // failure +} + +extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj, + mirror::Object* new_value, + const mirror::AbstractMethod* referrer, Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, + sizeof(mirror::Object*)); + if (LIKELY(field != NULL && obj != NULL)) { + field->SetObj(obj, new_value); + return 0; // success + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode(field_idx, referrer, self, InstanceObjectWrite, + sizeof(mirror::Object*), true); + if (LIKELY(field != NULL)) { + if (UNLIKELY(obj == NULL)) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, false); + } else { + field->SetObj(obj, new_value); + return 0; // success + } + } + return -1; // failure +} + +} // namespace art diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc new file mode 100644 index 0000000000..b81ad12b7b --- /dev/null +++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "callee_save_frame.h" +#include "common_throws.h" +#include "dex_instruction.h" +#include "mirror/array.h" +#include "mirror/object-inl.h" + +namespace art { + +/* + * Fill the array with predefined constant values, throwing exceptions if the array is null or + * not of sufficient length. + * + * NOTE: When dealing with a raw dex file, the data to be copied uses + * little-endian ordering. Require that oat2dex do any required swapping + * so this routine can get by with a memcpy(). + * + * Format of the data: + * ushort ident = 0x0300 magic value + * ushort width width of each element in the table + * uint size number of elements in the table + * ubyte data[size*width] table of data values (may contain a single-byte + * padding at the end) + */ +extern "C" int artHandleFillArrayDataFromCode(mirror::Array* array, + const Instruction::ArrayDataPayload* payload, + Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + DCHECK_EQ(payload->ident, static_cast(Instruction::kArrayDataSignature)); + if (UNLIKELY(array == NULL)) { + ThrowNullPointerException(NULL, "null array in FILL_ARRAY_DATA"); + return -1; // Error + } + DCHECK(array->IsArrayInstance() && !array->IsObjectArray()); + if (UNLIKELY(static_cast(payload->element_count) > array->GetLength())) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;", + "failed FILL_ARRAY_DATA; length=%d, index=%d", + array->GetLength(), payload->element_count - 1); + return -1; // Error + } + uint32_t size_in_bytes = payload->element_count * payload->element_width; + memcpy(array->GetRawData(payload->element_width), payload->data, size_in_bytes); + return 0; // Success +} + +} // namespace art diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc new file mode 100644 index 0000000000..7ecd296742 --- /dev/null +++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "callee_save_frame.h" +#include "instrumentation.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "runtime.h" +#include "thread-inl.h" + +namespace art { + +extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::AbstractMethod* method, + mirror::Object* this_object, + Thread* self, + mirror::AbstractMethod** sp, + uintptr_t lr) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); + instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); + const void* result = instrumentation->GetQuickCodeFor(method); + bool interpreter_entry = (result == GetInterpreterEntryPoint()); + instrumentation->PushInstrumentationStackFrame(self, method->IsStatic() ? NULL : this_object, + method, lr, interpreter_entry); + CHECK(result != NULL) << PrettyMethod(method); + return result; +} + +extern "C" uint64_t artInstrumentationMethodExitFromCode(Thread* self, mirror::AbstractMethod** sp, + uint64_t gpr_result, uint64_t fpr_result) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // TODO: use FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly) not the hand inlined below. + // We use the hand inline version to ensure the return_pc is assigned before verifying the + // stack. + // Be aware the store below may well stomp on an incoming argument. + Locks::mutator_lock_->AssertSharedHeld(self); + mirror::AbstractMethod* callee_save = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsOnly); + *sp = callee_save; + uintptr_t* return_pc = reinterpret_cast(reinterpret_cast(sp) + + callee_save->GetReturnPcOffsetInBytes()); + CHECK_EQ(*return_pc, 0U); + self->SetTopOfStack(sp, 0); + self->VerifyStack(); + instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); + uint64_t return_or_deoptimize_pc = instrumentation->PopInstrumentationStackFrame(self, return_pc, + gpr_result, + fpr_result); + self->VerifyStack(); + return return_or_deoptimize_pc; +} + +} // namespace art diff --git a/runtime/entrypoints/quick/quick_interpreter_entrypoints.cc b/runtime/entrypoints/quick/quick_interpreter_entrypoints.cc new file mode 100644 index 0000000000..656df8de5b --- /dev/null +++ b/runtime/entrypoints/quick/quick_interpreter_entrypoints.cc @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "quick_argument_visitor.h" +#include "callee_save_frame.h" +#include "dex_file-inl.h" +#include "interpreter/interpreter.h" +#include "invoke_arg_array_builder.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/class-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "object_utils.h" + +namespace art { + +// Visits arguments on the stack placing them into the shadow frame. +class BuildShadowFrameVisitor : public QuickArgumentVisitor { + public: + BuildShadowFrameVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp, + ShadowFrame& sf, size_t first_arg_reg) : + QuickArgumentVisitor(caller_mh, sp), sf_(sf), cur_reg_(first_arg_reg) {} + + virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Primitive::Type type = GetParamPrimitiveType(); + switch (type) { + case Primitive::kPrimLong: // Fall-through. + case Primitive::kPrimDouble: + if (IsSplitLongOrDouble()) { + sf_.SetVRegLong(cur_reg_, ReadSplitLongParam()); + } else { + sf_.SetVRegLong(cur_reg_, *reinterpret_cast(GetParamAddress())); + } + ++cur_reg_; + break; + case Primitive::kPrimNot: + sf_.SetVRegReference(cur_reg_, *reinterpret_cast(GetParamAddress())); + break; + case Primitive::kPrimBoolean: // Fall-through. + case Primitive::kPrimByte: // Fall-through. + case Primitive::kPrimChar: // Fall-through. + case Primitive::kPrimShort: // Fall-through. + case Primitive::kPrimInt: // Fall-through. + case Primitive::kPrimFloat: + sf_.SetVReg(cur_reg_, *reinterpret_cast(GetParamAddress())); + break; + case Primitive::kPrimVoid: + LOG(FATAL) << "UNREACHABLE"; + break; + } + ++cur_reg_; + } + + private: + ShadowFrame& sf_; + size_t cur_reg_; + + DISALLOW_COPY_AND_ASSIGN(BuildShadowFrameVisitor); +}; + +extern "C" uint64_t artInterpreterEntry(mirror::AbstractMethod* method, Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Ensure we don't get thread suspension until the object arguments are safely in the shadow + // frame. + const char* old_cause = self->StartAssertNoThreadSuspension("Building interpreter shadow frame"); + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); + + MethodHelper mh(method); + const DexFile::CodeItem* code_item = mh.GetCodeItem(); + uint16_t num_regs = code_item->registers_size_; + void* memory = alloca(ShadowFrame::ComputeSize(num_regs)); + ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, NULL, // No last shadow coming from quick. + method, 0, memory)); + size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; + BuildShadowFrameVisitor shadow_frame_builder(mh, sp, *shadow_frame, first_arg_reg); + shadow_frame_builder.VisitArguments(); + // Push a transition back into managed code onto the linked list in thread. + ManagedStack fragment; + self->PushManagedStackFragment(&fragment); + self->PushShadowFrame(shadow_frame); + self->EndAssertNoThreadSuspension(old_cause); + + if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) { + // Ensure static method's class is initialized. + if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(), + true, true)) { + DCHECK(Thread::Current()->IsExceptionPending()); + self->PopManagedStackFragment(fragment); + return 0; + } + } + + JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame); + // Pop transition. + self->PopManagedStackFragment(fragment); + return result.GetJ(); +} + +extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::AbstractMethod* method = shadow_frame->GetMethod(); + // Ensure static methods are initialized. + if (method->IsStatic()) { + Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(), true, true); + } + uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_; + ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength()); + arg_array.BuildArgArray(shadow_frame, arg_offset); + method->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result, mh.GetShorty()[0]); +} + +} // namespace art diff --git a/runtime/entrypoints/quick/quick_invoke_entrypoints.cc b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc new file mode 100644 index 0000000000..53b3628e2f --- /dev/null +++ b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc @@ -0,0 +1,226 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "callee_save_frame.h" +#include "dex_instruction-inl.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/class-inl.h" +#include "mirror/dex_cache-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" + +namespace art { + +// Determine target of interface dispatch. This object is known non-null. +extern "C" uint64_t artInvokeInterfaceTrampoline(mirror::AbstractMethod* interface_method, + mirror::Object* this_object, + mirror::AbstractMethod* caller_method, + Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::AbstractMethod* method; + if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex16)) { + method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method); + if (UNLIKELY(method == NULL)) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); + ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object, + caller_method); + return 0; // Failure. + } + } else { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); + DCHECK(interface_method == Runtime::Current()->GetResolutionMethod()); + // Determine method index from calling dex instruction. +#if defined(__arm__) + // On entry the stack pointed by sp is: + // | argN | | + // | ... | | + // | arg4 | | + // | arg3 spill | | Caller's frame + // | arg2 spill | | + // | arg1 spill | | + // | Method* | --- + // | LR | + // | ... | callee saves + // | R3 | arg3 + // | R2 | arg2 + // | R1 | arg1 + // | R0 | + // | Method* | <- sp + DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); + uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp) + kPointerSize); + uintptr_t caller_pc = regs[10]; +#elif defined(__i386__) + // On entry the stack pointed by sp is: + // | argN | | + // | ... | | + // | arg4 | | + // | arg3 spill | | Caller's frame + // | arg2 spill | | + // | arg1 spill | | + // | Method* | --- + // | Return | + // | EBP,ESI,EDI | callee saves + // | EBX | arg3 + // | EDX | arg2 + // | ECX | arg1 + // | EAX/Method* | <- sp + DCHECK_EQ(32U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); + uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp)); + uintptr_t caller_pc = regs[7]; +#elif defined(__mips__) + // On entry the stack pointed by sp is: + // | argN | | + // | ... | | + // | arg4 | | + // | arg3 spill | | Caller's frame + // | arg2 spill | | + // | arg1 spill | | + // | Method* | --- + // | RA | + // | ... | callee saves + // | A3 | arg3 + // | A2 | arg2 + // | A1 | arg1 + // | A0/Method* | <- sp + DCHECK_EQ(64U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); + uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp)); + uintptr_t caller_pc = regs[15]; +#else + UNIMPLEMENTED(FATAL); + uintptr_t caller_pc = 0; +#endif + uint32_t dex_pc = caller_method->ToDexPc(caller_pc); + const DexFile::CodeItem* code = MethodHelper(caller_method).GetCodeItem(); + CHECK_LT(dex_pc, code->insns_size_in_code_units_); + const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); + Instruction::Code instr_code = instr->Opcode(); + CHECK(instr_code == Instruction::INVOKE_INTERFACE || + instr_code == Instruction::INVOKE_INTERFACE_RANGE) + << "Unexpected call into interface trampoline: " << instr->DumpString(NULL); + uint32_t dex_method_idx; + if (instr_code == Instruction::INVOKE_INTERFACE) { + dex_method_idx = instr->VRegB_35c(); + } else { + DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE); + dex_method_idx = instr->VRegB_3rc(); + } + method = FindMethodFromCode(dex_method_idx, this_object, caller_method, self, + false, kInterface); + if (UNLIKELY(method == NULL)) { + CHECK(self->IsExceptionPending()); + return 0; // Failure. + } + } + const void* code = method->GetEntryPointFromCompiledCode(); + +#ifndef NDEBUG + // When we return, the caller will branch to this address, so it had better not be 0! + if (UNLIKELY(code == NULL)) { + MethodHelper mh(method); + LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method) + << " location: " << mh.GetDexFile().GetLocation(); + } +#endif + + uint32_t method_uint = reinterpret_cast(method); + uint64_t code_uint = reinterpret_cast(code); + uint64_t result = ((code_uint << 32) | method_uint); + return result; +} + + +static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, + mirror::AbstractMethod* caller_method, + Thread* self, mirror::AbstractMethod** sp, bool access_check, + InvokeType type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::AbstractMethod* method = FindMethodFast(method_idx, this_object, caller_method, + access_check, type); + if (UNLIKELY(method == NULL)) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); + method = FindMethodFromCode(method_idx, this_object, caller_method, self, access_check, type); + if (UNLIKELY(method == NULL)) { + CHECK(self->IsExceptionPending()); + return 0; // failure + } + } + DCHECK(!self->IsExceptionPending()); + const void* code = method->GetEntryPointFromCompiledCode(); + +#ifndef NDEBUG + // When we return, the caller will branch to this address, so it had better not be 0! + if (UNLIKELY(code == NULL)) { + MethodHelper mh(method); + LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method) + << " location: " << mh.GetDexFile().GetLocation(); + } +#endif + + uint32_t method_uint = reinterpret_cast(method); + uint64_t code_uint = reinterpret_cast(code); + uint64_t result = ((code_uint << 32) | method_uint); + return result; +} + +// See comments in runtime_support_asm.S +extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* caller_method, + Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kInterface); +} + + +extern "C" uint64_t artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* caller_method, + Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kDirect); +} + +extern "C" uint64_t artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* caller_method, + Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kStatic); +} + +extern "C" uint64_t artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* caller_method, + Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kSuper); +} + +extern "C" uint64_t artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* caller_method, + Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kVirtual); +} + +} // namespace art diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc new file mode 100644 index 0000000000..23a28f9cce --- /dev/null +++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc @@ -0,0 +1,171 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex_file-inl.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/class-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "object_utils.h" +#include "scoped_thread_state_change.h" +#include "thread.h" + +namespace art { + +// Called on entry to JNI, transition out of Runnable and release share of mutator_lock_. +extern uint32_t JniMethodStart(Thread* self) { + JNIEnvExt* env = self->GetJniEnv(); + DCHECK(env != NULL); + uint32_t saved_local_ref_cookie = env->local_ref_cookie; + env->local_ref_cookie = env->locals.GetSegmentState(); + self->TransitionFromRunnableToSuspended(kNative); + return saved_local_ref_cookie; +} + +extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self) { + self->DecodeJObject(to_lock)->MonitorEnter(self); + return JniMethodStart(self); +} + +static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) { + JNIEnvExt* env = self->GetJniEnv(); + env->locals.SetSegmentState(env->local_ref_cookie); + env->local_ref_cookie = saved_local_ref_cookie; + self->PopSirt(); +} + +extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) { + self->TransitionFromSuspendedToRunnable(); + PopLocalReferences(saved_local_ref_cookie, self); +} + + +extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, + Thread* self) { + self->TransitionFromSuspendedToRunnable(); + UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. + PopLocalReferences(saved_local_ref_cookie, self); +} + +extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, + Thread* self) { + self->TransitionFromSuspendedToRunnable(); + mirror::Object* o = self->DecodeJObject(result); // Must decode before pop. + PopLocalReferences(saved_local_ref_cookie, self); + // Process result. + if (UNLIKELY(self->GetJniEnv()->check_jni)) { + if (self->IsExceptionPending()) { + return NULL; + } + CheckReferenceResult(o, self); + } + return o; +} + +extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result, + uint32_t saved_local_ref_cookie, + jobject locked, Thread* self) { + self->TransitionFromSuspendedToRunnable(); + UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. + mirror::Object* o = self->DecodeJObject(result); + PopLocalReferences(saved_local_ref_cookie, self); + // Process result. + if (UNLIKELY(self->GetJniEnv()->check_jni)) { + if (self->IsExceptionPending()) { + return NULL; + } + CheckReferenceResult(o, self); + } + return o; +} + +static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr) { + intptr_t value = *arg_ptr; + mirror::Object** value_as_jni_rep = reinterpret_cast(value); + mirror::Object* value_as_work_around_rep = value_as_jni_rep != NULL ? *value_as_jni_rep : NULL; + CHECK(Runtime::Current()->GetHeap()->IsHeapAddress(value_as_work_around_rep)) + << value_as_work_around_rep; + *arg_ptr = reinterpret_cast(value_as_work_around_rep); +} + +extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(Thread::Current() == self); + // TODO: this code is specific to ARM + // On entry the stack pointed by sp is: + // | arg3 | <- Calling JNI method's frame (and extra bit for out args) + // | LR | + // | R3 | arg2 + // | R2 | arg1 + // | R1 | jclass/jobject + // | R0 | JNIEnv + // | unused | + // | unused | + // | unused | <- sp + mirror::AbstractMethod* jni_method = self->GetCurrentMethod(NULL); + DCHECK(jni_method->IsNative()) << PrettyMethod(jni_method); + intptr_t* arg_ptr = sp + 4; // pointer to r1 on stack + // Fix up this/jclass argument + WorkAroundJniBugsForJobject(arg_ptr); + arg_ptr++; + // Fix up jobject arguments + MethodHelper mh(jni_method); + int reg_num = 2; // Current register being processed, -1 for stack arguments. + for (uint32_t i = 1; i < mh.GetShortyLength(); i++) { + char shorty_char = mh.GetShorty()[i]; + if (shorty_char == 'L') { + WorkAroundJniBugsForJobject(arg_ptr); + } + if (shorty_char == 'J' || shorty_char == 'D') { + if (reg_num == 2) { + arg_ptr = sp + 8; // skip to out arguments + reg_num = -1; + } else if (reg_num == 3) { + arg_ptr = sp + 10; // skip to out arguments plus 2 slots as long must be aligned + reg_num = -1; + } else { + DCHECK_EQ(reg_num, -1); + if ((reinterpret_cast(arg_ptr) & 7) == 4) { + arg_ptr += 3; // unaligned, pad and move through stack arguments + } else { + arg_ptr += 2; // aligned, move through stack arguments + } + } + } else { + if (reg_num == 2) { + arg_ptr++; // move through register arguments + reg_num++; + } else if (reg_num == 3) { + arg_ptr = sp + 8; // skip to outgoing stack arguments + reg_num = -1; + } else { + DCHECK_EQ(reg_num, -1); + arg_ptr++; // move through stack arguments + } + } + } + // Load expected destination, see Method::RegisterNative + const void* code = reinterpret_cast(jni_method->GetNativeGcMap()); + if (UNLIKELY(code == NULL)) { + code = GetJniDlsymLookupStub(); + jni_method->RegisterNative(self, code); + } + return code; +} + +} // namespace art diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc new file mode 100644 index 0000000000..79bb7a69f1 --- /dev/null +++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "callee_save_frame.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self, + mirror::AbstractMethod** sp) + UNLOCK_FUNCTION(monitor_lock_) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + DCHECK(obj != NULL); // Assumed to have been checked before entry + // MonitorExit may throw exception + return obj->MonitorExit(self) ? 0 /* Success */ : -1 /* Failure */; +} + +extern "C" void artLockObjectFromCode(mirror::Object* obj, Thread* thread, + mirror::AbstractMethod** sp) + EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) { + FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly); + DCHECK(obj != NULL); // Assumed to have been checked before entry + obj->MonitorEnter(thread); // May block + DCHECK(thread->HoldsLock(obj)); + // Only possible exception is NPE and is handled before entry + DCHECK(!thread->IsExceptionPending()); +} + +} // namespace art diff --git a/runtime/entrypoints/quick/quick_math_entrypoints.cc b/runtime/entrypoints/quick/quick_math_entrypoints.cc new file mode 100644 index 0000000000..0bfe59dc2f --- /dev/null +++ b/runtime/entrypoints/quick/quick_math_entrypoints.cc @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +namespace art { + +int CmplFloat(float a, float b) { + if (a == b) { + return 0; + } else if (a < b) { + return -1; + } else if (a > b) { + return 1; + } + return -1; +} + +int CmpgFloat(float a, float b) { + if (a == b) { + return 0; + } else if (a < b) { + return -1; + } else if (a > b) { + return 1; + } + return 1; +} + +int CmpgDouble(double a, double b) { + if (a == b) { + return 0; + } else if (a < b) { + return -1; + } else if (a > b) { + return 1; + } + return 1; +} + +int CmplDouble(double a, double b) { + if (a == b) { + return 0; + } else if (a < b) { + return -1; + } else if (a > b) { + return 1; + } + return -1; +} + +extern "C" int64_t artLmulFromCode(int64_t a, int64_t b) { + return a * b; +} + +extern "C" int64_t artLdivFromCode(int64_t a, int64_t b) { + return a / b; +} + +extern "C" int64_t artLdivmodFromCode(int64_t a, int64_t b) { + return a % b; +} + +} // namespace art diff --git a/runtime/entrypoints/quick/quick_proxy_entrypoints.cc b/runtime/entrypoints/quick/quick_proxy_entrypoints.cc new file mode 100644 index 0000000000..4e3d749e27 --- /dev/null +++ b/runtime/entrypoints/quick/quick_proxy_entrypoints.cc @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "quick_argument_visitor.h" +#include "dex_file-inl.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/object-inl.h" +#include "object_utils.h" +#include "reflection.h" +#include "scoped_thread_state_change.h" +#include "thread.h" +#include "well_known_classes.h" + +#include "ScopedLocalRef.h" + +namespace art { + +// Visits arguments on the stack placing them into the args vector, Object* arguments are converted +// to jobjects. +class BuildQuickArgumentVisitor : public QuickArgumentVisitor { + public: + BuildQuickArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp, + ScopedObjectAccessUnchecked& soa, std::vector& args) : + QuickArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {} + + virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + jvalue val; + Primitive::Type type = GetParamPrimitiveType(); + switch (type) { + case Primitive::kPrimNot: { + mirror::Object* obj = *reinterpret_cast(GetParamAddress()); + val.l = soa_.AddLocalReference(obj); + break; + } + case Primitive::kPrimLong: // Fall-through. + case Primitive::kPrimDouble: + if (IsSplitLongOrDouble()) { + val.j = ReadSplitLongParam(); + } else { + val.j = *reinterpret_cast(GetParamAddress()); + } + break; + case Primitive::kPrimBoolean: // Fall-through. + case Primitive::kPrimByte: // Fall-through. + case Primitive::kPrimChar: // Fall-through. + case Primitive::kPrimShort: // Fall-through. + case Primitive::kPrimInt: // Fall-through. + case Primitive::kPrimFloat: + val.i = *reinterpret_cast(GetParamAddress()); + break; + case Primitive::kPrimVoid: + LOG(FATAL) << "UNREACHABLE"; + val.j = 0; + break; + } + args_.push_back(val); + } + + private: + ScopedObjectAccessUnchecked& soa_; + std::vector& args_; + + DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); +}; + +// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method +// which is responsible for recording callee save registers. We explicitly place into jobjects the +// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a +// field within the proxy object, which will box the primitive arguments and deal with error cases. +extern "C" uint64_t artQuickProxyInvokeHandler(mirror::AbstractMethod* proxy_method, + mirror::Object* receiver, + Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Ensure we don't get thread suspension until the object arguments are safely in jobjects. + const char* old_cause = + self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); + // Register the top of the managed stack, making stack crawlable. + DCHECK_EQ(*sp, proxy_method); + self->SetTopOfStack(sp, 0); + DCHECK_EQ(proxy_method->GetFrameSizeInBytes(), + Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); + self->VerifyStack(); + // Start new JNI local reference state. + JNIEnvExt* env = self->GetJniEnv(); + ScopedObjectAccessUnchecked soa(env); + ScopedJniEnvLocalRefState env_state(env); + // Create local ref. copies of proxy method and the receiver. + jobject rcvr_jobj = soa.AddLocalReference(receiver); + + // Placing arguments into args vector and remove the receiver. + MethodHelper proxy_mh(proxy_method); + std::vector args; + BuildQuickArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args); + local_ref_visitor.VisitArguments(); + args.erase(args.begin()); + + // Convert proxy method into expected interface method. + mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod(); + DCHECK(interface_method != NULL); + DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); + jobject interface_method_jobj = soa.AddLocalReference(interface_method); + + // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code + // that performs allocations. + self->EndAssertNoThreadSuspension(old_cause); + JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(), + rcvr_jobj, interface_method_jobj, args); + return result.GetJ(); +} + +} // namespace art diff --git a/runtime/entrypoints/quick/quick_stub_entrypoints.cc b/runtime/entrypoints/quick/quick_stub_entrypoints.cc new file mode 100644 index 0000000000..d78bbf3bc8 --- /dev/null +++ b/runtime/entrypoints/quick/quick_stub_entrypoints.cc @@ -0,0 +1,295 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "callee_save_frame.h" +#include "class_linker-inl.h" +#include "dex_file-inl.h" +#include "dex_instruction-inl.h" +#include "mirror/class-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/object-inl.h" +#include "object_utils.h" +#include "scoped_thread_state_change.h" + +// Architecture specific assembler helper to deliver exception. +extern "C" void art_quick_deliver_exception_from_code(void*); + +namespace art { + +// Lazily resolve a method for quick. Called by stub code. +extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +#if defined(__arm__) + // On entry the stack pointed by sp is: + // | argN | | + // | ... | | + // | arg4 | | + // | arg3 spill | | Caller's frame + // | arg2 spill | | + // | arg1 spill | | + // | Method* | --- + // | LR | + // | ... | callee saves + // | R3 | arg3 + // | R2 | arg2 + // | R1 | arg1 + // | R0 | + // | Method* | <- sp + DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); + mirror::AbstractMethod** caller_sp = reinterpret_cast(reinterpret_cast(sp) + 48); + uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp) + kPointerSize); + uint32_t pc_offset = 10; + uintptr_t caller_pc = regs[pc_offset]; +#elif defined(__i386__) + // On entry the stack pointed by sp is: + // | argN | | + // | ... | | + // | arg4 | | + // | arg3 spill | | Caller's frame + // | arg2 spill | | + // | arg1 spill | | + // | Method* | --- + // | Return | + // | EBP,ESI,EDI | callee saves + // | EBX | arg3 + // | EDX | arg2 + // | ECX | arg1 + // | EAX/Method* | <- sp + DCHECK_EQ(32U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); + mirror::AbstractMethod** caller_sp = reinterpret_cast(reinterpret_cast(sp) + 32); + uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp)); + uintptr_t caller_pc = regs[7]; +#elif defined(__mips__) + // On entry the stack pointed by sp is: + // | argN | | + // | ... | | + // | arg4 | | + // | arg3 spill | | Caller's frame + // | arg2 spill | | + // | arg1 spill | | + // | Method* | --- + // | RA | + // | ... | callee saves + // | A3 | arg3 + // | A2 | arg2 + // | A1 | arg1 + // | A0/Method* | <- sp + DCHECK_EQ(64U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); + mirror::AbstractMethod** caller_sp = reinterpret_cast(reinterpret_cast(sp) + 64); + uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp)); + uint32_t pc_offset = 15; + uintptr_t caller_pc = regs[pc_offset]; +#else + UNIMPLEMENTED(FATAL); + mirror::AbstractMethod** caller_sp = NULL; + uintptr_t* regs = NULL; + uintptr_t caller_pc = 0; +#endif + FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsAndArgs); + // Start new JNI local reference state + JNIEnvExt* env = thread->GetJniEnv(); + ScopedObjectAccessUnchecked soa(env); + ScopedJniEnvLocalRefState env_state(env); + + // Compute details about the called method (avoid GCs) + ClassLinker* linker = Runtime::Current()->GetClassLinker(); + mirror::AbstractMethod* caller = *caller_sp; + InvokeType invoke_type; + uint32_t dex_method_idx; +#if !defined(__i386__) + const char* shorty; + uint32_t shorty_len; +#endif + if (called->IsRuntimeMethod()) { + uint32_t dex_pc = caller->ToDexPc(caller_pc); + const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem(); + CHECK_LT(dex_pc, code->insns_size_in_code_units_); + const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); + Instruction::Code instr_code = instr->Opcode(); + bool is_range; + switch (instr_code) { + case Instruction::INVOKE_DIRECT: + invoke_type = kDirect; + is_range = false; + break; + case Instruction::INVOKE_DIRECT_RANGE: + invoke_type = kDirect; + is_range = true; + break; + case Instruction::INVOKE_STATIC: + invoke_type = kStatic; + is_range = false; + break; + case Instruction::INVOKE_STATIC_RANGE: + invoke_type = kStatic; + is_range = true; + break; + case Instruction::INVOKE_SUPER: + invoke_type = kSuper; + is_range = false; + break; + case Instruction::INVOKE_SUPER_RANGE: + invoke_type = kSuper; + is_range = true; + break; + case Instruction::INVOKE_VIRTUAL: + invoke_type = kVirtual; + is_range = false; + break; + case Instruction::INVOKE_VIRTUAL_RANGE: + invoke_type = kVirtual; + is_range = true; + break; + case Instruction::INVOKE_INTERFACE: + invoke_type = kInterface; + is_range = false; + break; + case Instruction::INVOKE_INTERFACE_RANGE: + invoke_type = kInterface; + is_range = true; + break; + default: + LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL); + // Avoid used uninitialized warnings. + invoke_type = kDirect; + is_range = false; + } + dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); +#if !defined(__i386__) + shorty = linker->MethodShorty(dex_method_idx, caller, &shorty_len); +#endif + } else { + invoke_type = kStatic; + dex_method_idx = called->GetDexMethodIndex(); +#if !defined(__i386__) + MethodHelper mh(called); + shorty = mh.GetShorty(); + shorty_len = mh.GetShortyLength(); +#endif + } +#if !defined(__i386__) + // Discover shorty (avoid GCs) + size_t args_in_regs = 0; + for (size_t i = 1; i < shorty_len; i++) { + char c = shorty[i]; + args_in_regs = args_in_regs + (c == 'J' || c == 'D' ? 2 : 1); + if (args_in_regs > 3) { + args_in_regs = 3; + break; + } + } + // Place into local references incoming arguments from the caller's register arguments + size_t cur_arg = 1; // skip method_idx in R0, first arg is in R1 + if (invoke_type != kStatic) { + mirror::Object* obj = reinterpret_cast(regs[cur_arg]); + cur_arg++; + if (args_in_regs < 3) { + // If we thought we had fewer than 3 arguments in registers, account for the receiver + args_in_regs++; + } + soa.AddLocalReference(obj); + } + size_t shorty_index = 1; // skip return value + // Iterate while arguments and arguments in registers (less 1 from cur_arg which is offset to skip + // R0) + while ((cur_arg - 1) < args_in_regs && shorty_index < shorty_len) { + char c = shorty[shorty_index]; + shorty_index++; + if (c == 'L') { + mirror::Object* obj = reinterpret_cast(regs[cur_arg]); + soa.AddLocalReference(obj); + } + cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1); + } + // Place into local references incoming arguments from the caller's stack arguments + cur_arg += pc_offset + 1; // skip LR/RA, Method* and spills for R1-R3/A1-A3 and callee saves + while (shorty_index < shorty_len) { + char c = shorty[shorty_index]; + shorty_index++; + if (c == 'L') { + mirror::Object* obj = reinterpret_cast(regs[cur_arg]); + soa.AddLocalReference(obj); + } + cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1); + } +#endif + // Resolve method filling in dex cache + if (called->IsRuntimeMethod()) { + called = linker->ResolveMethod(dex_method_idx, caller, invoke_type); + } + const void* code = NULL; + if (LIKELY(!thread->IsExceptionPending())) { + // Incompatible class change should have been handled in resolve method. + CHECK(!called->CheckIncompatibleClassChange(invoke_type)); + // Refine called method based on receiver. + if (invoke_type == kVirtual) { + called = receiver->GetClass()->FindVirtualMethodForVirtual(called); + } else if (invoke_type == kInterface) { + called = receiver->GetClass()->FindVirtualMethodForInterface(called); + } + // Ensure that the called method's class is initialized. + mirror::Class* called_class = called->GetDeclaringClass(); + linker->EnsureInitialized(called_class, true, true); + if (LIKELY(called_class->IsInitialized())) { + code = called->GetEntryPointFromCompiledCode(); + } else if (called_class->IsInitializing()) { + if (invoke_type == kStatic) { + // Class is still initializing, go to oat and grab code (trampoline must be left in place + // until class is initialized to stop races between threads). + code = linker->GetOatCodeFor(called); + } else { + // No trampoline for non-static methods. + code = called->GetEntryPointFromCompiledCode(); + } + } else { + DCHECK(called_class->IsErroneous()); + } + } + if (UNLIKELY(code == NULL)) { + // Something went wrong in ResolveMethod or EnsureInitialized, + // go into deliver exception with the pending exception in r0 + CHECK(thread->IsExceptionPending()); + code = reinterpret_cast(art_quick_deliver_exception_from_code); + regs[0] = reinterpret_cast(thread->GetException(NULL)); + thread->ClearException(); + } else { + // Expect class to at least be initializing. + DCHECK(called->GetDeclaringClass()->IsInitializing()); + // Don't want infinite recursion. + DCHECK(code != GetResolutionTrampoline(linker)); + // Set up entry into main method + regs[0] = reinterpret_cast(called); + } + return code; +} + +// Called by the abstract method error stub. +extern "C" void artThrowAbstractMethodErrorFromCode(mirror::AbstractMethod* method, Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +#if !defined(ART_USE_PORTABLE_COMPILER) + FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); +#else + UNUSED(sp); +#endif + ThrowAbstractMethodError(method); + self->QuickDeliverException(); +} + +} // namespace art diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc new file mode 100644 index 0000000000..b4d6c0ba8d --- /dev/null +++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "callee_save_frame.h" +#include "entrypoints/entrypoint_utils.h" +#include "thread.h" +#include "thread_list.h" + +namespace art { + +void CheckSuspendFromCode(Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Called when thread->suspend_count_ != 0 on JNI return. JNI method acts as callee-save frame. + thread->VerifyStack(); + CheckSuspend(thread); +} + +extern "C" void artTestSuspendFromCode(Thread* thread, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Called when suspend count check value is 0 and thread->suspend_count_ != 0 + FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly); + CheckSuspend(thread); +} + +} // namespace art diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc new file mode 100644 index 0000000000..3bfa2f2611 --- /dev/null +++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "callee_save_frame.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/object.h" +#include "object_utils.h" +#include "thread.h" +#include "well_known_classes.h" + +namespace art { + +// Deliver an exception that's pending on thread helping set up a callee save frame on the way. +extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); + thread->QuickDeliverException(); +} + +// Called by generated call to throw an exception. +extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + /* + * exception may be NULL, in which case this routine should + * throw NPE. NOTE: this is a convenience for generated code, + * which previously did the null check inline and constructed + * and threw a NPE if NULL. This routine responsible for setting + * exception_ in thread and delivering the exception. + */ + FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + if (exception == NULL) { + self->ThrowNewException(throw_location, "Ljava/lang/NullPointerException;", + "throw with null exception"); + } else { + self->SetException(throw_location, exception); + } + self->QuickDeliverException(); +} + +// Called by generated call to throw a NPE exception. +extern "C" void artThrowNullPointerExceptionFromCode(Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionFromDexPC(throw_location); + self->QuickDeliverException(); +} + +// Called by generated call to throw an arithmetic divide by zero exception. +extern "C" void artThrowDivZeroFromCode(Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); + ThrowArithmeticExceptionDivideByZero(); + self->QuickDeliverException(); +} + +// Called by generated call to throw an array index out of bounds exception. +extern "C" void artThrowArrayBoundsFromCode(int index, int length, Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); + ThrowArrayIndexOutOfBoundsException(index, length); + self->QuickDeliverException(); +} + +extern "C" void artThrowStackOverflowFromCode(Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); + ThrowStackOverflowError(self); + self->QuickDeliverException(); +} + +extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); + ThrowNoSuchMethodError(method_idx); + self->QuickDeliverException(); +} + +} // namespace art diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h index 92d9ea2828..a732566f65 100644 --- a/runtime/gc/accounting/atomic_stack.h +++ b/runtime/gc/accounting/atomic_stack.h @@ -47,7 +47,7 @@ class AtomicStack { DCHECK(begin_ != NULL); front_index_ = 0; back_index_ = 0; - is_sorted_ = true; + debug_is_sorted_ = true; int result = madvise(begin_, sizeof(T) * capacity_, MADV_DONTNEED); if (result == -1) { PLOG(WARNING) << "madvise failed"; @@ -58,8 +58,10 @@ class AtomicStack { // Returns false if we overflowed the stack. bool AtomicPushBack(const T& value) { + if (kIsDebugBuild) { + debug_is_sorted_ = false; + } int32_t index; - is_sorted_ = false; do { index = back_index_; if (UNLIKELY(static_cast(index) >= capacity_)) { @@ -72,7 +74,9 @@ class AtomicStack { } void PushBack(const T& value) { - is_sorted_ = false; + if (kIsDebugBuild) { + debug_is_sorted_ = false; + } int32_t index = back_index_; DCHECK_LT(static_cast(index), capacity_); back_index_ = index + 1; @@ -122,22 +126,23 @@ class AtomicStack { } void Sort() { - if (!is_sorted_) { - int32_t start_back_index = back_index_.load(); - int32_t start_front_index = front_index_.load(); - is_sorted_ = true; - std::sort(Begin(), End()); - CHECK_EQ(start_back_index, back_index_.load()); - CHECK_EQ(start_front_index, front_index_.load()); + int32_t start_back_index = back_index_.load(); + int32_t start_front_index = front_index_.load(); + std::sort(Begin(), End()); + CHECK_EQ(start_back_index, back_index_.load()); + CHECK_EQ(start_front_index, front_index_.load()); + if (kIsDebugBuild) { + debug_is_sorted_ = true; } } + bool ContainsSorted(const T& value) const { + DCHECK(debug_is_sorted_); + return std::binary_search(Begin(), End(), value); + } + bool Contains(const T& value) const { - if (is_sorted_) { - return std::binary_search(Begin(), End(), value); - } else { - return std::find(Begin(), End(), value) != End(); - } + return std::find(Begin(), End(), value) != End(); } private: @@ -147,7 +152,7 @@ class AtomicStack { front_index_(0), begin_(NULL), capacity_(capacity), - is_sorted_(true) { + debug_is_sorted_(true) { } // Size in number of elements. @@ -156,6 +161,7 @@ class AtomicStack { CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack"; byte* addr = mem_map_->Begin(); CHECK(addr != NULL); + debug_is_sorted_ = true; begin_ = reinterpret_cast(addr); Reset(); } @@ -178,7 +184,8 @@ class AtomicStack { // Maximum number of elements. size_t capacity_; - bool is_sorted_; + // Whether or not the stack is sorted, only updated in debug mode to avoid performance overhead. + bool debug_is_sorted_; DISALLOW_COPY_AND_ASSIGN(AtomicStack); }; diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h index 1684664eff..0f566c954b 100644 --- a/runtime/gc/collector/garbage_collector.h +++ b/runtime/gc/collector/garbage_collector.h @@ -64,7 +64,7 @@ class GarbageCollector { void RegisterPause(uint64_t nano_length); - base::NewTimingLogger& GetTimings() { + base::TimingLogger& GetTimings() { return timings_; } @@ -101,7 +101,7 @@ class GarbageCollector { const bool verbose_; uint64_t duration_ns_; - base::NewTimingLogger timings_; + base::TimingLogger timings_; // Cumulative statistics. uint64_t total_time_ns_; diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 5736e3817b..89c768a34e 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -1509,7 +1509,7 @@ void MarkSweep::FinishPhase() { // Update the cumulative loggers. cumulative_timings_.Start(); - cumulative_timings_.AddNewLogger(timings_); + cumulative_timings_.AddLogger(timings_); cumulative_timings_.End(); // Clear all of the spaces' mark bitmaps. diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 0c1c6312a8..00f7e5b57f 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -148,7 +148,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max CHECK(large_object_space_ != NULL) << "Failed to create large object space"; AddDiscontinuousSpace(large_object_space_); - alloc_space_ = space::DlMallocSpace::Create("alloc space", + alloc_space_ = space::DlMallocSpace::Create(Runtime::Current()->IsZygote() ? "zygote space" : "alloc space", initial_size, growth_limit, capacity, requested_alloc_space_begin); @@ -524,25 +524,24 @@ bool Heap::IsHeapAddress(const mirror::Object* obj) { bool Heap::IsLiveObjectLocked(const mirror::Object* obj) { // Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current()); - if (obj == NULL) { - return false; - } - if (UNLIKELY(!IsAligned(obj))) { + if (obj == NULL || UNLIKELY(!IsAligned(obj))) { return false; } - space::ContinuousSpace* cont_space = FindContinuousSpaceFromObject(obj, true); - if (cont_space != NULL) { - if (cont_space->GetLiveBitmap()->Test(obj)) { + space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true); + space::DiscontinuousSpace* d_space = NULL; + if (c_space != NULL) { + if (c_space->GetLiveBitmap()->Test(obj)) { return true; } } else { - space::DiscontinuousSpace* disc_space = FindDiscontinuousSpaceFromObject(obj, true); - if (disc_space != NULL) { - if (disc_space->GetLiveObjects()->Test(obj)) { + d_space = FindDiscontinuousSpaceFromObject(obj, true); + if (d_space != NULL) { + if (d_space->GetLiveObjects()->Test(obj)) { return true; } } } + // This is covering the allocation/live stack swapping that is done without mutators suspended. for (size_t i = 0; i < 5; ++i) { if (allocation_stack_->Contains(const_cast(obj)) || live_stack_->Contains(const_cast(obj))) { @@ -550,6 +549,18 @@ bool Heap::IsLiveObjectLocked(const mirror::Object* obj) { } NanoSleep(MsToNs(10)); } + // We need to check the bitmaps again since there is a race where we mark something as live and + // then clear the stack containing it. + if (c_space != NULL) { + if (c_space->GetLiveBitmap()->Test(obj)) { + return true; + } + } else { + d_space = FindDiscontinuousSpaceFromObject(obj, true); + if (d_space != NULL && d_space->GetLiveObjects()->Test(obj)) { + return true; + } + } return false; } @@ -972,7 +983,7 @@ void Heap::PreZygoteFork() { // Turns the current alloc space into a Zygote space and obtain the new alloc space composed // of the remaining available heap memory. space::DlMallocSpace* zygote_space = alloc_space_; - alloc_space_ = zygote_space->CreateZygoteSpace(); + alloc_space_ = zygote_space->CreateZygoteSpace("alloc space"); alloc_space_->SetFootprintLimit(alloc_space_->Capacity()); // Change the GC retention policy of the zygote space to only collect when full. @@ -1131,7 +1142,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus << PrettySize(total_memory) << ", " << "paused " << pause_string.str() << " total " << PrettyDuration((duration / 1000) * 1000); if (VLOG_IS_ON(heap)) { - LOG(INFO) << Dumpable(collector->GetTimings()); + LOG(INFO) << Dumpable(collector->GetTimings()); } } @@ -1149,7 +1160,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus return gc_type; } -void Heap::UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::NewTimingLogger& timings, +void Heap::UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::TimingLogger& timings, collector::GcType gc_type) { if (gc_type == collector::kGcTypeSticky) { // Don't need to do anything for mod union table in this case since we are only scanning dirty @@ -1229,10 +1240,10 @@ class VerifyReferenceVisitor { if (bitmap != NULL && bitmap->Test(obj)) { LOG(ERROR) << "Object " << obj << " found in live bitmap"; } - if (alloc_stack->Contains(const_cast(obj))) { + if (alloc_stack->ContainsSorted(const_cast(obj))) { LOG(ERROR) << "Object " << obj << " found in allocation stack"; } - if (live_stack->Contains(const_cast(obj))) { + if (live_stack->ContainsSorted(const_cast(obj))) { LOG(ERROR) << "Object " << obj << " found in live stack"; } // Attempt to see if the card table missed the reference. @@ -1252,10 +1263,10 @@ class VerifyReferenceVisitor { } else { LOG(ERROR) << "Root references dead object " << ref << "\nRef type " << PrettyTypeOf(ref); } - if (alloc_stack->Contains(const_cast(ref))) { + if (alloc_stack->ContainsSorted(const_cast(ref))) { LOG(ERROR) << "Reference " << ref << " found in allocation stack!"; } - if (live_stack->Contains(const_cast(ref))) { + if (live_stack->ContainsSorted(const_cast(ref))) { LOG(ERROR) << "Reference " << ref << " found in live stack!"; } heap_->image_mod_union_table_->Dump(LOG(ERROR) << "Image mod-union table: "); @@ -1345,8 +1356,8 @@ class VerifyReferenceCardVisitor { // Card should be either kCardDirty if it got re-dirtied after we aged it, or // kCardDirty - 1 if it didnt get touched since we aged it. accounting::ObjectStack* live_stack = heap_->live_stack_.get(); - if (live_stack->Contains(const_cast(ref))) { - if (live_stack->Contains(const_cast(obj))) { + if (live_stack->ContainsSorted(const_cast(ref))) { + if (live_stack->ContainsSorted(const_cast(obj))) { LOG(ERROR) << "Object " << obj << " found in live stack"; } if (heap_->GetLiveBitmap()->Test(obj)) { @@ -1441,7 +1452,7 @@ void Heap::SwapStacks() { } } -void Heap::ProcessCards(base::NewTimingLogger& timings) { +void Heap::ProcessCards(base::TimingLogger& timings) { // Clear cards and keep track of cards cleared in the mod-union table. typedef std::vector::iterator It; for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { @@ -1934,5 +1945,27 @@ void Heap::RegisterNativeFree(int bytes) { } while (!native_bytes_allocated_.compare_and_swap(expected_size, new_size)); } +int64_t Heap::GetTotalMemory() const { + int64_t ret = 0; + typedef std::vector::const_iterator It; + for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { + space::ContinuousSpace* space = *it; + if (space->IsImageSpace()) { + // Currently don't include the image space. + } else if (space->IsDlMallocSpace()) { + // Zygote or alloc space + ret += space->AsDlMallocSpace()->GetFootprint(); + } + } + typedef std::vector::const_iterator It2; + for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { + space::DiscontinuousSpace* space = *it; + if (space->IsLargeObjectSpace()) { + ret += space->AsLargeObjectSpace()->GetBytesAllocated(); + } + } + return ret; +} + } // namespace gc } // namespace art diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 20512b8d0b..7615f981dd 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -330,11 +330,7 @@ class Heap { // Implements java.lang.Runtime.totalMemory, returning the amount of memory consumed by an // application. - int64_t GetTotalMemory() const { - // TODO: we use the footprint limit here which is conservative wrt number of pages really used. - // We could implement a more accurate count across all spaces. - return max_allowed_footprint_; - } + int64_t GetTotalMemory() const; // Implements java.lang.Runtime.freeMemory. int64_t GetFreeMemory() const { @@ -382,7 +378,7 @@ class Heap { EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Update and mark mod union table based on gc type. - void UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::NewTimingLogger& timings, + void UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::TimingLogger& timings, collector::GcType gc_type) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); @@ -475,7 +471,7 @@ class Heap { void SwapStacks(); // Clear cards and update the mod union table. - void ProcessCards(base::NewTimingLogger& timings); + void ProcessCards(base::TimingLogger& timings); // All-known continuous spaces, where objects lie within fixed bounds. std::vector continuous_spaces_; diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc index ee88edaaae..de4917fcee 100644 --- a/runtime/gc/space/dlmalloc_space.cc +++ b/runtime/gc/space/dlmalloc_space.cc @@ -286,7 +286,7 @@ void DlMallocSpace::SetGrowthLimit(size_t growth_limit) { } } -DlMallocSpace* DlMallocSpace::CreateZygoteSpace() { +DlMallocSpace* DlMallocSpace::CreateZygoteSpace(const char* alloc_space_name) { end_ = reinterpret_cast(RoundUp(reinterpret_cast(end_), kPageSize)); DCHECK(IsAligned(begin_)); DCHECK(IsAligned(end_)); @@ -316,20 +316,19 @@ DlMallocSpace* DlMallocSpace::CreateZygoteSpace() { VLOG(heap) << "Size " << GetMemMap()->Size(); VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit); VLOG(heap) << "Capacity " << PrettySize(capacity); - UniquePtr mem_map(MemMap::MapAnonymous(GetName(), End(), capacity, PROT_READ | PROT_WRITE)); + UniquePtr mem_map(MemMap::MapAnonymous(alloc_space_name, End(), capacity, PROT_READ | PROT_WRITE)); void* mspace = CreateMallocSpace(end_, starting_size, initial_size); // Protect memory beyond the initial size. byte* end = mem_map->Begin() + starting_size; if (capacity - initial_size > 0) { - CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name_.c_str()); + CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), alloc_space_name); } DlMallocSpace* alloc_space = - new DlMallocSpace(name_, mem_map.release(), mspace, end_, end, growth_limit); + new DlMallocSpace(alloc_space_name, mem_map.release(), mspace, end_, end, growth_limit); live_bitmap_->SetHeapLimit(reinterpret_cast(End())); CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast(End())); mark_bitmap_->SetHeapLimit(reinterpret_cast(End())); CHECK_EQ(mark_bitmap_->HeapLimit(), reinterpret_cast(End())); - name_ += "-zygote-transformed"; VLOG(heap) << "zygote space creation done"; return alloc_space; } @@ -449,6 +448,11 @@ void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_byte callback(NULL, NULL, 0, arg); // Indicate end of a space. } +size_t DlMallocSpace::GetFootprint() { + MutexLock mu(Thread::Current(), lock_); + return mspace_footprint(mspace_); +} + size_t DlMallocSpace::GetFootprintLimit() { MutexLock mu(Thread::Current(), lock_); return mspace_footprint_limit(mspace_); diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h index 8a4314c716..c15d0babcc 100644 --- a/runtime/gc/space/dlmalloc_space.h +++ b/runtime/gc/space/dlmalloc_space.h @@ -73,6 +73,10 @@ class DlMallocSpace : public MemMapSpace, public AllocSpace { // in use, indicated by num_bytes equaling zero. void Walk(WalkCallback callback, void* arg); + // Returns the number of bytes that the space has currently obtained from the system. This is + // greater or equal to the amount of live data in the space. + size_t GetFootprint(); + // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore. size_t GetFootprintLimit(); @@ -113,7 +117,7 @@ class DlMallocSpace : public MemMapSpace, public AllocSpace { void SwapBitmaps(); // Turn ourself into a zygote space and return a new alloc space which has our unused memory. - DlMallocSpace* CreateZygoteSpace(); + DlMallocSpace* CreateZygoteSpace(const char* alloc_space_name); uint64_t GetBytesAllocated() const { return num_bytes_allocated_; diff --git a/runtime/gc/space/space_test.cc b/runtime/gc/space/space_test.cc index 08ae894e58..3003140e5d 100644 --- a/runtime/gc/space/space_test.cc +++ b/runtime/gc/space/space_test.cc @@ -123,7 +123,7 @@ TEST_F(SpaceTest, ZygoteSpace) { // Make sure that the zygote space isn't directly at the start of the space. space->Alloc(self, 1U * MB); - space = space->CreateZygoteSpace(); + space = space->CreateZygoteSpace("alloc space"); // Make space findable to the heap, will also delete space when runtime is cleaned up AddContinuousSpace(space); diff --git a/runtime/image_test.cc b/runtime/image_test.cc index 75eead4d8f..22bed2e3d2 100644 --- a/runtime/image_test.cc +++ b/runtime/image_test.cc @@ -44,7 +44,8 @@ TEST_F(ImageTest, WriteRead) { { jobject class_loader = NULL; ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - TimingLogger timings("ImageTest::WriteRead", false); + base::TimingLogger timings("ImageTest::WriteRead", false, false); + timings.StartSplit("CompileAll"); compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings); ScopedObjectAccess soa(Thread::Current()); diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index e99fbd8d53..c0b85f41fd 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -30,7 +30,7 @@ #include "mirror/object-inl.h" #include "nth_caller_visitor.h" #if !defined(ART_USE_PORTABLE_COMPILER) -#include "oat/runtime/oat_support_entrypoints.h" +#include "entrypoints/quick/quick_entrypoints.h" #endif #include "object_utils.h" #include "os.h" diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 30c7a46204..ef4b95c037 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -24,6 +24,7 @@ #include "dex_file-inl.h" #include "dex_instruction-inl.h" #include "dex_instruction.h" +#include "entrypoints/entrypoint_utils.h" #include "gc/accounting/card_table-inl.h" #include "invoke_arg_array_builder.h" #include "nth_caller_visitor.h" @@ -35,7 +36,6 @@ #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" #include "object_utils.h" -#include "runtime_support.h" #include "ScopedLocalRef.h" #include "scoped_thread_state_change.h" #include "thread.h" @@ -408,11 +408,11 @@ static void DoMonitorExit(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS { // TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template // specialization. template -static void DoInvoke(Thread* self, ShadowFrame& shadow_frame, +static bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, JValue* result) NO_THREAD_SAFETY_ANALYSIS; template -static void DoInvoke(Thread* self, ShadowFrame& shadow_frame, +static bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, JValue* result) { uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c(); uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c(); @@ -422,7 +422,11 @@ static void DoInvoke(Thread* self, ShadowFrame& shadow_frame, if (UNLIKELY(method == NULL)) { CHECK(self->IsExceptionPending()); result->SetJ(0); - return; + return false; + } else if (UNLIKELY(method->IsAbstract())) { + ThrowAbstractMethodError(method); + result->SetJ(0); + return false; } MethodHelper mh(method); @@ -432,9 +436,6 @@ static void DoInvoke(Thread* self, ShadowFrame& shadow_frame, if (LIKELY(code_item != NULL)) { num_regs = code_item->registers_size_; num_ins = code_item->ins_size_; - } else if (method->IsAbstract()) { - ThrowAbstractMethodError(method); - return; } else { DCHECK(method->IsNative() || method->IsProxyMethod()); num_regs = num_ins = AbstractMethod::NumArgRegisters(mh.GetShorty()); @@ -486,17 +487,18 @@ static void DoInvoke(Thread* self, ShadowFrame& shadow_frame, } else { UnstartedRuntimeInvoke(self, mh, code_item, new_shadow_frame, result, num_regs - num_ins); } + return !self->IsExceptionPending(); } // TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template // specialization. template -static void DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame, +static bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, JValue* result) NO_THREAD_SAFETY_ANALYSIS; template -static void DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame, +static bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, JValue* result) { uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c(); Object* receiver = shadow_frame.GetVRegReference(vregC); @@ -504,26 +506,28 @@ static void DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame, // We lost the reference to the method index so we cannot get a more // precised exception message. ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); - return; + return false; } uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c(); + // TODO: use ObjectArray::GetWithoutChecks ? AbstractMethod* method = receiver->GetClass()->GetVTable()->Get(vtable_idx); if (UNLIKELY(method == NULL)) { CHECK(self->IsExceptionPending()); result->SetJ(0); - return; + return false; + } else if (UNLIKELY(method->IsAbstract())) { + ThrowAbstractMethodError(method); + result->SetJ(0); + return false; } - MethodHelper mh(method); + MethodHelper mh(method); const DexFile::CodeItem* code_item = mh.GetCodeItem(); uint16_t num_regs; uint16_t num_ins; if (code_item != NULL) { num_regs = code_item->registers_size_; num_ins = code_item->ins_size_; - } else if (method->IsAbstract()) { - ThrowAbstractMethodError(method); - return; } else { DCHECK(method->IsNative() || method->IsProxyMethod()); num_regs = num_ins = AbstractMethod::NumArgRegisters(mh.GetShorty()); @@ -576,6 +580,7 @@ static void DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame, } else { UnstartedRuntimeInvoke(self, mh, code_item, new_shadow_frame, result, num_regs - num_ins); } + return !self->IsExceptionPending(); } // We use template functions to optimize compiler inlining process. Otherwise, @@ -587,12 +592,12 @@ static void DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame, // TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template // specialization. template -static void DoFieldGet(Thread* self, ShadowFrame& shadow_frame, +static bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst) NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE; template -static inline void DoFieldGet(Thread* self, ShadowFrame& shadow_frame, +static inline bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst) { bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead); uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c(); @@ -601,7 +606,7 @@ static inline void DoFieldGet(Thread* self, ShadowFrame& shadow_frame, do_access_check); if (UNLIKELY(f == NULL)) { CHECK(self->IsExceptionPending()); - return; + return false; } Object* obj; if (is_static) { @@ -610,7 +615,7 @@ static inline void DoFieldGet(Thread* self, ShadowFrame& shadow_frame, obj = shadow_frame.GetVRegReference(inst->VRegB_22c()); if (UNLIKELY(obj == NULL)) { ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(), f, true); - return; + return false; } } uint32_t vregA = is_static ? inst->VRegA_21c() : inst->VRegA_22c(); @@ -639,24 +644,25 @@ static inline void DoFieldGet(Thread* self, ShadowFrame& shadow_frame, default: LOG(FATAL) << "Unreachable: " << field_type; } + return true; } // TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template // specialization. template -static void DoIGetQuick(Thread* self, ShadowFrame& shadow_frame, +static bool DoIGetQuick(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst) NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE; template -static inline void DoIGetQuick(Thread* self, ShadowFrame& shadow_frame, +static inline bool DoIGetQuick(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst) { Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c()); if (UNLIKELY(obj == NULL)) { // We lost the reference to the field index so we cannot get a more // precised exception message. ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); - return; + return false; } MemberOffset field_offset(inst->VRegC_22c()); const bool is_volatile = false; // iget-x-quick only on non volatile fields. @@ -674,17 +680,18 @@ static inline void DoIGetQuick(Thread* self, ShadowFrame& shadow_frame, default: LOG(FATAL) << "Unreachable: " << field_type; } + return true; } // TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template // specialization. template -static void DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, +static bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction* inst) NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE; template -static inline void DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, +static inline bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction* inst) { bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite); uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c(); @@ -693,7 +700,7 @@ static inline void DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, do_access_check); if (UNLIKELY(f == NULL)) { CHECK(self->IsExceptionPending()); - return; + return false; } Object* obj; if (is_static) { @@ -703,7 +710,7 @@ static inline void DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, if (UNLIKELY(obj == NULL)) { ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(), f, false); - return; + return false; } } uint32_t vregA = is_static ? inst->VRegA_21c() : inst->VRegA_22c(); @@ -732,24 +739,25 @@ static inline void DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, default: LOG(FATAL) << "Unreachable: " << field_type; } + return true; } // TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template // specialization. template -static void DoIPutQuick(Thread* self, ShadowFrame& shadow_frame, +static bool DoIPutQuick(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst) NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE; template -static inline void DoIPutQuick(Thread* self, ShadowFrame& shadow_frame, +static inline bool DoIPutQuick(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst) { Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c()); if (UNLIKELY(obj == NULL)) { // We lost the reference to the field index so we cannot get a more // precised exception message. ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow()); - return; + return false; } MemberOffset field_offset(inst->VRegC_22c()); const bool is_volatile = false; // iput-x-quick only on non volatile fields. @@ -767,6 +775,7 @@ static inline void DoIPutQuick(Thread* self, ShadowFrame& shadow_frame, default: LOG(FATAL) << "Unreachable: " << field_type; } + return true; } static inline String* ResolveString(Thread* self, MethodHelper& mh, uint32_t string_idx) @@ -783,52 +792,64 @@ static inline String* ResolveString(Thread* self, MethodHelper& mh, uint32_t str return mh.ResolveString(string_idx); } -static inline void DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg, +static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg, int32_t dividend, int32_t divisor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(divisor == 0)) { ThrowArithmeticExceptionDivideByZero(); - } else if (UNLIKELY(dividend == kMinInt && divisor == -1)) { + return false; + } + if (UNLIKELY(dividend == kMinInt && divisor == -1)) { shadow_frame.SetVReg(result_reg, kMinInt); } else { shadow_frame.SetVReg(result_reg, dividend / divisor); } + return true; } -static inline void DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg, +static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg, int32_t dividend, int32_t divisor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(divisor == 0)) { ThrowArithmeticExceptionDivideByZero(); - } else if (UNLIKELY(dividend == kMinInt && divisor == -1)) { + return false; + } + if (UNLIKELY(dividend == kMinInt && divisor == -1)) { shadow_frame.SetVReg(result_reg, 0); } else { shadow_frame.SetVReg(result_reg, dividend % divisor); } + return true; } -static inline void DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg, +static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg, int64_t dividend, int64_t divisor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(divisor == 0)) { ThrowArithmeticExceptionDivideByZero(); - } else if (UNLIKELY(dividend == kMinLong && divisor == -1)) { + return false; + } + if (UNLIKELY(dividend == kMinLong && divisor == -1)) { shadow_frame.SetVRegLong(result_reg, kMinLong); } else { shadow_frame.SetVRegLong(result_reg, dividend / divisor); } + return true; } -static inline void DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg, +static inline bool DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg, int64_t dividend, int64_t divisor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(divisor == 0)) { ThrowArithmeticExceptionDivideByZero(); - } else if (UNLIKELY(dividend == kMinLong && divisor == -1)) { + return false; + } + if (UNLIKELY(dividend == kMinLong && divisor == -1)) { shadow_frame.SetVRegLong(result_reg, 0); } else { shadow_frame.SetVRegLong(result_reg, dividend % divisor); } + return true; } // TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template @@ -954,7 +975,9 @@ static inline const Instruction* FindNextInstructionFollowingException(Thread* s self->VerifyStack(); ThrowLocation throw_location; mirror::Throwable* exception = self->GetException(&throw_location); - uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(exception->GetClass(), dex_pc); + bool clear_exception; + uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(exception->GetClass(), dex_pc, + &clear_exception); if (found_dex_pc == DexFile::kDexNoIndex) { instrumentation->MethodUnwindEvent(self, this_object_ref.get(), shadow_frame.GetMethod(), dex_pc); @@ -963,6 +986,9 @@ static inline const Instruction* FindNextInstructionFollowingException(Thread* s instrumentation->ExceptionCaughtEvent(self, throw_location, shadow_frame.GetMethod(), found_dex_pc, exception); + if (clear_exception) { + self->ClearException(); + } return Instruction::At(insns + found_dex_pc); } } @@ -975,13 +1001,9 @@ static inline const Instruction* FindNextInstructionFollowingException(Thread* s return JValue(); /* Handled in caller. */ \ } -#define POSSIBLY_HANDLE_PENDING_EXCEPTION(next_function) \ - if (UNLIKELY(self->IsExceptionPending())) { \ - inst = FindNextInstructionFollowingException(self, shadow_frame, inst->GetDexPc(insns), insns, \ - this_object_ref, instrumentation); \ - if (inst == NULL) { \ - return JValue(); /* Handled in caller. */ \ - } \ +#define POSSIBLY_HANDLE_PENDING_EXCEPTION(is_exception_pending, next_function) \ + if (UNLIKELY(is_exception_pending)) { \ + HANDLE_PENDING_EXCEPTION(); \ } else { \ inst = inst->next_function(); \ } @@ -1013,28 +1035,29 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte return JValue(); } self->VerifyStack(); - instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); - const uint16_t* const insns = code_item->insns_; + instrumentation::Instrumentation* const instrumentation = Runtime::Current()->GetInstrumentation(); // As the 'this' object won't change during the execution of current code, we // want to cache it in local variables. Nevertheless, in order to let the // garbage collector access it, we store it into sirt references. SirtRef this_object_ref(self, shadow_frame.GetThisObject(code_item->ins_size_)); - const Instruction* inst = Instruction::At(insns + shadow_frame.GetDexPC()); - if (inst->GetDexPc(insns) == 0) { // We are entering the method as opposed to deoptimizing.. + uint32_t dex_pc = shadow_frame.GetDexPC(); + if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing.. if (UNLIKELY(instrumentation->HasMethodEntryListeners())) { instrumentation->MethodEnterEvent(self, this_object_ref.get(), shadow_frame.GetMethod(), 0); } } + const uint16_t* const insns = code_item->insns_; + const Instruction* inst = Instruction::At(insns + dex_pc); while (true) { + dex_pc = inst->GetDexPc(insns); + shadow_frame.SetDexPC(dex_pc); if (UNLIKELY(self->TestAllFlags())) { CheckSuspend(self); } - const uint32_t dex_pc = inst->GetDexPc(insns); - shadow_frame.SetDexPC(dex_pc); - if (instrumentation->HasDexPcListeners()) { + if (UNLIKELY(instrumentation->HasDexPcListeners())) { instrumentation->DexPcMovedEvent(self, this_object_ref.get(), shadow_frame.GetMethod(), dex_pc); } @@ -1200,8 +1223,8 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::CONST_4: { PREAMBLE(); - uint32_t dst = inst->VRegA_11n(); - int32_t val = inst->VRegB_11n(); + uint4_t dst = inst->VRegA_11n(); + int4_t val = inst->VRegB_11n(); shadow_frame.SetVReg(dst, val); if (val == 0) { shadow_frame.SetVRegReference(dst, NULL); @@ -1211,8 +1234,8 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::CONST_16: { PREAMBLE(); - uint32_t dst = inst->VRegA_21s(); - int32_t val = inst->VRegB_21s(); + uint8_t dst = inst->VRegA_21s(); + int16_t val = inst->VRegB_21s(); shadow_frame.SetVReg(dst, val); if (val == 0) { shadow_frame.SetVRegReference(dst, NULL); @@ -1222,7 +1245,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::CONST: { PREAMBLE(); - uint32_t dst = inst->VRegA_31i(); + uint8_t dst = inst->VRegA_31i(); int32_t val = inst->VRegB_31i(); shadow_frame.SetVReg(dst, val); if (val == 0) { @@ -1233,7 +1256,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::CONST_HIGH16: { PREAMBLE(); - uint32_t dst = inst->VRegA_21h(); + uint8_t dst = inst->VRegA_21h(); int32_t val = static_cast(inst->VRegB_21h() << 16); shadow_frame.SetVReg(dst, val); if (val == 0) { @@ -1304,7 +1327,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte HANDLE_PENDING_EXCEPTION(); } else { DoMonitorEnter(self, obj); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_1xx); + POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx); } break; } @@ -1316,7 +1339,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte HANDLE_PENDING_EXCEPTION(); } else { DoMonitorExit(self, obj); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_1xx); + POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx); } break; } @@ -1391,22 +1414,14 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte PREAMBLE(); bool success = DoFilledNewArray(inst, shadow_frame, self, &result_register); - if (LIKELY(success)) { - inst = inst->Next_3xx(); - } else { - HANDLE_PENDING_EXCEPTION(); - } + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx); break; } case Instruction::FILLED_NEW_ARRAY_RANGE: { PREAMBLE(); bool success = DoFilledNewArray(inst, shadow_frame, self, &result_register); - if (LIKELY(success)) { - inst = inst->Next_3xx(); - } else { - HANDLE_PENDING_EXCEPTION(); - } + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx); break; } case Instruction::FILL_ARRAY_DATA: { @@ -1934,236 +1949,282 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } break; } - case Instruction::IGET_BOOLEAN: + case Instruction::IGET_BOOLEAN: { PREAMBLE(); - DoFieldGet(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldGet(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IGET_BYTE: + } + case Instruction::IGET_BYTE: { PREAMBLE(); - DoFieldGet(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldGet(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IGET_CHAR: + } + case Instruction::IGET_CHAR: { PREAMBLE(); - DoFieldGet(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldGet(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IGET_SHORT: + } + case Instruction::IGET_SHORT: { PREAMBLE(); - DoFieldGet(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldGet(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IGET: + } + case Instruction::IGET: { PREAMBLE(); - DoFieldGet(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldGet(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IGET_WIDE: + } + case Instruction::IGET_WIDE: { PREAMBLE(); - DoFieldGet(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldGet(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IGET_OBJECT: + } + case Instruction::IGET_OBJECT: { PREAMBLE(); - DoFieldGet(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldGet(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IGET_QUICK: + } + case Instruction::IGET_QUICK: { PREAMBLE(); - DoIGetQuick(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoIGetQuick(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IGET_WIDE_QUICK: + } + case Instruction::IGET_WIDE_QUICK: { PREAMBLE(); - DoIGetQuick(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoIGetQuick(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IGET_OBJECT_QUICK: + } + case Instruction::IGET_OBJECT_QUICK: { PREAMBLE(); - DoIGetQuick(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoIGetQuick(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::SGET_BOOLEAN: + } + case Instruction::SGET_BOOLEAN: { PREAMBLE(); - DoFieldGet(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldGet(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::SGET_BYTE: + } + case Instruction::SGET_BYTE: { PREAMBLE(); - DoFieldGet(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldGet(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::SGET_CHAR: + } + case Instruction::SGET_CHAR: { PREAMBLE(); - DoFieldGet(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldGet(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::SGET_SHORT: + } + case Instruction::SGET_SHORT: { PREAMBLE(); - DoFieldGet(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldGet(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::SGET: + } + case Instruction::SGET: { PREAMBLE(); - DoFieldGet(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldGet(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::SGET_WIDE: + } + case Instruction::SGET_WIDE: { PREAMBLE(); - DoFieldGet(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldGet(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::SGET_OBJECT: + } + case Instruction::SGET_OBJECT: { PREAMBLE(); - DoFieldGet(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldGet(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IPUT_BOOLEAN: + } + case Instruction::IPUT_BOOLEAN: { PREAMBLE(); - DoFieldPut(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldPut(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IPUT_BYTE: + } + case Instruction::IPUT_BYTE: { PREAMBLE(); - DoFieldPut(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldPut(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IPUT_CHAR: + } + case Instruction::IPUT_CHAR: { PREAMBLE(); - DoFieldPut(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldPut(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IPUT_SHORT: + } + case Instruction::IPUT_SHORT: { PREAMBLE(); - DoFieldPut(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldPut(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IPUT: + } + case Instruction::IPUT: { PREAMBLE(); - DoFieldPut(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldPut(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IPUT_WIDE: + } + case Instruction::IPUT_WIDE: { PREAMBLE(); - DoFieldPut(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldPut(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IPUT_OBJECT: + } + case Instruction::IPUT_OBJECT: { PREAMBLE(); - DoFieldPut(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldPut(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IPUT_QUICK: + } + case Instruction::IPUT_QUICK: { PREAMBLE(); - DoIPutQuick(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoIPutQuick(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IPUT_WIDE_QUICK: + } + case Instruction::IPUT_WIDE_QUICK: { PREAMBLE(); - DoIPutQuick(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoIPutQuick(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::IPUT_OBJECT_QUICK: + } + case Instruction::IPUT_OBJECT_QUICK: { PREAMBLE(); - DoIPutQuick(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoIPutQuick(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::SPUT_BOOLEAN: + } + case Instruction::SPUT_BOOLEAN: { PREAMBLE(); - DoFieldPut(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldPut(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::SPUT_BYTE: + } + case Instruction::SPUT_BYTE: { PREAMBLE(); - DoFieldPut(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldPut(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::SPUT_CHAR: + } + case Instruction::SPUT_CHAR: { PREAMBLE(); - DoFieldPut(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldPut(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::SPUT_SHORT: + } + case Instruction::SPUT_SHORT: { PREAMBLE(); - DoFieldPut(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldPut(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::SPUT: + } + case Instruction::SPUT: { PREAMBLE(); - DoFieldPut(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldPut(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::SPUT_WIDE: + } + case Instruction::SPUT_WIDE: { PREAMBLE(); - DoFieldPut(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldPut(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::SPUT_OBJECT: + } + case Instruction::SPUT_OBJECT: { PREAMBLE(); - DoFieldPut(self, shadow_frame, inst); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoFieldPut(self, shadow_frame, inst); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::INVOKE_VIRTUAL: + } + case Instruction::INVOKE_VIRTUAL: { PREAMBLE(); - DoInvoke(self, shadow_frame, inst, &result_register); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx); + bool success = DoInvoke(self, shadow_frame, inst, &result_register); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx); break; - case Instruction::INVOKE_VIRTUAL_RANGE: + } + case Instruction::INVOKE_VIRTUAL_RANGE: { PREAMBLE(); - DoInvoke(self, shadow_frame, inst, &result_register); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx); + bool success = DoInvoke(self, shadow_frame, inst, &result_register); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx); break; - case Instruction::INVOKE_SUPER: + } + case Instruction::INVOKE_SUPER: { PREAMBLE(); - DoInvoke(self, shadow_frame, inst, &result_register); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx); + bool success = DoInvoke(self, shadow_frame, inst, &result_register); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx); break; - case Instruction::INVOKE_SUPER_RANGE: + } + case Instruction::INVOKE_SUPER_RANGE: { PREAMBLE(); - DoInvoke(self, shadow_frame, inst, &result_register); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx); + bool success = DoInvoke(self, shadow_frame, inst, &result_register); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx); break; - case Instruction::INVOKE_DIRECT: + } + case Instruction::INVOKE_DIRECT: { PREAMBLE(); - DoInvoke(self, shadow_frame, inst, &result_register); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx); + bool success = DoInvoke(self, shadow_frame, inst, &result_register); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx); break; - case Instruction::INVOKE_DIRECT_RANGE: + } + case Instruction::INVOKE_DIRECT_RANGE: { PREAMBLE(); - DoInvoke(self, shadow_frame, inst, &result_register); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx); + bool success = DoInvoke(self, shadow_frame, inst, &result_register); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx); break; - case Instruction::INVOKE_INTERFACE: + } + case Instruction::INVOKE_INTERFACE: { PREAMBLE(); - DoInvoke(self, shadow_frame, inst, &result_register); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx); + bool success = DoInvoke(self, shadow_frame, inst, &result_register); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx); break; - case Instruction::INVOKE_INTERFACE_RANGE: + } + case Instruction::INVOKE_INTERFACE_RANGE: { PREAMBLE(); - DoInvoke(self, shadow_frame, inst, &result_register); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx); + bool success = DoInvoke(self, shadow_frame, inst, &result_register); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx); break; - case Instruction::INVOKE_STATIC: + } + case Instruction::INVOKE_STATIC: { PREAMBLE(); - DoInvoke(self, shadow_frame, inst, &result_register); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx); + bool success = DoInvoke(self, shadow_frame, inst, &result_register); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx); break; - case Instruction::INVOKE_STATIC_RANGE: + } + case Instruction::INVOKE_STATIC_RANGE: { PREAMBLE(); - DoInvoke(self, shadow_frame, inst, &result_register); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx); + bool success = DoInvoke(self, shadow_frame, inst, &result_register); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx); break; - case Instruction::INVOKE_VIRTUAL_QUICK: + } + case Instruction::INVOKE_VIRTUAL_QUICK: { PREAMBLE(); - DoInvokeVirtualQuick(self, shadow_frame, inst, &result_register); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx); + bool success = DoInvokeVirtualQuick(self, shadow_frame, inst, &result_register); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx); break; - case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: + } + case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: { PREAMBLE(); - DoInvokeVirtualQuick(self, shadow_frame, inst, &result_register); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx); + bool success = DoInvokeVirtualQuick(self, shadow_frame, inst, &result_register); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx); break; + } case Instruction::NEG_INT: PREAMBLE(); shadow_frame.SetVReg(inst->VRegA_12x(), -shadow_frame.GetVReg(inst->VRegB_12x())); @@ -2341,20 +2402,22 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte shadow_frame.GetVReg(inst->VRegC_23x())); inst = inst->Next_2xx(); break; - case Instruction::DIV_INT: + case Instruction::DIV_INT: { PREAMBLE(); - DoIntDivide(shadow_frame, inst->VRegA_23x(), - shadow_frame.GetVReg(inst->VRegB_23x()), - shadow_frame.GetVReg(inst->VRegC_23x())); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoIntDivide(shadow_frame, inst->VRegA_23x(), + shadow_frame.GetVReg(inst->VRegB_23x()), + shadow_frame.GetVReg(inst->VRegC_23x())); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::REM_INT: + } + case Instruction::REM_INT: { PREAMBLE(); - DoIntRemainder(shadow_frame, inst->VRegA_23x(), - shadow_frame.GetVReg(inst->VRegB_23x()), - shadow_frame.GetVReg(inst->VRegC_23x())); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoIntRemainder(shadow_frame, inst->VRegA_23x(), + shadow_frame.GetVReg(inst->VRegB_23x()), + shadow_frame.GetVReg(inst->VRegC_23x())); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; + } case Instruction::SHL_INT: PREAMBLE(); shadow_frame.SetVReg(inst->VRegA_23x(), @@ -2423,14 +2486,14 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte DoLongDivide(shadow_frame, inst->VRegA_23x(), shadow_frame.GetVRegLong(inst->VRegB_23x()), shadow_frame.GetVRegLong(inst->VRegC_23x())); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx); break; case Instruction::REM_LONG: PREAMBLE(); DoLongRemainder(shadow_frame, inst->VRegA_23x(), shadow_frame.GetVRegLong(inst->VRegB_23x()), shadow_frame.GetVRegLong(inst->VRegC_23x())); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx); break; case Instruction::AND_LONG: PREAMBLE(); @@ -2546,7 +2609,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte break; case Instruction::ADD_INT_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVReg(vregA, shadow_frame.GetVReg(vregA) + shadow_frame.GetVReg(inst->VRegB_12x())); @@ -2555,7 +2618,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::SUB_INT_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVReg(vregA, shadow_frame.GetVReg(vregA) - shadow_frame.GetVReg(inst->VRegB_12x())); @@ -2564,7 +2627,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::MUL_INT_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVReg(vregA, shadow_frame.GetVReg(vregA) * shadow_frame.GetVReg(inst->VRegB_12x())); @@ -2573,23 +2636,23 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::DIV_INT_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); - DoIntDivide(shadow_frame, vregA, shadow_frame.GetVReg(vregA), - shadow_frame.GetVReg(inst->VRegB_12x())); - inst = inst->Next_1xx(); + uint4_t vregA = inst->VRegA_12x(); + bool success = DoIntDivide(shadow_frame, vregA, shadow_frame.GetVReg(vregA), + shadow_frame.GetVReg(inst->VRegB_12x())); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx); break; } case Instruction::REM_INT_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); - DoIntRemainder(shadow_frame, vregA, shadow_frame.GetVReg(vregA), - shadow_frame.GetVReg(inst->VRegB_12x())); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_1xx); + uint4_t vregA = inst->VRegA_12x(); + bool success = DoIntRemainder(shadow_frame, vregA, shadow_frame.GetVReg(vregA), + shadow_frame.GetVReg(inst->VRegB_12x())); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx); break; } case Instruction::SHL_INT_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVReg(vregA, shadow_frame.GetVReg(vregA) << (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x1f)); @@ -2598,7 +2661,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::SHR_INT_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVReg(vregA, shadow_frame.GetVReg(vregA) >> (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x1f)); @@ -2607,7 +2670,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::USHR_INT_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVReg(vregA, static_cast(shadow_frame.GetVReg(vregA)) >> (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x1f)); @@ -2616,7 +2679,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::AND_INT_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVReg(vregA, shadow_frame.GetVReg(vregA) & shadow_frame.GetVReg(inst->VRegB_12x())); @@ -2625,7 +2688,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::OR_INT_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVReg(vregA, shadow_frame.GetVReg(vregA) | shadow_frame.GetVReg(inst->VRegB_12x())); @@ -2634,7 +2697,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::XOR_INT_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVReg(vregA, shadow_frame.GetVReg(vregA) ^ shadow_frame.GetVReg(inst->VRegB_12x())); @@ -2643,7 +2706,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::ADD_LONG_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegLong(vregA, shadow_frame.GetVRegLong(vregA) + shadow_frame.GetVRegLong(inst->VRegB_12x())); @@ -2652,7 +2715,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::SUB_LONG_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegLong(vregA, shadow_frame.GetVRegLong(vregA) - shadow_frame.GetVRegLong(inst->VRegB_12x())); @@ -2661,7 +2724,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::MUL_LONG_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegLong(vregA, shadow_frame.GetVRegLong(vregA) * shadow_frame.GetVRegLong(inst->VRegB_12x())); @@ -2670,23 +2733,23 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::DIV_LONG_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); DoLongDivide(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA), shadow_frame.GetVRegLong(inst->VRegB_12x())); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_1xx); + POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx); break; } case Instruction::REM_LONG_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); DoLongRemainder(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA), shadow_frame.GetVRegLong(inst->VRegB_12x())); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_1xx); + POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx); break; } case Instruction::AND_LONG_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegLong(vregA, shadow_frame.GetVRegLong(vregA) & shadow_frame.GetVRegLong(inst->VRegB_12x())); @@ -2695,7 +2758,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::OR_LONG_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegLong(vregA, shadow_frame.GetVRegLong(vregA) | shadow_frame.GetVRegLong(inst->VRegB_12x())); @@ -2704,7 +2767,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::XOR_LONG_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegLong(vregA, shadow_frame.GetVRegLong(vregA) ^ shadow_frame.GetVRegLong(inst->VRegB_12x())); @@ -2713,7 +2776,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::SHL_LONG_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegLong(vregA, shadow_frame.GetVRegLong(vregA) << (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x3f)); @@ -2722,7 +2785,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::SHR_LONG_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegLong(vregA, shadow_frame.GetVRegLong(vregA) >> (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x3f)); @@ -2731,7 +2794,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::USHR_LONG_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegLong(vregA, static_cast(shadow_frame.GetVRegLong(vregA)) >> (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x3f)); @@ -2740,7 +2803,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::ADD_FLOAT_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegFloat(vregA, shadow_frame.GetVRegFloat(vregA) + shadow_frame.GetVRegFloat(inst->VRegB_12x())); @@ -2749,7 +2812,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::SUB_FLOAT_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegFloat(vregA, shadow_frame.GetVRegFloat(vregA) - shadow_frame.GetVRegFloat(inst->VRegB_12x())); @@ -2758,7 +2821,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::MUL_FLOAT_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegFloat(vregA, shadow_frame.GetVRegFloat(vregA) * shadow_frame.GetVRegFloat(inst->VRegB_12x())); @@ -2767,7 +2830,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::DIV_FLOAT_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegFloat(vregA, shadow_frame.GetVRegFloat(vregA) / shadow_frame.GetVRegFloat(inst->VRegB_12x())); @@ -2776,7 +2839,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::REM_FLOAT_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegFloat(vregA, fmodf(shadow_frame.GetVRegFloat(vregA), shadow_frame.GetVRegFloat(inst->VRegB_12x()))); @@ -2785,7 +2848,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::ADD_DOUBLE_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegDouble(vregA, shadow_frame.GetVRegDouble(vregA) + shadow_frame.GetVRegDouble(inst->VRegB_12x())); @@ -2794,7 +2857,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::SUB_DOUBLE_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegDouble(vregA, shadow_frame.GetVRegDouble(vregA) - shadow_frame.GetVRegDouble(inst->VRegB_12x())); @@ -2803,7 +2866,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::MUL_DOUBLE_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegDouble(vregA, shadow_frame.GetVRegDouble(vregA) * shadow_frame.GetVRegDouble(inst->VRegB_12x())); @@ -2812,7 +2875,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::DIV_DOUBLE_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegDouble(vregA, shadow_frame.GetVRegDouble(vregA) / shadow_frame.GetVRegDouble(inst->VRegB_12x())); @@ -2821,7 +2884,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte } case Instruction::REM_DOUBLE_2ADDR: { PREAMBLE(); - uint32_t vregA = inst->VRegA_12x(); + uint4_t vregA = inst->VRegA_12x(); shadow_frame.SetVRegDouble(vregA, fmod(shadow_frame.GetVRegDouble(vregA), shadow_frame.GetVRegDouble(inst->VRegB_12x()))); @@ -2849,18 +2912,20 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte inst->VRegC_22s()); inst = inst->Next_2xx(); break; - case Instruction::DIV_INT_LIT16: + case Instruction::DIV_INT_LIT16: { PREAMBLE(); - DoIntDivide(shadow_frame, inst->VRegA_22s(), - shadow_frame.GetVReg(inst->VRegB_22s()), inst->VRegC_22s()); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(), + shadow_frame.GetVReg(inst->VRegB_22s()), inst->VRegC_22s()); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::REM_INT_LIT16: + } + case Instruction::REM_INT_LIT16: { PREAMBLE(); - DoIntRemainder(shadow_frame, inst->VRegA_22s(), - shadow_frame.GetVReg(inst->VRegB_22s()), inst->VRegC_22s()); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(), + shadow_frame.GetVReg(inst->VRegB_22s()), inst->VRegC_22s()); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; + } case Instruction::AND_INT_LIT16: PREAMBLE(); shadow_frame.SetVReg(inst->VRegA_22s(), @@ -2903,18 +2968,20 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte inst->VRegC_22b()); inst = inst->Next_2xx(); break; - case Instruction::DIV_INT_LIT8: + case Instruction::DIV_INT_LIT8: { PREAMBLE(); - DoIntDivide(shadow_frame, inst->VRegA_22b(), - shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b()); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoIntDivide(shadow_frame, inst->VRegA_22b(), + shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b()); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; - case Instruction::REM_INT_LIT8: + } + case Instruction::REM_INT_LIT8: { PREAMBLE(); - DoIntRemainder(shadow_frame, inst->VRegA_22b(), - shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b()); - POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx); + bool success = DoIntRemainder(shadow_frame, inst->VRegA_22b(), + shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b()); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; + } case Instruction::AND_INT_LIT8: PREAMBLE(); shadow_frame.SetVReg(inst->VRegA_22b(), diff --git a/runtime/mirror/abstract_method-inl.h b/runtime/mirror/abstract_method-inl.h index 2df1367637..d235e3eed8 100644 --- a/runtime/mirror/abstract_method-inl.h +++ b/runtime/mirror/abstract_method-inl.h @@ -20,9 +20,9 @@ #include "abstract_method.h" #include "dex_file.h" +#include "entrypoints/entrypoint_utils.h" #include "object_array.h" #include "runtime.h" -#include "runtime_support.h" namespace art { namespace mirror { diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc index 58ef5f7bc8..4d7f99e076 100644 --- a/runtime/mirror/abstract_method.cc +++ b/runtime/mirror/abstract_method.cc @@ -20,6 +20,7 @@ #include "base/stringpiece.h" #include "class-inl.h" #include "dex_file-inl.h" +#include "dex_instruction.h" #include "gc/accounting/card_table-inl.h" #include "interpreter/interpreter.h" #include "jni_internal.h" @@ -225,28 +226,37 @@ uintptr_t AbstractMethod::ToNativePc(const uint32_t dex_pc) const { return 0; } -uint32_t AbstractMethod::FindCatchBlock(Class* exception_type, uint32_t dex_pc) const { +uint32_t AbstractMethod::FindCatchBlock(Class* exception_type, uint32_t dex_pc, + bool* has_no_move_exception) const { MethodHelper mh(this); const DexFile::CodeItem* code_item = mh.GetCodeItem(); - // Iterate over the catch handlers associated with dex_pc + // Default to handler not found. + uint32_t found_dex_pc = DexFile::kDexNoIndex; + // Iterate over the catch handlers associated with dex_pc. for (CatchHandlerIterator it(*code_item, dex_pc); it.HasNext(); it.Next()) { uint16_t iter_type_idx = it.GetHandlerTypeIndex(); // Catch all case if (iter_type_idx == DexFile::kDexNoIndex16) { - return it.GetHandlerAddress(); + found_dex_pc = it.GetHandlerAddress(); + break; } // Does this catch exception type apply? Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx); if (iter_exception_type == NULL) { // The verifier should take care of resolving all exception classes early LOG(WARNING) << "Unresolved exception class when finding catch block: " - << mh.GetTypeDescriptorFromTypeIdx(iter_type_idx); + << mh.GetTypeDescriptorFromTypeIdx(iter_type_idx); } else if (iter_exception_type->IsAssignableFrom(exception_type)) { - return it.GetHandlerAddress(); + found_dex_pc = it.GetHandlerAddress(); + break; } } - // Handler not found - return DexFile::kDexNoIndex; + if (found_dex_pc != DexFile::kDexNoIndex) { + const Instruction* first_catch_instr = + Instruction::At(&mh.GetCodeItem()->insns_[found_dex_pc]); + *has_no_move_exception = (first_catch_instr->Opcode() != Instruction::MOVE_EXCEPTION); + } + return found_dex_pc; } void AbstractMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result, diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h index bbebecebb4..2e6e262451 100644 --- a/runtime/mirror/abstract_method.h +++ b/runtime/mirror/abstract_method.h @@ -407,8 +407,10 @@ class MANAGED AbstractMethod : public Object { uintptr_t ToFirstNativeSafepointPc(const uint32_t dex_pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // Find the catch block for the given exception type and dex_pc - uint32_t FindCatchBlock(Class* exception_type, uint32_t dex_pc) const + // Find the catch block for the given exception type and dex_pc. When a catch block is found, + // indicates whether the found catch block is responsible for clearing the exception or whether + // a move-exception instruction is present. + uint32_t FindCatchBlock(Class* exception_type, uint32_t dex_pc, bool* has_no_move_exception) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SetClasses(Class* java_lang_reflect_Constructor, Class* java_lang_reflect_Method); diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h index b195a87fc0..db6132df59 100644 --- a/runtime/mirror/array.h +++ b/runtime/mirror/array.h @@ -72,7 +72,7 @@ class MANAGED Array : public Object { bool IsValidIndex(int32_t index) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (UNLIKELY(index < 0 || index >= GetLength())) { + if (UNLIKELY(static_cast(index) >= static_cast(GetLength()))) { ThrowArrayIndexOutOfBoundsException(index); return false; } diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc index 53a1df95a6..540ff9f68e 100644 --- a/runtime/mirror/object_test.cc +++ b/runtime/mirror/object_test.cc @@ -26,6 +26,7 @@ #include "class_linker-inl.h" #include "common_test.h" #include "dex_file.h" +#include "entrypoints/entrypoint_utils.h" #include "field-inl.h" #include "gc/accounting/card_table-inl.h" #include "gc/heap.h" @@ -33,7 +34,6 @@ #include "abstract_method-inl.h" #include "object-inl.h" #include "object_array-inl.h" -#include "runtime_support.h" #include "sirt_ref.h" #include "UniquePtr.h" diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc index 60624c2704..e3ec3bcbb0 100644 --- a/runtime/native/dalvik_system_VMDebug.cc +++ b/runtime/native/dalvik_system_VMDebug.cc @@ -20,6 +20,9 @@ #include "class_linker.h" #include "common_throws.h" #include "debugger.h" +#include "gc/space/dlmalloc_space.h" +#include "gc/space/large_object_space.h" +#include "gc/space/space-inl.h" #include "hprof/hprof.h" #include "jni_internal.h" #include "mirror/class.h" @@ -234,6 +237,69 @@ static jlong VMDebug_countInstancesOfClass(JNIEnv* env, jclass, jclass javaClass return count; } +// We export the VM internal per-heap-space size/alloc/free metrics +// for the zygote space, alloc space (application heap), and the large +// object space for dumpsys meminfo. The other memory region data such +// as PSS, private/shared dirty/shared data are available via +// /proc//smaps. +static void VMDebug_getHeapSpaceStats(JNIEnv* env, jclass, jlongArray data) { + jlong* arr = reinterpret_cast(env->GetPrimitiveArrayCritical(data, 0)); + if (arr == NULL || env->GetArrayLength(data) < 9) { + return; + } + + size_t allocSize = 0; + size_t allocUsed = 0; + size_t zygoteSize = 0; + size_t zygoteUsed = 0; + size_t largeObjectsSize = 0; + size_t largeObjectsUsed = 0; + + gc::Heap* heap = Runtime::Current()->GetHeap(); + const std::vector& continuous_spaces = heap->GetContinuousSpaces(); + const std::vector& discontinuous_spaces = heap->GetDiscontinuousSpaces(); + typedef std::vector::const_iterator It; + for (It it = continuous_spaces.begin(), end = continuous_spaces.end(); it != end; ++it) { + gc::space::ContinuousSpace* space = *it; + if (space->IsImageSpace()) { + // Currently don't include the image space. + } else if (space->IsZygoteSpace()) { + gc::space::DlMallocSpace* dlmalloc_space = space->AsDlMallocSpace(); + zygoteSize += dlmalloc_space->GetFootprint(); + zygoteUsed += dlmalloc_space->GetBytesAllocated(); + } else { + // This is the alloc space. + gc::space::DlMallocSpace* dlmalloc_space = space->AsDlMallocSpace(); + allocSize += dlmalloc_space->GetFootprint(); + allocUsed += dlmalloc_space->GetBytesAllocated(); + } + } + typedef std::vector::const_iterator It2; + for (It2 it = discontinuous_spaces.begin(), end = discontinuous_spaces.end(); it != end; ++it) { + gc::space::DiscontinuousSpace* space = *it; + if (space->IsLargeObjectSpace()) { + largeObjectsSize += space->AsLargeObjectSpace()->GetBytesAllocated(); + largeObjectsUsed += largeObjectsSize; + } + } + + size_t allocFree = allocSize - allocUsed; + size_t zygoteFree = zygoteSize - zygoteUsed; + size_t largeObjectsFree = largeObjectsSize - largeObjectsUsed; + + int j = 0; + arr[j++] = allocSize; + arr[j++] = allocUsed; + arr[j++] = allocFree; + arr[j++] = zygoteSize; + arr[j++] = zygoteUsed; + arr[j++] = zygoteFree; + arr[j++] = largeObjectsSize; + arr[j++] = largeObjectsUsed; + arr[j++] = largeObjectsFree; + env->ReleasePrimitiveArrayCritical(data, arr, 0); +} + static JNINativeMethod gMethods[] = { NATIVE_METHOD(VMDebug, countInstancesOfClass, "(Ljava/lang/Class;Z)J"), NATIVE_METHOD(VMDebug, crash, "()V"), @@ -241,6 +307,7 @@ static JNINativeMethod gMethods[] = { NATIVE_METHOD(VMDebug, dumpHprofDataDdms, "()V"), NATIVE_METHOD(VMDebug, dumpReferenceTables, "()V"), NATIVE_METHOD(VMDebug, getAllocCount, "(I)I"), + NATIVE_METHOD(VMDebug, getHeapSpaceStats, "([J)V"), NATIVE_METHOD(VMDebug, getInstructionCount, "([I)V"), NATIVE_METHOD(VMDebug, getLoadedClassCount, "()I"), NATIVE_METHOD(VMDebug, getVmFeatureList, "()[Ljava/lang/String;"), diff --git a/runtime/oat/runtime/argument_visitor.h b/runtime/oat/runtime/argument_visitor.h deleted file mode 100644 index aaf93f7db7..0000000000 --- a/runtime/oat/runtime/argument_visitor.h +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Copyright (C) 2013 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_OAT_RUNTIME_ARGUMENT_VISITOR_H_ -#define ART_RUNTIME_OAT_RUNTIME_ARGUMENT_VISITOR_H_ - -#include "object_utils.h" - -namespace art { - -// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. -class PortableArgumentVisitor { - public: -// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame. -// Size of Runtime::kRefAndArgs callee save frame. -// Size of Method* and register parameters in out stack arguments. -#if defined(__arm__) -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8 -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48 -#define PORTABLE_STACK_ARG_SKIP 0 -#elif defined(__mips__) -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64 -#define PORTABLE_STACK_ARG_SKIP 16 -#elif defined(__i386__) -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32 -#define PORTABLE_STACK_ARG_SKIP 4 -#else -#error "Unsupported architecture" -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0 -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0 -#define PORTABLE_STACK_ARG_SKIP 0 -#endif - - PortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : - caller_mh_(caller_mh), - args_in_regs_(ComputeArgsInRegs(caller_mh)), - num_params_(caller_mh.NumArgs()), - reg_args_(reinterpret_cast(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET), - stack_args_(reinterpret_cast(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE - + PORTABLE_STACK_ARG_SKIP), - cur_args_(reg_args_), - cur_arg_index_(0), - param_index_(0) { - } - - virtual ~PortableArgumentVisitor() {} - - virtual void Visit() = 0; - - bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return caller_mh_.IsParamAReference(param_index_); - } - - bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return caller_mh_.IsParamALongOrDouble(param_index_); - } - - Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return caller_mh_.GetParamPrimitiveType(param_index_); - } - - byte* GetParamAddress() const { - return cur_args_ + (cur_arg_index_ * kPointerSize); - } - - void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) { -#if (defined(__arm__) || defined(__mips__)) - if (IsParamALongOrDouble() && cur_arg_index_ == 2) { - break; - } -#endif - Visit(); - cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); - param_index_++; - } - cur_args_ = stack_args_; - cur_arg_index_ = 0; - while (param_index_ < num_params_) { -#if (defined(__arm__) || defined(__mips__)) - if (IsParamALongOrDouble() && cur_arg_index_ % 2 != 0) { - cur_arg_index_++; - } -#endif - Visit(); - cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); - param_index_++; - } - } - - private: - static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { -#if (defined(__i386__)) - return 0; -#else - size_t args_in_regs = 0; - size_t num_params = mh.NumArgs(); - for (size_t i = 0; i < num_params; i++) { - args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1); - if (args_in_regs > 3) { - args_in_regs = 3; - break; - } - } - return args_in_regs; -#endif - } - MethodHelper& caller_mh_; - const size_t args_in_regs_; - const size_t num_params_; - byte* const reg_args_; - byte* const stack_args_; - byte* cur_args_; - size_t cur_arg_index_; - size_t param_index_; -}; - -// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. -class QuickArgumentVisitor { - public: -// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame. -// Size of Runtime::kRefAndArgs callee save frame. -// Size of Method* and register parameters in out stack arguments. -#if defined(__arm__) -#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8 -#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48 -#define QUICK_STACK_ARG_SKIP 16 -#elif defined(__mips__) -#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 -#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64 -#define QUICK_STACK_ARG_SKIP 16 -#elif defined(__i386__) -#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 -#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32 -#define QUICK_STACK_ARG_SKIP 16 -#else -#error "Unsupported architecture" -#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0 -#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0 -#define QUICK_STACK_ARG_SKIP 0 -#endif - - QuickArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : - caller_mh_(caller_mh), - args_in_regs_(ComputeArgsInRegs(caller_mh)), - num_params_(caller_mh.NumArgs()), - reg_args_(reinterpret_cast(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET), - stack_args_(reinterpret_cast(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE - + QUICK_STACK_ARG_SKIP), - cur_args_(reg_args_), - cur_arg_index_(0), - param_index_(0), - is_split_long_or_double_(false) { - } - - virtual ~QuickArgumentVisitor() {} - - virtual void Visit() = 0; - - bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return caller_mh_.IsParamAReference(param_index_); - } - - bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return caller_mh_.IsParamALongOrDouble(param_index_); - } - - Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return caller_mh_.GetParamPrimitiveType(param_index_); - } - - byte* GetParamAddress() const { - return cur_args_ + (cur_arg_index_ * kPointerSize); - } - - bool IsSplitLongOrDouble() const { - return is_split_long_or_double_; - } - - uint64_t ReadSplitLongParam() const { - DCHECK(IsSplitLongOrDouble()); - uint64_t low_half = *reinterpret_cast(GetParamAddress()); - uint64_t high_half = *reinterpret_cast(stack_args_); - return (low_half & 0xffffffffULL) | (high_half << 32); - } - - void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) { - is_split_long_or_double_ = (cur_arg_index_ == 2) && IsParamALongOrDouble(); - Visit(); - cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); - param_index_++; - } - cur_args_ = stack_args_; - cur_arg_index_ = is_split_long_or_double_ ? 1 : 0; - is_split_long_or_double_ = false; - while (param_index_ < num_params_) { - Visit(); - cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); - param_index_++; - } - } - - private: - static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - size_t args_in_regs = 0; - size_t num_params = mh.NumArgs(); - for (size_t i = 0; i < num_params; i++) { - args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1); - if (args_in_regs > 3) { - args_in_regs = 3; - break; - } - } - return args_in_regs; - } - MethodHelper& caller_mh_; - const size_t args_in_regs_; - const size_t num_params_; - byte* const reg_args_; - byte* const stack_args_; - byte* cur_args_; - size_t cur_arg_index_; - size_t param_index_; - // Does a 64bit parameter straddle the register and stack arguments? - bool is_split_long_or_double_; -}; - -} // namespace art - -#endif // ART_RUNTIME_OAT_RUNTIME_ARGUMENT_VISITOR_H_ diff --git a/runtime/oat/runtime/arm/context_arm.cc b/runtime/oat/runtime/arm/context_arm.cc deleted file mode 100644 index 6b9538e801..0000000000 --- a/runtime/oat/runtime/arm/context_arm.cc +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "context_arm.h" - -#include "mirror/abstract_method.h" -#include "mirror/object-inl.h" -#include "stack.h" -#include "thread.h" - -namespace art { -namespace arm { - -static const uint32_t gZero = 0; - -void ArmContext::Reset() { - for (size_t i = 0; i < kNumberOfCoreRegisters; i++) { - gprs_[i] = NULL; - } - for (size_t i = 0; i < kNumberOfSRegisters; i++) { - fprs_[i] = NULL; - } - gprs_[SP] = &sp_; - gprs_[PC] = &pc_; - // Initialize registers with easy to spot debug values. - sp_ = ArmContext::kBadGprBase + SP; - pc_ = ArmContext::kBadGprBase + PC; -} - -void ArmContext::FillCalleeSaves(const StackVisitor& fr) { - mirror::AbstractMethod* method = fr.GetMethod(); - uint32_t core_spills = method->GetCoreSpillMask(); - uint32_t fp_core_spills = method->GetFpSpillMask(); - size_t spill_count = __builtin_popcount(core_spills); - size_t fp_spill_count = __builtin_popcount(fp_core_spills); - size_t frame_size = method->GetFrameSizeInBytes(); - if (spill_count > 0) { - // Lowest number spill is farthest away, walk registers and fill into context - int j = 1; - for (size_t i = 0; i < kNumberOfCoreRegisters; i++) { - if (((core_spills >> i) & 1) != 0) { - gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size); - j++; - } - } - } - if (fp_spill_count > 0) { - // Lowest number spill is farthest away, walk registers and fill into context - int j = 1; - for (size_t i = 0; i < kNumberOfSRegisters; i++) { - if (((fp_core_spills >> i) & 1) != 0) { - fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_size); - j++; - } - } - } -} - -void ArmContext::SetGPR(uint32_t reg, uintptr_t value) { - DCHECK_LT(reg, static_cast(kNumberOfCoreRegisters)); - DCHECK_NE(gprs_[reg], &gZero); // Can't overwrite this static value since they are never reset. - DCHECK(gprs_[reg] != NULL); - *gprs_[reg] = value; -} - -void ArmContext::SmashCallerSaves() { - // This needs to be 0 because we want a null/zero return value. - gprs_[R0] = const_cast(&gZero); - gprs_[R1] = const_cast(&gZero); - gprs_[R2] = NULL; - gprs_[R3] = NULL; -} - -extern "C" void art_quick_do_long_jump(uint32_t*, uint32_t*); - -void ArmContext::DoLongJump() { - uintptr_t gprs[16]; - uint32_t fprs[32]; - for (size_t i = 0; i < kNumberOfCoreRegisters; ++i) { - gprs[i] = gprs_[i] != NULL ? *gprs_[i] : ArmContext::kBadGprBase + i; - } - for (size_t i = 0; i < kNumberOfSRegisters; ++i) { - fprs[i] = fprs_[i] != NULL ? *fprs_[i] : ArmContext::kBadGprBase + i; - } - DCHECK_EQ(reinterpret_cast(Thread::Current()), gprs[TR]); - art_quick_do_long_jump(gprs, fprs); -} - -} // namespace arm -} // namespace art diff --git a/runtime/oat/runtime/arm/context_arm.h b/runtime/oat/runtime/arm/context_arm.h deleted file mode 100644 index 0be85e3577..0000000000 --- a/runtime/oat/runtime/arm/context_arm.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_OAT_RUNTIME_ARM_CONTEXT_ARM_H_ -#define ART_RUNTIME_OAT_RUNTIME_ARM_CONTEXT_ARM_H_ - -#include "locks.h" -#include "constants_arm.h" -#include "oat/runtime/context.h" - -namespace art { -namespace arm { - -class ArmContext : public Context { - public: - ArmContext() { - Reset(); - } - - virtual ~ArmContext() {} - - virtual void Reset(); - - virtual void FillCalleeSaves(const StackVisitor& fr); - - virtual void SetSP(uintptr_t new_sp) { - SetGPR(SP, new_sp); - } - - virtual void SetPC(uintptr_t new_pc) { - SetGPR(PC, new_pc); - } - - virtual uintptr_t GetGPR(uint32_t reg) { - CHECK_LT(reg, static_cast(kNumberOfCoreRegisters)); - return *gprs_[reg]; - } - - virtual void SetGPR(uint32_t reg, uintptr_t value); - virtual void SmashCallerSaves(); - virtual void DoLongJump(); - - private: - // Pointers to register locations, initialized to NULL or the specific registers below. - uintptr_t* gprs_[kNumberOfCoreRegisters]; - uint32_t* fprs_[kNumberOfSRegisters]; - // Hold values for sp and pc if they are not located within a stack frame. - uintptr_t sp_, pc_; -}; - -} // namespace arm -} // namespace art - -#endif // ART_RUNTIME_OAT_RUNTIME_ARM_CONTEXT_ARM_H_ diff --git a/runtime/oat/runtime/arm/oat_support_entrypoints_arm.cc b/runtime/oat/runtime/arm/oat_support_entrypoints_arm.cc deleted file mode 100644 index 2e9453ce9c..0000000000 --- a/runtime/oat/runtime/arm/oat_support_entrypoints_arm.cc +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "oat/runtime/oat_support_entrypoints.h" -#include "runtime_support.h" - -namespace art { - -// Alloc entrypoints. -extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); -extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); -extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); - -// Cast entrypoints. -extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, - const mirror::Class* ref_class); -extern "C" void art_quick_can_put_array_element_from_code(void*, void*); -extern "C" void art_quick_check_cast_from_code(void*, void*); - -// DexCache entrypoints. -extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); -extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); - -// Exception entrypoints. -extern "C" void* GetAndClearException(Thread*); - -// Field entrypoints. -extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); -extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); -extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); -extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); -extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); -extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_static_from_code(uint32_t); -extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); -extern "C" int64_t art_quick_get64_static_from_code(uint32_t); -extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); -extern "C" void* art_quick_get_obj_static_from_code(uint32_t); - -// FillArray entrypoint. -extern "C" void art_quick_handle_fill_data_from_code(void*, void*); - -// Lock entrypoints. -extern "C" void art_quick_lock_object_from_code(void*); -extern "C" void art_quick_unlock_object_from_code(void*); - -// Math entrypoints. -extern int32_t CmpgDouble(double a, double b); -extern int32_t CmplDouble(double a, double b); -extern int32_t CmpgFloat(float a, float b); -extern int32_t CmplFloat(float a, float b); - -// Math conversions. -extern "C" int32_t __aeabi_f2iz(float op1); // FLOAT_TO_INT -extern "C" int32_t __aeabi_d2iz(double op1); // DOUBLE_TO_INT -extern "C" float __aeabi_l2f(int64_t op1); // LONG_TO_FLOAT -extern "C" double __aeabi_l2d(int64_t op1); // LONG_TO_DOUBLE - -// Single-precision FP arithmetics. -extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR] - -// Double-precision FP arithmetics. -extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR] - -// Integer arithmetics. -extern "C" int __aeabi_idivmod(int32_t, int32_t); // [DIV|REM]_INT[_2ADDR|_LIT8|_LIT16] - -// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR] -extern "C" int64_t __aeabi_ldivmod(int64_t, int64_t); -extern "C" int64_t art_quick_mul_long(int64_t, int64_t); -extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t); -extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t); -extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t); - -// Interpreter entrypoints. -extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); -extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); - -// Intrinsic entrypoints. -extern "C" int32_t __memcmp16(void*, void*, int32_t); -extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); -extern "C" int32_t art_quick_string_compareto(void*, void*); - -// Invoke entrypoints. -extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); - -// Thread entrypoints. -extern void CheckSuspendFromCode(Thread* thread); -extern "C" void art_quick_test_suspend(); - -// Throw entrypoints. -extern "C" void art_quick_deliver_exception_from_code(void*); -extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); -extern "C" void art_quick_throw_div_zero_from_code(); -extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); -extern "C" void art_quick_throw_null_pointer_exception_from_code(); -extern "C" void art_quick_throw_stack_overflow_from_code(void*); - -void InitEntryPoints(EntryPoints* points) { - // Alloc - points->pAllocArrayFromCode = art_quick_alloc_array_from_code; - points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; - points->pAllocObjectFromCode = art_quick_alloc_object_from_code; - points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; - points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; - points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; - - // Cast - points->pInstanceofNonTrivialFromCode = artIsAssignableFromCode; - points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; - points->pCheckCastFromCode = art_quick_check_cast_from_code; - - // DexCache - points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; - points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; - points->pInitializeTypeFromCode = art_quick_initialize_type_from_code; - points->pResolveStringFromCode = art_quick_resolve_string_from_code; - - // Field - points->pSet32Instance = art_quick_set32_instance_from_code; - points->pSet32Static = art_quick_set32_static_from_code; - points->pSet64Instance = art_quick_set64_instance_from_code; - points->pSet64Static = art_quick_set64_static_from_code; - points->pSetObjInstance = art_quick_set_obj_instance_from_code; - points->pSetObjStatic = art_quick_set_obj_static_from_code; - points->pGet32Instance = art_quick_get32_instance_from_code; - points->pGet64Instance = art_quick_get64_instance_from_code; - points->pGetObjInstance = art_quick_get_obj_instance_from_code; - points->pGet32Static = art_quick_get32_static_from_code; - points->pGet64Static = art_quick_get64_static_from_code; - points->pGetObjStatic = art_quick_get_obj_static_from_code; - - // FillArray - points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; - - // JNI - points->pJniMethodStart = JniMethodStart; - points->pJniMethodStartSynchronized = JniMethodStartSynchronized; - points->pJniMethodEnd = JniMethodEnd; - points->pJniMethodEndSynchronized = JniMethodEndSynchronized; - points->pJniMethodEndWithReference = JniMethodEndWithReference; - points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - - // Locks - points->pLockObjectFromCode = art_quick_lock_object_from_code; - points->pUnlockObjectFromCode = art_quick_unlock_object_from_code; - - // Math - points->pCmpgDouble = CmpgDouble; - points->pCmpgFloat = CmpgFloat; - points->pCmplDouble = CmplDouble; - points->pCmplFloat = CmplFloat; - points->pFmod = fmod; - points->pSqrt = sqrt; - points->pL2d = __aeabi_l2d; - points->pFmodf = fmodf; - points->pL2f = __aeabi_l2f; - points->pD2iz = __aeabi_d2iz; - points->pF2iz = __aeabi_f2iz; - points->pIdivmod = __aeabi_idivmod; - points->pD2l = art_d2l; - points->pF2l = art_f2l; - points->pLdiv = __aeabi_ldivmod; - points->pLdivmod = __aeabi_ldivmod; // result returned in r2:r3 - points->pLmul = art_quick_mul_long; - points->pShlLong = art_quick_shl_long; - points->pShrLong = art_quick_shr_long; - points->pUshrLong = art_quick_ushr_long; - - // Interpreter - points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; - points->pInterpreterToQuickEntry = artInterpreterToQuickEntry; - - // Intrinsics - points->pIndexOf = art_quick_indexof; - points->pMemcmp16 = __memcmp16; - points->pStringCompareTo = art_quick_string_compareto; - points->pMemcpy = memcpy; - - // Invocation - points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; - points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; - points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; - points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; - points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; - points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; - points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; - points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - points->pCheckSuspendFromCode = CheckSuspendFromCode; - points->pTestSuspendFromCode = art_quick_test_suspend; - - // Throws - points->pDeliverException = art_quick_deliver_exception_from_code; - points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; - points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; - points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; - points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; - points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; -}; - -} // namespace art diff --git a/runtime/oat/runtime/arm/runtime_support_arm.S b/runtime/oat/runtime/arm/runtime_support_arm.S deleted file mode 100644 index f19e8bada0..0000000000 --- a/runtime/oat/runtime/arm/runtime_support_arm.S +++ /dev/null @@ -1,1413 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "asm_support.h" - - /* Deliver the given exception */ - .extern artDeliverExceptionFromCode - /* Deliver an exception pending on a thread */ - .extern artDeliverPendingException - -.macro ENTRY name - .type \name, #function - .global \name - /* Cache alignment for function entry */ - .balign 16 -\name: - .cfi_startproc - .fnstart -.endm - -.macro END name - .fnend - .cfi_endproc - .size \name, .-\name -.endm - - /* - * Macro that sets up the callee save frame to conform with - * Runtime::CreateCalleeSaveMethod(kSaveAll) - */ -.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME - push {r4-r11, lr} @ 9 words of callee saves - .save {r4-r11, lr} - .cfi_adjust_cfa_offset 36 - .cfi_rel_offset r4, 0 - .cfi_rel_offset r5, 4 - .cfi_rel_offset r6, 8 - .cfi_rel_offset r7, 12 - .cfi_rel_offset r8, 16 - .cfi_rel_offset r9, 20 - .cfi_rel_offset r10, 24 - .cfi_rel_offset r11, 28 - .cfi_rel_offset lr, 32 - vpush {s0-s31} - .pad #128 - .cfi_adjust_cfa_offset 128 - sub sp, #12 @ 3 words of space, bottom word will hold Method* - .pad #12 - .cfi_adjust_cfa_offset 12 -.endm - - /* - * Macro that sets up the callee save frame to conform with - * Runtime::CreateCalleeSaveMethod(kRefsOnly). Restoration assumes non-moving GC. - */ -.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME - push {r5-r8, r10-r11, lr} @ 7 words of callee saves - .save {r5-r8, r10-r11, lr} - .cfi_adjust_cfa_offset 28 - .cfi_rel_offset r5, 0 - .cfi_rel_offset r6, 4 - .cfi_rel_offset r7, 8 - .cfi_rel_offset r8, 12 - .cfi_rel_offset r10, 16 - .cfi_rel_offset r11, 20 - .cfi_rel_offset lr, 24 - sub sp, #4 @ bottom word will hold Method* - .pad #4 - .cfi_adjust_cfa_offset 4 -.endm - -.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - ldr lr, [sp, #28] @ restore lr for return - add sp, #32 @ unwind stack - .cfi_adjust_cfa_offset -32 -.endm - -.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN - ldr lr, [sp, #28] @ restore lr for return - add sp, #32 @ unwind stack - .cfi_adjust_cfa_offset -32 - bx lr @ return -.endm - - /* - * Macro that sets up the callee save frame to conform with - * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC. - */ -.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves - .save {r1-r3, r5-r8, r10-r11, lr} - .cfi_adjust_cfa_offset 40 - .cfi_rel_offset r1, 0 - .cfi_rel_offset r2, 4 - .cfi_rel_offset r3, 8 - .cfi_rel_offset r5, 12 - .cfi_rel_offset r6, 16 - .cfi_rel_offset r7, 20 - .cfi_rel_offset r8, 24 - .cfi_rel_offset r10, 28 - .cfi_rel_offset r11, 32 - .cfi_rel_offset lr, 36 - sub sp, #8 @ 2 words of space, bottom word will hold Method* - .pad #8 - .cfi_adjust_cfa_offset 8 -.endm - -.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME - ldr r1, [sp, #8] @ restore non-callee save r1 - ldrd r2, [sp, #12] @ restore non-callee saves r2-r3 - ldr lr, [sp, #44] @ restore lr - add sp, #48 @ rewind sp - .cfi_adjust_cfa_offset -48 -.endm - - /* - * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending - * exception is Thread::Current()->exception_ - */ -.macro DELIVER_PENDING_EXCEPTION - .fnend - .fnstart - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME @ save callee saves for throw - mov r0, r9 @ pass Thread::Current - mov r1, sp @ pass SP - b artDeliverPendingExceptionFromCode @ artDeliverPendingExceptionFromCode(Thread*, SP) -.endm - -.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name - .extern \cxx_name -ENTRY \c_name - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context - mov r0, r9 @ pass Thread::Current - mov r1, sp @ pass SP - b \cxx_name @ \cxx_name(Thread*, SP) -END \c_name -.endm - -.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name - .extern \cxx_name -ENTRY \c_name - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context - mov r1, r9 @ pass Thread::Current - mov r2, sp @ pass SP - b \cxx_name @ \cxx_name(Thread*, SP) -END \c_name -.endm - -.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name - .extern \cxx_name -ENTRY \c_name - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - b \cxx_name @ \cxx_name(Thread*, SP) -END \c_name -.endm - - /* - * Called by managed code, saves callee saves and then calls artThrowException - * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception. - */ -ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception_from_code, artDeliverExceptionFromCode - - /* - * Called by managed code to create and deliver a NullPointerException. - */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_code, artThrowNullPointerExceptionFromCode - - /* - * Called by managed code to create and deliver an ArithmeticException. - */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero_from_code, artThrowDivZeroFromCode - - /* - * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds - * index, arg2 holds limit. - */ -TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds_from_code, artThrowArrayBoundsFromCode - - /* - * Called by managed code to create and deliver a StackOverflowError. - */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow_from_code, artThrowStackOverflowFromCode - - /* - * Called by managed code to create and deliver a NoSuchMethodError. - */ -ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method_from_code, artThrowNoSuchMethodFromCode - - /* - * All generated callsites for interface invokes and invocation slow paths will load arguments - * as usual - except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain - * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the - * stack and call the appropriate C helper. - * NOTE: "this" is first visible argument of the target, and so can be found in arg1/r1. - * - * The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting - * of the target Method* in r0 and method->code_ in r1. - * - * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the - * thread and we branch to another stub to deliver it. - * - * On success this wrapper will restore arguments and *jump* to the target, leaving the lr - * pointing back to the original caller. - */ -.macro INVOKE_TRAMPOLINE c_name, cxx_name - .extern \cxx_name -ENTRY \c_name - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME @ save callee saves in case allocation triggers GC - ldr r2, [sp, #48] @ pass caller Method* - mov r3, r9 @ pass Thread::Current - mov r12, sp - str r12, [sp, #-16]! @ expand the frame and pass SP - .pad #16 - .cfi_adjust_cfa_offset 16 - bl \cxx_name @ (method_idx, this, caller, Thread*, SP) - add sp, #16 @ strip the extra frame - .cfi_adjust_cfa_offset -16 - mov r12, r1 @ save Method*->code_ - RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME - cmp r0, #0 @ did we find the target? - bxne r12 @ tail call to target if so - DELIVER_PENDING_EXCEPTION -END \c_name -.endm - -INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline -INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck - -INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck -INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck -INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck -INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck - - /* - * Portable invocation stub. - * On entry: - * r0 = method pointer - * r1 = argument array or NULL for no argument methods - * r2 = size of argument array in bytes - * r3 = (managed) thread pointer - * [sp] = JValue* result - * [sp + 4] = result type char - */ -ENTRY art_portable_invoke_stub - push {r0, r4, r5, r9, r11, lr} @ spill regs - .save {r0, r4, r5, r9, r11, lr} - .pad #24 - .cfi_adjust_cfa_offset 24 - .cfi_rel_offset r0, 0 - .cfi_rel_offset r4, 4 - .cfi_rel_offset r5, 8 - .cfi_rel_offset r9, 12 - .cfi_rel_offset r11, 16 - .cfi_rel_offset lr, 20 - mov r11, sp @ save the stack pointer - .cfi_def_cfa_register r11 - mov r9, r3 @ move managed thread pointer into r9 - mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval - add r5, r2, #16 @ create space for method pointer in frame - and r5, #0xFFFFFFF0 @ align frame size to 16 bytes - sub sp, r5 @ reserve stack space for argument array - add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy - bl memcpy @ memcpy (dest, src, bytes) - ldr r0, [r11] @ restore method* - ldr r1, [sp, #4] @ copy arg value for r1 - ldr r2, [sp, #8] @ copy arg value for r2 - ldr r3, [sp, #12] @ copy arg value for r3 - mov ip, #0 @ set ip to 0 - str ip, [sp] @ store NULL for method* at bottom of frame - add sp, #16 @ first 4 args are not passed on stack for portable - ldr ip, [r0, #METHOD_CODE_OFFSET] @ get pointer to the code - blx ip @ call the method - mov sp, r11 @ restore the stack pointer - ldr ip, [sp, #24] @ load the result pointer - strd r0, [ip] @ store r0/r1 into result pointer - pop {r0, r4, r5, r9, r11, lr} @ restore spill regs - .cfi_adjust_cfa_offset -24 - bx lr -END art_portable_invoke_stub - - /* - * Quick invocation stub. - * On entry: - * r0 = method pointer - * r1 = argument array or NULL for no argument methods - * r2 = size of argument array in bytes - * r3 = (managed) thread pointer - * [sp] = JValue* result - * [sp + 4] = result type char - */ -ENTRY art_quick_invoke_stub - push {r0, r4, r5, r9, r11, lr} @ spill regs - .save {r0, r4, r5, r9, r11, lr} - .pad #24 - .cfi_adjust_cfa_offset 24 - .cfi_rel_offset r0, 0 - .cfi_rel_offset r4, 4 - .cfi_rel_offset r5, 8 - .cfi_rel_offset r9, 12 - .cfi_rel_offset r11, 16 - .cfi_rel_offset lr, 20 - mov r11, sp @ save the stack pointer - .cfi_def_cfa_register r11 - mov r9, r3 @ move managed thread pointer into r9 - mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval - add r5, r2, #16 @ create space for method pointer in frame - and r5, #0xFFFFFFF0 @ align frame size to 16 bytes - sub sp, r5 @ reserve stack space for argument array - add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy - bl memcpy @ memcpy (dest, src, bytes) - ldr r0, [r11] @ restore method* - ldr r1, [sp, #4] @ copy arg value for r1 - ldr r2, [sp, #8] @ copy arg value for r2 - ldr r3, [sp, #12] @ copy arg value for r3 - mov ip, #0 @ set ip to 0 - str ip, [sp] @ store NULL for method* at bottom of frame - ldr ip, [r0, #METHOD_CODE_OFFSET] @ get pointer to the code - blx ip @ call the method - mov sp, r11 @ restore the stack pointer - ldr ip, [sp, #24] @ load the result pointer - strd r0, [ip] @ store r0/r1 into result pointer - pop {r0, r4, r5, r9, r11, lr} @ restore spill regs - .cfi_adjust_cfa_offset -24 - bx lr -END art_quick_invoke_stub - - /* - * On entry r0 is uint32_t* gprs_ and r1 is uint32_t* fprs_ - */ -ENTRY art_quick_do_long_jump - vldm r1, {s0-s31} @ load all fprs from argument fprs_ - ldr r2, [r0, #60] @ r2 = r15 (PC from gprs_ 60=4*15) - add r0, r0, #12 @ increment r0 to skip gprs_[0..2] 12=4*3 - ldm r0, {r3-r14} @ load remaining gprs from argument gprs_ - mov r0, #0 @ clear result registers r0 and r1 - mov r1, #0 - bx r2 @ do long jump -END art_quick_do_long_jump - - /* - * Entry point of native methods when JNI bug compatibility is enabled. - */ - .extern artWorkAroundAppJniBugs -ENTRY art_quick_work_around_app_jni_bugs - @ save registers that may contain arguments and LR that will be crushed by a call - push {r0-r3, lr} - .save {r0-r3, lr} - .cfi_adjust_cfa_offset 16 - .cfi_rel_offset r0, 0 - .cfi_rel_offset r1, 4 - .cfi_rel_offset r2, 8 - .cfi_rel_offset r3, 12 - sub sp, #12 @ 3 words of space for alignment - mov r0, r9 @ pass Thread::Current - mov r1, sp @ pass SP - bl artWorkAroundAppJniBugs @ (Thread*, SP) - add sp, #12 @ rewind stack - mov r12, r0 @ save target address - pop {r0-r3, lr} @ restore possibly modified argument registers - .cfi_adjust_cfa_offset -16 - bx r12 @ tail call into JNI routine -END art_quick_work_around_app_jni_bugs - - /* - * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on - * failure. - */ - .extern artHandleFillArrayDataFromCode -ENTRY art_quick_handle_fill_data_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case exception allocation triggers GC - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - bl artHandleFillArrayDataFromCode @ (Array*, const DexFile::Payload*, Thread*, SP) - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r0, #0 @ success? - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_handle_fill_data_from_code - - /* - * Entry from managed code that calls artLockObjectFromCode, may block for GC. - */ - .extern artLockObjectFromCode -ENTRY art_quick_lock_object_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case we block - mov r1, r9 @ pass Thread::Current - mov r2, sp @ pass SP - bl artLockObjectFromCode @ (Object* obj, Thread*, SP) - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN -END art_quick_lock_object_from_code - - /* - * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. - */ - .extern artUnlockObjectFromCode -ENTRY art_quick_unlock_object_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case exception allocation triggers GC - mov r1, r9 @ pass Thread::Current - mov r2, sp @ pass SP - bl artUnlockObjectFromCode @ (Object* obj, Thread*, SP) - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r0, #0 @ success? - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_unlock_object_from_code - - /* - * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure. - */ - .extern artCheckCastFromCode -ENTRY art_quick_check_cast_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case exception allocation triggers GC - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - bl artCheckCastFromCode @ (Class* a, Class* b, Thread*, SP) - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r0, #0 @ success? - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_check_cast_from_code - - /* - * Entry from managed code that calls artCanPutArrayElementFromCode and delivers exception on - * failure. - */ - .extern artCanPutArrayElementFromCode -ENTRY art_quick_can_put_array_element_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case exception allocation triggers GC - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - bl artCanPutArrayElementFromCode @ (Object* element, Class* array_class, Thread*, SP) - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r0, #0 @ success? - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_can_put_array_element_from_code - - /* - * Entry from managed code when uninitialized static storage, this stub will run the class - * initializer and deliver the exception on error. On success the static storage base is - * returned. - */ - .extern artInitializeStaticStorageFromCode -ENTRY art_quick_initialize_static_storage_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - @ artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*, SP) - bl artInitializeStaticStorageFromCode - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r0, #0 @ success if result is non-null - bxne lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_initialize_static_storage_from_code - - /* - * Entry from managed code when dex cache misses for a type_idx - */ - .extern artInitializeTypeFromCode -ENTRY art_quick_initialize_type_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - @ artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, SP) - bl artInitializeTypeFromCode - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r0, #0 @ success if result is non-null - bxne lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_initialize_type_from_code - - /* - * Entry from managed code when type_idx needs to be checked for access and dex cache may also - * miss. - */ - .extern artInitializeTypeAndVerifyAccessFromCode -ENTRY art_quick_initialize_type_and_verify_access_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - @ artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Method* referrer, Thread*, SP) - bl artInitializeTypeAndVerifyAccessFromCode - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r0, #0 @ success if result is non-null - bxne lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_initialize_type_and_verify_access_from_code - - /* - * Called by managed code to resolve a static field and load a 32-bit primitive value. - */ - .extern artGet32StaticFromCode -ENTRY art_quick_get32_static_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r1, [sp, #32] @ pass referrer - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - bl artGet32StaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*, SP) - ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r12, #0 @ success if no exception is pending - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_get32_static_from_code - - /* - * Called by managed code to resolve a static field and load a 64-bit primitive value. - */ - .extern artGet64StaticFromCode -ENTRY art_quick_get64_static_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r1, [sp, #32] @ pass referrer - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - bl artGet64StaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*, SP) - ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r12, #0 @ success if no exception is pending - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_get64_static_from_code - - /* - * Called by managed code to resolve a static field and load an object reference. - */ - .extern artGetObjStaticFromCode -ENTRY art_quick_get_obj_static_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r1, [sp, #32] @ pass referrer - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - bl artGetObjStaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*, SP) - ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r12, #0 @ success if no exception is pending - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_get_obj_static_from_code - - /* - * Called by managed code to resolve an instance field and load a 32-bit primitive value. - */ - .extern artGet32InstanceFromCode -ENTRY art_quick_get32_instance_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r2, [sp, #32] @ pass referrer - mov r3, r9 @ pass Thread::Current - mov r12, sp - str r12, [sp, #-16]! @ expand the frame and pass SP - bl artGet32InstanceFromCode @ (field_idx, Object*, referrer, Thread*, SP) - add sp, #16 @ strip the extra frame - ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r12, #0 @ success if no exception is pending - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_get32_instance_from_code - - /* - * Called by managed code to resolve an instance field and load a 64-bit primitive value. - */ - .extern artGet64InstanceFromCode -ENTRY art_quick_get64_instance_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r2, [sp, #32] @ pass referrer - mov r3, r9 @ pass Thread::Current - mov r12, sp - str r12, [sp, #-16]! @ expand the frame and pass SP - .pad #16 - .cfi_adjust_cfa_offset 16 - bl artGet64InstanceFromCode @ (field_idx, Object*, referrer, Thread*, SP) - add sp, #16 @ strip the extra frame - .cfi_adjust_cfa_offset -16 - ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r12, #0 @ success if no exception is pending - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_get64_instance_from_code - - /* - * Called by managed code to resolve an instance field and load an object reference. - */ - .extern artGetObjInstanceFromCode -ENTRY art_quick_get_obj_instance_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r2, [sp, #32] @ pass referrer - mov r3, r9 @ pass Thread::Current - mov r12, sp - str r12, [sp, #-16]! @ expand the frame and pass SP - .pad #16 - .cfi_adjust_cfa_offset 16 - bl artGetObjInstanceFromCode @ (field_idx, Object*, referrer, Thread*, SP) - add sp, #16 @ strip the extra frame - .cfi_adjust_cfa_offset -16 - ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r12, #0 @ success if no exception is pending - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_get_obj_instance_from_code - - /* - * Called by managed code to resolve a static field and store a 32-bit primitive value. - */ - .extern artSet32StaticFromCode -ENTRY art_quick_set32_static_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r2, [sp, #32] @ pass referrer - mov r3, r9 @ pass Thread::Current - mov r12, sp - str r12, [sp, #-16]! @ expand the frame and pass SP - .pad #16 - .cfi_adjust_cfa_offset 16 - bl artSet32StaticFromCode @ (field_idx, new_val, referrer, Thread*, SP) - add sp, #16 @ strip the extra frame - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r0, #0 @ success if result is 0 - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_set32_static_from_code - - /* - * Called by managed code to resolve a static field and store a 64-bit primitive value. - * On entry r0 holds field index, r1:r2 hold new_val - */ - .extern artSet64StaticFromCode -ENTRY art_quick_set64_static_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - mov r3, r2 @ pass one half of wide argument - mov r2, r1 @ pass other half of wide argument - ldr r1, [sp, #32] @ pass referrer - mov r12, sp @ save SP - sub sp, #8 @ grow frame for alignment with stack args - .pad #8 - .cfi_adjust_cfa_offset 8 - push {r9, r12} @ pass Thread::Current and SP - .save {r9, r12} - .cfi_adjust_cfa_offset 8 - .cfi_rel_offset r9, 0 - bl artSet64StaticFromCode @ (field_idx, referrer, new_val, Thread*, SP) - add sp, #16 @ release out args - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here - cmp r0, #0 @ success if result is 0 - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_set64_static_from_code - - /* - * Called by managed code to resolve a static field and store an object reference. - */ - .extern artSetObjStaticFromCode -ENTRY art_quick_set_obj_static_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r2, [sp, #32] @ pass referrer - mov r3, r9 @ pass Thread::Current - mov r12, sp - str r12, [sp, #-16]! @ expand the frame and pass SP - .pad #16 - .cfi_adjust_cfa_offset 16 - bl artSetObjStaticFromCode @ (field_idx, new_val, referrer, Thread*, SP) - add sp, #16 @ strip the extra frame - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r0, #0 @ success if result is 0 - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_set_obj_static_from_code - - /* - * Called by managed code to resolve an instance field and store a 32-bit primitive value. - */ - .extern artSet32InstanceFromCode -ENTRY art_quick_set32_instance_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r3, [sp, #32] @ pass referrer - mov r12, sp @ save SP - sub sp, #8 @ grow frame for alignment with stack args - .pad #8 - .cfi_adjust_cfa_offset 8 - push {r9, r12} @ pass Thread::Current and SP - .save {r9, r12} - .cfi_adjust_cfa_offset 8 - .cfi_rel_offset r9, 0 - .cfi_rel_offset r12, 4 - bl artSet32InstanceFromCode @ (field_idx, Object*, new_val, referrer, Thread*, SP) - add sp, #16 @ release out args - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here - cmp r0, #0 @ success if result is 0 - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_set32_instance_from_code - - /* - * Called by managed code to resolve an instance field and store a 64-bit primitive value. - */ - .extern artSet32InstanceFromCode -ENTRY art_quick_set64_instance_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - mov r12, sp @ save SP - sub sp, #8 @ grow frame for alignment with stack args - .pad #8 - .cfi_adjust_cfa_offset 8 - push {r9, r12} @ pass Thread::Current and SP - .save {r9, r12} - .cfi_adjust_cfa_offset 8 - .cfi_rel_offset r9, 0 - bl artSet64InstanceFromCode @ (field_idx, Object*, new_val, Thread*, SP) - add sp, #16 @ release out args - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here - cmp r0, #0 @ success if result is 0 - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_set64_instance_from_code - - /* - * Called by managed code to resolve an instance field and store an object reference. - */ - .extern artSetObjInstanceFromCode -ENTRY art_quick_set_obj_instance_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r3, [sp, #32] @ pass referrer - mov r12, sp @ save SP - sub sp, #8 @ grow frame for alignment with stack args - .pad #8 - .cfi_adjust_cfa_offset 8 - push {r9, r12} @ pass Thread::Current and SP - .save {r9, r12} - .cfi_adjust_cfa_offset 8 - .cfi_rel_offset r9, 0 - bl artSetObjInstanceFromCode @ (field_idx, Object*, new_val, referrer, Thread*, SP) - add sp, #16 @ release out args - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here - cmp r0, #0 @ success if result is 0 - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_set_obj_instance_from_code - - /* - * Entry from managed code to resolve a string, this stub will allocate a String and deliver an - * exception on error. On success the String is returned. R0 holds the referring method, - * R1 holds the string index. The fast path check for hit in strings cache has already been - * performed. - */ - .extern artResolveStringFromCode -ENTRY art_quick_resolve_string_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - @ artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*, SP) - bl artResolveStringFromCode - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r0, #0 @ success if result is non-null - bxne lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_resolve_string_from_code - - /* - * Called by managed code to allocate an object - */ - .extern artAllocObjectFromCode -ENTRY art_quick_alloc_object_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - bl artAllocObjectFromCode @ (uint32_t type_idx, Method* method, Thread*, SP) - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r0, #0 @ success if result is non-null - bxne lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_alloc_object_from_code - - /* - * Called by managed code to allocate an object when the caller doesn't know whether it has - * access to the created type. - */ - .extern artAllocObjectFromCodeWithAccessCheck -ENTRY art_quick_alloc_object_from_code_with_access_check - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - bl artAllocObjectFromCodeWithAccessCheck @ (uint32_t type_idx, Method* method, Thread*, SP) - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r0, #0 @ success if result is non-null - bxne lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_alloc_object_from_code_with_access_check - - /* - * Called by managed code to allocate an array. - */ - .extern artAllocArrayFromCode -ENTRY art_quick_alloc_array_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - mov r3, r9 @ pass Thread::Current - mov r12, sp - str r12, [sp, #-16]! @ expand the frame and pass SP - .pad #16 - .cfi_adjust_cfa_offset 16 - @ artAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, Thread*, SP) - bl artAllocArrayFromCode - add sp, #16 @ strip the extra frame - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r0, #0 @ success if result is non-null - bxne lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_alloc_array_from_code - - /* - * Called by managed code to allocate an array when the caller doesn't know whether it has - * access to the created type. - */ - .extern artAllocArrayFromCodeWithAccessCheck -ENTRY art_quick_alloc_array_from_code_with_access_check - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - mov r3, r9 @ pass Thread::Current - mov r12, sp - str r12, [sp, #-16]! @ expand the frame and pass SP - .pad #16 - .cfi_adjust_cfa_offset 16 - @ artAllocArrayFromCodeWithAccessCheck(type_idx, method, component_count, Thread*, SP) - bl artAllocArrayFromCodeWithAccessCheck - add sp, #16 @ strip the extra frame - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r0, #0 @ success if result is non-null - bxne lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_alloc_array_from_code_with_access_check - - /* - * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY. - */ - .extern artCheckAndAllocArrayFromCode -ENTRY art_quick_check_and_alloc_array_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - mov r3, r9 @ pass Thread::Current - mov r12, sp - str r12, [sp, #-16]! @ expand the frame and pass SP - .pad #16 - .cfi_adjust_cfa_offset 16 - @ artCheckAndAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t count, Thread* , SP) - bl artCheckAndAllocArrayFromCode - add sp, #16 @ strip the extra frame - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r0, #0 @ success if result is non-null - bxne lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_check_and_alloc_array_from_code - - /* - * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY. - */ - .extern artCheckAndAllocArrayFromCodeWithAccessCheck -ENTRY art_quick_check_and_alloc_array_from_code_with_access_check - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - mov r3, r9 @ pass Thread::Current - mov r12, sp - str r12, [sp, #-16]! @ expand the frame and pass SP - .pad #16 - .cfi_adjust_cfa_offset 16 - @ artCheckAndAllocArrayFromCodeWithAccessCheck(type_idx, method, count, Thread* , SP) - bl artCheckAndAllocArrayFromCodeWithAccessCheck - add sp, #16 @ strip the extra frame - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cmp r0, #0 @ success if result is non-null - bxne lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_check_and_alloc_array_from_code_with_access_check - - /* - * Called by managed code when the value in rSUSPEND has been decremented to 0. - */ - .extern artTestSuspendFromCode -ENTRY art_quick_test_suspend - ldrh r0, [rSELF, #THREAD_FLAGS_OFFSET] - mov rSUSPEND, #SUSPEND_CHECK_INTERVAL @ reset rSUSPEND to SUSPEND_CHECK_INTERVAL - cmp r0, #0 @ check Thread::Current()->suspend_count_ == 0 - bxeq lr @ return if suspend_count_ == 0 - mov r0, rSELF - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves for stack crawl - mov r1, sp - bl artTestSuspendFromCode @ (Thread*, SP) - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN -END art_quick_test_suspend - - .extern artPortableProxyInvokeHandler -ENTRY art_portable_proxy_invoke_handler - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - str r0, [sp, #0] @ place proxy method at bottom of frame - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - blx artPortableProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP) - ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ - ldr lr, [sp, #44] @ restore lr - add sp, #48 @ pop frame - .cfi_adjust_cfa_offset -48 - bx lr @ return -END art_portable_proxy_invoke_handler - - /* - * Called by managed code that is attempting to call a method on a proxy class. On entry - * r0 holds the proxy method and r1 holds the receiver; r2 and r3 may contain arguments. The - * frame size of the invoked proxy method agrees with a ref and args callee save frame. - */ - .extern artQuickProxyInvokeHandler -ENTRY art_quick_proxy_invoke_handler - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - str r0, [sp, #0] @ place proxy method at bottom of frame - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - blx artQuickProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP) - ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ - ldr lr, [sp, #44] @ restore lr - add sp, #48 @ pop frame - .cfi_adjust_cfa_offset -48 - cmp r12, #0 @ success if no exception is pending - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_proxy_invoke_handler - - .extern artInterpreterEntry -ENTRY art_quick_interpreter_entry - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - str r0, [sp, #0] @ place proxy method at bottom of frame - mov r1, r9 @ pass Thread::Current - mov r2, sp @ pass SP - blx artInterpreterEntry @ (Method* method, Thread*, SP) - ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ - ldr lr, [sp, #44] @ restore lr - add sp, #48 @ pop frame - .cfi_adjust_cfa_offset -48 - cmp r12, #0 @ success if no exception is pending - bxeq lr @ return on success - DELIVER_PENDING_EXCEPTION -END art_quick_interpreter_entry - - /* - * Routine that intercepts method calls and returns. - */ - .extern artInstrumentationMethodEntryFromCode - .extern artInstrumentationMethodExitFromCode -ENTRY art_quick_instrumentation_entry_from_code - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - str r0, [sp, #4] @ preserve r0 - mov r12, sp @ remember sp - str lr, [sp, #-16]! @ expand the frame and pass LR - .pad #16 - .cfi_adjust_cfa_offset 16 - .cfi_rel_offset lr, 0 - mov r2, r9 @ pass Thread::Current - mov r3, r12 @ pass SP - blx artInstrumentationMethodEntryFromCode @ (Method*, Object*, Thread*, SP, LR) - add sp, #16 @ remove out argument and padding from stack - .cfi_adjust_cfa_offset -16 - mov r12, r0 @ r12 holds reference to code - ldr r0, [sp, #4] @ restore r0 - RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME - blx r12 @ call method with lr set to art_quick_instrumentation_exit_from_code -END art_quick_instrumentation_entry_from_code - .type art_quick_instrumentation_exit_from_code, #function - .global art_quick_instrumentation_exit_from_code -art_quick_instrumentation_exit_from_code: - .cfi_startproc - .fnstart - mov lr, #0 @ link register is to here, so clobber with 0 for later checks - SETUP_REF_ONLY_CALLEE_SAVE_FRAME - mov r12, sp @ remember bottom of caller's frame - push {r0-r1} @ save return value - .save {r0-r1} - .cfi_adjust_cfa_offset 8 - .cfi_rel_offset r0, 0 - .cfi_rel_offset r1, 4 - sub sp, #8 @ space for return value argument - .pad #8 - .cfi_adjust_cfa_offset 8 - strd r0, [sp] @ r0/r1 -> [sp] for fpr_res - mov r2, r0 @ pass return value as gpr_res - mov r3, r1 - mov r0, r9 @ pass Thread::Current - mov r1, r12 @ pass SP - blx artInstrumentationMethodExitFromCode @ (Thread*, SP, gpr_res, fpr_res) - add sp, #8 - .cfi_adjust_cfa_offset -8 - - mov r2, r0 @ link register saved by instrumentation - mov lr, r1 @ r1 is holding link register if we're to bounce to deoptimize - pop {r0, r1} @ restore return value - add sp, #32 @ remove callee save frame - .cfi_adjust_cfa_offset -32 - bx r2 @ return -END art_quick_instrumentation_exit_from_code - - /* - * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization - * will long jump to the upcall with a special exception of -1. - */ - .extern artDeoptimize -ENTRY art_quick_deoptimize - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME - mov r0, r9 @ Set up args. - mov r1, sp - blx artDeoptimize @ artDeoptimize(Thread*, SP) -END art_quick_deoptimize - - /* - * Portable abstract method error stub. r0 contains method* on entry. SP unused in portable. - */ - .extern artThrowAbstractMethodErrorFromCode -ENTRY art_portable_abstract_method_error_stub - mov r1, r9 @ pass Thread::Current - b artThrowAbstractMethodErrorFromCode @ (Method*, Thread*, SP) -END art_portable_abstract_method_error_stub - - /* - * Quick abstract method error stub. r0 contains method* on entry. - */ -ENTRY art_quick_abstract_method_error_stub - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME - mov r1, r9 @ pass Thread::Current - mov r2, sp @ pass SP - b artThrowAbstractMethodErrorFromCode @ (Method*, Thread*, SP) -END art_quick_abstract_method_error_stub - - /* - * Jni dlsym lookup stub. - */ - .extern artFindNativeMethod -ENTRY art_jni_dlsym_lookup_stub - push {r0, r1, r2, r3, lr} @ spill regs - .save {r0, r1, r2, r3, lr} - .pad #20 - .cfi_adjust_cfa_offset 20 - sub sp, #12 @ pad stack pointer to align frame - .pad #12 - .cfi_adjust_cfa_offset 12 - mov r0, r9 @ pass Thread::Current - blx artFindNativeMethod @ (Thread*) - mov r12, r0 @ save result in r12 - add sp, #12 @ restore stack pointer - .cfi_adjust_cfa_offset -12 - pop {r0, r1, r2, r3, lr} @ restore regs - .cfi_adjust_cfa_offset -20 - cmp r12, #0 @ is method code null? - bxne r12 @ if non-null, tail call to method's code - bx lr @ otherwise, return to caller to handle exception -END art_jni_dlsym_lookup_stub - - /* - * Signed 64-bit integer multiply. - * - * Consider WXxYZ (r1r0 x r3r2) with a long multiply: - * WX - * x YZ - * -------- - * ZW ZX - * YW YX - * - * The low word of the result holds ZX, the high word holds - * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because - * it doesn't fit in the low 64 bits. - * - * Unlike most ARM math operations, multiply instructions have - * restrictions on using the same register more than once (Rd and Rm - * cannot be the same). - */ - /* mul-long vAA, vBB, vCC */ -ENTRY art_quick_mul_long - push {r9 - r10} - .save {r9 - r10} - .cfi_adjust_cfa_offset 8 - .cfi_rel_offset r9, 0 - .cfi_rel_offset r10, 4 - mul ip, r2, r1 @ ip<- ZxW - umull r9, r10, r2, r0 @ r9/r10 <- ZxX - mla r2, r0, r3, ip @ r2<- YxX + (ZxW) - add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) - mov r0,r9 - mov r1,r10 - pop {r9 - r10} - .cfi_adjust_cfa_offset -8 - bx lr -END art_quick_mul_long - - /* - * Long integer shift. This is different from the generic 32/64-bit - * binary operations because vAA/vBB are 64-bit but vCC (the shift - * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low - * 6 bits. - * On entry: - * r0: low word - * r1: high word - * r2: shift count - */ - /* shl-long vAA, vBB, vCC */ -ENTRY art_quick_shl_long - and r2, r2, #63 @ r2<- r2 & 0x3f - mov r1, r1, asl r2 @ r1<- r1 << r2 - rsb r3, r2, #32 @ r3<- 32 - r2 - orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) - subs ip, r2, #32 @ ip<- r2 - 32 - movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) - mov r0, r0, asl r2 @ r0<- r0 << r2 - bx lr -END art_quick_shl_long - - /* - * Long integer shift. This is different from the generic 32/64-bit - * binary operations because vAA/vBB are 64-bit but vCC (the shift - * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low - * 6 bits. - * On entry: - * r0: low word - * r1: high word - * r2: shift count - */ - /* shr-long vAA, vBB, vCC */ -ENTRY art_quick_shr_long - and r2, r2, #63 @ r0<- r0 & 0x3f - mov r0, r0, lsr r2 @ r0<- r2 >> r2 - rsb r3, r2, #32 @ r3<- 32 - r2 - orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) - subs ip, r2, #32 @ ip<- r2 - 32 - movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) - mov r1, r1, asr r2 @ r1<- r1 >> r2 - bx lr -END art_quick_shr_long - - /* - * Long integer shift. This is different from the generic 32/64-bit - * binary operations because vAA/vBB are 64-bit but vCC (the shift - * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low - * 6 bits. - * On entry: - * r0: low word - * r1: high word - * r2: shift count - */ - /* ushr-long vAA, vBB, vCC */ -ENTRY art_quick_ushr_long - and r2, r2, #63 @ r0<- r0 & 0x3f - mov r0, r0, lsr r2 @ r0<- r2 >> r2 - rsb r3, r2, #32 @ r3<- 32 - r2 - orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) - subs ip, r2, #32 @ ip<- r2 - 32 - movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) - mov r1, r1, lsr r2 @ r1<- r1 >>> r2 - bx lr -END art_quick_ushr_long - - /* - * String's indexOf. - * - * On entry: - * r0: string object (known non-null) - * r1: char to match (known <= 0xFFFF) - * r2: Starting offset in string data - */ -ENTRY art_quick_indexof - push {r4, r10-r11, lr} @ 4 words of callee saves - .save {r4, r10-r11, lr} - .cfi_adjust_cfa_offset 16 - .cfi_rel_offset r4, 0 - .cfi_rel_offset r10, 4 - .cfi_rel_offset r11, 8 - .cfi_rel_offset lr, 12 - ldr r3, [r0, #STRING_COUNT_OFFSET] - ldr r12, [r0, #STRING_OFFSET_OFFSET] - ldr r0, [r0, #STRING_VALUE_OFFSET] - - /* Clamp start to [0..count] */ - cmp r2, #0 - movlt r2, #0 - cmp r2, r3 - movgt r2, r3 - - /* Build a pointer to the start of string data */ - add r0, #STRING_DATA_OFFSET - add r0, r0, r12, lsl #1 - - /* Save a copy in r12 to later compute result */ - mov r12, r0 - - /* Build pointer to start of data to compare and pre-bias */ - add r0, r0, r2, lsl #1 - sub r0, #2 - - /* Compute iteration count */ - sub r2, r3, r2 - - /* - * At this point we have: - * r0: start of data to test - * r1: char to compare - * r2: iteration count - * r12: original start of string data - * r3, r4, r10, r11 available for loading string data - */ - - subs r2, #4 - blt indexof_remainder - -indexof_loop4: - ldrh r3, [r0, #2]! - ldrh r4, [r0, #2]! - ldrh r10, [r0, #2]! - ldrh r11, [r0, #2]! - cmp r3, r1 - beq match_0 - cmp r4, r1 - beq match_1 - cmp r10, r1 - beq match_2 - cmp r11, r1 - beq match_3 - subs r2, #4 - bge indexof_loop4 - -indexof_remainder: - adds r2, #4 - beq indexof_nomatch - -indexof_loop1: - ldrh r3, [r0, #2]! - cmp r3, r1 - beq match_3 - subs r2, #1 - bne indexof_loop1 - -indexof_nomatch: - mov r0, #-1 - pop {r4, r10-r11, pc} - -match_0: - sub r0, #6 - sub r0, r12 - asr r0, r0, #1 - pop {r4, r10-r11, pc} -match_1: - sub r0, #4 - sub r0, r12 - asr r0, r0, #1 - pop {r4, r10-r11, pc} -match_2: - sub r0, #2 - sub r0, r12 - asr r0, r0, #1 - pop {r4, r10-r11, pc} -match_3: - sub r0, r12 - asr r0, r0, #1 - pop {r4, r10-r11, pc} -END art_quick_indexof - - /* - * String's compareTo. - * - * Requires rARG0/rARG1 to have been previously checked for null. Will - * return negative if this's string is < comp, 0 if they are the - * same and positive if >. - * - * On entry: - * r0: this object pointer - * r1: comp object pointer - * - */ - .extern __memcmp16 -ENTRY art_quick_string_compareto - mov r2, r0 @ this to r2, opening up r0 for return value - subs r0, r2, r1 @ Same? - bxeq lr - - push {r4, r7-r12, lr} @ 8 words - keep alignment - .save {r4, r7-r12, lr} - .cfi_adjust_cfa_offset 32 - .cfi_rel_offset r4, 0 - .cfi_rel_offset r7, 4 - .cfi_rel_offset r8, 8 - .cfi_rel_offset r9, 12 - .cfi_rel_offset r10, 16 - .cfi_rel_offset r11, 20 - .cfi_rel_offset r12, 24 - .cfi_rel_offset lr, 28 - - ldr r4, [r2, #STRING_OFFSET_OFFSET] - ldr r9, [r1, #STRING_OFFSET_OFFSET] - ldr r7, [r2, #STRING_COUNT_OFFSET] - ldr r10, [r1, #STRING_COUNT_OFFSET] - ldr r2, [r2, #STRING_VALUE_OFFSET] - ldr r1, [r1, #STRING_VALUE_OFFSET] - - /* - * At this point, we have: - * value: r2/r1 - * offset: r4/r9 - * count: r7/r10 - * We're going to compute - * r11 <- countDiff - * r10 <- minCount - */ - subs r11, r7, r10 - movls r10, r7 - - /* Now, build pointers to the string data */ - add r2, r2, r4, lsl #1 - add r1, r1, r9, lsl #1 - /* - * Note: data pointers point to previous element so we can use pre-index - * mode with base writeback. - */ - add r2, #STRING_DATA_OFFSET-2 @ offset to contents[-1] - add r1, #STRING_DATA_OFFSET-2 @ offset to contents[-1] - - /* - * At this point we have: - * r2: *this string data - * r1: *comp string data - * r10: iteration count for comparison - * r11: value to return if the first part of the string is equal - * r0: reserved for result - * r3, r4, r7, r8, r9, r12 available for loading string data - */ - - subs r10, #2 - blt do_remainder2 - - /* - * Unroll the first two checks so we can quickly catch early mismatch - * on long strings (but preserve incoming alignment) - */ - - ldrh r3, [r2, #2]! - ldrh r4, [r1, #2]! - ldrh r7, [r2, #2]! - ldrh r8, [r1, #2]! - subs r0, r3, r4 - subeqs r0, r7, r8 - bne done - cmp r10, #28 - bgt do_memcmp16 - subs r10, #3 - blt do_remainder - -loopback_triple: - ldrh r3, [r2, #2]! - ldrh r4, [r1, #2]! - ldrh r7, [r2, #2]! - ldrh r8, [r1, #2]! - ldrh r9, [r2, #2]! - ldrh r12,[r1, #2]! - subs r0, r3, r4 - subeqs r0, r7, r8 - subeqs r0, r9, r12 - bne done - subs r10, #3 - bge loopback_triple - -do_remainder: - adds r10, #3 - beq returnDiff - -loopback_single: - ldrh r3, [r2, #2]! - ldrh r4, [r1, #2]! - subs r0, r3, r4 - bne done - subs r10, #1 - bne loopback_single - -returnDiff: - mov r0, r11 - pop {r4, r7-r12, pc} - -do_remainder2: - adds r10, #2 - bne loopback_single - mov r0, r11 - pop {r4, r7-r12, pc} - - /* Long string case */ -do_memcmp16: - mov r7, r11 - add r0, r2, #2 - add r1, r1, #2 - mov r2, r10 - bl __memcmp16 - cmp r0, #0 - moveq r0, r7 -done: - pop {r4, r7-r12, pc} -END art_quick_string_compareto diff --git a/runtime/oat/runtime/callee_save_frame.h b/runtime/oat/runtime/callee_save_frame.h deleted file mode 100644 index 59f46acbac..0000000000 --- a/runtime/oat/runtime/callee_save_frame.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_OAT_RUNTIME_CALLEE_SAVE_FRAME_H_ -#define ART_RUNTIME_OAT_RUNTIME_CALLEE_SAVE_FRAME_H_ - -#include "base/mutex.h" -#include "thread-inl.h" - -namespace art { -namespace mirror { -class AbstractMethod; -} // namespace mirror - -// Place a special frame at the TOS that will save the callee saves for the given type. -static void FinishCalleeSaveFrameSetup(Thread* self, mirror::AbstractMethod** sp, - Runtime::CalleeSaveType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Be aware the store below may well stomp on an incoming argument. - Locks::mutator_lock_->AssertSharedHeld(self); - *sp = Runtime::Current()->GetCalleeSaveMethod(type); - self->SetTopOfStack(sp, 0); - self->VerifyStack(); -} - -} // namespace art - -#endif // ART_RUNTIME_OAT_RUNTIME_CALLEE_SAVE_FRAME_H_ diff --git a/runtime/oat/runtime/context.cc b/runtime/oat/runtime/context.cc deleted file mode 100644 index 7075e42575..0000000000 --- a/runtime/oat/runtime/context.cc +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "context.h" - -#if defined(__arm__) -#include "arm/context_arm.h" -#elif defined(__mips__) -#include "mips/context_mips.h" -#elif defined(__i386__) -#include "x86/context_x86.h" -#endif - -namespace art { - -Context* Context::Create() { -#if defined(__arm__) - return new arm::ArmContext(); -#elif defined(__mips__) - return new mips::MipsContext(); -#elif defined(__i386__) - return new x86::X86Context(); -#else - UNIMPLEMENTED(FATAL); -#endif -} - -} // namespace art diff --git a/runtime/oat/runtime/context.h b/runtime/oat/runtime/context.h deleted file mode 100644 index ac43e9a7e9..0000000000 --- a/runtime/oat/runtime/context.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_OAT_RUNTIME_CONTEXT_H_ -#define ART_RUNTIME_OAT_RUNTIME_CONTEXT_H_ - -#include -#include - -namespace art { - -class StackVisitor; - -// Representation of a thread's context on the executing machine, used to implement long jumps in -// the quick stack frame layout. -class Context { - public: - // Creates a context for the running architecture - static Context* Create(); - - virtual ~Context() {} - - // Re-initializes the registers for context re-use. - virtual void Reset() = 0; - - // Read values from callee saves in the given frame. The frame also holds - // the method that holds the layout. - virtual void FillCalleeSaves(const StackVisitor& fr) = 0; - - // Set the stack pointer value - virtual void SetSP(uintptr_t new_sp) = 0; - - // Set the program counter value - virtual void SetPC(uintptr_t new_pc) = 0; - - // Read the given GPR - virtual uintptr_t GetGPR(uint32_t reg) = 0; - - // Set the given GPR. - virtual void SetGPR(uint32_t reg, uintptr_t value) = 0; - - // Smash the caller save registers. If we're throwing, we don't want to return bogus values. - virtual void SmashCallerSaves() = 0; - - // Switch execution of the executing context to this context - virtual void DoLongJump() = 0; - - protected: - enum { - kBadGprBase = 0xebad6070, - kBadFprBase = 0xebad8070, - }; -}; - -} // namespace art - -#endif // ART_RUNTIME_OAT_RUNTIME_CONTEXT_H_ diff --git a/runtime/oat/runtime/mips/context_mips.cc b/runtime/oat/runtime/mips/context_mips.cc deleted file mode 100644 index a78e5ee80d..0000000000 --- a/runtime/oat/runtime/mips/context_mips.cc +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "context_mips.h" - -#include "mirror/abstract_method.h" -#include "mirror/object-inl.h" -#include "stack.h" - -namespace art { -namespace mips { - -static const uint32_t gZero = 0; - -void MipsContext::Reset() { - for (size_t i = 0; i < kNumberOfCoreRegisters; i++) { - gprs_[i] = NULL; - } - for (size_t i = 0; i < kNumberOfFRegisters; i++) { - fprs_[i] = NULL; - } - gprs_[SP] = &sp_; - gprs_[RA] = &ra_; - // Initialize registers with easy to spot debug values. - sp_ = MipsContext::kBadGprBase + SP; - ra_ = MipsContext::kBadGprBase + RA; -} - -void MipsContext::FillCalleeSaves(const StackVisitor& fr) { - mirror::AbstractMethod* method = fr.GetMethod(); - uint32_t core_spills = method->GetCoreSpillMask(); - uint32_t fp_core_spills = method->GetFpSpillMask(); - size_t spill_count = __builtin_popcount(core_spills); - size_t fp_spill_count = __builtin_popcount(fp_core_spills); - size_t frame_size = method->GetFrameSizeInBytes(); - if (spill_count > 0) { - // Lowest number spill is farthest away, walk registers and fill into context. - int j = 1; - for (size_t i = 0; i < kNumberOfCoreRegisters; i++) { - if (((core_spills >> i) & 1) != 0) { - gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size); - j++; - } - } - } - if (fp_spill_count > 0) { - // Lowest number spill is farthest away, walk registers and fill into context. - int j = 1; - for (size_t i = 0; i < kNumberOfFRegisters; i++) { - if (((fp_core_spills >> i) & 1) != 0) { - fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_size); - j++; - } - } - } -} - -void MipsContext::SetGPR(uint32_t reg, uintptr_t value) { - CHECK_LT(reg, static_cast(kNumberOfCoreRegisters)); - CHECK_NE(gprs_[reg], &gZero); // Can't overwrite this static value since they are never reset. - CHECK(gprs_[reg] != NULL); - *gprs_[reg] = value; -} - -void MipsContext::SmashCallerSaves() { - // This needs to be 0 because we want a null/zero return value. - gprs_[V0] = const_cast(&gZero); - gprs_[V1] = const_cast(&gZero); - gprs_[A1] = NULL; - gprs_[A2] = NULL; - gprs_[A3] = NULL; -} - -extern "C" void art_quick_do_long_jump(uint32_t*, uint32_t*); - -void MipsContext::DoLongJump() { - uintptr_t gprs[kNumberOfCoreRegisters]; - uint32_t fprs[kNumberOfFRegisters]; - for (size_t i = 0; i < kNumberOfCoreRegisters; ++i) { - gprs[i] = gprs_[i] != NULL ? *gprs_[i] : MipsContext::kBadGprBase + i; - } - for (size_t i = 0; i < kNumberOfFRegisters; ++i) { - fprs[i] = fprs_[i] != NULL ? *fprs_[i] : MipsContext::kBadGprBase + i; - } - art_quick_do_long_jump(gprs, fprs); -} - -} // namespace mips -} // namespace art diff --git a/runtime/oat/runtime/mips/context_mips.h b/runtime/oat/runtime/mips/context_mips.h deleted file mode 100644 index f27124c79b..0000000000 --- a/runtime/oat/runtime/mips/context_mips.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_OAT_RUNTIME_MIPS_CONTEXT_MIPS_H_ -#define ART_RUNTIME_OAT_RUNTIME_MIPS_CONTEXT_MIPS_H_ - -#include "constants_mips.h" -#include "oat/runtime/context.h" - -namespace art { -namespace mips { - -class MipsContext : public Context { - public: - MipsContext() { - Reset(); - } - virtual ~MipsContext() {} - - virtual void Reset(); - - virtual void FillCalleeSaves(const StackVisitor& fr); - - virtual void SetSP(uintptr_t new_sp) { - SetGPR(SP, new_sp); - } - - virtual void SetPC(uintptr_t new_pc) { - SetGPR(RA, new_pc); - } - - virtual uintptr_t GetGPR(uint32_t reg) { - CHECK_LT(reg, static_cast(kNumberOfCoreRegisters)); - return *gprs_[reg]; - } - - virtual void SetGPR(uint32_t reg, uintptr_t value); - virtual void SmashCallerSaves(); - virtual void DoLongJump(); - - private: - // Pointers to registers in the stack, initialized to NULL except for the special cases below. - uintptr_t* gprs_[kNumberOfCoreRegisters]; - uint32_t* fprs_[kNumberOfFRegisters]; - // Hold values for sp and ra (return address) if they are not located within a stack frame. - uintptr_t sp_, ra_; -}; -} // namespace mips -} // namespace art - -#endif // ART_RUNTIME_OAT_RUNTIME_MIPS_CONTEXT_MIPS_H_ diff --git a/runtime/oat/runtime/mips/oat_support_entrypoints_mips.cc b/runtime/oat/runtime/mips/oat_support_entrypoints_mips.cc deleted file mode 100644 index 8e066118cd..0000000000 --- a/runtime/oat/runtime/mips/oat_support_entrypoints_mips.cc +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "runtime_support.h" -#include "oat/runtime/oat_support_entrypoints.h" - -namespace art { - -// Alloc entrypoints. -extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); -extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); -extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); - -// Cast entrypoints. -extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, - const mirror::Class* ref_class); -extern "C" void art_quick_can_put_array_element_from_code(void*, void*); -extern "C" void art_quick_check_cast_from_code(void*, void*); - -// DexCache entrypoints. -extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); -extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); - -// Exception entrypoints. -extern "C" void* GetAndClearException(Thread*); - -// Field entrypoints. -extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); -extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); -extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); -extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); -extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); -extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_static_from_code(uint32_t); -extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); -extern "C" int64_t art_quick_get64_static_from_code(uint32_t); -extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); -extern "C" void* art_quick_get_obj_static_from_code(uint32_t); - -// FillArray entrypoint. -extern "C" void art_quick_handle_fill_data_from_code(void*, void*); - -// Lock entrypoints. -extern "C" void art_quick_lock_object_from_code(void*); -extern "C" void art_quick_unlock_object_from_code(void*); - -// Math entrypoints. -extern int32_t CmpgDouble(double a, double b); -extern int32_t CmplDouble(double a, double b); -extern int32_t CmpgFloat(float a, float b); -extern int32_t CmplFloat(float a, float b); -extern "C" int64_t artLmulFromCode(int64_t a, int64_t b); -extern "C" int64_t artLdivFromCode(int64_t a, int64_t b); -extern "C" int64_t artLdivmodFromCode(int64_t a, int64_t b); - -// Math conversions. -extern "C" int32_t __fixsfsi(float op1); // FLOAT_TO_INT -extern "C" int32_t __fixdfsi(double op1); // DOUBLE_TO_INT -extern "C" float __floatdisf(int64_t op1); // LONG_TO_FLOAT -extern "C" double __floatdidf(int64_t op1); // LONG_TO_DOUBLE -extern "C" int64_t __fixsfdi(float op1); // FLOAT_TO_LONG -extern "C" int64_t __fixdfdi(double op1); // DOUBLE_TO_LONG - -// Single-precision FP arithmetics. -extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR] - -// Double-precision FP arithmetics. -extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR] - -// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR] -extern "C" int64_t __divdi3(int64_t, int64_t); -extern "C" int64_t __moddi3(int64_t, int64_t); -extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t); -extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t); -extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t); - -// Interpreter entrypoints. -extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); -extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); - -// Intrinsic entrypoints. -extern "C" int32_t __memcmp16(void*, void*, int32_t); -extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); -extern "C" int32_t art_quick_string_compareto(void*, void*); - -// Invoke entrypoints. -extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); - -// Thread entrypoints. -extern void CheckSuspendFromCode(Thread* thread); -extern "C" void art_quick_test_suspend(); - -// Throw entrypoints. -extern "C" void art_quick_deliver_exception_from_code(void*); -extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); -extern "C" void art_quick_throw_div_zero_from_code(); -extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); -extern "C" void art_quick_throw_null_pointer_exception_from_code(); -extern "C" void art_quick_throw_stack_overflow_from_code(void*); - -void InitEntryPoints(EntryPoints* points) { - // Alloc - points->pAllocArrayFromCode = art_quick_alloc_array_from_code; - points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; - points->pAllocObjectFromCode = art_quick_alloc_object_from_code; - points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; - points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; - points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; - - // Cast - points->pInstanceofNonTrivialFromCode = artIsAssignableFromCode; - points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; - points->pCheckCastFromCode = art_quick_check_cast_from_code; - - // DexCache - points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; - points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; - points->pInitializeTypeFromCode = art_quick_initialize_type_from_code; - points->pResolveStringFromCode = art_quick_resolve_string_from_code; - - // Field - points->pSet32Instance = art_quick_set32_instance_from_code; - points->pSet32Static = art_quick_set32_static_from_code; - points->pSet64Instance = art_quick_set64_instance_from_code; - points->pSet64Static = art_quick_set64_static_from_code; - points->pSetObjInstance = art_quick_set_obj_instance_from_code; - points->pSetObjStatic = art_quick_set_obj_static_from_code; - points->pGet32Instance = art_quick_get32_instance_from_code; - points->pGet64Instance = art_quick_get64_instance_from_code; - points->pGetObjInstance = art_quick_get_obj_instance_from_code; - points->pGet32Static = art_quick_get32_static_from_code; - points->pGet64Static = art_quick_get64_static_from_code; - points->pGetObjStatic = art_quick_get_obj_static_from_code; - - // FillArray - points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; - - // JNI - points->pJniMethodStart = JniMethodStart; - points->pJniMethodStartSynchronized = JniMethodStartSynchronized; - points->pJniMethodEnd = JniMethodEnd; - points->pJniMethodEndSynchronized = JniMethodEndSynchronized; - points->pJniMethodEndWithReference = JniMethodEndWithReference; - points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - - // Locks - points->pLockObjectFromCode = art_quick_lock_object_from_code; - points->pUnlockObjectFromCode = art_quick_unlock_object_from_code; - - // Math - points->pCmpgDouble = CmpgDouble; - points->pCmpgFloat = CmpgFloat; - points->pCmplDouble = CmplDouble; - points->pCmplFloat = CmplFloat; - points->pFmod = fmod; - points->pL2d = __floatdidf; - points->pFmodf = fmodf; - points->pL2f = __floatdisf; - points->pD2iz = __fixdfsi; - points->pF2iz = __fixsfsi; - points->pIdivmod = NULL; - points->pD2l = art_d2l; - points->pF2l = art_f2l; - points->pLdiv = artLdivFromCode; - points->pLdivmod = artLdivmodFromCode; - points->pLmul = artLmulFromCode; - points->pShlLong = art_quick_shl_long; - points->pShrLong = art_quick_shr_long; - points->pUshrLong = art_quick_ushr_long; - - // Interpreter - points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; - points->pInterpreterToQuickEntry = artInterpreterToQuickEntry; - - // Intrinsics - points->pIndexOf = art_quick_indexof; - points->pMemcmp16 = __memcmp16; - points->pStringCompareTo = art_quick_string_compareto; - points->pMemcpy = memcpy; - - // Invocation - points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; - points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; - points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; - points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; - points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; - points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; - points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; - points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - points->pCheckSuspendFromCode = CheckSuspendFromCode; - points->pTestSuspendFromCode = art_quick_test_suspend; - - // Throws - points->pDeliverException = art_quick_deliver_exception_from_code; - points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; - points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; - points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; - points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; - points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; -}; - -} // namespace art diff --git a/runtime/oat/runtime/mips/runtime_support_mips.S b/runtime/oat/runtime/mips/runtime_support_mips.S deleted file mode 100644 index 45d583e097..0000000000 --- a/runtime/oat/runtime/mips/runtime_support_mips.S +++ /dev/null @@ -1,1187 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "asm_support.h" - - .set noreorder - .balign 4 - - /* Deliver the given exception */ - .extern artDeliverExceptionFromCode - /* Deliver an exception pending on a thread */ - .extern artDeliverPendingExceptionFromCode - - /* Cache alignment for function entry */ -.macro ENTRY name - .type \name, %function - .global \name - .balign 16 -\name: - .cfi_startproc -.endm - -.macro END name - .cfi_endproc - .size \name, .-\name -.endm - - /* Generates $gp for function calls */ -.macro GENERATE_GLOBAL_POINTER - .cpload $t9 -.endm - - /* - * Macro that sets up the callee save frame to conform with - * Runtime::CreateCalleeSaveMethod(kSaveAll) - * callee-save: $s0-$s8 + $gp + $ra, 11 total + 1 word padding + 4 open words for args - */ -.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME - addiu $sp, $sp, -64 - .cfi_adjust_cfa_offset 64 - sw $ra, 60($sp) - .cfi_rel_offset 31, 60 - sw $s8, 56($sp) - .cfi_rel_offset 30, 56 - sw $gp, 52($sp) - .cfi_rel_offset 28, 52 - sw $s7, 48($sp) - .cfi_rel_offset 23, 48 - sw $s6, 44($sp) - .cfi_rel_offset 22, 44 - sw $s5, 40($sp) - .cfi_rel_offset 21, 40 - sw $s4, 36($sp) - .cfi_rel_offset 20, 36 - sw $s3, 32($sp) - .cfi_rel_offset 19, 32 - sw $s2, 28($sp) - .cfi_rel_offset 18, 28 - sw $s1, 24($sp) - .cfi_rel_offset 17, 24 - sw $s0, 20($sp) - .cfi_rel_offset 16, 20 - # 1 word for alignment, 4 open words for args $a0-$a3, bottom will hold Method* -.endm - - /* - * Macro that sets up the callee save frame to conform with - * Runtime::CreateCalleeSaveMethod(kRefsOnly). Restoration assumes non-moving GC. - * Does not include rSUSPEND or rSELF - * callee-save: $s2-$s8 + $gp + $ra, 9 total + 3 words padding + 4 open words for args - */ -.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME - addiu $sp, $sp, -64 - .cfi_adjust_cfa_offset 64 - sw $ra, 60($sp) - .cfi_rel_offset 31, 60 - sw $s8, 56($sp) - .cfi_rel_offset 30, 56 - sw $gp, 52($sp) - .cfi_rel_offset 28, 52 - sw $s7, 48($sp) - .cfi_rel_offset 23, 48 - sw $s6, 44($sp) - .cfi_rel_offset 22, 44 - sw $s5, 40($sp) - .cfi_rel_offset 21, 40 - sw $s4, 36($sp) - .cfi_rel_offset 20, 36 - sw $s3, 32($sp) - .cfi_rel_offset 19, 32 - sw $s2, 28($sp) - .cfi_rel_offset 18, 28 - # 3 words for alignment and extra args, 4 open words for args $a0-$a3, bottom will hold Method* -.endm - -.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - lw $gp, 52($sp) - lw $ra, 60($sp) - addiu $sp, $sp, 64 - .cfi_adjust_cfa_offset -64 -.endm - -.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN - lw $gp, 52($sp) - lw $ra, 60($sp) - jr $ra - addiu $sp, $sp, 64 - .cfi_adjust_cfa_offset -64 -.endm - - /* - * Macro that sets up the callee save frame to conform with - * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC. - * callee-save: $a1-$a3, $s2-$s8 + $gp + $ra, 12 total + 3 words padding + method* - */ -.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - addiu $sp, $sp, -64 - .cfi_adjust_cfa_offset 64 - sw $ra, 60($sp) - .cfi_rel_offset 31, 60 - sw $s8, 56($sp) - .cfi_rel_offset 30, 56 - sw $gp, 52($sp) - .cfi_rel_offset 28, 52 - sw $s7, 48($sp) - .cfi_rel_offset 23, 48 - sw $s6, 44($sp) - .cfi_rel_offset 22, 44 - sw $s5, 40($sp) - .cfi_rel_offset 21, 40 - sw $s4, 36($sp) - .cfi_rel_offset 20, 36 - sw $s3, 32($sp) - .cfi_rel_offset 19, 32 - sw $s2, 28($sp) - .cfi_rel_offset 18, 28 - sw $a3, 12($sp) - .cfi_rel_offset 7, 12 - sw $a2, 8($sp) - .cfi_rel_offset 6, 8 - sw $a1, 4($sp) - .cfi_rel_offset 5, 4 - # bottom will hold Method* -.endm - -.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME - lw $ra, 60($sp) # restore $ra - lw $gp, 52($sp) # restore $gp - lw $a1, 4($sp) # restore non-callee save $a1 - lw $a2, 8($sp) # restore non-callee save $a2 - lw $a3, 12($sp) # restore non-callee save $a3 - addiu $sp, $sp, 64 # strip frame - .cfi_adjust_cfa_offset -64 -.endm - - /* - * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending - * exception is Thread::Current()->exception_ - */ -.macro DELIVER_PENDING_EXCEPTION - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME # save callee saves for throw - move $a0, rSELF # pass Thread::Current - la $t9, artDeliverPendingExceptionFromCode - jr $t9 # artDeliverPendingExceptionFromCode(Thread*, $sp) - move $a1, $sp # pass $sp -.endm - -.macro RETURN_IF_NO_EXCEPTION - lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_ - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - bnez $t0, 1f # success if no exception is pending - nop - jr $ra - nop -1: - DELIVER_PENDING_EXCEPTION -.endm - -.macro RETURN_IF_ZERO - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - bnez $v0, 1f # success? - nop - jr $ra # return on success - nop -1: - DELIVER_PENDING_EXCEPTION -.endm - -.macro RETURN_IF_NONZERO - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - beqz $v0, 1f # success? - nop - jr $ra # return on success - nop -1: - DELIVER_PENDING_EXCEPTION -.endm - - /* - * On entry $a0 is uint32_t* gprs_ and $a1 is uint32_t* fprs_ - * FIXME: just guessing about the shape of the jmpbuf. Where will pc be? - */ -ENTRY art_quick_do_long_jump - l.s $f0, 0($a1) - l.s $f1, 4($a1) - l.s $f2, 8($a1) - l.s $f3, 12($a1) - l.s $f4, 16($a1) - l.s $f5, 20($a1) - l.s $f6, 24($a1) - l.s $f7, 28($a1) - l.s $f8, 32($a1) - l.s $f9, 36($a1) - l.s $f10, 40($a1) - l.s $f11, 44($a1) - l.s $f12, 48($a1) - l.s $f13, 52($a1) - l.s $f14, 56($a1) - l.s $f15, 60($a1) - l.s $f16, 64($a1) - l.s $f17, 68($a1) - l.s $f18, 72($a1) - l.s $f19, 76($a1) - l.s $f20, 80($a1) - l.s $f21, 84($a1) - l.s $f22, 88($a1) - l.s $f23, 92($a1) - l.s $f24, 96($a1) - l.s $f25, 100($a1) - l.s $f26, 104($a1) - l.s $f27, 108($a1) - l.s $f28, 112($a1) - l.s $f29, 116($a1) - l.s $f30, 120($a1) - l.s $f31, 124($a1) - lw $at, 4($a0) - lw $v0, 8($a0) - lw $v1, 12($a0) - lw $a1, 20($a0) - lw $a2, 24($a0) - lw $a3, 28($a0) - lw $t0, 32($a0) - lw $t1, 36($a0) - lw $t2, 40($a0) - lw $t3, 44($a0) - lw $t4, 48($a0) - lw $t5, 52($a0) - lw $t6, 56($a0) - lw $t7, 60($a0) - lw $s0, 64($a0) - lw $s1, 68($a0) - lw $s2, 72($a0) - lw $s3, 76($a0) - lw $s4, 80($a0) - lw $s5, 84($a0) - lw $s6, 88($a0) - lw $s7, 92($a0) - lw $t8, 96($a0) - lw $t9, 100($a0) - lw $k0, 104($a0) - lw $k1, 108($a0) - lw $gp, 112($a0) - lw $sp, 116($a0) - lw $fp, 120($a0) - lw $ra, 124($a0) - lw $a0, 16($a0) - move $v0, $zero # clear result registers r0 and r1 - jr $ra # do long jump - move $v1, $zero -END art_quick_do_long_jump - - /* - * Called by managed code, saves most registers (forms basis of long jump context) and passes - * the bottom of the stack. artDeliverExceptionFromCode will place the callee save Method* at - * the bottom of the thread. On entry r0 holds Throwable* - */ -ENTRY art_quick_deliver_exception_from_code - GENERATE_GLOBAL_POINTER - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME - move $a1, rSELF # pass Thread::Current - la $t9, artDeliverExceptionFromCode - jr $t9 # artDeliverExceptionFromCode(Throwable*, Thread*, $sp) - move $a2, $sp # pass $sp -END art_quick_deliver_exception_from_code - - /* - * Called by managed code to create and deliver a NullPointerException - */ - .extern artThrowNullPointerExceptionFromCode -ENTRY art_quick_throw_null_pointer_exception_from_code - GENERATE_GLOBAL_POINTER - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME - move $a0, rSELF # pass Thread::Current - la $t9, artThrowNullPointerExceptionFromCode - jr $t9 # artThrowNullPointerExceptionFromCode(Thread*, $sp) - move $a1, $sp # pass $sp -END art_quick_throw_null_pointer_exception_from_code - - /* - * Called by managed code to create and deliver an ArithmeticException - */ - .extern artThrowDivZeroFromCode -ENTRY art_quick_throw_div_zero_from_code - GENERATE_GLOBAL_POINTER - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME - move $a0, rSELF # pass Thread::Current - la $t9, artThrowDivZeroFromCode - jr $t9 # artThrowDivZeroFromCode(Thread*, $sp) - move $a1, $sp # pass $sp -END art_quick_throw_div_zero_from_code - - /* - * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException - */ - .extern artThrowArrayBoundsFromCode -ENTRY art_quick_throw_array_bounds_from_code - GENERATE_GLOBAL_POINTER - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME - move $a2, rSELF # pass Thread::Current - la $t9, artThrowArrayBoundsFromCode - jr $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*, $sp) - move $a3, $sp # pass $sp -END art_quick_throw_array_bounds_from_code - - /* - * Called by managed code to create and deliver a StackOverflowError. - */ - .extern artThrowStackOverflowFromCode -ENTRY art_quick_throw_stack_overflow_from_code - GENERATE_GLOBAL_POINTER - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME - move $a0, rSELF # pass Thread::Current - la $t9, artThrowStackOverflowFromCode - jr $t9 # artThrowStackOverflowFromCode(Thread*, $sp) - move $a1, $sp # pass $sp -END art_quick_throw_stack_overflow_from_code - - /* - * Called by managed code to create and deliver a NoSuchMethodError. - */ - .extern artThrowNoSuchMethodFromCode -ENTRY art_quick_throw_no_such_method_from_code - GENERATE_GLOBAL_POINTER - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME - move $a1, rSELF # pass Thread::Current - la $t9, artThrowNoSuchMethodFromCode - jr $t9 # artThrowNoSuchMethodFromCode(method_idx, Thread*, $sp) - move $a2, $sp # pass $sp -END art_quick_throw_no_such_method_from_code - - /* - * All generated callsites for interface invokes and invocation slow paths will load arguments - * as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain - * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the - * stack and call the appropriate C helper. - * NOTE: "this" is first visable argument of the target, and so can be found in arg1/$a1. - * - * The helper will attempt to locate the target and return a 64-bit result in $v0/$v1 consisting - * of the target Method* in $v0 and method->code_ in $v1. - * - * If unsuccessful, the helper will return NULL/NULL. There will be a pending exception in the - * thread and we branch to another stub to deliver it. - * - * On success this wrapper will restore arguments and *jump* to the target, leaving the lr - * pointing back to the original caller. - */ -.macro INVOKE_TRAMPOLINE c_name, cxx_name - .extern \cxx_name -ENTRY \c_name - GENERATE_GLOBAL_POINTER - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME # save callee saves in case allocation triggers GC - lw $a2, 64($sp) # pass caller Method* - move $t0, $sp # save $sp - addiu $sp, $sp, -32 # make space for extra args - .cfi_adjust_cfa_offset 32 - move $a3, rSELF # pass Thread::Current - .cfi_rel_offset 28, 12 - jal \cxx_name # (method_idx, this, caller, Thread*, $sp) - sw $t0, 16($sp) # pass $sp - addiu $sp, $sp, 32 # release out args - .cfi_adjust_cfa_offset -32 - move $a0, $v0 # save target Method* - move $t9, $v1 # save $v0->code_ - RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME - beqz $v0, 1f - nop - jr $t9 - nop -1: - DELIVER_PENDING_EXCEPTION -END \c_name -.endm - -INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline -INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck - -INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck -INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck -INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck -INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck - - /* - * Common invocation stub for portable and quick. - * On entry: - * a0 = method pointer - * a1 = argument array or NULL for no argument methods - * a2 = size of argument array in bytes - * a3 = (managed) thread pointer - * [sp + 16] = JValue* result - * [sp + 20] = result type char - */ - .type art_portable_invoke_stub, %function - .global art_portable_invoke_stub -art_portable_invoke_stub: -ENTRY art_quick_invoke_stub - GENERATE_GLOBAL_POINTER - sw $a0, 0($sp) # save out a0 - addiu $sp, $sp, -16 # spill s0, s1, fp, ra - .cfi_adjust_cfa_offset 16 - sw $ra, 12($sp) - .cfi_rel_offset 31, 12 - sw $fp, 8($sp) - .cfi_rel_offset 30, 8 - sw $s1, 4($sp) - .cfi_rel_offset 17, 4 - sw $s0, 0($sp) - .cfi_rel_offset 16, 0 - move $fp, $sp # save sp in fp - .cfi_def_cfa_register 30 - move $s1, $a3 # move managed thread pointer into s1 - addiu $s0, $zero, SUSPEND_CHECK_INTERVAL # reset s0 to suspend check interval - addiu $t0, $a2, 16 # create space for method pointer in frame - srl $t0, $t0, 3 # shift the frame size right 3 - sll $t0, $t0, 3 # shift the frame size left 3 to align to 16 bytes - subu $sp, $sp, $t0 # reserve stack space for argument array - addiu $a0, $sp, 4 # pass stack pointer + method ptr as dest for memcpy - jal memcpy # (dest, src, bytes) - addiu $sp, $sp, -16 # make space for argument slots for memcpy - addiu $sp, $sp, 16 # restore stack after memcpy - lw $a0, 16($fp) # restore method* - lw $a1, 4($sp) # copy arg value for a1 - lw $a2, 8($sp) # copy arg value for a2 - lw $a3, 12($sp) # copy arg value for a3 - lw $t9, METHOD_CODE_OFFSET($a0) # get pointer to the code - jalr $t9 # call the method - sw $zero, 0($sp) # store NULL for method* at bottom of frame - move $sp, $fp # restore the stack - lw $s0, 0($sp) - lw $s1, 4($sp) - lw $fp, 8($sp) - lw $ra, 12($sp) - addiu $sp, $sp, 16 - .cfi_adjust_cfa_offset -16 - lw $t0, 16($sp) # get result pointer - lw $t1, 20($sp) # get result type char - li $t2, 68 # put char 'D' into t2 - beq $t1, $t2, 1f # branch if result type char == 'D' - li $t3, 70 # put char 'F' into t3 - beq $t1, $t3, 1f # branch if result type char == 'F' - sw $v0, 0($t0) # store the result - jr $ra - sw $v1, 4($t0) # store the other half of the result -1: - s.s $f0, 0($t0) # store floating point result - jr $ra - s.s $f1, 4($t0) # store other half of floating point result -END art_quick_invoke_stub - .size art_portable_invoke_stub, .-art_portable_invoke_stub - - /* - * Entry point of native methods when JNI bug compatibility is enabled. - */ - .extern artWorkAroundAppJniBugs -ENTRY art_quick_work_around_app_jni_bugs - GENERATE_GLOBAL_POINTER - # save registers that may contain arguments and LR that will be crushed by a call - addiu $sp, $sp, -32 - .cfi_adjust_cfa_offset 32 - sw $ra, 28($sp) - .cfi_rel_offset 31, 28 - sw $a3, 24($sp) - .cfi_rel_offset 7, 28 - sw $a2, 20($sp) - .cfi_rel_offset 6, 28 - sw $a1, 16($sp) - .cfi_rel_offset 5, 28 - sw $a0, 12($sp) - .cfi_rel_offset 4, 28 - move $a0, rSELF # pass Thread::Current - jal artWorkAroundAppJniBugs # (Thread*, $sp) - move $a1, $sp # pass $sp - move $t9, $v0 # save target address - lw $a0, 12($sp) - lw $a1, 16($sp) - lw $a2, 20($sp) - lw $a3, 24($sp) - lw $ra, 28($sp) - jr $t9 # tail call into JNI routine - addiu $sp, $sp, 32 - .cfi_adjust_cfa_offset -32 -END art_quick_work_around_app_jni_bugs - - /* - * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on - * failure. - */ - .extern artHandleFillArrayDataFromCode -ENTRY art_quick_handle_fill_data_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC - move $a2, rSELF # pass Thread::Current - jal artHandleFillArrayDataFromCode # (Array*, const DexFile::Payload*, Thread*, $sp) - move $a3, $sp # pass $sp - RETURN_IF_ZERO -END art_quick_handle_fill_data_from_code - - /* - * Entry from managed code that calls artLockObjectFromCode, may block for GC. - */ - .extern artLockObjectFromCode -ENTRY art_quick_lock_object_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block - move $a1, rSELF # pass Thread::Current - jal artLockObjectFromCode # (Object* obj, Thread*, $sp) - move $a2, $sp # pass $sp - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN -END art_quick_lock_object_from_code - - /* - * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. - */ - .extern artUnlockObjectFromCode -ENTRY art_quick_unlock_object_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC - move $a1, rSELF # pass Thread::Current - jal artUnlockObjectFromCode # (Object* obj, Thread*, $sp) - move $a2, $sp # pass $sp - RETURN_IF_ZERO -END art_quick_unlock_object_from_code - - /* - * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure. - */ - .extern artCheckCastFromCode -ENTRY art_quick_check_cast_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC - move $a2, rSELF # pass Thread::Current - jal artCheckCastFromCode # (Class* a, Class* b, Thread*, $sp) - move $a3, $sp # pass $sp - RETURN_IF_ZERO -END art_quick_check_cast_from_code - - /* - * Entry from managed code that calls artCanPutArrayElementFromCode and delivers exception on - * failure. - */ - .extern artCanPutArrayElementFromCode -ENTRY art_quick_can_put_array_element_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC - move $a2, rSELF # pass Thread::Current - jal artCanPutArrayElementFromCode # (Object* element, Class* array_class, Thread*, $sp) - move $a3, $sp # pass $sp - RETURN_IF_ZERO -END art_quick_can_put_array_element_from_code - - /* - * Entry from managed code when uninitialized static storage, this stub will run the class - * initializer and deliver the exception on error. On success the static storage base is - * returned. - */ - .extern artInitializeStaticStorageFromCode -ENTRY art_quick_initialize_static_storage_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - move $a2, rSELF # pass Thread::Current - # artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*, $sp) - jal artInitializeStaticStorageFromCode - move $a3, $sp # pass $sp - RETURN_IF_NONZERO -END art_quick_initialize_static_storage_from_code - - /* - * Entry from managed code when dex cache misses for a type_idx. - */ - .extern artInitializeTypeFromCode -ENTRY art_quick_initialize_type_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - move $a2, rSELF # pass Thread::Current - # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, $sp) - jal artInitializeTypeFromCode - move $a3, $sp # pass $sp - RETURN_IF_NONZERO -END art_quick_initialize_type_from_code - - /* - * Entry from managed code when type_idx needs to be checked for access and dex cache may also - * miss. - */ - .extern artInitializeTypeAndVerifyAccessFromCode -ENTRY art_quick_initialize_type_and_verify_access_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - move $a2, rSELF # pass Thread::Current - # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, $sp) - jal artInitializeTypeAndVerifyAccessFromCode - move $a3, $sp # pass $sp - RETURN_IF_NONZERO -END art_quick_initialize_type_and_verify_access_from_code - - /* - * Called by managed code to resolve a static field and load a 32-bit primitive value. - */ - .extern artGet32StaticFromCode -ENTRY art_quick_get32_static_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lw $a1, 64($sp) # pass referrer's Method* - move $a2, rSELF # pass Thread::Current - jal artGet32StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp) - move $a3, $sp # pass $sp - RETURN_IF_NO_EXCEPTION -END art_quick_get32_static_from_code - - /* - * Called by managed code to resolve a static field and load a 64-bit primitive value. - */ - .extern artGet64StaticFromCode -ENTRY art_quick_get64_static_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lw $a1, 64($sp) # pass referrer's Method* - move $a2, rSELF # pass Thread::Current - jal artGet64StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp) - move $a3, $sp # pass $sp - RETURN_IF_NO_EXCEPTION -END art_quick_get64_static_from_code - - /* - * Called by managed code to resolve a static field and load an object reference. - */ - .extern artGetObjStaticFromCode -ENTRY art_quick_get_obj_static_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lw $a1, 64($sp) # pass referrer's Method* - move $a2, rSELF # pass Thread::Current - jal artGetObjStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp) - move $a3, $sp # pass $sp - RETURN_IF_NO_EXCEPTION -END art_quick_get_obj_static_from_code - - /* - * Called by managed code to resolve an instance field and load a 32-bit primitive value. - */ - .extern artGet32InstanceFromCode -ENTRY art_quick_get32_instance_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lw $a2, 64($sp) # pass referrer's Method* - move $a3, rSELF # pass Thread::Current - jal artGet32InstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp) - sw $sp, 16($sp) # pass $sp - RETURN_IF_NO_EXCEPTION -END art_quick_get32_instance_from_code - - /* - * Called by managed code to resolve an instance field and load a 64-bit primitive value. - */ - .extern artGet64InstanceFromCode -ENTRY art_quick_get64_instance_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lw $a2, 64($sp) # pass referrer's Method* - move $a3, rSELF # pass Thread::Current - jal artGet64InstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp) - sw $sp, 16($sp) # pass $sp - RETURN_IF_NO_EXCEPTION -END art_quick_get64_instance_from_code - - /* - * Called by managed code to resolve an instance field and load an object reference. - */ - .extern artGetObjInstanceFromCode -ENTRY art_quick_get_obj_instance_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lw $a2, 64($sp) # pass referrer's Method* - move $a3, rSELF # pass Thread::Current - jal artGetObjInstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp) - sw $sp, 16($sp) # pass $sp - RETURN_IF_NO_EXCEPTION -END art_quick_get_obj_instance_from_code - - /* - * Called by managed code to resolve a static field and store a 32-bit primitive value. - */ - .extern artSet32StaticFromCode -ENTRY art_quick_set32_static_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lw $a2, 64($sp) # pass referrer's Method* - move $a3, rSELF # pass Thread::Current - jal artSet32StaticFromCode # (field_idx, new_val, referrer, Thread*, $sp) - sw $sp, 16($sp) # pass $sp - RETURN_IF_ZERO -END art_quick_set32_static_from_code - - /* - * Called by managed code to resolve a static field and store a 64-bit primitive value. - */ - .extern artSet32StaticFromCode -ENTRY art_quick_set64_static_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lw $a1, 64($sp) # pass referrer's Method* - sw rSELF, 16($sp) # pass Thread::Current - jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*, $sp) - sw $sp, 20($sp) # pass $sp - RETURN_IF_ZERO -END art_quick_set64_static_from_code - - /* - * Called by managed code to resolve a static field and store an object reference. - */ - .extern artSetObjStaticFromCode -ENTRY art_quick_set_obj_static_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lw $a2, 64($sp) # pass referrer's Method* - move $a3, rSELF # pass Thread::Current - jal artSetObjStaticFromCode # (field_idx, new_val, referrer, Thread*, $sp) - sw $sp, 16($sp) # pass $sp - RETURN_IF_ZERO -END art_quick_set_obj_static_from_code - - /* - * Called by managed code to resolve an instance field and store a 32-bit primitive value. - */ - .extern artSet32InstanceFromCode -ENTRY art_quick_set32_instance_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lw $a3, 64($sp) # pass referrer's Method* - sw rSELF, 16($sp) # pass Thread::Current - jal artSet32InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*, $sp) - sw $sp, 20($sp) # pass $sp - RETURN_IF_ZERO -END art_quick_set32_instance_from_code - - /* - * Called by managed code to resolve an instance field and store a 64-bit primitive value. - */ - .extern artSet32InstanceFromCode -ENTRY art_quick_set64_instance_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - sw rSELF, 16($sp) # pass Thread::Current - jal artSet64InstanceFromCode # (field_idx, Object*, new_val, Thread*, $sp) - sw $sp, 20($sp) # pass $sp - RETURN_IF_ZERO -END art_quick_set64_instance_from_code - - /* - * Called by managed code to resolve an instance field and store an object reference. - */ - .extern artSetObjInstanceFromCode -ENTRY art_quick_set_obj_instance_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lw $a3, 64($sp) # pass referrer's Method* - sw rSELF, 16($sp) # pass Thread::Current - jal artSetObjInstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*, $sp) - sw $sp, 20($sp) # pass $sp - RETURN_IF_ZERO -END art_quick_set_obj_instance_from_code - - /* - * Entry from managed code to resolve a string, this stub will allocate a String and deliver an - * exception on error. On success the String is returned. R0 holds the referring method, - * R1 holds the string index. The fast path check for hit in strings cache has already been - * performed. - */ - .extern artResolveStringFromCode -ENTRY art_quick_resolve_string_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - move $a2, rSELF # pass Thread::Current - # artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*, $sp) - jal artResolveStringFromCode - move $a3, $sp # pass $sp - RETURN_IF_NONZERO -END art_quick_resolve_string_from_code - - /* - * Called by managed code to allocate an object. - */ - .extern artAllocObjectFromCode -ENTRY art_quick_alloc_object_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - move $a2, rSELF # pass Thread::Current - jal artAllocObjectFromCode # (uint32_t type_idx, Method* method, Thread*, $sp) - move $a3, $sp # pass $sp - RETURN_IF_NONZERO -END art_quick_alloc_object_from_code - - /* - * Called by managed code to allocate an object when the caller doesn't know whether it has - * access to the created type. - */ - .extern artAllocObjectFromCodeWithAccessCheck -ENTRY art_quick_alloc_object_from_code_with_access_check - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - move $a2, rSELF # pass Thread::Current - jal artAllocObjectFromCodeWithAccessCheck # (uint32_t type_idx, Method* method, Thread*, $sp) - move $a3, $sp # pass $sp - RETURN_IF_NONZERO -END art_quick_alloc_object_from_code_with_access_check - - /* - * Called by managed code to allocate an array. - */ - .extern artAllocArrayFromCode -ENTRY art_quick_alloc_array_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - move $a3, rSELF # pass Thread::Current - # artAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, Thread*, $sp) - jal artAllocArrayFromCode - sw $sp, 16($sp) # pass $sp - RETURN_IF_NONZERO -END art_quick_alloc_array_from_code - - /* - * Called by managed code to allocate an array when the caller doesn't know whether it has - * access to the created type. - */ - .extern artAllocArrayFromCodeWithAccessCheck -ENTRY art_quick_alloc_array_from_code_with_access_check - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - move $a3, rSELF # pass Thread::Current - # artAllocArrayFromCodeWithAccessCheck(type_idx, method, component_count, Thread*, $sp) - jal artAllocArrayFromCodeWithAccessCheck - sw $sp, 16($sp) # pass $sp - RETURN_IF_NONZERO -END art_quick_alloc_array_from_code_with_access_check - - /* - * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY. - */ - .extern artCheckAndAllocArrayFromCode -ENTRY art_quick_check_and_alloc_array_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - move $a3, rSELF # pass Thread::Current - # artCheckAndAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t count, Thread* , $sp) - jal artCheckAndAllocArrayFromCode - sw $sp, 16($sp) # pass $sp - RETURN_IF_NONZERO -END art_quick_check_and_alloc_array_from_code - - /* - * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY. - */ - .extern artCheckAndAllocArrayFromCodeWithAccessCheck -ENTRY art_quick_check_and_alloc_array_from_code_with_access_check - GENERATE_GLOBAL_POINTER - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - move $a3, rSELF # pass Thread::Current - # artCheckAndAllocArrayFromCodeWithAccessCheck(type_idx, method, count, Thread* , $sp) - jal artCheckAndAllocArrayFromCodeWithAccessCheck - sw $sp, 16($sp) # pass $sp - RETURN_IF_NONZERO -END art_quick_check_and_alloc_array_from_code_with_access_check - - /* - * Called by managed code when the value in rSUSPEND has been decremented to 0. - */ - .extern artTestSuspendFromCode -ENTRY art_quick_test_suspend - GENERATE_GLOBAL_POINTER - lh $a0, THREAD_FLAGS_OFFSET(rSELF) - bnez $a0, 1f - addi rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL - jr $ra - nop -1: - move $a0, rSELF - SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves for stack crawl - jal artTestSuspendFromCode # (Thread*, $sp) - move $a1, $sp - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN -END art_quick_test_suspend - - .extern artPortableProxyInvokeHandler -ENTRY art_portable_proxy_invoke_handler - GENERATE_GLOBAL_POINTER - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - sw $a0, 0($sp) # place proxy method at bottom of frame - move $a2, rSELF # pass Thread::Current - jal artPortableProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP) - move $a3, $sp # pass $sp - lw $ra, 60($sp) # restore $ra - jr $ra - addiu $sp, $sp, 64 # pop frame - .cfi_adjust_cfa_offset -64 -END art_portable_proxy_invoke_handler - - /* - * Called by managed code that is attempting to call a method on a proxy class. On entry - * r0 holds the proxy method; r1, r2 and r3 may contain arguments. - */ - .extern artQuickProxyInvokeHandler -ENTRY art_quick_proxy_invoke_handler - GENERATE_GLOBAL_POINTER - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - sw $a0, 0($sp) # place proxy method at bottom of frame - move $a2, rSELF # pass Thread::Current - jal artQuickProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP) - move $a3, $sp # pass $sp - lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_ - lw $gp, 52($sp) # restore $gp - lw $ra, 60($sp) # restore $ra - bnez $t0, 1f - addiu $sp, $sp, 64 # pop frame - .cfi_adjust_cfa_offset -64 - jr $ra - nop -1: - DELIVER_PENDING_EXCEPTION -END art_quick_proxy_invoke_handler - - .extern artInterpreterEntry -ENTRY art_quick_interpreter_entry - GENERATE_GLOBAL_POINTER - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - sw $a0, 0($sp) # place proxy method at bottom of frame - move $a1, rSELF # pass Thread::Current - jal artInterpreterEntry # (Method* method, Thread*, SP) - move $a2, $sp # pass $sp - lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_ - lw $gp, 52($sp) # restore $gp - lw $ra, 60($sp) # restore $ra - bnez $t0, 1f - addiu $sp, $sp, 64 # pop frame - .cfi_adjust_cfa_offset -64 - jr $ra - nop -1: - DELIVER_PENDING_EXCEPTION -END art_quick_interpreter_entry - - /* - * Routine that intercepts method calls and returns. - */ - .extern artInstrumentationMethodEntryFromCode - .extern artInstrumentationMethodExitFromCode -ENTRY art_quick_instrumentation_entry_from_code - GENERATE_GLOBAL_POINTER - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - move $t0, $sp # remember bottom of caller's frame - addiu $sp, $sp, -32 # space for args, pad (3 words), arguments (5 words) - .cfi_adjust_cfa_offset 32 - sw $a0, 28($sp) # save arg0 - sw $ra, 16($sp) # pass $ra - move $a3, $t0 # pass $sp - jal artInstrumentationMethodEntryFromCode # (Method*, Object*, Thread*, SP, LR) - move $a2, rSELF # pass Thread::Current - move $t9, $v0 # $t9 holds reference to code - lw $a0, 28($sp) # restore arg0 - addiu $sp, $sp, 32 # remove args - .cfi_adjust_cfa_offset -32 - RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME - jalr $t9 # call method - nop -END art_quick_instrumentation_entry_from_code - /* intentional fallthrough */ - .global art_quick_instrumentation_exit_from_code -art_quick_instrumentation_exit_from_code: - .cfi_startproc - addiu $t9, $ra, 4 # put current address into $t9 to rebuild $gp - GENERATE_GLOBAL_POINTER - move $t0, $sp # remember bottom of caller's frame - SETUP_REF_ONLY_CALLEE_SAVE_FRAME - addiu $sp, $sp, -48 # save return values and set up args - .cfi_adjust_cfa_offset 48 - sw $v0, 32($sp) - .cfi_rel_offset 2, 0 - sw $v1, 36($sp) - .cfi_rel_offset 3, 4 - s.s $f0, 40($sp) - s.s $f1, 44($sp) - s.s $f0, 16($sp) # pass fpr result - s.s $f1, 20($sp) - move $a2, $v0 # pass gpr result - move $a3, $v1 - move $a1, $t0 # pass $sp - jal artInstrumentationMethodExitFromCode # (Thread*, SP, gpr_res, fpr_res) - move $a0, rSELF # pass Thread::Current - move $t0, $v0 # set aside returned link register - move $ra, $v1 # set link register for deoptimization - lw $v0, 32($sp) # restore return values - lw $v1, 36($sp) - l.s $f0, 40($sp) - l.s $f1, 44($sp) - jr $t0 # return - addiu $sp, $sp, 112 # 48 bytes of args + 64 bytes of callee save frame - .cfi_adjust_cfa_offset -112 -END art_quick_instrumentation_exit_from_code - - /* - * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization - * will long jump to the upcall with a special exception of -1. - */ - .extern artDeoptimize - .extern artEnterInterpreterFromDeoptimize -ENTRY art_quick_deoptimize - GENERATE_GLOBAL_POINTER - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME - move $a0, rSELF # pass Thread::current - jal artDeoptimize # artDeoptimize(Thread*, SP) - # Returns caller method's frame size. - move $a1, $sp # pass $sp -END art_quick_deoptimize - - /* - * Portable abstract method error stub. $a0 contains method* on entry. SP unused in portable. - */ - .extern artThrowAbstractMethodErrorFromCode -ENTRY art_portable_abstract_method_error_stub - GENERATE_GLOBAL_POINTER - la $t9, artThrowAbstractMethodErrorFromCode - jr $t9 # (Method*, Thread*, SP) - move $a1, $s1 # pass Thread::Current -END art_portable_abstract_method_error_stub - - /* - * Quick abstract method error stub. $a0 contains method* on entry. - */ -ENTRY art_quick_abstract_method_error_stub - GENERATE_GLOBAL_POINTER - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME - move $a1, $s1 # pass Thread::Current - la $t9, artThrowAbstractMethodErrorFromCode - jr $t9 # (Method*, Thread*, SP) - move $a2, $sp # pass SP -END art_quick_abstract_method_error_stub - - /* - * Jni dlsym lookup stub. - */ - .extern artFindNativeMethod -ENTRY art_jni_dlsym_lookup_stub - GENERATE_GLOBAL_POINTER - addiu $sp, $sp, -32 # leave room for $a0, $a1, $a2, $a3, and $ra - .cfi_adjust_cfa_offset 32 - sw $ra, 16($sp) - .cfi_rel_offset 31, 16 - sw $a3, 12($sp) - .cfi_rel_offset 7, 12 - sw $a2, 8($sp) - .cfi_rel_offset 6, 8 - sw $a1, 4($sp) - .cfi_rel_offset 5, 4 - sw $a0, 0($sp) - .cfi_rel_offset 4, 0 - jal artFindNativeMethod # (Thread*) - move $a0, $s1 # pass Thread::Current() - lw $a0, 0($sp) # restore registers from stack - lw $a1, 4($sp) - lw $a2, 8($sp) - lw $a3, 12($sp) - lw $ra, 16($sp) - beq $v0, $zero, no_native_code_found - addiu $sp, $sp, 32 # restore the stack - .cfi_adjust_cfa_offset -32 - move $t9, $v0 # put method code result in $t9 - jr $t9 # leaf call to method's code - nop -no_native_code_found: - jr $ra - nop -END art_jni_dlsym_lookup_stub - - /* - * Long integer shift. This is different from the generic 32/64-bit - * binary operations because vAA/vBB are 64-bit but vCC (the shift - * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low - * 6 bits. - * On entry: - * $a0: low word - * $a1: high word - * $a2: shift count - */ -ENTRY art_quick_shl_long - /* shl-long vAA, vBB, vCC */ - sll $v0, $a0, $a2 # rlo<- alo << (shift&31) - not $v1, $a2 # rhi<- 31-shift (shift is 5b) - srl $a0, 1 - srl $a0, $v1 # alo<- alo >> (32-(shift&31)) - sll $v1, $a1, $a2 # rhi<- ahi << (shift&31) - or $v1, $a0 # rhi<- rhi | alo - andi $a2, 0x20 # shift< shift & 0x20 - movn $v1, $v0, $a2 # rhi<- rlo (if shift&0x20) - jr $ra - movn $v0, $zero, $a2 # rlo<- 0 (if shift&0x20) -END art_quick_shl_long - - /* - * Long integer shift. This is different from the generic 32/64-bit - * binary operations because vAA/vBB are 64-bit but vCC (the shift - * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low - * 6 bits. - * On entry: - * $a0: low word - * $a1: high word - * $a2: shift count - */ - .global art_quick_shr_long -ENTRY art_quick_shr_long - sra $v1, $a1, $a2 # rhi<- ahi >> (shift&31) - srl $v0, $a0, $a2 # rlo<- alo >> (shift&31) - sra $a3, $a1, 31 # $a3<- sign(ah) - not $a0, $a2 # alo<- 31-shift (shift is 5b) - sll $a1, 1 - sll $a1, $a0 # ahi<- ahi << (32-(shift&31)) - or $v0, $a1 # rlo<- rlo | ahi - andi $a2, 0x20 # shift & 0x20 - movn $v0, $v1, $a2 # rlo<- rhi (if shift&0x20) - jr $ra - movn $v1, $a3, $a2 # rhi<- sign(ahi) (if shift&0x20) -END art_quick_shr_long - - /* - * Long integer shift. This is different from the generic 32/64-bit - * binary operations because vAA/vBB are 64-bit but vCC (the shift - * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low - * 6 bits. - * On entry: - * r0: low word - * r1: high word - * r2: shift count - */ - /* ushr-long vAA, vBB, vCC */ - .global art_quick_ushr_long -ENTRY art_quick_ushr_long - srl $v1, $a1, $a2 # rhi<- ahi >> (shift&31) - srl $v0, $a0, $a2 # rlo<- alo >> (shift&31) - not $a0, $a2 # alo<- 31-shift (shift is 5b) - sll $a1, 1 - sll $a1, $a0 # ahi<- ahi << (32-(shift&31)) - or $v0, $a1 # rlo<- rlo | ahi - andi $a2, 0x20 # shift & 0x20 - movn $v0, $v1, $a2 # rlo<- rhi (if shift&0x20) - jr $ra - movn $v1, $zero, $a2 # rhi<- 0 (if shift&0x20) -END art_quick_ushr_long - -ENTRY art_quick_indexof - jr $ra - nop -END art_quick_indexof - -ENTRY art_quick_string_compareto - jr $ra - nop -END art_quick_string_compareto diff --git a/runtime/oat/runtime/oat_support_entrypoints.h b/runtime/oat/runtime/oat_support_entrypoints.h deleted file mode 100644 index 546ee01c6f..0000000000 --- a/runtime/oat/runtime/oat_support_entrypoints.h +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_OAT_RUNTIME_OAT_SUPPORT_ENTRYPOINTS_H_ -#define ART_RUNTIME_OAT_RUNTIME_OAT_SUPPORT_ENTRYPOINTS_H_ - -#include "dex_file-inl.h" -#include "runtime.h" - -#define ENTRYPOINT_OFFSET(x) \ - (static_cast(OFFSETOF_MEMBER(Thread, entrypoints_)) + \ - static_cast(OFFSETOF_MEMBER(EntryPoints, x))) - -namespace art { -namespace mirror { -class AbstractMethod; -class Class; -class Object; -} // namespace mirror -class DvmDex; -class MethodHelper; -class ShadowFrame; -class Thread; - -struct PACKED(4) EntryPoints { - // Alloc - void* (*pAllocArrayFromCode)(uint32_t, void*, int32_t); - void* (*pAllocArrayFromCodeWithAccessCheck)(uint32_t, void*, int32_t); - void* (*pAllocObjectFromCode)(uint32_t, void*); - void* (*pAllocObjectFromCodeWithAccessCheck)(uint32_t, void*); - void* (*pCheckAndAllocArrayFromCode)(uint32_t, void*, int32_t); - void* (*pCheckAndAllocArrayFromCodeWithAccessCheck)(uint32_t, void*, int32_t); - - // Cast - uint32_t (*pInstanceofNonTrivialFromCode)(const mirror::Class*, const mirror::Class*); - void (*pCanPutArrayElementFromCode)(void*, void*); - void (*pCheckCastFromCode)(void*, void*); - - // DexCache - void* (*pInitializeStaticStorage)(uint32_t, void*); - void* (*pInitializeTypeAndVerifyAccessFromCode)(uint32_t, void*); - void* (*pInitializeTypeFromCode)(uint32_t, void*); - void* (*pResolveStringFromCode)(void*, uint32_t); - - // Field - int (*pSet32Instance)(uint32_t, void*, int32_t); // field_idx, obj, src - int (*pSet32Static)(uint32_t, int32_t); - int (*pSet64Instance)(uint32_t, void*, int64_t); - int (*pSet64Static)(uint32_t, int64_t); - int (*pSetObjInstance)(uint32_t, void*, void*); - int (*pSetObjStatic)(uint32_t, void*); - int32_t (*pGet32Instance)(uint32_t, void*); - int32_t (*pGet32Static)(uint32_t); - int64_t (*pGet64Instance)(uint32_t, void*); - int64_t (*pGet64Static)(uint32_t); - void* (*pGetObjInstance)(uint32_t, void*); - void* (*pGetObjStatic)(uint32_t); - - // FillArray - void (*pHandleFillArrayDataFromCode)(void*, void*); - - // JNI - uint32_t (*pJniMethodStart)(Thread*); - uint32_t (*pJniMethodStartSynchronized)(jobject to_lock, Thread* self); - void (*pJniMethodEnd)(uint32_t cookie, Thread* self); - void (*pJniMethodEndSynchronized)(uint32_t cookie, jobject locked, Thread* self); - mirror::Object* (*pJniMethodEndWithReference)(jobject result, uint32_t cookie, Thread* self); - mirror::Object* (*pJniMethodEndWithReferenceSynchronized)(jobject result, uint32_t cookie, - jobject locked, Thread* self); - - // Locks - void (*pLockObjectFromCode)(void*); - void (*pUnlockObjectFromCode)(void*); - - // Math - int32_t (*pCmpgDouble)(double, double); - int32_t (*pCmpgFloat)(float, float); - int32_t (*pCmplDouble)(double, double); - int32_t (*pCmplFloat)(float, float); - double (*pFmod)(double, double); - double (*pSqrt)(double); - double (*pL2d)(int64_t); - float (*pFmodf)(float, float); - float (*pL2f)(int64_t); - int32_t (*pD2iz)(double); - int32_t (*pF2iz)(float); - int32_t (*pIdivmod)(int32_t, int32_t); - int64_t (*pD2l)(double); - int64_t (*pF2l)(float); - int64_t (*pLdiv)(int64_t, int64_t); - int64_t (*pLdivmod)(int64_t, int64_t); - int64_t (*pLmul)(int64_t, int64_t); - uint64_t (*pShlLong)(uint64_t, uint32_t); - uint64_t (*pShrLong)(uint64_t, uint32_t); - uint64_t (*pUshrLong)(uint64_t, uint32_t); - - // Interpreter - void (*pInterpreterToInterpreterEntry)(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); - void (*pInterpreterToQuickEntry)(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); - - // Intrinsics - int32_t (*pIndexOf)(void*, uint32_t, uint32_t, uint32_t); - int32_t (*pMemcmp16)(void*, void*, int32_t); - int32_t (*pStringCompareTo)(void*, void*); - void* (*pMemcpy)(void*, const void*, size_t); - - // Invocation - const void* (*pPortableResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*, - mirror::AbstractMethod**, Thread*); - const void* (*pQuickResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*, - mirror::AbstractMethod**, Thread*); - void (*pInvokeDirectTrampolineWithAccessCheck)(uint32_t, void*); - void (*pInvokeInterfaceTrampoline)(uint32_t, void*); - void (*pInvokeInterfaceTrampolineWithAccessCheck)(uint32_t, void*); - void (*pInvokeStaticTrampolineWithAccessCheck)(uint32_t, void*); - void (*pInvokeSuperTrampolineWithAccessCheck)(uint32_t, void*); - void (*pInvokeVirtualTrampolineWithAccessCheck)(uint32_t, void*); - - // Thread - void (*pCheckSuspendFromCode)(Thread*); // Stub that is called when the suspend count is non-zero - void (*pTestSuspendFromCode)(); // Stub that is periodically called to test the suspend count - - // Throws - void (*pDeliverException)(void*); - void (*pThrowArrayBoundsFromCode)(int32_t, int32_t); - void (*pThrowDivZeroFromCode)(); - void (*pThrowNoSuchMethodFromCode)(int32_t); - void (*pThrowNullPointerFromCode)(); - void (*pThrowStackOverflowFromCode)(void*); -}; - - -// JNI entrypoints. -extern uint32_t JniMethodStart(Thread* self) - UNLOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR; -extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self) - UNLOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR; -extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR; -extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, - Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR; -extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, - Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR; - -extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result, - uint32_t saved_local_ref_cookie, - jobject locked, Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR; - -// Initialize an entry point data structure. -void InitEntryPoints(EntryPoints* points); - -// Change the debugger entry point in the data structure. -void ChangeDebuggerEntryPoint(EntryPoints* points, bool enabled); - -} // namespace art - -#endif // ART_RUNTIME_OAT_RUNTIME_OAT_SUPPORT_ENTRYPOINTS_H_ diff --git a/runtime/oat/runtime/support_alloc.cc b/runtime/oat/runtime/support_alloc.cc deleted file mode 100644 index f66fc848d5..0000000000 --- a/runtime/oat/runtime/support_alloc.cc +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "callee_save_frame.h" -#include "mirror/class-inl.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/object_array-inl.h" -#include "mirror/object-inl.h" -#include "runtime_support.h" - -namespace art { - -extern "C" mirror::Object* artAllocObjectFromCode(uint32_t type_idx, mirror::AbstractMethod* method, - Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - return AllocObjectFromCode(type_idx, method, self, false); -} - -extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck(uint32_t type_idx, - mirror::AbstractMethod* method, - Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - return AllocObjectFromCode(type_idx, method, self, true); -} - -extern "C" mirror::Array* artAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, - int32_t component_count, Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - return AllocArrayFromCode(type_idx, method, component_count, self, false); -} - -extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, - mirror::AbstractMethod* method, - int32_t component_count, - Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - return AllocArrayFromCode(type_idx, method, component_count, self, true); -} - -extern "C" mirror::Array* artCheckAndAllocArrayFromCode(uint32_t type_idx, - mirror::AbstractMethod* method, - int32_t component_count, Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, false); -} - -extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, - mirror::AbstractMethod* method, - int32_t component_count, - Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, true); -} - -} // namespace art diff --git a/runtime/oat/runtime/support_cast.cc b/runtime/oat/runtime/support_cast.cc deleted file mode 100644 index fe91e617bb..0000000000 --- a/runtime/oat/runtime/support_cast.cc +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "callee_save_frame.h" -#include "mirror/class-inl.h" -#include "mirror/object-inl.h" -#include "mirror/object_array-inl.h" -#include "runtime_support.h" - -namespace art { - -// Assignable test for code, won't throw. Null and equality tests already performed -extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, - const mirror::Class* ref_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(klass != NULL); - DCHECK(ref_class != NULL); - return klass->IsAssignableFrom(ref_class) ? 1 : 0; -} - -// Check whether it is safe to cast one class to the other, throw exception and return -1 on failure -extern "C" int artCheckCastFromCode(mirror::Class* src_type, mirror::Class* dest_type, - Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(src_type->IsClass()) << PrettyClass(src_type); - DCHECK(dest_type->IsClass()) << PrettyClass(dest_type); - if (LIKELY(dest_type->IsAssignableFrom(src_type))) { - return 0; // Success - } else { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - ThrowClassCastException(dest_type, src_type); - return -1; // Failure - } -} - -// Tests whether 'element' can be assigned into an array of type 'array_class'. -// Returns 0 on success and -1 if an exception is pending. -extern "C" int artCanPutArrayElementFromCode(const mirror::Object* element, - const mirror::Class* array_class, - Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(array_class != NULL); - // element can't be NULL as we catch this is screened in runtime_support - mirror::Class* element_class = element->GetClass(); - mirror::Class* component_type = array_class->GetComponentType(); - if (LIKELY(component_type->IsAssignableFrom(element_class))) { - return 0; // Success - } else { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - ThrowArrayStoreException(element_class, array_class); - return -1; // Failure - } -} - -} // namespace art diff --git a/runtime/oat/runtime/support_deoptimize.cc b/runtime/oat/runtime/support_deoptimize.cc deleted file mode 100644 index 43fc9d2a2d..0000000000 --- a/runtime/oat/runtime/support_deoptimize.cc +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "callee_save_frame.h" -#include "dex_file-inl.h" -#include "interpreter/interpreter.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/class-inl.h" -#include "mirror/object_array-inl.h" -#include "mirror/object-inl.h" -#include "object_utils.h" -#include "stack.h" -#include "thread.h" -#include "verifier/method_verifier.h" - -namespace art { - -extern "C" void artDeoptimize(Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); - self->SetException(ThrowLocation(), reinterpret_cast(-1)); - self->QuickDeliverException(); -} - -} // namespace art diff --git a/runtime/oat/runtime/support_dexcache.cc b/runtime/oat/runtime/support_dexcache.cc deleted file mode 100644 index 0af7a6281d..0000000000 --- a/runtime/oat/runtime/support_dexcache.cc +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "callee_save_frame.h" -#include "gc/accounting/card_table-inl.h" -#include "class_linker-inl.h" -#include "dex_file-inl.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/object_array-inl.h" -#include "mirror/object-inl.h" -#include "runtime_support.h" - -namespace art { - -extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, - const mirror::AbstractMethod* referrer, - Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Called to ensure static storage base is initialized for direct static field reads and writes. - // A class may be accessing another class' fields when it doesn't have access, as access has been - // given by inheritance. - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - return ResolveVerifyAndClinit(type_idx, referrer, self, true, false); -} - -extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, - const mirror::AbstractMethod* referrer, - Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Called when method->dex_cache_resolved_types_[] misses. - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - return ResolveVerifyAndClinit(type_idx, referrer, self, false, false); -} - -extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, - const mirror::AbstractMethod* referrer, - Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Called when caller isn't guaranteed to have access to a type and the dex cache may be - // unpopulated. - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - return ResolveVerifyAndClinit(type_idx, referrer, self, false, true); -} - -extern "C" mirror::String* artResolveStringFromCode(mirror::AbstractMethod* referrer, - int32_t string_idx, - Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - return ResolveStringFromCode(referrer, string_idx); -} - -} // namespace art diff --git a/runtime/oat/runtime/support_field.cc b/runtime/oat/runtime/support_field.cc deleted file mode 100644 index c20326c63e..0000000000 --- a/runtime/oat/runtime/support_field.cc +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "callee_save_frame.h" -#include "dex_file-inl.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/class-inl.h" -#include "mirror/field-inl.h" -#include "runtime_support.h" - -#include - -namespace art { - -extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, - const mirror::AbstractMethod* referrer, - Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t)); - if (LIKELY(field != NULL)) { - return field->Get32(field->GetDeclaringClass()); - } - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveRead, sizeof(int32_t), true); - if (LIKELY(field != NULL)) { - return field->Get32(field->GetDeclaringClass()); - } - return 0; // Will throw exception by checking with Thread::Current -} - -extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, - const mirror::AbstractMethod* referrer, - Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t)); - if (LIKELY(field != NULL)) { - return field->Get64(field->GetDeclaringClass()); - } - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveRead, sizeof(int64_t), true); - if (LIKELY(field != NULL)) { - return field->Get64(field->GetDeclaringClass()); - } - return 0; // Will throw exception by checking with Thread::Current -} - -extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx, - const mirror::AbstractMethod* referrer, - Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, - sizeof(mirror::Object*)); - if (LIKELY(field != NULL)) { - return field->GetObj(field->GetDeclaringClass()); - } - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - field = FindFieldFromCode(field_idx, referrer, self, StaticObjectRead, sizeof(mirror::Object*), true); - if (LIKELY(field != NULL)) { - return field->GetObj(field->GetDeclaringClass()); - } - return NULL; // Will throw exception by checking with Thread::Current -} - -extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, - const mirror::AbstractMethod* referrer, Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t)); - if (LIKELY(field != NULL && obj != NULL)) { - return field->Get32(obj); - } - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveRead, sizeof(int32_t), true); - if (LIKELY(field != NULL)) { - if (UNLIKELY(obj == NULL)) { - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - ThrowNullPointerExceptionForFieldAccess(throw_location, field, true); - } else { - return field->Get32(obj); - } - } - return 0; // Will throw exception by checking with Thread::Current -} - -extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, - const mirror::AbstractMethod* referrer, Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t)); - if (LIKELY(field != NULL && obj != NULL)) { - return field->Get64(obj); - } - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveRead, sizeof(int64_t), true); - if (LIKELY(field != NULL)) { - if (UNLIKELY(obj == NULL)) { - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - ThrowNullPointerExceptionForFieldAccess(throw_location, field, true); - } else { - return field->Get64(obj); - } - } - return 0; // Will throw exception by checking with Thread::Current -} - -extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj, - const mirror::AbstractMethod* referrer, - Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(mirror::Object*)); - if (LIKELY(field != NULL && obj != NULL)) { - return field->GetObj(obj); - } - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - field = FindFieldFromCode(field_idx, referrer, self, InstanceObjectRead, sizeof(mirror::Object*), true); - if (LIKELY(field != NULL)) { - if (UNLIKELY(obj == NULL)) { - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - ThrowNullPointerExceptionForFieldAccess(throw_location, field, true); - } else { - return field->GetObj(obj); - } - } - return NULL; // Will throw exception by checking with Thread::Current -} - -extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value, - const mirror::AbstractMethod* referrer, Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t)); - if (LIKELY(field != NULL)) { - field->Set32(field->GetDeclaringClass(), new_value); - return 0; // success - } - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveWrite, sizeof(int32_t), true); - if (LIKELY(field != NULL)) { - field->Set32(field->GetDeclaringClass(), new_value); - return 0; // success - } - return -1; // failure -} - -extern "C" int artSet64StaticFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer, - uint64_t new_value, Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t)); - if (LIKELY(field != NULL)) { - field->Set64(field->GetDeclaringClass(), new_value); - return 0; // success - } - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveWrite, sizeof(int64_t), true); - if (LIKELY(field != NULL)) { - field->Set64(field->GetDeclaringClass(), new_value); - return 0; // success - } - return -1; // failure -} - -extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_value, - const mirror::AbstractMethod* referrer, Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, - sizeof(mirror::Object*)); - if (LIKELY(field != NULL)) { - if (LIKELY(!FieldHelper(field).IsPrimitiveType())) { - field->SetObj(field->GetDeclaringClass(), new_value); - return 0; // success - } - } - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - field = FindFieldFromCode(field_idx, referrer, self, StaticObjectWrite, sizeof(mirror::Object*), true); - if (LIKELY(field != NULL)) { - field->SetObj(field->GetDeclaringClass(), new_value); - return 0; // success - } - return -1; // failure -} - -extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value, - const mirror::AbstractMethod* referrer, Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t)); - if (LIKELY(field != NULL && obj != NULL)) { - field->Set32(obj, new_value); - return 0; // success - } - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveWrite, sizeof(int32_t), true); - if (LIKELY(field != NULL)) { - if (UNLIKELY(obj == NULL)) { - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - ThrowNullPointerExceptionForFieldAccess(throw_location, field, false); - } else { - field->Set32(obj, new_value); - return 0; // success - } - } - return -1; // failure -} - -extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint64_t new_value, - Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::AbstractMethod* callee_save = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsOnly); - mirror::AbstractMethod* referrer = - sp[callee_save->GetFrameSizeInBytes() / sizeof(mirror::AbstractMethod*)]; - mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, - sizeof(int64_t)); - if (LIKELY(field != NULL && obj != NULL)) { - field->Set64(obj, new_value); - return 0; // success - } - *sp = callee_save; - self->SetTopOfStack(sp, 0); - field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveWrite, sizeof(int64_t), true); - if (LIKELY(field != NULL)) { - if (UNLIKELY(obj == NULL)) { - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - ThrowNullPointerExceptionForFieldAccess(throw_location, field, false); - } else { - field->Set64(obj, new_value); - return 0; // success - } - } - return -1; // failure -} - -extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj, - mirror::Object* new_value, - const mirror::AbstractMethod* referrer, Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, - sizeof(mirror::Object*)); - if (LIKELY(field != NULL && obj != NULL)) { - field->SetObj(obj, new_value); - return 0; // success - } - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - field = FindFieldFromCode(field_idx, referrer, self, InstanceObjectWrite, - sizeof(mirror::Object*), true); - if (LIKELY(field != NULL)) { - if (UNLIKELY(obj == NULL)) { - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - ThrowNullPointerExceptionForFieldAccess(throw_location, field, false); - } else { - field->SetObj(obj, new_value); - return 0; // success - } - } - return -1; // failure -} - -} // namespace art diff --git a/runtime/oat/runtime/support_fillarray.cc b/runtime/oat/runtime/support_fillarray.cc deleted file mode 100644 index a0b06fb521..0000000000 --- a/runtime/oat/runtime/support_fillarray.cc +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "callee_save_frame.h" -#include "common_throws.h" -#include "dex_instruction.h" -#include "mirror/array.h" -#include "mirror/object-inl.h" - -namespace art { - -/* - * Fill the array with predefined constant values, throwing exceptions if the array is null or - * not of sufficient length. - * - * NOTE: When dealing with a raw dex file, the data to be copied uses - * little-endian ordering. Require that oat2dex do any required swapping - * so this routine can get by with a memcpy(). - * - * Format of the data: - * ushort ident = 0x0300 magic value - * ushort width width of each element in the table - * uint size number of elements in the table - * ubyte data[size*width] table of data values (may contain a single-byte - * padding at the end) - */ -extern "C" int artHandleFillArrayDataFromCode(mirror::Array* array, - const Instruction::ArrayDataPayload* payload, - Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - DCHECK_EQ(payload->ident, static_cast(Instruction::kArrayDataSignature)); - if (UNLIKELY(array == NULL)) { - ThrowNullPointerException(NULL, "null array in FILL_ARRAY_DATA"); - return -1; // Error - } - DCHECK(array->IsArrayInstance() && !array->IsObjectArray()); - if (UNLIKELY(static_cast(payload->element_count) > array->GetLength())) { - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;", - "failed FILL_ARRAY_DATA; length=%d, index=%d", - array->GetLength(), payload->element_count); - return -1; // Error - } - uint32_t size_in_bytes = payload->element_count * payload->element_width; - memcpy(array->GetRawData(payload->element_width), payload->data, size_in_bytes); - return 0; // Success -} - -} // namespace art diff --git a/runtime/oat/runtime/support_instrumentation.cc b/runtime/oat/runtime/support_instrumentation.cc deleted file mode 100644 index 7ecd296742..0000000000 --- a/runtime/oat/runtime/support_instrumentation.cc +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "callee_save_frame.h" -#include "instrumentation.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/object-inl.h" -#include "runtime.h" -#include "thread-inl.h" - -namespace art { - -extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::AbstractMethod* method, - mirror::Object* this_object, - Thread* self, - mirror::AbstractMethod** sp, - uintptr_t lr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); - instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); - const void* result = instrumentation->GetQuickCodeFor(method); - bool interpreter_entry = (result == GetInterpreterEntryPoint()); - instrumentation->PushInstrumentationStackFrame(self, method->IsStatic() ? NULL : this_object, - method, lr, interpreter_entry); - CHECK(result != NULL) << PrettyMethod(method); - return result; -} - -extern "C" uint64_t artInstrumentationMethodExitFromCode(Thread* self, mirror::AbstractMethod** sp, - uint64_t gpr_result, uint64_t fpr_result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // TODO: use FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly) not the hand inlined below. - // We use the hand inline version to ensure the return_pc is assigned before verifying the - // stack. - // Be aware the store below may well stomp on an incoming argument. - Locks::mutator_lock_->AssertSharedHeld(self); - mirror::AbstractMethod* callee_save = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsOnly); - *sp = callee_save; - uintptr_t* return_pc = reinterpret_cast(reinterpret_cast(sp) + - callee_save->GetReturnPcOffsetInBytes()); - CHECK_EQ(*return_pc, 0U); - self->SetTopOfStack(sp, 0); - self->VerifyStack(); - instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); - uint64_t return_or_deoptimize_pc = instrumentation->PopInstrumentationStackFrame(self, return_pc, - gpr_result, - fpr_result); - self->VerifyStack(); - return return_or_deoptimize_pc; -} - -} // namespace art diff --git a/runtime/oat/runtime/support_interpreter.cc b/runtime/oat/runtime/support_interpreter.cc deleted file mode 100644 index 78b7e10b2a..0000000000 --- a/runtime/oat/runtime/support_interpreter.cc +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "argument_visitor.h" -#include "callee_save_frame.h" -#include "dex_file-inl.h" -#include "interpreter/interpreter.h" -#include "invoke_arg_array_builder.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/class-inl.h" -#include "mirror/object-inl.h" -#include "mirror/object_array-inl.h" -#include "object_utils.h" - -namespace art { - -// Visits arguments on the stack placing them into the shadow frame. -class BuildShadowFrameVisitor : public QuickArgumentVisitor { - public: - BuildShadowFrameVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp, - ShadowFrame& sf, size_t first_arg_reg) : - QuickArgumentVisitor(caller_mh, sp), sf_(sf), cur_reg_(first_arg_reg) {} - - virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Primitive::Type type = GetParamPrimitiveType(); - switch (type) { - case Primitive::kPrimLong: // Fall-through. - case Primitive::kPrimDouble: - if (IsSplitLongOrDouble()) { - sf_.SetVRegLong(cur_reg_, ReadSplitLongParam()); - } else { - sf_.SetVRegLong(cur_reg_, *reinterpret_cast(GetParamAddress())); - } - ++cur_reg_; - break; - case Primitive::kPrimNot: - sf_.SetVRegReference(cur_reg_, *reinterpret_cast(GetParamAddress())); - break; - case Primitive::kPrimBoolean: // Fall-through. - case Primitive::kPrimByte: // Fall-through. - case Primitive::kPrimChar: // Fall-through. - case Primitive::kPrimShort: // Fall-through. - case Primitive::kPrimInt: // Fall-through. - case Primitive::kPrimFloat: - sf_.SetVReg(cur_reg_, *reinterpret_cast(GetParamAddress())); - break; - case Primitive::kPrimVoid: - LOG(FATAL) << "UNREACHABLE"; - break; - } - ++cur_reg_; - } - - private: - ShadowFrame& sf_; - size_t cur_reg_; - - DISALLOW_COPY_AND_ASSIGN(BuildShadowFrameVisitor); -}; - -extern "C" uint64_t artInterpreterEntry(mirror::AbstractMethod* method, Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Ensure we don't get thread suspension until the object arguments are safely in the shadow - // frame. - const char* old_cause = self->StartAssertNoThreadSuspension("Building interpreter shadow frame"); - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); - - MethodHelper mh(method); - const DexFile::CodeItem* code_item = mh.GetCodeItem(); - uint16_t num_regs = code_item->registers_size_; - void* memory = alloca(ShadowFrame::ComputeSize(num_regs)); - ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, NULL, // No last shadow coming from quick. - method, 0, memory)); - size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; - BuildShadowFrameVisitor shadow_frame_builder(mh, sp, *shadow_frame, first_arg_reg); - shadow_frame_builder.VisitArguments(); - // Push a transition back into managed code onto the linked list in thread. - ManagedStack fragment; - self->PushManagedStackFragment(&fragment); - self->PushShadowFrame(shadow_frame); - self->EndAssertNoThreadSuspension(old_cause); - - if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) { - // Ensure static method's class is initialized. - if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(), - true, true)) { - DCHECK(Thread::Current()->IsExceptionPending()); - self->PopManagedStackFragment(fragment); - return 0; - } - } - - JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame); - // Pop transition. - self->PopManagedStackFragment(fragment); - return result.GetJ(); -} - -extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::AbstractMethod* method = shadow_frame->GetMethod(); - // Ensure static methods are initialized. - if (method->IsStatic()) { - Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(), true, true); - } - uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_; - ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength()); - arg_array.BuildArgArray(shadow_frame, arg_offset); - method->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result, mh.GetShorty()[0]); -} - -} // namespace art diff --git a/runtime/oat/runtime/support_invoke.cc b/runtime/oat/runtime/support_invoke.cc deleted file mode 100644 index 6a95f3c8ff..0000000000 --- a/runtime/oat/runtime/support_invoke.cc +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "callee_save_frame.h" -#include "dex_instruction-inl.h" -#include "mirror/class-inl.h" -#include "mirror/dex_cache-inl.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/object-inl.h" -#include "mirror/object_array-inl.h" -#include "runtime_support.h" - -namespace art { - -// Determine target of interface dispatch. This object is known non-null. -extern "C" uint64_t artInvokeInterfaceTrampoline(mirror::AbstractMethod* interface_method, - mirror::Object* this_object, - mirror::AbstractMethod* caller_method, - Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::AbstractMethod* method; - if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex16)) { - method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method); - if (UNLIKELY(method == NULL)) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); - ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object, - caller_method); - return 0; // Failure. - } - } else { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); - DCHECK(interface_method == Runtime::Current()->GetResolutionMethod()); - // Determine method index from calling dex instruction. -#if defined(__arm__) - // On entry the stack pointed by sp is: - // | argN | | - // | ... | | - // | arg4 | | - // | arg3 spill | | Caller's frame - // | arg2 spill | | - // | arg1 spill | | - // | Method* | --- - // | LR | - // | ... | callee saves - // | R3 | arg3 - // | R2 | arg2 - // | R1 | arg1 - // | R0 | - // | Method* | <- sp - DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); - uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp) + kPointerSize); - uintptr_t caller_pc = regs[10]; -#elif defined(__i386__) - // On entry the stack pointed by sp is: - // | argN | | - // | ... | | - // | arg4 | | - // | arg3 spill | | Caller's frame - // | arg2 spill | | - // | arg1 spill | | - // | Method* | --- - // | Return | - // | EBP,ESI,EDI | callee saves - // | EBX | arg3 - // | EDX | arg2 - // | ECX | arg1 - // | EAX/Method* | <- sp - DCHECK_EQ(32U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); - uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp)); - uintptr_t caller_pc = regs[7]; -#elif defined(__mips__) - // On entry the stack pointed by sp is: - // | argN | | - // | ... | | - // | arg4 | | - // | arg3 spill | | Caller's frame - // | arg2 spill | | - // | arg1 spill | | - // | Method* | --- - // | RA | - // | ... | callee saves - // | A3 | arg3 - // | A2 | arg2 - // | A1 | arg1 - // | A0/Method* | <- sp - DCHECK_EQ(64U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); - uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp)); - uintptr_t caller_pc = regs[15]; -#else - UNIMPLEMENTED(FATAL); - uintptr_t caller_pc = 0; -#endif - uint32_t dex_pc = caller_method->ToDexPc(caller_pc); - const DexFile::CodeItem* code = MethodHelper(caller_method).GetCodeItem(); - CHECK_LT(dex_pc, code->insns_size_in_code_units_); - const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); - Instruction::Code instr_code = instr->Opcode(); - CHECK(instr_code == Instruction::INVOKE_INTERFACE || - instr_code == Instruction::INVOKE_INTERFACE_RANGE) - << "Unexpected call into interface trampoline: " << instr->DumpString(NULL); - uint32_t dex_method_idx; - if (instr_code == Instruction::INVOKE_INTERFACE) { - dex_method_idx = instr->VRegB_35c(); - } else { - DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE); - dex_method_idx = instr->VRegB_3rc(); - } - method = FindMethodFromCode(dex_method_idx, this_object, caller_method, self, - false, kInterface); - if (UNLIKELY(method == NULL)) { - CHECK(self->IsExceptionPending()); - return 0; // Failure. - } - } - const void* code = method->GetEntryPointFromCompiledCode(); - -#ifndef NDEBUG - // When we return, the caller will branch to this address, so it had better not be 0! - if (UNLIKELY(code == NULL)) { - MethodHelper mh(method); - LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method) - << " location: " << mh.GetDexFile().GetLocation(); - } -#endif - - uint32_t method_uint = reinterpret_cast(method); - uint64_t code_uint = reinterpret_cast(code); - uint64_t result = ((code_uint << 32) | method_uint); - return result; -} - - -static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, - mirror::AbstractMethod* caller_method, - Thread* self, mirror::AbstractMethod** sp, bool access_check, - InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::AbstractMethod* method = FindMethodFast(method_idx, this_object, caller_method, - access_check, type); - if (UNLIKELY(method == NULL)) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); - method = FindMethodFromCode(method_idx, this_object, caller_method, self, access_check, type); - if (UNLIKELY(method == NULL)) { - CHECK(self->IsExceptionPending()); - return 0; // failure - } - } - DCHECK(!self->IsExceptionPending()); - const void* code = method->GetEntryPointFromCompiledCode(); - -#ifndef NDEBUG - // When we return, the caller will branch to this address, so it had better not be 0! - if (UNLIKELY(code == NULL)) { - MethodHelper mh(method); - LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method) - << " location: " << mh.GetDexFile().GetLocation(); - } -#endif - - uint32_t method_uint = reinterpret_cast(method); - uint64_t code_uint = reinterpret_cast(code); - uint64_t result = ((code_uint << 32) | method_uint); - return result; -} - -// See comments in runtime_support_asm.S -extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx, - mirror::Object* this_object, - mirror::AbstractMethod* caller_method, - Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kInterface); -} - - -extern "C" uint64_t artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx, - mirror::Object* this_object, - mirror::AbstractMethod* caller_method, - Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kDirect); -} - -extern "C" uint64_t artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx, - mirror::Object* this_object, - mirror::AbstractMethod* caller_method, - Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kStatic); -} - -extern "C" uint64_t artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx, - mirror::Object* this_object, - mirror::AbstractMethod* caller_method, - Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kSuper); -} - -extern "C" uint64_t artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx, - mirror::Object* this_object, - mirror::AbstractMethod* caller_method, - Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kVirtual); -} - -} // namespace art diff --git a/runtime/oat/runtime/support_jni.cc b/runtime/oat/runtime/support_jni.cc deleted file mode 100644 index 2d31160a4b..0000000000 --- a/runtime/oat/runtime/support_jni.cc +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "dex_file-inl.h" -#include "mirror/class-inl.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/object.h" -#include "mirror/object-inl.h" -#include "mirror/object_array-inl.h" -#include "object_utils.h" -#include "runtime_support.h" -#include "scoped_thread_state_change.h" -#include "thread.h" - -namespace art { - -// Called on entry to JNI, transition out of Runnable and release share of mutator_lock_. -extern uint32_t JniMethodStart(Thread* self) { - JNIEnvExt* env = self->GetJniEnv(); - DCHECK(env != NULL); - uint32_t saved_local_ref_cookie = env->local_ref_cookie; - env->local_ref_cookie = env->locals.GetSegmentState(); - self->TransitionFromRunnableToSuspended(kNative); - return saved_local_ref_cookie; -} - -extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self) { - self->DecodeJObject(to_lock)->MonitorEnter(self); - return JniMethodStart(self); -} - -static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) { - JNIEnvExt* env = self->GetJniEnv(); - env->locals.SetSegmentState(env->local_ref_cookie); - env->local_ref_cookie = saved_local_ref_cookie; - self->PopSirt(); -} - -extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) { - self->TransitionFromSuspendedToRunnable(); - PopLocalReferences(saved_local_ref_cookie, self); -} - - -extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, - Thread* self) { - self->TransitionFromSuspendedToRunnable(); - UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. - PopLocalReferences(saved_local_ref_cookie, self); -} - -extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, - Thread* self) { - self->TransitionFromSuspendedToRunnable(); - mirror::Object* o = self->DecodeJObject(result); // Must decode before pop. - PopLocalReferences(saved_local_ref_cookie, self); - // Process result. - if (UNLIKELY(self->GetJniEnv()->check_jni)) { - if (self->IsExceptionPending()) { - return NULL; - } - CheckReferenceResult(o, self); - } - return o; -} - -extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result, - uint32_t saved_local_ref_cookie, - jobject locked, Thread* self) { - self->TransitionFromSuspendedToRunnable(); - UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. - mirror::Object* o = self->DecodeJObject(result); - PopLocalReferences(saved_local_ref_cookie, self); - // Process result. - if (UNLIKELY(self->GetJniEnv()->check_jni)) { - if (self->IsExceptionPending()) { - return NULL; - } - CheckReferenceResult(o, self); - } - return o; -} - -static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr) { - intptr_t value = *arg_ptr; - mirror::Object** value_as_jni_rep = reinterpret_cast(value); - mirror::Object* value_as_work_around_rep = value_as_jni_rep != NULL ? *value_as_jni_rep : NULL; - CHECK(Runtime::Current()->GetHeap()->IsHeapAddress(value_as_work_around_rep)) - << value_as_work_around_rep; - *arg_ptr = reinterpret_cast(value_as_work_around_rep); -} - -extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(Thread::Current() == self); - // TODO: this code is specific to ARM - // On entry the stack pointed by sp is: - // | arg3 | <- Calling JNI method's frame (and extra bit for out args) - // | LR | - // | R3 | arg2 - // | R2 | arg1 - // | R1 | jclass/jobject - // | R0 | JNIEnv - // | unused | - // | unused | - // | unused | <- sp - mirror::AbstractMethod* jni_method = self->GetCurrentMethod(NULL); - DCHECK(jni_method->IsNative()) << PrettyMethod(jni_method); - intptr_t* arg_ptr = sp + 4; // pointer to r1 on stack - // Fix up this/jclass argument - WorkAroundJniBugsForJobject(arg_ptr); - arg_ptr++; - // Fix up jobject arguments - MethodHelper mh(jni_method); - int reg_num = 2; // Current register being processed, -1 for stack arguments. - for (uint32_t i = 1; i < mh.GetShortyLength(); i++) { - char shorty_char = mh.GetShorty()[i]; - if (shorty_char == 'L') { - WorkAroundJniBugsForJobject(arg_ptr); - } - if (shorty_char == 'J' || shorty_char == 'D') { - if (reg_num == 2) { - arg_ptr = sp + 8; // skip to out arguments - reg_num = -1; - } else if (reg_num == 3) { - arg_ptr = sp + 10; // skip to out arguments plus 2 slots as long must be aligned - reg_num = -1; - } else { - DCHECK_EQ(reg_num, -1); - if ((reinterpret_cast(arg_ptr) & 7) == 4) { - arg_ptr += 3; // unaligned, pad and move through stack arguments - } else { - arg_ptr += 2; // aligned, move through stack arguments - } - } - } else { - if (reg_num == 2) { - arg_ptr++; // move through register arguments - reg_num++; - } else if (reg_num == 3) { - arg_ptr = sp + 8; // skip to outgoing stack arguments - reg_num = -1; - } else { - DCHECK_EQ(reg_num, -1); - arg_ptr++; // move through stack arguments - } - } - } - // Load expected destination, see Method::RegisterNative - const void* code = reinterpret_cast(jni_method->GetNativeGcMap()); - if (UNLIKELY(code == NULL)) { - code = GetJniDlsymLookupStub(); - jni_method->RegisterNative(self, code); - } - return code; -} - -} // namespace art diff --git a/runtime/oat/runtime/support_locks.cc b/runtime/oat/runtime/support_locks.cc deleted file mode 100644 index 79bb7a69f1..0000000000 --- a/runtime/oat/runtime/support_locks.cc +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "callee_save_frame.h" -#include "mirror/object-inl.h" - -namespace art { - -extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self, - mirror::AbstractMethod** sp) - UNLOCK_FUNCTION(monitor_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); - DCHECK(obj != NULL); // Assumed to have been checked before entry - // MonitorExit may throw exception - return obj->MonitorExit(self) ? 0 /* Success */ : -1 /* Failure */; -} - -extern "C" void artLockObjectFromCode(mirror::Object* obj, Thread* thread, - mirror::AbstractMethod** sp) - EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) { - FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly); - DCHECK(obj != NULL); // Assumed to have been checked before entry - obj->MonitorEnter(thread); // May block - DCHECK(thread->HoldsLock(obj)); - // Only possible exception is NPE and is handled before entry - DCHECK(!thread->IsExceptionPending()); -} - -} // namespace art diff --git a/runtime/oat/runtime/support_math.cc b/runtime/oat/runtime/support_math.cc deleted file mode 100644 index 0bfe59dc2f..0000000000 --- a/runtime/oat/runtime/support_math.cc +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -namespace art { - -int CmplFloat(float a, float b) { - if (a == b) { - return 0; - } else if (a < b) { - return -1; - } else if (a > b) { - return 1; - } - return -1; -} - -int CmpgFloat(float a, float b) { - if (a == b) { - return 0; - } else if (a < b) { - return -1; - } else if (a > b) { - return 1; - } - return 1; -} - -int CmpgDouble(double a, double b) { - if (a == b) { - return 0; - } else if (a < b) { - return -1; - } else if (a > b) { - return 1; - } - return 1; -} - -int CmplDouble(double a, double b) { - if (a == b) { - return 0; - } else if (a < b) { - return -1; - } else if (a > b) { - return 1; - } - return -1; -} - -extern "C" int64_t artLmulFromCode(int64_t a, int64_t b) { - return a * b; -} - -extern "C" int64_t artLdivFromCode(int64_t a, int64_t b) { - return a / b; -} - -extern "C" int64_t artLdivmodFromCode(int64_t a, int64_t b) { - return a % b; -} - -} // namespace art diff --git a/runtime/oat/runtime/support_proxy.cc b/runtime/oat/runtime/support_proxy.cc deleted file mode 100644 index d4d0ca1034..0000000000 --- a/runtime/oat/runtime/support_proxy.cc +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "argument_visitor.h" -#include "dex_file-inl.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/object_array-inl.h" -#include "mirror/object-inl.h" -#include "object_utils.h" -#include "reflection.h" -#include "runtime_support.h" -#include "scoped_thread_state_change.h" -#include "thread.h" -#include "well_known_classes.h" - -#include "ScopedLocalRef.h" - -namespace art { - -// Visits arguments on the stack placing them into the args vector, Object* arguments are converted -// to jobjects. -class BuildPortableArgumentVisitor : public PortableArgumentVisitor { - public: - BuildPortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp, - ScopedObjectAccessUnchecked& soa, std::vector& args) : - PortableArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {} - - virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - jvalue val; - Primitive::Type type = GetParamPrimitiveType(); - switch (type) { - case Primitive::kPrimNot: { - mirror::Object* obj = *reinterpret_cast(GetParamAddress()); - val.l = soa_.AddLocalReference(obj); - break; - } - case Primitive::kPrimLong: // Fall-through. - case Primitive::kPrimDouble: - val.j = *reinterpret_cast(GetParamAddress()); - break; - case Primitive::kPrimBoolean: // Fall-through. - case Primitive::kPrimByte: // Fall-through. - case Primitive::kPrimChar: // Fall-through. - case Primitive::kPrimShort: // Fall-through. - case Primitive::kPrimInt: // Fall-through. - case Primitive::kPrimFloat: - val.i = *reinterpret_cast(GetParamAddress()); - break; - case Primitive::kPrimVoid: - LOG(FATAL) << "UNREACHABLE"; - val.j = 0; - break; - } - args_.push_back(val); - } - - private: - ScopedObjectAccessUnchecked& soa_; - std::vector& args_; - - DISALLOW_COPY_AND_ASSIGN(BuildPortableArgumentVisitor); -}; - -// Visits arguments on the stack placing them into the args vector, Object* arguments are converted -// to jobjects. -class BuildQuickArgumentVisitor : public QuickArgumentVisitor { - public: - BuildQuickArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp, - ScopedObjectAccessUnchecked& soa, std::vector& args) : - QuickArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {} - - virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - jvalue val; - Primitive::Type type = GetParamPrimitiveType(); - switch (type) { - case Primitive::kPrimNot: { - mirror::Object* obj = *reinterpret_cast(GetParamAddress()); - val.l = soa_.AddLocalReference(obj); - break; - } - case Primitive::kPrimLong: // Fall-through. - case Primitive::kPrimDouble: - if (IsSplitLongOrDouble()) { - val.j = ReadSplitLongParam(); - } else { - val.j = *reinterpret_cast(GetParamAddress()); - } - break; - case Primitive::kPrimBoolean: // Fall-through. - case Primitive::kPrimByte: // Fall-through. - case Primitive::kPrimChar: // Fall-through. - case Primitive::kPrimShort: // Fall-through. - case Primitive::kPrimInt: // Fall-through. - case Primitive::kPrimFloat: - val.i = *reinterpret_cast(GetParamAddress()); - break; - case Primitive::kPrimVoid: - LOG(FATAL) << "UNREACHABLE"; - val.j = 0; - break; - } - args_.push_back(val); - } - - private: - ScopedObjectAccessUnchecked& soa_; - std::vector& args_; - - DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); -}; - -// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method -// which is responsible for recording callee save registers. We explicitly place into jobjects the -// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a -// field within the proxy object, which will box the primitive arguments and deal with error cases. -extern "C" uint64_t artPortableProxyInvokeHandler(mirror::AbstractMethod* proxy_method, - mirror::Object* receiver, - Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Ensure we don't get thread suspension until the object arguments are safely in jobjects. - const char* old_cause = - self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); - self->VerifyStack(); - // Start new JNI local reference state. - JNIEnvExt* env = self->GetJniEnv(); - ScopedObjectAccessUnchecked soa(env); - ScopedJniEnvLocalRefState env_state(env); - // Create local ref. copies of proxy method and the receiver. - jobject rcvr_jobj = soa.AddLocalReference(receiver); - - // Placing arguments into args vector and remove the receiver. - MethodHelper proxy_mh(proxy_method); - std::vector args; - BuildPortableArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args); - local_ref_visitor.VisitArguments(); - args.erase(args.begin()); - - // Convert proxy method into expected interface method. - mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod(); - DCHECK(interface_method != NULL); - DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); - jobject interface_method_jobj = soa.AddLocalReference(interface_method); - - // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code - // that performs allocations. - self->EndAssertNoThreadSuspension(old_cause); - JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(), - rcvr_jobj, interface_method_jobj, args); - return result.GetJ(); -} - -// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method -// which is responsible for recording callee save registers. We explicitly place into jobjects the -// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a -// field within the proxy object, which will box the primitive arguments and deal with error cases. -extern "C" uint64_t artQuickProxyInvokeHandler(mirror::AbstractMethod* proxy_method, - mirror::Object* receiver, - Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Ensure we don't get thread suspension until the object arguments are safely in jobjects. - const char* old_cause = - self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); - // Register the top of the managed stack, making stack crawlable. - DCHECK_EQ(*sp, proxy_method); - self->SetTopOfStack(sp, 0); - DCHECK_EQ(proxy_method->GetFrameSizeInBytes(), - Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); - self->VerifyStack(); - // Start new JNI local reference state. - JNIEnvExt* env = self->GetJniEnv(); - ScopedObjectAccessUnchecked soa(env); - ScopedJniEnvLocalRefState env_state(env); - // Create local ref. copies of proxy method and the receiver. - jobject rcvr_jobj = soa.AddLocalReference(receiver); - - // Placing arguments into args vector and remove the receiver. - MethodHelper proxy_mh(proxy_method); - std::vector args; - BuildQuickArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args); - local_ref_visitor.VisitArguments(); - args.erase(args.begin()); - - // Convert proxy method into expected interface method. - mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod(); - DCHECK(interface_method != NULL); - DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); - jobject interface_method_jobj = soa.AddLocalReference(interface_method); - - // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code - // that performs allocations. - self->EndAssertNoThreadSuspension(old_cause); - JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(), - rcvr_jobj, interface_method_jobj, args); - return result.GetJ(); -} - -} // namespace art diff --git a/runtime/oat/runtime/support_stubs.cc b/runtime/oat/runtime/support_stubs.cc deleted file mode 100644 index f2af6d28dc..0000000000 --- a/runtime/oat/runtime/support_stubs.cc +++ /dev/null @@ -1,438 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "callee_save_frame.h" -#include "class_linker-inl.h" -#include "dex_file-inl.h" -#include "dex_instruction-inl.h" -#include "mirror/class-inl.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/object_array-inl.h" -#include "mirror/object-inl.h" -#include "object_utils.h" -#include "scoped_thread_state_change.h" - -// Architecture specific assembler helper to deliver exception. -extern "C" void art_quick_deliver_exception_from_code(void*); - -namespace art { - -// Lazily resolve a method for portable. Called by stub code. -extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** called_addr, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - uint32_t dex_pc; - mirror::AbstractMethod* caller = thread->GetCurrentMethod(&dex_pc); - - ClassLinker* linker = Runtime::Current()->GetClassLinker(); - InvokeType invoke_type; - bool is_range; - if (called->IsRuntimeMethod()) { - const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem(); - CHECK_LT(dex_pc, code->insns_size_in_code_units_); - const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); - Instruction::Code instr_code = instr->Opcode(); - switch (instr_code) { - case Instruction::INVOKE_DIRECT: - invoke_type = kDirect; - is_range = false; - break; - case Instruction::INVOKE_DIRECT_RANGE: - invoke_type = kDirect; - is_range = true; - break; - case Instruction::INVOKE_STATIC: - invoke_type = kStatic; - is_range = false; - break; - case Instruction::INVOKE_STATIC_RANGE: - invoke_type = kStatic; - is_range = true; - break; - case Instruction::INVOKE_SUPER: - invoke_type = kSuper; - is_range = false; - break; - case Instruction::INVOKE_SUPER_RANGE: - invoke_type = kSuper; - is_range = true; - break; - case Instruction::INVOKE_VIRTUAL: - invoke_type = kVirtual; - is_range = false; - break; - case Instruction::INVOKE_VIRTUAL_RANGE: - invoke_type = kVirtual; - is_range = true; - break; - case Instruction::INVOKE_INTERFACE: - invoke_type = kInterface; - is_range = false; - break; - case Instruction::INVOKE_INTERFACE_RANGE: - invoke_type = kInterface; - is_range = true; - break; - default: - LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL); - // Avoid used uninitialized warnings. - invoke_type = kDirect; - is_range = true; - } - uint32_t dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); - called = linker->ResolveMethod(dex_method_idx, caller, invoke_type); - // Refine called method based on receiver. - if (invoke_type == kVirtual) { - called = receiver->GetClass()->FindVirtualMethodForVirtual(called); - } else if (invoke_type == kInterface) { - called = receiver->GetClass()->FindVirtualMethodForInterface(called); - } - } else { - CHECK(called->IsStatic()) << PrettyMethod(called); - invoke_type = kStatic; - } - const void* code = NULL; - if (LIKELY(!thread->IsExceptionPending())) { - // Incompatible class change should have been handled in resolve method. - CHECK(!called->CheckIncompatibleClassChange(invoke_type)); - // Ensure that the called method's class is initialized. - mirror::Class* called_class = called->GetDeclaringClass(); - linker->EnsureInitialized(called_class, true, true); - if (LIKELY(called_class->IsInitialized())) { - code = called->GetEntryPointFromCompiledCode(); - // TODO: remove this after we solve the link issue. - { // for lazy link. - if (code == NULL) { - code = linker->GetOatCodeFor(called); - } - } - } else if (called_class->IsInitializing()) { - if (invoke_type == kStatic) { - // Class is still initializing, go to oat and grab code (trampoline must be left in place - // until class is initialized to stop races between threads). - code = linker->GetOatCodeFor(called); - } else { - // No trampoline for non-static methods. - code = called->GetEntryPointFromCompiledCode(); - // TODO: remove this after we solve the link issue. - { // for lazy link. - if (code == NULL) { - code = linker->GetOatCodeFor(called); - } - } - } - } else { - DCHECK(called_class->IsErroneous()); - } - } - if (LIKELY(code != NULL)) { - // Expect class to at least be initializing. - DCHECK(called->GetDeclaringClass()->IsInitializing()); - // Don't want infinite recursion. - DCHECK(code != GetResolutionTrampoline(linker)); - // Set up entry into main method - *called_addr = called; - } - return code; -} - -// Lazily resolve a method for quick. Called by stub code. -extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { -#if defined(__arm__) - // On entry the stack pointed by sp is: - // | argN | | - // | ... | | - // | arg4 | | - // | arg3 spill | | Caller's frame - // | arg2 spill | | - // | arg1 spill | | - // | Method* | --- - // | LR | - // | ... | callee saves - // | R3 | arg3 - // | R2 | arg2 - // | R1 | arg1 - // | R0 | - // | Method* | <- sp - DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); - mirror::AbstractMethod** caller_sp = reinterpret_cast(reinterpret_cast(sp) + 48); - uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp) + kPointerSize); - uint32_t pc_offset = 10; - uintptr_t caller_pc = regs[pc_offset]; -#elif defined(__i386__) - // On entry the stack pointed by sp is: - // | argN | | - // | ... | | - // | arg4 | | - // | arg3 spill | | Caller's frame - // | arg2 spill | | - // | arg1 spill | | - // | Method* | --- - // | Return | - // | EBP,ESI,EDI | callee saves - // | EBX | arg3 - // | EDX | arg2 - // | ECX | arg1 - // | EAX/Method* | <- sp - DCHECK_EQ(32U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); - mirror::AbstractMethod** caller_sp = reinterpret_cast(reinterpret_cast(sp) + 32); - uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp)); - uintptr_t caller_pc = regs[7]; -#elif defined(__mips__) - // On entry the stack pointed by sp is: - // | argN | | - // | ... | | - // | arg4 | | - // | arg3 spill | | Caller's frame - // | arg2 spill | | - // | arg1 spill | | - // | Method* | --- - // | RA | - // | ... | callee saves - // | A3 | arg3 - // | A2 | arg2 - // | A1 | arg1 - // | A0/Method* | <- sp - DCHECK_EQ(64U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); - mirror::AbstractMethod** caller_sp = reinterpret_cast(reinterpret_cast(sp) + 64); - uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp)); - uint32_t pc_offset = 15; - uintptr_t caller_pc = regs[pc_offset]; -#else - UNIMPLEMENTED(FATAL); - mirror::AbstractMethod** caller_sp = NULL; - uintptr_t* regs = NULL; - uintptr_t caller_pc = 0; -#endif - FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsAndArgs); - // Start new JNI local reference state - JNIEnvExt* env = thread->GetJniEnv(); - ScopedObjectAccessUnchecked soa(env); - ScopedJniEnvLocalRefState env_state(env); - - // Compute details about the called method (avoid GCs) - ClassLinker* linker = Runtime::Current()->GetClassLinker(); - mirror::AbstractMethod* caller = *caller_sp; - InvokeType invoke_type; - uint32_t dex_method_idx; -#if !defined(__i386__) - const char* shorty; - uint32_t shorty_len; -#endif - if (called->IsRuntimeMethod()) { - uint32_t dex_pc = caller->ToDexPc(caller_pc); - const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem(); - CHECK_LT(dex_pc, code->insns_size_in_code_units_); - const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); - Instruction::Code instr_code = instr->Opcode(); - bool is_range; - switch (instr_code) { - case Instruction::INVOKE_DIRECT: - invoke_type = kDirect; - is_range = false; - break; - case Instruction::INVOKE_DIRECT_RANGE: - invoke_type = kDirect; - is_range = true; - break; - case Instruction::INVOKE_STATIC: - invoke_type = kStatic; - is_range = false; - break; - case Instruction::INVOKE_STATIC_RANGE: - invoke_type = kStatic; - is_range = true; - break; - case Instruction::INVOKE_SUPER: - invoke_type = kSuper; - is_range = false; - break; - case Instruction::INVOKE_SUPER_RANGE: - invoke_type = kSuper; - is_range = true; - break; - case Instruction::INVOKE_VIRTUAL: - invoke_type = kVirtual; - is_range = false; - break; - case Instruction::INVOKE_VIRTUAL_RANGE: - invoke_type = kVirtual; - is_range = true; - break; - case Instruction::INVOKE_INTERFACE: - invoke_type = kInterface; - is_range = false; - break; - case Instruction::INVOKE_INTERFACE_RANGE: - invoke_type = kInterface; - is_range = true; - break; - default: - LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL); - // Avoid used uninitialized warnings. - invoke_type = kDirect; - is_range = false; - } - dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); -#if !defined(__i386__) - shorty = linker->MethodShorty(dex_method_idx, caller, &shorty_len); -#endif - } else { - invoke_type = kStatic; - dex_method_idx = called->GetDexMethodIndex(); -#if !defined(__i386__) - MethodHelper mh(called); - shorty = mh.GetShorty(); - shorty_len = mh.GetShortyLength(); -#endif - } -#if !defined(__i386__) - // Discover shorty (avoid GCs) - size_t args_in_regs = 0; - for (size_t i = 1; i < shorty_len; i++) { - char c = shorty[i]; - args_in_regs = args_in_regs + (c == 'J' || c == 'D' ? 2 : 1); - if (args_in_regs > 3) { - args_in_regs = 3; - break; - } - } - // Place into local references incoming arguments from the caller's register arguments - size_t cur_arg = 1; // skip method_idx in R0, first arg is in R1 - if (invoke_type != kStatic) { - mirror::Object* obj = reinterpret_cast(regs[cur_arg]); - cur_arg++; - if (args_in_regs < 3) { - // If we thought we had fewer than 3 arguments in registers, account for the receiver - args_in_regs++; - } - soa.AddLocalReference(obj); - } - size_t shorty_index = 1; // skip return value - // Iterate while arguments and arguments in registers (less 1 from cur_arg which is offset to skip - // R0) - while ((cur_arg - 1) < args_in_regs && shorty_index < shorty_len) { - char c = shorty[shorty_index]; - shorty_index++; - if (c == 'L') { - mirror::Object* obj = reinterpret_cast(regs[cur_arg]); - soa.AddLocalReference(obj); - } - cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1); - } - // Place into local references incoming arguments from the caller's stack arguments - cur_arg += pc_offset + 1; // skip LR/RA, Method* and spills for R1-R3/A1-A3 and callee saves - while (shorty_index < shorty_len) { - char c = shorty[shorty_index]; - shorty_index++; - if (c == 'L') { - mirror::Object* obj = reinterpret_cast(regs[cur_arg]); - soa.AddLocalReference(obj); - } - cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1); - } -#endif - // Resolve method filling in dex cache - if (called->IsRuntimeMethod()) { - called = linker->ResolveMethod(dex_method_idx, caller, invoke_type); - } - const void* code = NULL; - if (LIKELY(!thread->IsExceptionPending())) { - // Incompatible class change should have been handled in resolve method. - CHECK(!called->CheckIncompatibleClassChange(invoke_type)); - // Refine called method based on receiver. - if (invoke_type == kVirtual) { - called = receiver->GetClass()->FindVirtualMethodForVirtual(called); - } else if (invoke_type == kInterface) { - called = receiver->GetClass()->FindVirtualMethodForInterface(called); - } - // Ensure that the called method's class is initialized. - mirror::Class* called_class = called->GetDeclaringClass(); - linker->EnsureInitialized(called_class, true, true); - if (LIKELY(called_class->IsInitialized())) { - code = called->GetEntryPointFromCompiledCode(); - } else if (called_class->IsInitializing()) { - if (invoke_type == kStatic) { - // Class is still initializing, go to oat and grab code (trampoline must be left in place - // until class is initialized to stop races between threads). - code = linker->GetOatCodeFor(called); - } else { - // No trampoline for non-static methods. - code = called->GetEntryPointFromCompiledCode(); - } - } else { - DCHECK(called_class->IsErroneous()); - } - } - if (UNLIKELY(code == NULL)) { - // Something went wrong in ResolveMethod or EnsureInitialized, - // go into deliver exception with the pending exception in r0 - CHECK(thread->IsExceptionPending()); - code = reinterpret_cast(art_quick_deliver_exception_from_code); - regs[0] = reinterpret_cast(thread->GetException(NULL)); - thread->ClearException(); - } else { - // Expect class to at least be initializing. - DCHECK(called->GetDeclaringClass()->IsInitializing()); - // Don't want infinite recursion. - DCHECK(code != GetResolutionTrampoline(linker)); - // Set up entry into main method - regs[0] = reinterpret_cast(called); - } - return code; -} - -// Called by the abstract method error stub. -extern "C" void artThrowAbstractMethodErrorFromCode(mirror::AbstractMethod* method, Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { -#if !defined(ART_USE_PORTABLE_COMPILER) - FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); -#else - UNUSED(sp); -#endif - ThrowAbstractMethodError(method); - self->QuickDeliverException(); -} - -// Used by the JNI dlsym stub to find the native method to invoke if none is registered. -extern "C" void* artFindNativeMethod(Thread* self) { - Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native. - DCHECK(Thread::Current() == self); - ScopedObjectAccess soa(self); - - mirror::AbstractMethod* method = self->GetCurrentMethod(NULL); - DCHECK(method != NULL); - - // Lookup symbol address for method, on failure we'll return NULL with an - // exception set, otherwise we return the address of the method we found. - void* native_code = soa.Vm()->FindCodeForNativeMethod(method); - if (native_code == NULL) { - DCHECK(self->IsExceptionPending()); - return NULL; - } else { - // Register so that future calls don't come here - method->RegisterNative(self, native_code); - return native_code; - } -} - -} // namespace art diff --git a/runtime/oat/runtime/support_thread.cc b/runtime/oat/runtime/support_thread.cc deleted file mode 100644 index e7117147a9..0000000000 --- a/runtime/oat/runtime/support_thread.cc +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "callee_save_frame.h" -#include "runtime_support.h" -#include "thread.h" -#include "thread_list.h" - -namespace art { - -void CheckSuspendFromCode(Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Called when thread->suspend_count_ != 0 on JNI return. JNI method acts as callee-save frame. - thread->VerifyStack(); - CheckSuspend(thread); -} - -extern "C" void artTestSuspendFromCode(Thread* thread, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Called when suspend count check value is 0 and thread->suspend_count_ != 0 - FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly); - CheckSuspend(thread); -} - -} // namespace art diff --git a/runtime/oat/runtime/support_throw.cc b/runtime/oat/runtime/support_throw.cc deleted file mode 100644 index 9588698bb2..0000000000 --- a/runtime/oat/runtime/support_throw.cc +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "callee_save_frame.h" -#include "mirror/object.h" -#include "object_utils.h" -#include "runtime_support.h" -#include "thread.h" -#include "well_known_classes.h" - -namespace art { - -// Deliver an exception that's pending on thread helping set up a callee save frame on the way. -extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll); - thread->QuickDeliverException(); -} - -// Called by generated call to throw an exception. -extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - /* - * exception may be NULL, in which case this routine should - * throw NPE. NOTE: this is a convenience for generated code, - * which previously did the null check inline and constructed - * and threw a NPE if NULL. This routine responsible for setting - * exception_ in thread and delivering the exception. - */ - FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - if (exception == NULL) { - self->ThrowNewException(throw_location, "Ljava/lang/NullPointerException;", - "throw with null exception"); - } else { - self->SetException(throw_location, exception); - } - self->QuickDeliverException(); -} - -// Called by generated call to throw a NPE exception. -extern "C" void artThrowNullPointerExceptionFromCode(Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - ThrowNullPointerExceptionFromDexPC(throw_location); - self->QuickDeliverException(); -} - -// Called by generated call to throw an arithmetic divide by zero exception. -extern "C" void artThrowDivZeroFromCode(Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); - ThrowArithmeticExceptionDivideByZero(); - self->QuickDeliverException(); -} - -// Called by generated call to throw an array index out of bounds exception. -extern "C" void artThrowArrayBoundsFromCode(int index, int length, Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); - ThrowArrayIndexOutOfBoundsException(index, length); - self->QuickDeliverException(); -} - -extern "C" void artThrowStackOverflowFromCode(Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); - ThrowStackOverflowError(self); - self->QuickDeliverException(); -} - -extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); - ThrowNoSuchMethodError(method_idx); - self->QuickDeliverException(); -} - -} // namespace art diff --git a/runtime/oat/runtime/x86/context_x86.cc b/runtime/oat/runtime/x86/context_x86.cc deleted file mode 100644 index c728ae97ec..0000000000 --- a/runtime/oat/runtime/x86/context_x86.cc +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "context_x86.h" - -#include "mirror/abstract_method.h" -#include "mirror/object-inl.h" -#include "stack.h" - -namespace art { -namespace x86 { - -static const uint32_t gZero = 0; - -void X86Context::Reset() { - for (int i = 0; i < kNumberOfCpuRegisters; i++) { - gprs_[i] = NULL; - } - gprs_[ESP] = &esp_; - // Initialize registers with easy to spot debug values. - esp_ = X86Context::kBadGprBase + ESP; - eip_ = X86Context::kBadGprBase + kNumberOfCpuRegisters; -} - -void X86Context::FillCalleeSaves(const StackVisitor& fr) { - mirror::AbstractMethod* method = fr.GetMethod(); - uint32_t core_spills = method->GetCoreSpillMask(); - size_t spill_count = __builtin_popcount(core_spills); - DCHECK_EQ(method->GetFpSpillMask(), 0u); - size_t frame_size = method->GetFrameSizeInBytes(); - if (spill_count > 0) { - // Lowest number spill is farthest away, walk registers and fill into context. - int j = 2; // Offset j to skip return address spill. - for (int i = 0; i < kNumberOfCpuRegisters; i++) { - if (((core_spills >> i) & 1) != 0) { - gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size); - j++; - } - } - } -} - -void X86Context::SmashCallerSaves() { - // This needs to be 0 because we want a null/zero return value. - gprs_[EAX] = const_cast(&gZero); - gprs_[EDX] = const_cast(&gZero); - gprs_[ECX] = NULL; - gprs_[EBX] = NULL; -} - -void X86Context::SetGPR(uint32_t reg, uintptr_t value) { - CHECK_LT(reg, static_cast(kNumberOfCpuRegisters)); - CHECK_NE(gprs_[reg], &gZero); - CHECK(gprs_[reg] != NULL); - *gprs_[reg] = value; -} - -void X86Context::DoLongJump() { -#if defined(__i386__) - // Array of GPR values, filled from the context backward for the long jump pop. We add a slot at - // the top for the stack pointer that doesn't get popped in a pop-all. - volatile uintptr_t gprs[kNumberOfCpuRegisters + 1]; - for (size_t i = 0; i < kNumberOfCpuRegisters; ++i) { - gprs[kNumberOfCpuRegisters - i - 1] = gprs_[i] != NULL ? *gprs_[i] : X86Context::kBadGprBase + i; - } - // We want to load the stack pointer one slot below so that the ret will pop eip. - uintptr_t esp = gprs[kNumberOfCpuRegisters - ESP - 1] - kWordSize; - gprs[kNumberOfCpuRegisters] = esp; - *(reinterpret_cast(esp)) = eip_; - __asm__ __volatile__( - "movl %0, %%esp\n\t" // ESP points to gprs. - "popal\n\t" // Load all registers except ESP and EIP with values in gprs. - "popl %%esp\n\t" // Load stack pointer. - "ret\n\t" // From higher in the stack pop eip. - : // output. - : "g"(&gprs[0]) // input. - :); // clobber. -#else - UNIMPLEMENTED(FATAL); -#endif -} - -} // namespace x86 -} // namespace art diff --git a/runtime/oat/runtime/x86/context_x86.h b/runtime/oat/runtime/x86/context_x86.h deleted file mode 100644 index 4ecfc51b04..0000000000 --- a/runtime/oat/runtime/x86/context_x86.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_OAT_RUNTIME_X86_CONTEXT_X86_H_ -#define ART_RUNTIME_OAT_RUNTIME_X86_CONTEXT_X86_H_ - -#include "constants_x86.h" -#include "oat/runtime/context.h" - -namespace art { -namespace x86 { - -class X86Context : public Context { - public: - X86Context() { - Reset(); - } - virtual ~X86Context() {} - - virtual void Reset(); - - virtual void FillCalleeSaves(const StackVisitor& fr); - - virtual void SetSP(uintptr_t new_sp) { - SetGPR(ESP, new_sp); - } - - virtual void SetPC(uintptr_t new_pc) { - eip_ = new_pc; - } - - virtual uintptr_t GetGPR(uint32_t reg) { - CHECK_LT(reg, static_cast(kNumberOfCpuRegisters)); - return *gprs_[reg]; - } - - virtual void SetGPR(uint32_t reg, uintptr_t value); - - virtual void SmashCallerSaves(); - virtual void DoLongJump(); - - private: - // Pointers to register locations, floating point registers are all caller save. Values are - // initialized to NULL or the special registers below. - uintptr_t* gprs_[kNumberOfCpuRegisters]; - // Hold values for esp and eip if they are not located within a stack frame. EIP is somewhat - // special in that it cannot be encoded normally as a register operand to an instruction (except - // in 64bit addressing modes). - uintptr_t esp_, eip_; -}; -} // namespace x86 -} // namespace art - -#endif // ART_RUNTIME_OAT_RUNTIME_X86_CONTEXT_X86_H_ diff --git a/runtime/oat/runtime/x86/oat_support_entrypoints_x86.cc b/runtime/oat/runtime/x86/oat_support_entrypoints_x86.cc deleted file mode 100644 index 7dfb07c21e..0000000000 --- a/runtime/oat/runtime/x86/oat_support_entrypoints_x86.cc +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "oat/runtime/oat_support_entrypoints.h" -#include "runtime_support.h" - -namespace art { - -// Alloc entrypoints. -extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); -extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); -extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); - -// Cast entrypoints. -extern "C" uint32_t art_quick_is_assignable_from_code(const mirror::Class* klass, - const mirror::Class* ref_class); -extern "C" void art_quick_can_put_array_element_from_code(void*, void*); -extern "C" void art_quick_check_cast_from_code(void*, void*); - -// DexCache entrypoints. -extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); -extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); - -// Field entrypoints. -extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); -extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); -extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); -extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); -extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); -extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_static_from_code(uint32_t); -extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); -extern "C" int64_t art_quick_get64_static_from_code(uint32_t); -extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); -extern "C" void* art_quick_get_obj_static_from_code(uint32_t); - -// FillArray entrypoint. -extern "C" void art_quick_handle_fill_data_from_code(void*, void*); - -// Lock entrypoints. -extern "C" void art_quick_lock_object_from_code(void*); -extern "C" void art_quick_unlock_object_from_code(void*); - -// Math entrypoints. -extern "C" double art_quick_fmod_from_code(double, double); -extern "C" float art_quick_fmodf_from_code(float, float); -extern "C" double art_quick_l2d_from_code(int64_t); -extern "C" float art_quick_l2f_from_code(int64_t); -extern "C" int64_t art_quick_d2l_from_code(double); -extern "C" int64_t art_quick_f2l_from_code(float); -extern "C" int32_t art_quick_idivmod_from_code(int32_t, int32_t); -extern "C" int64_t art_quick_ldiv_from_code(int64_t, int64_t); -extern "C" int64_t art_quick_ldivmod_from_code(int64_t, int64_t); -extern "C" int64_t art_quick_lmul_from_code(int64_t, int64_t); -extern "C" uint64_t art_quick_lshl_from_code(uint64_t, uint32_t); -extern "C" uint64_t art_quick_lshr_from_code(uint64_t, uint32_t); -extern "C" uint64_t art_quick_lushr_from_code(uint64_t, uint32_t); - -// Interpreter entrypoints. -extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); -extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); - -// Intrinsic entrypoints. -extern "C" int32_t art_quick_memcmp16(void*, void*, int32_t); -extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); -extern "C" int32_t art_quick_string_compareto(void*, void*); -extern "C" void* art_quick_memcpy(void*, const void*, size_t); - -// Invoke entrypoints. -extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); - -// Thread entrypoints. -extern void CheckSuspendFromCode(Thread* thread); -extern "C" void art_quick_test_suspend(); - -// Throw entrypoints. -extern "C" void art_quick_deliver_exception_from_code(void*); -extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); -extern "C" void art_quick_throw_div_zero_from_code(); -extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); -extern "C" void art_quick_throw_null_pointer_exception_from_code(); -extern "C" void art_quick_throw_stack_overflow_from_code(void*); - -void InitEntryPoints(EntryPoints* points) { - // Alloc - points->pAllocArrayFromCode = art_quick_alloc_array_from_code; - points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; - points->pAllocObjectFromCode = art_quick_alloc_object_from_code; - points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; - points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; - points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; - - // Cast - points->pInstanceofNonTrivialFromCode = art_quick_is_assignable_from_code; - points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; - points->pCheckCastFromCode = art_quick_check_cast_from_code; - - // DexCache - points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; - points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; - points->pInitializeTypeFromCode = art_quick_initialize_type_from_code; - points->pResolveStringFromCode = art_quick_resolve_string_from_code; - - // Field - points->pSet32Instance = art_quick_set32_instance_from_code; - points->pSet32Static = art_quick_set32_static_from_code; - points->pSet64Instance = art_quick_set64_instance_from_code; - points->pSet64Static = art_quick_set64_static_from_code; - points->pSetObjInstance = art_quick_set_obj_instance_from_code; - points->pSetObjStatic = art_quick_set_obj_static_from_code; - points->pGet32Instance = art_quick_get32_instance_from_code; - points->pGet64Instance = art_quick_get64_instance_from_code; - points->pGetObjInstance = art_quick_get_obj_instance_from_code; - points->pGet32Static = art_quick_get32_static_from_code; - points->pGet64Static = art_quick_get64_static_from_code; - points->pGetObjStatic = art_quick_get_obj_static_from_code; - - // FillArray - points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; - - // JNI - points->pJniMethodStart = JniMethodStart; - points->pJniMethodStartSynchronized = JniMethodStartSynchronized; - points->pJniMethodEnd = JniMethodEnd; - points->pJniMethodEndSynchronized = JniMethodEndSynchronized; - points->pJniMethodEndWithReference = JniMethodEndWithReference; - points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - - // Locks - points->pLockObjectFromCode = art_quick_lock_object_from_code; - points->pUnlockObjectFromCode = art_quick_unlock_object_from_code; - - // Math - // points->pCmpgDouble = NULL; // Not needed on x86. - // points->pCmpgFloat = NULL; // Not needed on x86. - // points->pCmplDouble = NULL; // Not needed on x86. - // points->pCmplFloat = NULL; // Not needed on x86. - points->pFmod = art_quick_fmod_from_code; - points->pL2d = art_quick_l2d_from_code; - points->pFmodf = art_quick_fmodf_from_code; - points->pL2f = art_quick_l2f_from_code; - // points->pD2iz = NULL; // Not needed on x86. - // points->pF2iz = NULL; // Not needed on x86. - points->pIdivmod = art_quick_idivmod_from_code; - points->pD2l = art_quick_d2l_from_code; - points->pF2l = art_quick_f2l_from_code; - points->pLdiv = art_quick_ldiv_from_code; - points->pLdivmod = art_quick_ldivmod_from_code; - points->pLmul = art_quick_lmul_from_code; - points->pShlLong = art_quick_lshl_from_code; - points->pShrLong = art_quick_lshr_from_code; - points->pUshrLong = art_quick_lushr_from_code; - - // Interpreter - points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; - points->pInterpreterToQuickEntry = artInterpreterToQuickEntry; - - // Intrinsics - points->pIndexOf = art_quick_indexof; - points->pMemcmp16 = art_quick_memcmp16; - points->pStringCompareTo = art_quick_string_compareto; - points->pMemcpy = art_quick_memcpy; - - // Invocation - points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; - points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; - points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; - points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; - points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; - points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; - points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; - points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - points->pCheckSuspendFromCode = CheckSuspendFromCode; - points->pTestSuspendFromCode = art_quick_test_suspend; - - // Throws - points->pDeliverException = art_quick_deliver_exception_from_code; - points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; - points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; - points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; - points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; - points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; -}; - -} // namespace art diff --git a/runtime/oat/runtime/x86/runtime_support_x86.S b/runtime/oat/runtime/x86/runtime_support_x86.S deleted file mode 100644 index ee6db0c3f8..0000000000 --- a/runtime/oat/runtime/x86/runtime_support_x86.S +++ /dev/null @@ -1,1211 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "asm_support.h" - -#if defined(__APPLE__) - // Mac OS' as(1) doesn't let you name macro parameters. - #define MACRO0(macro_name) .macro macro_name - #define MACRO1(macro_name, macro_arg1) .macro macro_name - #define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name - #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name - #define END_MACRO .endmacro - - // Mac OS' as(1) uses $0, $1, and so on for macro arguments, and function names - // are mangled with an extra underscore prefix. The use of $x for arguments - // mean that literals need to be represented with $$x in macros. - #define SYMBOL(name) _ ## name - #define VAR(name,index) SYMBOL($index) - #define REG_VAR(name,index) %$index - #define CALL_MACRO(name,index) $index - #define LITERAL(value) $value - #define MACRO_LITERAL(value) $$value -#else - // Regular gas(1) lets you name macro parameters. - #define MACRO0(macro_name) .macro macro_name - #define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1 - #define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2 - #define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3 - #define END_MACRO .endm - - // Regular gas(1) uses \argument_name for macro arguments. - // We need to turn on alternate macro syntax so we can use & instead or the preprocessor - // will screw us by inserting a space between the \ and the name. Even in this mode there's - // no special meaning to $, so literals are still just $x. The use of altmacro means % is a - // special character meaning care needs to be taken when passing registers as macro arguments. - .altmacro - #define SYMBOL(name) name - #define VAR(name,index) name& - #define REG_VAR(name,index) %name - #define CALL_MACRO(name,index) name& - #define LITERAL(value) $value - #define MACRO_LITERAL(value) $value -#endif - - /* Cache alignment for function entry */ -MACRO0(ALIGN_FUNCTION_ENTRY) - .balign 16 -END_MACRO - -MACRO1(DEFINE_FUNCTION, c_name) - .type VAR(c_name, 0), @function - .globl VAR(c_name, 0) - ALIGN_FUNCTION_ENTRY -VAR(c_name, 0): - .cfi_startproc -END_MACRO - -MACRO1(END_FUNCTION, c_name) - .cfi_endproc - .size \c_name, .-\c_name -END_MACRO - -MACRO1(PUSH, reg) - pushl REG_VAR(reg, 0) - .cfi_adjust_cfa_offset 4 - .cfi_rel_offset REG_VAR(reg, 0), 0 -END_MACRO - -MACRO1(POP, reg) - popl REG_VAR(reg,0) - .cfi_adjust_cfa_offset -4 - .cfi_restore REG_VAR(reg,0) -END_MACRO - - /* - * Macro that sets up the callee save frame to conform with - * Runtime::CreateCalleeSaveMethod(kSaveAll) - */ -MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME) - PUSH edi // Save callee saves (ebx is saved/restored by the upcall) - PUSH esi - PUSH ebp - subl MACRO_LITERAL(16), %esp // Grow stack by 4 words, bottom word will hold Method* - .cfi_adjust_cfa_offset 16 -END_MACRO - - /* - * Macro that sets up the callee save frame to conform with - * Runtime::CreateCalleeSaveMethod(kRefsOnly) - */ -MACRO0(SETUP_REF_ONLY_CALLEE_SAVE_FRAME) - PUSH edi // Save callee saves (ebx is saved/restored by the upcall) - PUSH esi - PUSH ebp - subl MACRO_LITERAL(16), %esp // Grow stack by 4 words, bottom word will hold Method* - .cfi_adjust_cfa_offset 16 -END_MACRO - -MACRO0(RESTORE_REF_ONLY_CALLEE_SAVE_FRAME) - addl MACRO_LITERAL(28), %esp // Unwind stack up to return address - .cfi_adjust_cfa_offset -28 -END_MACRO - - /* - * Macro that sets up the callee save frame to conform with - * Runtime::CreateCalleeSaveMethod(kRefsAndArgs) - */ -MACRO0(SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME) - PUSH edi // Save callee saves - PUSH esi - PUSH ebp - PUSH ebx // Save args - PUSH edx - PUSH ecx - PUSH eax // Align stack, eax will be clobbered by Method* -END_MACRO - -MACRO0(RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME) - addl MACRO_LITERAL(4), %esp // Remove padding - .cfi_adjust_cfa_offset -4 - POP ecx // Restore args except eax - POP edx - POP ebx - POP ebp // Restore callee saves - POP esi - POP edi -END_MACRO - - /* - * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending - * exception is Thread::Current()->exception_. - */ -MACRO0(DELIVER_PENDING_EXCEPTION) - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save callee saves for throw - mov %esp, %ecx - // Outgoing argument set up - subl MACRO_LITERAL(8), %esp // Alignment padding - .cfi_adjust_cfa_offset 8 - PUSH ecx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*, SP) - int3 // unreached -END_MACRO - -MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) - DEFINE_FUNCTION VAR(c_name, 0) - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context - mov %esp, %ecx - // Outgoing argument set up - subl MACRO_LITERAL(8), %esp // alignment padding - .cfi_adjust_cfa_offset 8 - PUSH ecx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - call VAR(cxx_name, 1) // cxx_name(Thread*, SP) - int3 // unreached - END_FUNCTION VAR(c_name, 0) -END_MACRO - -MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) - DEFINE_FUNCTION VAR(c_name, 0) - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context - mov %esp, %ecx - // Outgoing argument set up - PUSH eax // alignment padding - PUSH ecx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH eax // pass arg1 - call VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP) - int3 // unreached - END_FUNCTION VAR(c_name, 0) -END_MACRO - -MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) - DEFINE_FUNCTION VAR(c_name, 0) - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context - mov %esp, %edx - // Outgoing argument set up - PUSH edx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH ecx // pass arg2 - PUSH eax // pass arg1 - call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP) - int3 // unreached - END_FUNCTION VAR(c_name, 0) -END_MACRO - - /* - * Called by managed code to create and deliver a NullPointerException. - */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_code, artThrowNullPointerExceptionFromCode - - /* - * Called by managed code to create and deliver an ArithmeticException. - */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero_from_code, artThrowDivZeroFromCode - - /* - * Called by managed code to create and deliver a StackOverflowError. - */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow_from_code, artThrowStackOverflowFromCode - - /* - * Called by managed code, saves callee saves and then calls artThrowException - * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception. - */ -ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception_from_code, artDeliverExceptionFromCode - - /* - * Called by managed code to create and deliver a NoSuchMethodError. - */ -ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method_from_code, artThrowNoSuchMethodFromCode - - /* - * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds - * index, arg2 holds limit. - */ -TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds_from_code, artThrowArrayBoundsFromCode - - /* - * All generated callsites for interface invokes and invocation slow paths will load arguments - * as usual - except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain - * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the - * stack and call the appropriate C helper. - * NOTE: "this" is first visible argument of the target, and so can be found in arg1/r1. - * - * The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting - * of the target Method* in r0 and method->code_ in r1. - * - * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the - * thread and we branch to another stub to deliver it. - * - * On success this wrapper will restore arguments and *jump* to the target, leaving the lr - * pointing back to the original caller. - */ -MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name) - DEFINE_FUNCTION VAR(c_name, 0) - // Set up the callee save frame to conform with Runtime::CreateCalleeSaveMethod(kRefsAndArgs) - // return address - PUSH edi - PUSH esi - PUSH ebp - PUSH ebx - PUSH edx - PUSH ecx - PUSH eax // <-- callee save Method* to go here - movl %esp, %edx // remember SP - // Outgoing argument set up - subl MACRO_LITERAL(12), %esp // alignment padding - .cfi_adjust_cfa_offset 12 - PUSH edx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - pushl 32(%edx) // pass caller Method* - .cfi_adjust_cfa_offset 4 - PUSH ecx // pass arg2 - PUSH eax // pass arg1 - call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP) - movl %edx, %edi // save code pointer in EDI - addl MACRO_LITERAL(36), %esp // Pop arguments skip eax - .cfi_adjust_cfa_offset -36 - POP ecx // Restore args - POP edx - POP ebx - POP ebp // Restore callee saves. - POP esi - // Swap EDI callee save with code pointer. - xchgl %edi, (%esp) - testl %eax, %eax // Branch forward if exception pending. - jz 1f - // Tail call to intended method. - ret -1: - addl MACRO_LITERAL(4), %esp // Pop code pointer off stack - .cfi_adjust_cfa_offset -4 - DELIVER_PENDING_EXCEPTION - END_FUNCTION VAR(c_name, 0) -END_MACRO - -INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline -INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck - -INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck -INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck -INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck -INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck - - /* - * Portable invocation stub. - * On entry: - * [sp] = return address - * [sp + 4] = method pointer - * [sp + 8] = argument array or NULL for no argument methods - * [sp + 12] = size of argument array in bytes - * [sp + 16] = (managed) thread pointer - * [sp + 20] = JValue* result - * [sp + 24] = result type char - */ -DEFINE_FUNCTION art_portable_invoke_stub - PUSH ebp // save ebp - PUSH ebx // save ebx - mov %esp, %ebp // copy value of stack pointer into base pointer - .cfi_def_cfa_register ebp - mov 20(%ebp), %ebx // get arg array size - addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame - andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes - subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp - subl %ebx, %esp // reserve stack space for argument array - lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy - pushl 20(%ebp) // push size of region to memcpy - pushl 16(%ebp) // push arg array as source of memcpy - pushl %eax // push stack pointer as destination of memcpy - call SYMBOL(memcpy) // (void*, const void*, size_t) - addl LITERAL(12), %esp // pop arguments to memcpy - mov 12(%ebp), %eax // move method pointer into eax - mov %eax, (%esp) // push method pointer onto stack - call *METHOD_CODE_OFFSET(%eax) // call the method - mov %ebp, %esp // restore stack pointer - POP ebx // pop ebx - POP ebp // pop ebp - mov 20(%esp), %ecx // get result pointer - cmpl LITERAL(68), 24(%esp) // test if result type char == 'D' - je return_double_portable - cmpl LITERAL(70), 24(%esp) // test if result type char == 'F' - je return_float_portable - mov %eax, (%ecx) // store the result - mov %edx, 4(%ecx) // store the other half of the result - ret -return_double_portable: - fstpl (%ecx) // store the floating point result as double - ret -return_float_portable: - fstps (%ecx) // store the floating point result as float - ret -END_FUNCTION art_portable_invoke_stub - - /* - * Quick invocation stub. - * On entry: - * [sp] = return address - * [sp + 4] = method pointer - * [sp + 8] = argument array or NULL for no argument methods - * [sp + 12] = size of argument array in bytes - * [sp + 16] = (managed) thread pointer - * [sp + 20] = JValue* result - * [sp + 24] = result type char - */ -DEFINE_FUNCTION art_quick_invoke_stub - PUSH ebp // save ebp - PUSH ebx // save ebx - mov %esp, %ebp // copy value of stack pointer into base pointer - .cfi_def_cfa_register ebp - mov 20(%ebp), %ebx // get arg array size - addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame - andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes - subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp - subl %ebx, %esp // reserve stack space for argument array - lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy - pushl 20(%ebp) // push size of region to memcpy - pushl 16(%ebp) // push arg array as source of memcpy - pushl %eax // push stack pointer as destination of memcpy - call SYMBOL(memcpy) // (void*, const void*, size_t) - addl LITERAL(12), %esp // pop arguments to memcpy - movl LITERAL(0), (%esp) // store NULL for method* - mov 12(%ebp), %eax // move method pointer into eax - mov 4(%esp), %ecx // copy arg1 into ecx - mov 8(%esp), %edx // copy arg2 into edx - mov 12(%esp), %ebx // copy arg3 into ebx - call *METHOD_CODE_OFFSET(%eax) // call the method - mov %ebp, %esp // restore stack pointer - POP ebx // pop ebx - POP ebp // pop ebp - mov 20(%esp), %ecx // get result pointer - cmpl LITERAL(68), 24(%esp) // test if result type char == 'D' - je return_double_quick - cmpl LITERAL(70), 24(%esp) // test if result type char == 'F' - je return_float_quick - mov %eax, (%ecx) // store the result - mov %edx, 4(%ecx) // store the other half of the result - ret -return_double_quick: -return_float_quick: - movsd %xmm0, (%ecx) // store the floating point result - ret -END_FUNCTION art_quick_invoke_stub - -MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION VAR(c_name, 0) - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %edx // remember SP - // Outgoing argument set up - subl MACRO_LITERAL(8), %esp // push padding - .cfi_adjust_cfa_offset 8 - PUSH edx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - call VAR(cxx_name, 1) // cxx_name(Thread*, SP) - addl MACRO_LITERAL(16), %esp // pop arguments - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION VAR(c_name, 0) -END_MACRO - -MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION VAR(c_name, 0) - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %edx // remember SP - // Outgoing argument set up - PUSH eax // push padding - PUSH edx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH eax // pass arg1 - call VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP) - addl MACRO_LITERAL(16), %esp // pop arguments - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION VAR(c_name, 0) -END_MACRO - -MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION VAR(c_name, 0) - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %edx // remember SP - // Outgoing argument set up - PUSH edx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH ecx // pass arg2 - PUSH eax // pass arg1 - call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP) - addl MACRO_LITERAL(16), %esp // pop arguments - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION VAR(c_name, 0) -END_MACRO - -MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION VAR(c_name, 0) - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %ebx // remember SP - // Outgoing argument set up - subl MACRO_LITERAL(12), %esp // alignment padding - .cfi_adjust_cfa_offset 12 - PUSH ebx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH edx // pass arg3 - PUSH ecx // pass arg2 - PUSH eax // pass arg1 - call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP) - addl MACRO_LITERAL(32), %esp // pop arguments - .cfi_adjust_cfa_offset -32 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION VAR(c_name, 0) -END_MACRO - -MACRO0(RETURN_IF_EAX_NOT_ZERO) - testl %eax, %eax // eax == 0 ? - jz 1f // if eax == 0 goto 1 - ret // return -1: // deliver exception on current thread - DELIVER_PENDING_EXCEPTION -END_MACRO - -MACRO0(RETURN_IF_EAX_ZERO) - testl %eax, %eax // eax == 0 ? - jnz 1f // if eax != 0 goto 1 - ret // return -1: // deliver exception on current thread - DELIVER_PENDING_EXCEPTION -END_MACRO - -MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION) - mov %fs:THREAD_EXCEPTION_OFFSET, %ebx // get exception field - testl %ebx, %ebx // ebx == 0 ? - jnz 1f // if ebx != 0 goto 1 - ret // return -1: // deliver exception on current thread - DELIVER_PENDING_EXCEPTION -END_MACRO - -TWO_ARG_DOWNCALL art_quick_alloc_object_from_code, artAllocObjectFromCode, RETURN_IF_EAX_NOT_ZERO -TWO_ARG_DOWNCALL art_quick_alloc_object_from_code_with_access_check, artAllocObjectFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO -THREE_ARG_DOWNCALL art_quick_alloc_array_from_code, artAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO -THREE_ARG_DOWNCALL art_quick_alloc_array_from_code_with_access_check, artAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO -THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_from_code, artCheckAndAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO -THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_from_code_with_access_check, artCheckAndAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO - -TWO_ARG_DOWNCALL art_quick_resolve_string_from_code, artResolveStringFromCode, RETURN_IF_EAX_NOT_ZERO -TWO_ARG_DOWNCALL art_quick_initialize_static_storage_from_code, artInitializeStaticStorageFromCode, RETURN_IF_EAX_NOT_ZERO -TWO_ARG_DOWNCALL art_quick_initialize_type_from_code, artInitializeTypeFromCode, RETURN_IF_EAX_NOT_ZERO -TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access_from_code, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_EAX_NOT_ZERO - -ONE_ARG_DOWNCALL art_quick_lock_object_from_code, artLockObjectFromCode, ret -ONE_ARG_DOWNCALL art_quick_unlock_object_from_code, artUnlockObjectFromCode, RETURN_IF_EAX_ZERO - -TWO_ARG_DOWNCALL art_quick_handle_fill_data_from_code, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO - -DEFINE_FUNCTION art_quick_is_assignable_from_code - PUSH eax // alignment padding - PUSH ecx // pass arg2 - PUSH eax // pass arg1 - call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b, Thread*, SP) - addl LITERAL(12), %esp // pop arguments - .cfi_adjust_cfa_offset -12 - ret -END_FUNCTION art_quick_is_assignable_from_code - -DEFINE_FUNCTION art_quick_memcpy - PUSH edx // pass arg3 - PUSH ecx // pass arg2 - PUSH eax // pass arg1 - call SYMBOL(memcpy) // (void*, const void*, size_t) - addl LITERAL(12), %esp // pop arguments - .cfi_adjust_cfa_offset -12 - ret -END_FUNCTION art_quick_memcpy - -TWO_ARG_DOWNCALL art_quick_check_cast_from_code, artCheckCastFromCode, RETURN_IF_EAX_ZERO -TWO_ARG_DOWNCALL art_quick_can_put_array_element_from_code, artCanPutArrayElementFromCode, RETURN_IF_EAX_ZERO - -NO_ARG_DOWNCALL art_quick_test_suspend, artTestSuspendFromCode, ret - -DEFINE_FUNCTION art_quick_fmod_from_code - subl LITERAL(12), %esp // alignment padding - .cfi_adjust_cfa_offset 12 - PUSH ebx // pass arg4 b.hi - PUSH edx // pass arg3 b.lo - PUSH ecx // pass arg2 a.hi - PUSH eax // pass arg1 a.lo - call SYMBOL(fmod) // (jdouble a, jdouble b) - fstpl (%esp) // pop return value off fp stack - movsd (%esp), %xmm0 // place into %xmm0 - addl LITERAL(28), %esp // pop arguments - .cfi_adjust_cfa_offset -28 - ret -END_FUNCTION art_quick_fmod_from_code - -DEFINE_FUNCTION art_quick_fmodf_from_code - PUSH eax // alignment padding - PUSH ecx // pass arg2 b - PUSH eax // pass arg1 a - call SYMBOL(fmodf) // (jfloat a, jfloat b) - fstps (%esp) // pop return value off fp stack - movss (%esp), %xmm0 // place into %xmm0 - addl LITERAL(12), %esp // pop arguments - .cfi_adjust_cfa_offset -12 - ret -END_FUNCTION art_quick_fmodf_from_code - -DEFINE_FUNCTION art_quick_l2d_from_code - PUSH ecx // push arg2 a.hi - PUSH eax // push arg1 a.lo - fildll (%esp) // load as integer and push into st0 - fstpl (%esp) // pop value off fp stack as double - movsd (%esp), %xmm0 // place into %xmm0 - addl LITERAL(8), %esp // pop arguments - .cfi_adjust_cfa_offset -8 - ret -END_FUNCTION art_quick_l2d_from_code - -DEFINE_FUNCTION art_quick_l2f_from_code - PUSH ecx // push arg2 a.hi - PUSH eax // push arg1 a.lo - fildll (%esp) // load as integer and push into st0 - fstps (%esp) // pop value off fp stack as a single - movss (%esp), %xmm0 // place into %xmm0 - addl LITERAL(8), %esp // pop argument - .cfi_adjust_cfa_offset -8 - ret -END_FUNCTION art_quick_l2f_from_code - -DEFINE_FUNCTION art_quick_d2l_from_code - PUSH eax // alignment padding - PUSH ecx // pass arg2 a.hi - PUSH eax // pass arg1 a.lo - call SYMBOL(art_d2l) // (jdouble a) - addl LITERAL(12), %esp // pop arguments - .cfi_adjust_cfa_offset -12 - ret -END_FUNCTION art_quick_d2l_from_code - -DEFINE_FUNCTION art_quick_f2l_from_code - subl LITERAL(8), %esp // alignment padding - .cfi_adjust_cfa_offset 8 - PUSH eax // pass arg1 a - call SYMBOL(art_f2l) // (jfloat a) - addl LITERAL(12), %esp // pop arguments - .cfi_adjust_cfa_offset -12 - ret -END_FUNCTION art_quick_f2l_from_code - -DEFINE_FUNCTION art_quick_idivmod_from_code - cmpl LITERAL(0x80000000), %eax - je check_arg2 // special case -args_ok: - cdq // edx:eax = sign extend eax - idiv %ecx // (edx,eax) = (edx:eax % ecx, edx:eax / ecx) - ret -check_arg2: - cmpl LITERAL(-1), %ecx - jne args_ok - xorl %edx, %edx - ret // eax already holds min int -END_FUNCTION art_quick_idivmod_from_code - -DEFINE_FUNCTION art_quick_ldiv_from_code - subl LITERAL(12), %esp // alignment padding - .cfi_adjust_cfa_offset 12 - PUSH ebx // pass arg4 b.hi - PUSH edx // pass arg3 b.lo - PUSH ecx // pass arg2 a.hi - PUSH eax // pass arg1 a.lo - call SYMBOL(artLdivFromCode) // (jlong a, jlong b) - addl LITERAL(28), %esp // pop arguments - .cfi_adjust_cfa_offset -28 - ret -END_FUNCTION art_quick_ldiv_from_code - -DEFINE_FUNCTION art_quick_ldivmod_from_code - subl LITERAL(12), %esp // alignment padding - .cfi_adjust_cfa_offset 12 - PUSH ebx // pass arg4 b.hi - PUSH edx // pass arg3 b.lo - PUSH ecx // pass arg2 a.hi - PUSH eax // pass arg1 a.lo - call SYMBOL(artLdivmodFromCode) // (jlong a, jlong b) - addl LITERAL(28), %esp // pop arguments - .cfi_adjust_cfa_offset -28 - ret -END_FUNCTION art_quick_ldivmod_from_code - -DEFINE_FUNCTION art_quick_lmul_from_code - imul %eax, %ebx // ebx = a.lo(eax) * b.hi(ebx) - imul %edx, %ecx // ecx = b.lo(edx) * a.hi(ecx) - mul %edx // edx:eax = a.lo(eax) * b.lo(edx) - add %ebx, %ecx - add %ecx, %edx // edx += (a.lo * b.hi) + (b.lo * a.hi) - ret -END_FUNCTION art_quick_lmul_from_code - -DEFINE_FUNCTION art_quick_lshl_from_code - // ecx:eax << edx - xchg %edx, %ecx - shld %cl,%eax,%edx - shl %cl,%eax - test LITERAL(32), %cl - jz 1f - mov %eax, %edx - xor %eax, %eax -1: - ret -END_FUNCTION art_quick_lshl_from_code - -DEFINE_FUNCTION art_quick_lshr_from_code - // ecx:eax >> edx - xchg %edx, %ecx - shrd %cl,%edx,%eax - sar %cl,%edx - test LITERAL(32),%cl - jz 1f - mov %edx, %eax - sar LITERAL(31), %edx -1: - ret -END_FUNCTION art_quick_lshr_from_code - -DEFINE_FUNCTION art_quick_lushr_from_code - // ecx:eax >>> edx - xchg %edx, %ecx - shrd %cl,%edx,%eax - shr %cl,%edx - test LITERAL(32),%cl - jz 1f - mov %edx, %eax - xor %edx, %edx -1: - ret -END_FUNCTION art_quick_lushr_from_code - -DEFINE_FUNCTION art_quick_set32_instance_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %ebx // remember SP - subl LITERAL(8), %esp // alignment padding - .cfi_adjust_cfa_offset 8 - PUSH ebx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - mov 32(%ebx), %ebx // get referrer - PUSH ebx // pass referrer - PUSH edx // pass new_val - PUSH ecx // pass object - PUSH eax // pass field_idx - call SYMBOL(artSet32InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP) - addl LITERAL(32), %esp // pop arguments - .cfi_adjust_cfa_offset -32 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - RETURN_IF_EAX_ZERO // return or deliver exception -END_FUNCTION art_quick_set32_instance_from_code - -DEFINE_FUNCTION art_quick_set64_instance_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - subl LITERAL(8), %esp // alignment padding - .cfi_adjust_cfa_offset 8 - PUSH esp // pass SP-8 - addl LITERAL(8), (%esp) // fix SP on stack by adding 8 - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH ebx // pass high half of new_val - PUSH edx // pass low half of new_val - PUSH ecx // pass object - PUSH eax // pass field_idx - call SYMBOL(artSet64InstanceFromCode) // (field_idx, Object*, new_val, Thread*, SP) - addl LITERAL(32), %esp // pop arguments - .cfi_adjust_cfa_offset -32 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - RETURN_IF_EAX_ZERO // return or deliver exception -END_FUNCTION art_quick_set64_instance_from_code - -DEFINE_FUNCTION art_quick_set_obj_instance_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %ebx // remember SP - subl LITERAL(8), %esp // alignment padding - .cfi_adjust_cfa_offset 8 - PUSH ebx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - mov 32(%ebx), %ebx // get referrer - PUSH ebx // pass referrer - PUSH edx // pass new_val - PUSH ecx // pass object - PUSH eax // pass field_idx - call SYMBOL(artSetObjInstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP) - addl LITERAL(32), %esp // pop arguments - .cfi_adjust_cfa_offset -32 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - RETURN_IF_EAX_ZERO // return or deliver exception -END_FUNCTION art_quick_set_obj_instance_from_code - -DEFINE_FUNCTION art_quick_get32_instance_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %ebx // remember SP - mov 32(%esp), %edx // get referrer - subl LITERAL(12), %esp // alignment padding - .cfi_adjust_cfa_offset 12 - PUSH ebx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH edx // pass referrer - PUSH ecx // pass object - PUSH eax // pass field_idx - call SYMBOL(artGet32InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP) - addl LITERAL(32), %esp // pop arguments - .cfi_adjust_cfa_offset -32 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get32_instance_from_code - -DEFINE_FUNCTION art_quick_get64_instance_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %ebx // remember SP - mov 32(%esp), %edx // get referrer - subl LITERAL(12), %esp // alignment padding - .cfi_adjust_cfa_offset 12 - PUSH ebx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH edx // pass referrer - PUSH ecx // pass object - PUSH eax // pass field_idx - call SYMBOL(artGet64InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP) - addl LITERAL(32), %esp // pop arguments - .cfi_adjust_cfa_offset -32 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get64_instance_from_code - -DEFINE_FUNCTION art_quick_get_obj_instance_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %ebx // remember SP - mov 32(%esp), %edx // get referrer - subl LITERAL(12), %esp // alignment padding - .cfi_adjust_cfa_offset 12 - PUSH ebx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH edx // pass referrer - PUSH ecx // pass object - PUSH eax // pass field_idx - call SYMBOL(artGetObjInstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP) - addl LITERAL(32), %esp // pop arguments - .cfi_adjust_cfa_offset -32 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get_obj_instance_from_code - -DEFINE_FUNCTION art_quick_set32_static_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %ebx // remember SP - mov 32(%esp), %edx // get referrer - subl LITERAL(12), %esp // alignment padding - .cfi_adjust_cfa_offset 12 - PUSH ebx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH edx // pass referrer - PUSH ecx // pass new_val - PUSH eax // pass field_idx - call SYMBOL(artSet32StaticFromCode) // (field_idx, new_val, referrer, Thread*, SP) - addl LITERAL(32), %esp // pop arguments - .cfi_adjust_cfa_offset -32 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - RETURN_IF_EAX_ZERO // return or deliver exception -END_FUNCTION art_quick_set32_static_from_code - -DEFINE_FUNCTION art_quick_set64_static_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %ebx // remember SP - subl LITERAL(8), %esp // alignment padding - .cfi_adjust_cfa_offset 8 - PUSH ebx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - mov 32(%ebx), %ebx // get referrer - PUSH edx // pass high half of new_val - PUSH ecx // pass low half of new_val - PUSH ebx // pass referrer - PUSH eax // pass field_idx - call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*, SP) - addl LITERAL(32), %esp // pop arguments - .cfi_adjust_cfa_offset -32 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - RETURN_IF_EAX_ZERO // return or deliver exception -END_FUNCTION art_quick_set64_static_from_code - -DEFINE_FUNCTION art_quick_set_obj_static_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %ebx // remember SP - mov 32(%esp), %edx // get referrer - subl LITERAL(12), %esp // alignment padding - .cfi_adjust_cfa_offset 12 - PUSH ebx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH edx // pass referrer - PUSH ecx // pass new_val - PUSH eax // pass field_idx - call SYMBOL(artSetObjStaticFromCode) // (field_idx, new_val, referrer, Thread*, SP) - addl LITERAL(32), %esp // pop arguments - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - RETURN_IF_EAX_ZERO // return or deliver exception -END_FUNCTION art_quick_set_obj_static_from_code - -DEFINE_FUNCTION art_quick_get32_static_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %edx // remember SP - mov 32(%esp), %ecx // get referrer - PUSH edx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH ecx // pass referrer - PUSH eax // pass field_idx - call SYMBOL(artGet32StaticFromCode) // (field_idx, referrer, Thread*, SP) - addl LITERAL(16), %esp // pop arguments - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get32_static_from_code - -DEFINE_FUNCTION art_quick_get64_static_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %edx // remember SP - mov 32(%esp), %ecx // get referrer - PUSH edx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH ecx // pass referrer - PUSH eax // pass field_idx - call SYMBOL(artGet64StaticFromCode) // (field_idx, referrer, Thread*, SP) - addl LITERAL(16), %esp // pop arguments - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get64_static_from_code - -DEFINE_FUNCTION art_quick_get_obj_static_from_code - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %edx // remember SP - mov 32(%esp), %ecx // get referrer - PUSH edx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH ecx // pass referrer - PUSH eax // pass field_idx - call SYMBOL(artGetObjStaticFromCode) // (field_idx, referrer, Thread*, SP) - addl LITERAL(16), %esp // pop arguments - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get_obj_static_from_code - -DEFINE_FUNCTION art_portable_proxy_invoke_handler - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method* - PUSH esp // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH ecx // pass receiver - PUSH eax // pass proxy method - call SYMBOL(artPortableProxyInvokeHandler) // (proxy method, receiver, Thread*, SP) - movd %eax, %xmm0 // place return value also into floating point return value - movd %edx, %xmm1 - punpckldq %xmm1, %xmm0 - addl LITERAL(44), %esp // pop arguments - .cfi_adjust_cfa_offset -44 - ret -END_FUNCTION art_portable_proxy_invoke_handler - -DEFINE_FUNCTION art_quick_proxy_invoke_handler - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method* - PUSH esp // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH ecx // pass receiver - PUSH eax // pass proxy method - call SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP) - movd %eax, %xmm0 // place return value also into floating point return value - movd %edx, %xmm1 - punpckldq %xmm1, %xmm0 - addl LITERAL(44), %esp // pop arguments - .cfi_adjust_cfa_offset -44 - RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_proxy_invoke_handler - -DEFINE_FUNCTION art_quick_interpreter_entry - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame - mov %esp, %edx // remember SP - PUSH eax // alignment padding - PUSH edx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH eax // pass method - call SYMBOL(artInterpreterEntry) // (method, Thread*, SP) - movd %eax, %xmm0 // place return value also into floating point return value - movd %edx, %xmm1 - punpckldq %xmm1, %xmm0 - addl LITERAL(44), %esp // pop arguments - .cfi_adjust_cfa_offset -44 - RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_interpreter_entry - - /* - * Routine that intercepts method calls and returns. - */ -DEFINE_FUNCTION art_quick_instrumentation_entry_from_code - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - movl %esp, %edx // Save SP. - PUSH eax // Save eax which will be clobbered by the callee-save method. - subl LITERAL(8), %esp // Align stack. - .cfi_adjust_cfa_offset 8 - pushl 40(%esp) // Pass LR. - .cfi_adjust_cfa_offset 4 - PUSH edx // Pass SP. - pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current(). - .cfi_adjust_cfa_offset 4 - PUSH ecx // Pass receiver. - PUSH eax // Pass Method*. - call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP, LR) - addl LITERAL(28), %esp // Pop arguments upto saved Method*. - movl 28(%esp), %edi // Restore edi. - movl %eax, 28(%esp) // Place code* over edi, just under return pc. - movl LITERAL(SYMBOL(art_quick_instrumentation_exit_from_code)), 32(%esp) - // Place instrumentation exit as return pc. - movl (%esp), %eax // Restore eax. - movl 8(%esp), %ecx // Restore ecx. - movl 12(%esp), %edx // Restore edx. - movl 16(%esp), %ebx // Restore ebx. - movl 20(%esp), %ebp // Restore ebp. - movl 24(%esp), %esi // Restore esi. - addl LITERAL(28), %esp // Wind stack back upto code*. - ret // Call method (and pop). -END_FUNCTION art_quick_instrumentation_entry_from_code - -DEFINE_FUNCTION art_quick_instrumentation_exit_from_code - pushl LITERAL(0) // Push a fake return PC as there will be none on the stack. - SETUP_REF_ONLY_CALLEE_SAVE_FRAME - mov %esp, %ecx // Remember SP - subl LITERAL(8), %esp // Save float return value. - .cfi_adjust_cfa_offset 8 - movd %xmm0, (%esp) - PUSH edx // Save gpr return value. - PUSH eax - subl LITERAL(8), %esp // Align stack - movd %xmm0, (%esp) - subl LITERAL(8), %esp // Pass float return value. - .cfi_adjust_cfa_offset 8 - movd %xmm0, (%esp) - PUSH edx // Pass gpr return value. - PUSH eax - PUSH ecx // Pass SP. - pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current. - .cfi_adjust_cfa_offset 4 - call SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_result, fpr_result) - mov %eax, %ecx // Move returned link register. - addl LITERAL(32), %esp // Pop arguments. - .cfi_adjust_cfa_offset -32 - movl %edx, %ebx // Move returned link register for deopt - // (ebx is pretending to be our LR). - POP eax // Restore gpr return value. - POP edx - movd (%esp), %xmm0 // Restore fpr return value. - addl LITERAL(8), %esp - .cfi_adjust_cfa_offset -8 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - addl LITERAL(4), %esp // Remove fake return pc. - jmp *%ecx // Return. -END_FUNCTION art_quick_instrumentation_exit_from_code - - /* - * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization - * will long jump to the upcall with a special exception of -1. - */ -DEFINE_FUNCTION art_quick_deoptimize - pushl %ebx // Fake that we were called. - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME - mov %esp, %ecx // Remember SP. - subl LITERAL(8), %esp // Align stack. - .cfi_adjust_cfa_offset 8 - PUSH ecx // Pass SP. - pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current(). - .cfi_adjust_cfa_offset 4 - call SYMBOL(artDeoptimize) // artDeoptimize(Thread*, SP) - int3 // Unreachable. -END_FUNCTION art_quick_deoptimize - - /* - * Portable abstract method error stub. method* is at %esp + 4 on entry. - */ -DEFINE_FUNCTION art_portable_abstract_method_error_stub - PUSH ebp - movl %esp, %ebp // Remember SP. - .cfi_def_cfa_register ebp - subl LITERAL(12), %esp // Align stack. - PUSH esp // Pass sp (not used). - pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current(). - pushl 8(%ebp) // Pass Method*. - call SYMBOL(artThrowAbstractMethodErrorFromCode) // (Method*, Thread*, SP) - leave // Restore the stack and %ebp. - .cfi_def_cfa esp, 4 - .cfi_restore ebp - ret // Return to caller to handle pending exception. -END_FUNCTION art_portable_abstract_method_error_stub - - /* - * Quick abstract method error stub. %eax contains method* on entry. - */ -DEFINE_FUNCTION art_quick_abstract_method_error_stub - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME - movl %esp, %ecx // Remember SP. - PUSH eax // Align frame. - PUSH ecx // Pass SP for Method*. - pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current(). - .cfi_adjust_cfa_offset 4 - PUSH eax // Pass Method*. - call SYMBOL(artThrowAbstractMethodErrorFromCode) // (Method*, Thread*, SP) - int3 // Unreachable. -END_FUNCTION art_quick_abstract_method_error_stub - - /* - * Portable resolution trampoline. - */ -DEFINE_FUNCTION art_jni_dlsym_lookup_stub - subl LITERAL(8), %esp // align stack - .cfi_adjust_cfa_offset 8 - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - call SYMBOL(artFindNativeMethod) // (Thread*) - addl LITERAL(12), %esp // restore the stack - .cfi_adjust_cfa_offset -12 - cmpl LITERAL(0), %eax // check if returned method code is null - je no_native_code_found // if null, jump to return to handle - jmp *%eax // otherwise, tail call to intended method -no_native_code_found: - ret -END_FUNCTION art_jni_dlsym_lookup_stub - - /* - * String's indexOf. - * - * On entry: - * eax: string object (known non-null) - * ecx: char to match (known <= 0xFFFF) - * edx: Starting offset in string data - */ -DEFINE_FUNCTION art_quick_indexof - PUSH edi // push callee save reg - mov STRING_COUNT_OFFSET(%eax), %ebx - mov STRING_VALUE_OFFSET(%eax), %edi - mov STRING_OFFSET_OFFSET(%eax), %eax - testl %edx, %edx // check if start < 0 - jl clamp_min -clamp_done: - cmpl %ebx, %edx // check if start >= count - jge not_found - lea STRING_DATA_OFFSET(%edi, %eax, 2), %edi // build a pointer to the start of string data - mov %edi, %eax // save a copy in eax to later compute result - lea (%edi, %edx, 2), %edi // build pointer to start of data to compare - subl %edx, %ebx // compute iteration count - /* - * At this point we have: - * eax: original start of string data - * ecx: char to compare - * ebx: length to compare - * edi: start of data to test - */ - mov %eax, %edx - mov %ecx, %eax // put char to match in %eax - mov %ebx, %ecx // put length to compare in %ecx - repne scasw // find %ax, starting at [%edi], up to length %ecx - jne not_found - subl %edx, %edi - sar LITERAL(1), %edi - decl %edi // index = ((curr_ptr - orig_ptr) / 2) - 1 - mov %edi, %eax - POP edi // pop callee save reg - ret - .balign 16 -not_found: - mov LITERAL(-1), %eax // return -1 (not found) - POP edi // pop callee save reg - ret -clamp_min: - xor %edx, %edx // clamp start to 0 - jmp clamp_done -END_FUNCTION art_quick_indexof - - /* - * String's compareTo. - * - * On entry: - * eax: this string object (known non-null) - * ecx: comp string object (known non-null) - */ -DEFINE_FUNCTION art_quick_string_compareto - PUSH esi // push callee save reg - PUSH edi // push callee save reg - mov STRING_COUNT_OFFSET(%eax), %edx - mov STRING_COUNT_OFFSET(%ecx), %ebx - mov STRING_VALUE_OFFSET(%eax), %esi - mov STRING_VALUE_OFFSET(%ecx), %edi - mov STRING_OFFSET_OFFSET(%eax), %eax - mov STRING_OFFSET_OFFSET(%ecx), %ecx - /* Build pointers to the start of string data */ - lea STRING_DATA_OFFSET(%esi, %eax, 2), %esi - lea STRING_DATA_OFFSET(%edi, %ecx, 2), %edi - /* Calculate min length and count diff */ - mov %edx, %ecx - mov %edx, %eax - subl %ebx, %eax - cmovg %ebx, %ecx - /* - * At this point we have: - * eax: value to return if first part of strings are equal - * ecx: minimum among the lengths of the two strings - * esi: pointer to this string data - * edi: pointer to comp string data - */ - repe cmpsw // find nonmatching chars in [%esi] and [%edi], up to length %ecx - jne not_equal - POP edi // pop callee save reg - POP esi // pop callee save reg - ret - .balign 16 -not_equal: - movzwl -2(%esi), %eax // get last compared char from this string - movzwl -2(%edi), %ecx // get last compared char from comp string - subl %ecx, %eax // return the difference - POP edi // pop callee save reg - POP esi // pop callee save reg - ret -END_FUNCTION art_quick_string_compareto - -MACRO1(UNIMPLEMENTED,name) - .globl VAR(name, 0) - ALIGN_FUNCTION_ENTRY -VAR(name, 0): - int3 -END_MACRO - - // TODO: implement these! -UNIMPLEMENTED art_quick_memcmp16 diff --git a/runtime/oat/utils/arm/assembler_arm.cc b/runtime/oat/utils/arm/assembler_arm.cc deleted file mode 100644 index 960a60dfad..0000000000 --- a/runtime/oat/utils/arm/assembler_arm.cc +++ /dev/null @@ -1,1895 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "assembler_arm.h" - -#include "base/logging.h" -#include "oat/runtime/oat_support_entrypoints.h" -#include "offsets.h" -#include "thread.h" -#include "utils.h" - -namespace art { -namespace arm { - -// Instruction encoding bits. -enum { - H = 1 << 5, // halfword (or byte) - L = 1 << 20, // load (or store) - S = 1 << 20, // set condition code (or leave unchanged) - W = 1 << 21, // writeback base register (or leave unchanged) - A = 1 << 21, // accumulate in multiply instruction (or not) - B = 1 << 22, // unsigned byte (or word) - N = 1 << 22, // long (or short) - U = 1 << 23, // positive (or negative) offset/index - P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing) - I = 1 << 25, // immediate shifter operand (or not) - - B0 = 1, - B1 = 1 << 1, - B2 = 1 << 2, - B3 = 1 << 3, - B4 = 1 << 4, - B5 = 1 << 5, - B6 = 1 << 6, - B7 = 1 << 7, - B8 = 1 << 8, - B9 = 1 << 9, - B10 = 1 << 10, - B11 = 1 << 11, - B12 = 1 << 12, - B16 = 1 << 16, - B17 = 1 << 17, - B18 = 1 << 18, - B19 = 1 << 19, - B20 = 1 << 20, - B21 = 1 << 21, - B22 = 1 << 22, - B23 = 1 << 23, - B24 = 1 << 24, - B25 = 1 << 25, - B26 = 1 << 26, - B27 = 1 << 27, - - // Instruction bit masks. - RdMask = 15 << 12, // in str instruction - CondMask = 15 << 28, - CoprocessorMask = 15 << 8, - OpCodeMask = 15 << 21, // in data-processing instructions - Imm24Mask = (1 << 24) - 1, - Off12Mask = (1 << 12) - 1, - - // ldrex/strex register field encodings. - kLdExRnShift = 16, - kLdExRtShift = 12, - kStrExRnShift = 16, - kStrExRdShift = 12, - kStrExRtShift = 0, -}; - - -static const char* kRegisterNames[] = { - "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", - "fp", "ip", "sp", "lr", "pc" -}; -std::ostream& operator<<(std::ostream& os, const Register& rhs) { - if (rhs >= R0 && rhs <= PC) { - os << kRegisterNames[rhs]; - } else { - os << "Register[" << static_cast(rhs) << "]"; - } - return os; -} - - -std::ostream& operator<<(std::ostream& os, const SRegister& rhs) { - if (rhs >= S0 && rhs < kNumberOfSRegisters) { - os << "s" << static_cast(rhs); - } else { - os << "SRegister[" << static_cast(rhs) << "]"; - } - return os; -} - - -std::ostream& operator<<(std::ostream& os, const DRegister& rhs) { - if (rhs >= D0 && rhs < kNumberOfDRegisters) { - os << "d" << static_cast(rhs); - } else { - os << "DRegister[" << static_cast(rhs) << "]"; - } - return os; -} - - -static const char* kConditionNames[] = { - "EQ", "NE", "CS", "CC", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT", - "LE", "AL", -}; -std::ostream& operator<<(std::ostream& os, const Condition& rhs) { - if (rhs >= EQ && rhs <= AL) { - os << kConditionNames[rhs]; - } else { - os << "Condition[" << static_cast(rhs) << "]"; - } - return os; -} - -void ArmAssembler::Emit(int32_t value) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - buffer_.Emit(value); -} - - -void ArmAssembler::EmitType01(Condition cond, - int type, - Opcode opcode, - int set_cc, - Register rn, - Register rd, - ShifterOperand so) { - CHECK_NE(rd, kNoRegister); - CHECK_NE(cond, kNoCondition); - int32_t encoding = static_cast(cond) << kConditionShift | - type << kTypeShift | - static_cast(opcode) << kOpcodeShift | - set_cc << kSShift | - static_cast(rn) << kRnShift | - static_cast(rd) << kRdShift | - so.encoding(); - Emit(encoding); -} - - -void ArmAssembler::EmitType5(Condition cond, int offset, bool link) { - CHECK_NE(cond, kNoCondition); - int32_t encoding = static_cast(cond) << kConditionShift | - 5 << kTypeShift | - (link ? 1 : 0) << kLinkShift; - Emit(ArmAssembler::EncodeBranchOffset(offset, encoding)); -} - - -void ArmAssembler::EmitMemOp(Condition cond, - bool load, - bool byte, - Register rd, - Address ad) { - CHECK_NE(rd, kNoRegister); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B26 | - (load ? L : 0) | - (byte ? B : 0) | - (static_cast(rd) << kRdShift) | - ad.encoding(); - Emit(encoding); -} - - -void ArmAssembler::EmitMemOpAddressMode3(Condition cond, - int32_t mode, - Register rd, - Address ad) { - CHECK_NE(rd, kNoRegister); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B22 | - mode | - (static_cast(rd) << kRdShift) | - ad.encoding3(); - Emit(encoding); -} - - -void ArmAssembler::EmitMultiMemOp(Condition cond, - BlockAddressMode am, - bool load, - Register base, - RegList regs) { - CHECK_NE(base, kNoRegister); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B27 | - am | - (load ? L : 0) | - (static_cast(base) << kRnShift) | - regs; - Emit(encoding); -} - - -void ArmAssembler::EmitShiftImmediate(Condition cond, - Shift opcode, - Register rd, - Register rm, - ShifterOperand so) { - CHECK_NE(cond, kNoCondition); - CHECK_EQ(so.type(), 1U); - int32_t encoding = static_cast(cond) << kConditionShift | - static_cast(MOV) << kOpcodeShift | - static_cast(rd) << kRdShift | - so.encoding() << kShiftImmShift | - static_cast(opcode) << kShiftShift | - static_cast(rm); - Emit(encoding); -} - - -void ArmAssembler::EmitShiftRegister(Condition cond, - Shift opcode, - Register rd, - Register rm, - ShifterOperand so) { - CHECK_NE(cond, kNoCondition); - CHECK_EQ(so.type(), 0U); - int32_t encoding = static_cast(cond) << kConditionShift | - static_cast(MOV) << kOpcodeShift | - static_cast(rd) << kRdShift | - so.encoding() << kShiftRegisterShift | - static_cast(opcode) << kShiftShift | - B4 | - static_cast(rm); - Emit(encoding); -} - - -void ArmAssembler::EmitBranch(Condition cond, Label* label, bool link) { - if (label->IsBound()) { - EmitType5(cond, label->Position() - buffer_.Size(), link); - } else { - int position = buffer_.Size(); - // Use the offset field of the branch instruction for linking the sites. - EmitType5(cond, label->position_, link); - label->LinkTo(position); - } -} - -void ArmAssembler::and_(Register rd, Register rn, ShifterOperand so, - Condition cond) { - EmitType01(cond, so.type(), AND, 0, rn, rd, so); -} - - -void ArmAssembler::eor(Register rd, Register rn, ShifterOperand so, - Condition cond) { - EmitType01(cond, so.type(), EOR, 0, rn, rd, so); -} - - -void ArmAssembler::sub(Register rd, Register rn, ShifterOperand so, - Condition cond) { - EmitType01(cond, so.type(), SUB, 0, rn, rd, so); -} - -void ArmAssembler::rsb(Register rd, Register rn, ShifterOperand so, - Condition cond) { - EmitType01(cond, so.type(), RSB, 0, rn, rd, so); -} - -void ArmAssembler::rsbs(Register rd, Register rn, ShifterOperand so, - Condition cond) { - EmitType01(cond, so.type(), RSB, 1, rn, rd, so); -} - - -void ArmAssembler::add(Register rd, Register rn, ShifterOperand so, - Condition cond) { - EmitType01(cond, so.type(), ADD, 0, rn, rd, so); -} - - -void ArmAssembler::adds(Register rd, Register rn, ShifterOperand so, - Condition cond) { - EmitType01(cond, so.type(), ADD, 1, rn, rd, so); -} - - -void ArmAssembler::subs(Register rd, Register rn, ShifterOperand so, - Condition cond) { - EmitType01(cond, so.type(), SUB, 1, rn, rd, so); -} - - -void ArmAssembler::adc(Register rd, Register rn, ShifterOperand so, - Condition cond) { - EmitType01(cond, so.type(), ADC, 0, rn, rd, so); -} - - -void ArmAssembler::sbc(Register rd, Register rn, ShifterOperand so, - Condition cond) { - EmitType01(cond, so.type(), SBC, 0, rn, rd, so); -} - - -void ArmAssembler::rsc(Register rd, Register rn, ShifterOperand so, - Condition cond) { - EmitType01(cond, so.type(), RSC, 0, rn, rd, so); -} - - -void ArmAssembler::tst(Register rn, ShifterOperand so, Condition cond) { - CHECK_NE(rn, PC); // Reserve tst pc instruction for exception handler marker. - EmitType01(cond, so.type(), TST, 1, rn, R0, so); -} - - -void ArmAssembler::teq(Register rn, ShifterOperand so, Condition cond) { - CHECK_NE(rn, PC); // Reserve teq pc instruction for exception handler marker. - EmitType01(cond, so.type(), TEQ, 1, rn, R0, so); -} - - -void ArmAssembler::cmp(Register rn, ShifterOperand so, Condition cond) { - EmitType01(cond, so.type(), CMP, 1, rn, R0, so); -} - - -void ArmAssembler::cmn(Register rn, ShifterOperand so, Condition cond) { - EmitType01(cond, so.type(), CMN, 1, rn, R0, so); -} - - -void ArmAssembler::orr(Register rd, Register rn, - ShifterOperand so, Condition cond) { - EmitType01(cond, so.type(), ORR, 0, rn, rd, so); -} - - -void ArmAssembler::orrs(Register rd, Register rn, - ShifterOperand so, Condition cond) { - EmitType01(cond, so.type(), ORR, 1, rn, rd, so); -} - - -void ArmAssembler::mov(Register rd, ShifterOperand so, Condition cond) { - EmitType01(cond, so.type(), MOV, 0, R0, rd, so); -} - - -void ArmAssembler::movs(Register rd, ShifterOperand so, Condition cond) { - EmitType01(cond, so.type(), MOV, 1, R0, rd, so); -} - - -void ArmAssembler::bic(Register rd, Register rn, ShifterOperand so, - Condition cond) { - EmitType01(cond, so.type(), BIC, 0, rn, rd, so); -} - - -void ArmAssembler::mvn(Register rd, ShifterOperand so, Condition cond) { - EmitType01(cond, so.type(), MVN, 0, R0, rd, so); -} - - -void ArmAssembler::mvns(Register rd, ShifterOperand so, Condition cond) { - EmitType01(cond, so.type(), MVN, 1, R0, rd, so); -} - - -void ArmAssembler::clz(Register rd, Register rm, Condition cond) { - CHECK_NE(rd, kNoRegister); - CHECK_NE(rm, kNoRegister); - CHECK_NE(cond, kNoCondition); - CHECK_NE(rd, PC); - CHECK_NE(rm, PC); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B24 | B22 | B21 | (0xf << 16) | - (static_cast(rd) << kRdShift) | - (0xf << 8) | B4 | static_cast(rm); - Emit(encoding); -} - - -void ArmAssembler::movw(Register rd, uint16_t imm16, Condition cond) { - CHECK_NE(cond, kNoCondition); - int32_t encoding = static_cast(cond) << kConditionShift | - B25 | B24 | ((imm16 >> 12) << 16) | - static_cast(rd) << kRdShift | (imm16 & 0xfff); - Emit(encoding); -} - - -void ArmAssembler::movt(Register rd, uint16_t imm16, Condition cond) { - CHECK_NE(cond, kNoCondition); - int32_t encoding = static_cast(cond) << kConditionShift | - B25 | B24 | B22 | ((imm16 >> 12) << 16) | - static_cast(rd) << kRdShift | (imm16 & 0xfff); - Emit(encoding); -} - - -void ArmAssembler::EmitMulOp(Condition cond, int32_t opcode, - Register rd, Register rn, - Register rm, Register rs) { - CHECK_NE(rd, kNoRegister); - CHECK_NE(rn, kNoRegister); - CHECK_NE(rm, kNoRegister); - CHECK_NE(rs, kNoRegister); - CHECK_NE(cond, kNoCondition); - int32_t encoding = opcode | - (static_cast(cond) << kConditionShift) | - (static_cast(rn) << kRnShift) | - (static_cast(rd) << kRdShift) | - (static_cast(rs) << kRsShift) | - B7 | B4 | - (static_cast(rm) << kRmShift); - Emit(encoding); -} - - -void ArmAssembler::mul(Register rd, Register rn, Register rm, Condition cond) { - // Assembler registers rd, rn, rm are encoded as rn, rm, rs. - EmitMulOp(cond, 0, R0, rd, rn, rm); -} - - -void ArmAssembler::mla(Register rd, Register rn, Register rm, Register ra, - Condition cond) { - // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd. - EmitMulOp(cond, B21, ra, rd, rn, rm); -} - - -void ArmAssembler::mls(Register rd, Register rn, Register rm, Register ra, - Condition cond) { - // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd. - EmitMulOp(cond, B22 | B21, ra, rd, rn, rm); -} - - -void ArmAssembler::umull(Register rd_lo, Register rd_hi, Register rn, - Register rm, Condition cond) { - // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. - EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm); -} - - -void ArmAssembler::ldr(Register rd, Address ad, Condition cond) { - EmitMemOp(cond, true, false, rd, ad); -} - - -void ArmAssembler::str(Register rd, Address ad, Condition cond) { - EmitMemOp(cond, false, false, rd, ad); -} - - -void ArmAssembler::ldrb(Register rd, Address ad, Condition cond) { - EmitMemOp(cond, true, true, rd, ad); -} - - -void ArmAssembler::strb(Register rd, Address ad, Condition cond) { - EmitMemOp(cond, false, true, rd, ad); -} - - -void ArmAssembler::ldrh(Register rd, Address ad, Condition cond) { - EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad); -} - - -void ArmAssembler::strh(Register rd, Address ad, Condition cond) { - EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad); -} - - -void ArmAssembler::ldrsb(Register rd, Address ad, Condition cond) { - EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad); -} - - -void ArmAssembler::ldrsh(Register rd, Address ad, Condition cond) { - EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad); -} - - -void ArmAssembler::ldrd(Register rd, Address ad, Condition cond) { - CHECK_EQ(rd % 2, 0); - EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, ad); -} - - -void ArmAssembler::strd(Register rd, Address ad, Condition cond) { - CHECK_EQ(rd % 2, 0); - EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, ad); -} - - -void ArmAssembler::ldm(BlockAddressMode am, - Register base, - RegList regs, - Condition cond) { - EmitMultiMemOp(cond, am, true, base, regs); -} - - -void ArmAssembler::stm(BlockAddressMode am, - Register base, - RegList regs, - Condition cond) { - EmitMultiMemOp(cond, am, false, base, regs); -} - - -void ArmAssembler::ldrex(Register rt, Register rn, Condition cond) { - CHECK_NE(rn, kNoRegister); - CHECK_NE(rt, kNoRegister); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B24 | - B23 | - L | - (static_cast(rn) << kLdExRnShift) | - (static_cast(rt) << kLdExRtShift) | - B11 | B10 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0; - Emit(encoding); -} - - -void ArmAssembler::strex(Register rd, - Register rt, - Register rn, - Condition cond) { - CHECK_NE(rn, kNoRegister); - CHECK_NE(rd, kNoRegister); - CHECK_NE(rt, kNoRegister); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B24 | - B23 | - (static_cast(rn) << kStrExRnShift) | - (static_cast(rd) << kStrExRdShift) | - B11 | B10 | B9 | B8 | B7 | B4 | - (static_cast(rt) << kStrExRtShift); - Emit(encoding); -} - - -void ArmAssembler::clrex() { - int32_t encoding = (kSpecialCondition << kConditionShift) | - B26 | B24 | B22 | B21 | B20 | (0xff << 12) | B4 | 0xf; - Emit(encoding); -} - - -void ArmAssembler::nop(Condition cond) { - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B25 | B24 | B21 | (0xf << 12); - Emit(encoding); -} - - -void ArmAssembler::vmovsr(SRegister sn, Register rt, Condition cond) { - CHECK_NE(sn, kNoSRegister); - CHECK_NE(rt, kNoRegister); - CHECK_NE(rt, SP); - CHECK_NE(rt, PC); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B27 | B26 | B25 | - ((static_cast(sn) >> 1)*B16) | - (static_cast(rt)*B12) | B11 | B9 | - ((static_cast(sn) & 1)*B7) | B4; - Emit(encoding); -} - - -void ArmAssembler::vmovrs(Register rt, SRegister sn, Condition cond) { - CHECK_NE(sn, kNoSRegister); - CHECK_NE(rt, kNoRegister); - CHECK_NE(rt, SP); - CHECK_NE(rt, PC); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B27 | B26 | B25 | B20 | - ((static_cast(sn) >> 1)*B16) | - (static_cast(rt)*B12) | B11 | B9 | - ((static_cast(sn) & 1)*B7) | B4; - Emit(encoding); -} - - -void ArmAssembler::vmovsrr(SRegister sm, Register rt, Register rt2, - Condition cond) { - CHECK_NE(sm, kNoSRegister); - CHECK_NE(sm, S31); - CHECK_NE(rt, kNoRegister); - CHECK_NE(rt, SP); - CHECK_NE(rt, PC); - CHECK_NE(rt2, kNoRegister); - CHECK_NE(rt2, SP); - CHECK_NE(rt2, PC); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B27 | B26 | B22 | - (static_cast(rt2)*B16) | - (static_cast(rt)*B12) | B11 | B9 | - ((static_cast(sm) & 1)*B5) | B4 | - (static_cast(sm) >> 1); - Emit(encoding); -} - - -void ArmAssembler::vmovrrs(Register rt, Register rt2, SRegister sm, - Condition cond) { - CHECK_NE(sm, kNoSRegister); - CHECK_NE(sm, S31); - CHECK_NE(rt, kNoRegister); - CHECK_NE(rt, SP); - CHECK_NE(rt, PC); - CHECK_NE(rt2, kNoRegister); - CHECK_NE(rt2, SP); - CHECK_NE(rt2, PC); - CHECK_NE(rt, rt2); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B27 | B26 | B22 | B20 | - (static_cast(rt2)*B16) | - (static_cast(rt)*B12) | B11 | B9 | - ((static_cast(sm) & 1)*B5) | B4 | - (static_cast(sm) >> 1); - Emit(encoding); -} - - -void ArmAssembler::vmovdrr(DRegister dm, Register rt, Register rt2, - Condition cond) { - CHECK_NE(dm, kNoDRegister); - CHECK_NE(rt, kNoRegister); - CHECK_NE(rt, SP); - CHECK_NE(rt, PC); - CHECK_NE(rt2, kNoRegister); - CHECK_NE(rt2, SP); - CHECK_NE(rt2, PC); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B27 | B26 | B22 | - (static_cast(rt2)*B16) | - (static_cast(rt)*B12) | B11 | B9 | B8 | - ((static_cast(dm) >> 4)*B5) | B4 | - (static_cast(dm) & 0xf); - Emit(encoding); -} - - -void ArmAssembler::vmovrrd(Register rt, Register rt2, DRegister dm, - Condition cond) { - CHECK_NE(dm, kNoDRegister); - CHECK_NE(rt, kNoRegister); - CHECK_NE(rt, SP); - CHECK_NE(rt, PC); - CHECK_NE(rt2, kNoRegister); - CHECK_NE(rt2, SP); - CHECK_NE(rt2, PC); - CHECK_NE(rt, rt2); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B27 | B26 | B22 | B20 | - (static_cast(rt2)*B16) | - (static_cast(rt)*B12) | B11 | B9 | B8 | - ((static_cast(dm) >> 4)*B5) | B4 | - (static_cast(dm) & 0xf); - Emit(encoding); -} - - -void ArmAssembler::vldrs(SRegister sd, Address ad, Condition cond) { - CHECK_NE(sd, kNoSRegister); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B27 | B26 | B24 | B20 | - ((static_cast(sd) & 1)*B22) | - ((static_cast(sd) >> 1)*B12) | - B11 | B9 | ad.vencoding(); - Emit(encoding); -} - - -void ArmAssembler::vstrs(SRegister sd, Address ad, Condition cond) { - CHECK_NE(static_cast(ad.encoding_ & (0xf << kRnShift)), PC); - CHECK_NE(sd, kNoSRegister); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B27 | B26 | B24 | - ((static_cast(sd) & 1)*B22) | - ((static_cast(sd) >> 1)*B12) | - B11 | B9 | ad.vencoding(); - Emit(encoding); -} - - -void ArmAssembler::vldrd(DRegister dd, Address ad, Condition cond) { - CHECK_NE(dd, kNoDRegister); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B27 | B26 | B24 | B20 | - ((static_cast(dd) >> 4)*B22) | - ((static_cast(dd) & 0xf)*B12) | - B11 | B9 | B8 | ad.vencoding(); - Emit(encoding); -} - - -void ArmAssembler::vstrd(DRegister dd, Address ad, Condition cond) { - CHECK_NE(static_cast(ad.encoding_ & (0xf << kRnShift)), PC); - CHECK_NE(dd, kNoDRegister); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B27 | B26 | B24 | - ((static_cast(dd) >> 4)*B22) | - ((static_cast(dd) & 0xf)*B12) | - B11 | B9 | B8 | ad.vencoding(); - Emit(encoding); -} - - -void ArmAssembler::EmitVFPsss(Condition cond, int32_t opcode, - SRegister sd, SRegister sn, SRegister sm) { - CHECK_NE(sd, kNoSRegister); - CHECK_NE(sn, kNoSRegister); - CHECK_NE(sm, kNoSRegister); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B27 | B26 | B25 | B11 | B9 | opcode | - ((static_cast(sd) & 1)*B22) | - ((static_cast(sn) >> 1)*B16) | - ((static_cast(sd) >> 1)*B12) | - ((static_cast(sn) & 1)*B7) | - ((static_cast(sm) & 1)*B5) | - (static_cast(sm) >> 1); - Emit(encoding); -} - - -void ArmAssembler::EmitVFPddd(Condition cond, int32_t opcode, - DRegister dd, DRegister dn, DRegister dm) { - CHECK_NE(dd, kNoDRegister); - CHECK_NE(dn, kNoDRegister); - CHECK_NE(dm, kNoDRegister); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B27 | B26 | B25 | B11 | B9 | B8 | opcode | - ((static_cast(dd) >> 4)*B22) | - ((static_cast(dn) & 0xf)*B16) | - ((static_cast(dd) & 0xf)*B12) | - ((static_cast(dn) >> 4)*B7) | - ((static_cast(dm) >> 4)*B5) | - (static_cast(dm) & 0xf); - Emit(encoding); -} - - -void ArmAssembler::vmovs(SRegister sd, SRegister sm, Condition cond) { - EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm); -} - - -void ArmAssembler::vmovd(DRegister dd, DRegister dm, Condition cond) { - EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm); -} - - -bool ArmAssembler::vmovs(SRegister sd, float s_imm, Condition cond) { - uint32_t imm32 = bit_cast(s_imm); - if (((imm32 & ((1 << 19) - 1)) == 0) && - ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) || - (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) -1)))) { - uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) | - ((imm32 >> 19) & ((1 << 6) -1)); - EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | (imm8 & 0xf), - sd, S0, S0); - return true; - } - return false; -} - - -bool ArmAssembler::vmovd(DRegister dd, double d_imm, Condition cond) { - uint64_t imm64 = bit_cast(d_imm); - if (((imm64 & ((1LL << 48) - 1)) == 0) && - ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) || - (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) -1)))) { - uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) | - ((imm64 >> 48) & ((1 << 6) -1)); - EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | B8 | (imm8 & 0xf), - dd, D0, D0); - return true; - } - return false; -} - - -void ArmAssembler::vadds(SRegister sd, SRegister sn, SRegister sm, - Condition cond) { - EmitVFPsss(cond, B21 | B20, sd, sn, sm); -} - - -void ArmAssembler::vaddd(DRegister dd, DRegister dn, DRegister dm, - Condition cond) { - EmitVFPddd(cond, B21 | B20, dd, dn, dm); -} - - -void ArmAssembler::vsubs(SRegister sd, SRegister sn, SRegister sm, - Condition cond) { - EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm); -} - - -void ArmAssembler::vsubd(DRegister dd, DRegister dn, DRegister dm, - Condition cond) { - EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm); -} - - -void ArmAssembler::vmuls(SRegister sd, SRegister sn, SRegister sm, - Condition cond) { - EmitVFPsss(cond, B21, sd, sn, sm); -} - - -void ArmAssembler::vmuld(DRegister dd, DRegister dn, DRegister dm, - Condition cond) { - EmitVFPddd(cond, B21, dd, dn, dm); -} - - -void ArmAssembler::vmlas(SRegister sd, SRegister sn, SRegister sm, - Condition cond) { - EmitVFPsss(cond, 0, sd, sn, sm); -} - - -void ArmAssembler::vmlad(DRegister dd, DRegister dn, DRegister dm, - Condition cond) { - EmitVFPddd(cond, 0, dd, dn, dm); -} - - -void ArmAssembler::vmlss(SRegister sd, SRegister sn, SRegister sm, - Condition cond) { - EmitVFPsss(cond, B6, sd, sn, sm); -} - - -void ArmAssembler::vmlsd(DRegister dd, DRegister dn, DRegister dm, - Condition cond) { - EmitVFPddd(cond, B6, dd, dn, dm); -} - - -void ArmAssembler::vdivs(SRegister sd, SRegister sn, SRegister sm, - Condition cond) { - EmitVFPsss(cond, B23, sd, sn, sm); -} - - -void ArmAssembler::vdivd(DRegister dd, DRegister dn, DRegister dm, - Condition cond) { - EmitVFPddd(cond, B23, dd, dn, dm); -} - - -void ArmAssembler::vabss(SRegister sd, SRegister sm, Condition cond) { - EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm); -} - - -void ArmAssembler::vabsd(DRegister dd, DRegister dm, Condition cond) { - EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm); -} - - -void ArmAssembler::vnegs(SRegister sd, SRegister sm, Condition cond) { - EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm); -} - - -void ArmAssembler::vnegd(DRegister dd, DRegister dm, Condition cond) { - EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm); -} - - -void ArmAssembler::vsqrts(SRegister sd, SRegister sm, Condition cond) { - EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm); -} - -void ArmAssembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) { - EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm); -} - - -void ArmAssembler::EmitVFPsd(Condition cond, int32_t opcode, - SRegister sd, DRegister dm) { - CHECK_NE(sd, kNoSRegister); - CHECK_NE(dm, kNoDRegister); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B27 | B26 | B25 | B11 | B9 | opcode | - ((static_cast(sd) & 1)*B22) | - ((static_cast(sd) >> 1)*B12) | - ((static_cast(dm) >> 4)*B5) | - (static_cast(dm) & 0xf); - Emit(encoding); -} - - -void ArmAssembler::EmitVFPds(Condition cond, int32_t opcode, - DRegister dd, SRegister sm) { - CHECK_NE(dd, kNoDRegister); - CHECK_NE(sm, kNoSRegister); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B27 | B26 | B25 | B11 | B9 | opcode | - ((static_cast(dd) >> 4)*B22) | - ((static_cast(dd) & 0xf)*B12) | - ((static_cast(sm) & 1)*B5) | - (static_cast(sm) >> 1); - Emit(encoding); -} - - -void ArmAssembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) { - EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm); -} - - -void ArmAssembler::vcvtds(DRegister dd, SRegister sm, Condition cond) { - EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm); -} - - -void ArmAssembler::vcvtis(SRegister sd, SRegister sm, Condition cond) { - EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm); -} - - -void ArmAssembler::vcvtid(SRegister sd, DRegister dm, Condition cond) { - EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm); -} - - -void ArmAssembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) { - EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm); -} - - -void ArmAssembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) { - EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm); -} - - -void ArmAssembler::vcvtus(SRegister sd, SRegister sm, Condition cond) { - EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm); -} - - -void ArmAssembler::vcvtud(SRegister sd, DRegister dm, Condition cond) { - EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm); -} - - -void ArmAssembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) { - EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm); -} - - -void ArmAssembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) { - EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm); -} - - -void ArmAssembler::vcmps(SRegister sd, SRegister sm, Condition cond) { - EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm); -} - - -void ArmAssembler::vcmpd(DRegister dd, DRegister dm, Condition cond) { - EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm); -} - - -void ArmAssembler::vcmpsz(SRegister sd, Condition cond) { - EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0); -} - - -void ArmAssembler::vcmpdz(DRegister dd, Condition cond) { - EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0); -} - - -void ArmAssembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 | - (static_cast(PC)*B12) | - B11 | B9 | B4; - Emit(encoding); -} - - -void ArmAssembler::svc(uint32_t imm24) { - CHECK(IsUint(24, imm24)) << imm24; - int32_t encoding = (AL << kConditionShift) | B27 | B26 | B25 | B24 | imm24; - Emit(encoding); -} - - -void ArmAssembler::bkpt(uint16_t imm16) { - int32_t encoding = (AL << kConditionShift) | B24 | B21 | - ((imm16 >> 4) << 8) | B6 | B5 | B4 | (imm16 & 0xf); - Emit(encoding); -} - - -void ArmAssembler::b(Label* label, Condition cond) { - EmitBranch(cond, label, false); -} - - -void ArmAssembler::bl(Label* label, Condition cond) { - EmitBranch(cond, label, true); -} - - -void ArmAssembler::blx(Register rm, Condition cond) { - CHECK_NE(rm, kNoRegister); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B24 | B21 | (0xfff << 8) | B5 | B4 | - (static_cast(rm) << kRmShift); - Emit(encoding); -} - -void ArmAssembler::bx(Register rm, Condition cond) { - CHECK_NE(rm, kNoRegister); - CHECK_NE(cond, kNoCondition); - int32_t encoding = (static_cast(cond) << kConditionShift) | - B24 | B21 | (0xfff << 8) | B4 | - (static_cast(rm) << kRmShift); - Emit(encoding); -} - -void ArmAssembler::MarkExceptionHandler(Label* label) { - EmitType01(AL, 1, TST, 1, PC, R0, ShifterOperand(0)); - Label l; - b(&l); - EmitBranch(AL, label, false); - Bind(&l); -} - - -void ArmAssembler::Bind(Label* label) { - CHECK(!label->IsBound()); - int bound_pc = buffer_.Size(); - while (label->IsLinked()) { - int32_t position = label->Position(); - int32_t next = buffer_.Load(position); - int32_t encoded = ArmAssembler::EncodeBranchOffset(bound_pc - position, next); - buffer_.Store(position, encoded); - label->position_ = ArmAssembler::DecodeBranchOffset(next); - } - label->BindTo(bound_pc); -} - - -void ArmAssembler::EncodeUint32InTstInstructions(uint32_t data) { - // TODO: Consider using movw ip, <16 bits>. - while (!IsUint(8, data)) { - tst(R0, ShifterOperand(data & 0xFF), VS); - data >>= 8; - } - tst(R0, ShifterOperand(data), MI); -} - - -int32_t ArmAssembler::EncodeBranchOffset(int offset, int32_t inst) { - // The offset is off by 8 due to the way the ARM CPUs read PC. - offset -= 8; - CHECK_ALIGNED(offset, 4); - CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset)) << offset; - - // Properly preserve only the bits supported in the instruction. - offset >>= 2; - offset &= kBranchOffsetMask; - return (inst & ~kBranchOffsetMask) | offset; -} - - -int ArmAssembler::DecodeBranchOffset(int32_t inst) { - // Sign-extend, left-shift by 2, then add 8. - return ((((inst & kBranchOffsetMask) << 8) >> 6) + 8); -} - -void ArmAssembler::AddConstant(Register rd, int32_t value, Condition cond) { - AddConstant(rd, rd, value, cond); -} - - -void ArmAssembler::AddConstant(Register rd, Register rn, int32_t value, - Condition cond) { - if (value == 0) { - if (rd != rn) { - mov(rd, ShifterOperand(rn), cond); - } - return; - } - // We prefer to select the shorter code sequence rather than selecting add for - // positive values and sub for negatives ones, which would slightly improve - // the readability of generated code for some constants. - ShifterOperand shifter_op; - if (ShifterOperand::CanHold(value, &shifter_op)) { - add(rd, rn, shifter_op, cond); - } else if (ShifterOperand::CanHold(-value, &shifter_op)) { - sub(rd, rn, shifter_op, cond); - } else { - CHECK(rn != IP); - if (ShifterOperand::CanHold(~value, &shifter_op)) { - mvn(IP, shifter_op, cond); - add(rd, rn, ShifterOperand(IP), cond); - } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) { - mvn(IP, shifter_op, cond); - sub(rd, rn, ShifterOperand(IP), cond); - } else { - movw(IP, Low16Bits(value), cond); - uint16_t value_high = High16Bits(value); - if (value_high != 0) { - movt(IP, value_high, cond); - } - add(rd, rn, ShifterOperand(IP), cond); - } - } -} - - -void ArmAssembler::AddConstantSetFlags(Register rd, Register rn, int32_t value, - Condition cond) { - ShifterOperand shifter_op; - if (ShifterOperand::CanHold(value, &shifter_op)) { - adds(rd, rn, shifter_op, cond); - } else if (ShifterOperand::CanHold(-value, &shifter_op)) { - subs(rd, rn, shifter_op, cond); - } else { - CHECK(rn != IP); - if (ShifterOperand::CanHold(~value, &shifter_op)) { - mvn(IP, shifter_op, cond); - adds(rd, rn, ShifterOperand(IP), cond); - } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) { - mvn(IP, shifter_op, cond); - subs(rd, rn, ShifterOperand(IP), cond); - } else { - movw(IP, Low16Bits(value), cond); - uint16_t value_high = High16Bits(value); - if (value_high != 0) { - movt(IP, value_high, cond); - } - adds(rd, rn, ShifterOperand(IP), cond); - } - } -} - - -void ArmAssembler::LoadImmediate(Register rd, int32_t value, Condition cond) { - ShifterOperand shifter_op; - if (ShifterOperand::CanHold(value, &shifter_op)) { - mov(rd, shifter_op, cond); - } else if (ShifterOperand::CanHold(~value, &shifter_op)) { - mvn(rd, shifter_op, cond); - } else { - movw(rd, Low16Bits(value), cond); - uint16_t value_high = High16Bits(value); - if (value_high != 0) { - movt(rd, value_high, cond); - } - } -} - - -bool Address::CanHoldLoadOffset(LoadOperandType type, int offset) { - switch (type) { - case kLoadSignedByte: - case kLoadSignedHalfword: - case kLoadUnsignedHalfword: - case kLoadWordPair: - return IsAbsoluteUint(8, offset); // Addressing mode 3. - case kLoadUnsignedByte: - case kLoadWord: - return IsAbsoluteUint(12, offset); // Addressing mode 2. - case kLoadSWord: - case kLoadDWord: - return IsAbsoluteUint(10, offset); // VFP addressing mode. - default: - LOG(FATAL) << "UNREACHABLE"; - return false; - } -} - - -bool Address::CanHoldStoreOffset(StoreOperandType type, int offset) { - switch (type) { - case kStoreHalfword: - case kStoreWordPair: - return IsAbsoluteUint(8, offset); // Addressing mode 3. - case kStoreByte: - case kStoreWord: - return IsAbsoluteUint(12, offset); // Addressing mode 2. - case kStoreSWord: - case kStoreDWord: - return IsAbsoluteUint(10, offset); // VFP addressing mode. - default: - LOG(FATAL) << "UNREACHABLE"; - return false; - } -} - - -// Implementation note: this method must emit at most one instruction when -// Address::CanHoldLoadOffset. -void ArmAssembler::LoadFromOffset(LoadOperandType type, - Register reg, - Register base, - int32_t offset, - Condition cond) { - if (!Address::CanHoldLoadOffset(type, offset)) { - CHECK(base != IP); - LoadImmediate(IP, offset, cond); - add(IP, IP, ShifterOperand(base), cond); - base = IP; - offset = 0; - } - CHECK(Address::CanHoldLoadOffset(type, offset)); - switch (type) { - case kLoadSignedByte: - ldrsb(reg, Address(base, offset), cond); - break; - case kLoadUnsignedByte: - ldrb(reg, Address(base, offset), cond); - break; - case kLoadSignedHalfword: - ldrsh(reg, Address(base, offset), cond); - break; - case kLoadUnsignedHalfword: - ldrh(reg, Address(base, offset), cond); - break; - case kLoadWord: - ldr(reg, Address(base, offset), cond); - break; - case kLoadWordPair: - ldrd(reg, Address(base, offset), cond); - break; - default: - LOG(FATAL) << "UNREACHABLE"; - } -} - -// Implementation note: this method must emit at most one instruction when -// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset. -void ArmAssembler::LoadSFromOffset(SRegister reg, - Register base, - int32_t offset, - Condition cond) { - if (!Address::CanHoldLoadOffset(kLoadSWord, offset)) { - CHECK_NE(base, IP); - LoadImmediate(IP, offset, cond); - add(IP, IP, ShifterOperand(base), cond); - base = IP; - offset = 0; - } - CHECK(Address::CanHoldLoadOffset(kLoadSWord, offset)); - vldrs(reg, Address(base, offset), cond); -} - -// Implementation note: this method must emit at most one instruction when -// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset. -void ArmAssembler::LoadDFromOffset(DRegister reg, - Register base, - int32_t offset, - Condition cond) { - if (!Address::CanHoldLoadOffset(kLoadDWord, offset)) { - CHECK_NE(base, IP); - LoadImmediate(IP, offset, cond); - add(IP, IP, ShifterOperand(base), cond); - base = IP; - offset = 0; - } - CHECK(Address::CanHoldLoadOffset(kLoadDWord, offset)); - vldrd(reg, Address(base, offset), cond); -} - -// Implementation note: this method must emit at most one instruction when -// Address::CanHoldStoreOffset. -void ArmAssembler::StoreToOffset(StoreOperandType type, - Register reg, - Register base, - int32_t offset, - Condition cond) { - if (!Address::CanHoldStoreOffset(type, offset)) { - CHECK(reg != IP); - CHECK(base != IP); - LoadImmediate(IP, offset, cond); - add(IP, IP, ShifterOperand(base), cond); - base = IP; - offset = 0; - } - CHECK(Address::CanHoldStoreOffset(type, offset)); - switch (type) { - case kStoreByte: - strb(reg, Address(base, offset), cond); - break; - case kStoreHalfword: - strh(reg, Address(base, offset), cond); - break; - case kStoreWord: - str(reg, Address(base, offset), cond); - break; - case kStoreWordPair: - strd(reg, Address(base, offset), cond); - break; - default: - LOG(FATAL) << "UNREACHABLE"; - } -} - -// Implementation note: this method must emit at most one instruction when -// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreToOffset. -void ArmAssembler::StoreSToOffset(SRegister reg, - Register base, - int32_t offset, - Condition cond) { - if (!Address::CanHoldStoreOffset(kStoreSWord, offset)) { - CHECK_NE(base, IP); - LoadImmediate(IP, offset, cond); - add(IP, IP, ShifterOperand(base), cond); - base = IP; - offset = 0; - } - CHECK(Address::CanHoldStoreOffset(kStoreSWord, offset)); - vstrs(reg, Address(base, offset), cond); -} - -// Implementation note: this method must emit at most one instruction when -// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreSToOffset. -void ArmAssembler::StoreDToOffset(DRegister reg, - Register base, - int32_t offset, - Condition cond) { - if (!Address::CanHoldStoreOffset(kStoreDWord, offset)) { - CHECK_NE(base, IP); - LoadImmediate(IP, offset, cond); - add(IP, IP, ShifterOperand(base), cond); - base = IP; - offset = 0; - } - CHECK(Address::CanHoldStoreOffset(kStoreDWord, offset)); - vstrd(reg, Address(base, offset), cond); -} - -void ArmAssembler::Push(Register rd, Condition cond) { - str(rd, Address(SP, -kRegisterSize, Address::PreIndex), cond); -} - -void ArmAssembler::Pop(Register rd, Condition cond) { - ldr(rd, Address(SP, kRegisterSize, Address::PostIndex), cond); -} - -void ArmAssembler::PushList(RegList regs, Condition cond) { - stm(DB_W, SP, regs, cond); -} - -void ArmAssembler::PopList(RegList regs, Condition cond) { - ldm(IA_W, SP, regs, cond); -} - -void ArmAssembler::Mov(Register rd, Register rm, Condition cond) { - if (rd != rm) { - mov(rd, ShifterOperand(rm), cond); - } -} - -void ArmAssembler::Lsl(Register rd, Register rm, uint32_t shift_imm, - Condition cond) { - CHECK_NE(shift_imm, 0u); // Do not use Lsl if no shift is wanted. - mov(rd, ShifterOperand(rm, LSL, shift_imm), cond); -} - -void ArmAssembler::Lsr(Register rd, Register rm, uint32_t shift_imm, - Condition cond) { - CHECK_NE(shift_imm, 0u); // Do not use Lsr if no shift is wanted. - if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax. - mov(rd, ShifterOperand(rm, LSR, shift_imm), cond); -} - -void ArmAssembler::Asr(Register rd, Register rm, uint32_t shift_imm, - Condition cond) { - CHECK_NE(shift_imm, 0u); // Do not use Asr if no shift is wanted. - if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax. - mov(rd, ShifterOperand(rm, ASR, shift_imm), cond); -} - -void ArmAssembler::Ror(Register rd, Register rm, uint32_t shift_imm, - Condition cond) { - CHECK_NE(shift_imm, 0u); // Use Rrx instruction. - mov(rd, ShifterOperand(rm, ROR, shift_imm), cond); -} - -void ArmAssembler::Rrx(Register rd, Register rm, Condition cond) { - mov(rd, ShifterOperand(rm, ROR, 0), cond); -} - -void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, - const std::vector& callee_save_regs, - const std::vector& entry_spills) { - CHECK_ALIGNED(frame_size, kStackAlignment); - CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister()); - - // Push callee saves and link register. - RegList push_list = 1 << LR; - size_t pushed_values = 1; - for (size_t i = 0; i < callee_save_regs.size(); i++) { - Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister(); - push_list |= 1 << reg; - pushed_values++; - } - PushList(push_list); - - // Increase frame to required size. - CHECK_GT(frame_size, pushed_values * kPointerSize); // Must be at least space to push Method* - size_t adjust = frame_size - (pushed_values * kPointerSize); - IncreaseFrameSize(adjust); - - // Write out Method*. - StoreToOffset(kStoreWord, R0, SP, 0); - - // Write out entry spills. - for (size_t i = 0; i < entry_spills.size(); ++i) { - Register reg = entry_spills.at(i).AsArm().AsCoreRegister(); - StoreToOffset(kStoreWord, reg, SP, frame_size + kPointerSize + (i * kPointerSize)); - } -} - -void ArmAssembler::RemoveFrame(size_t frame_size, - const std::vector& callee_save_regs) { - CHECK_ALIGNED(frame_size, kStackAlignment); - // Compute callee saves to pop and PC - RegList pop_list = 1 << PC; - size_t pop_values = 1; - for (size_t i = 0; i < callee_save_regs.size(); i++) { - Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister(); - pop_list |= 1 << reg; - pop_values++; - } - - // Decrease frame to start of callee saves - CHECK_GT(frame_size, pop_values * kPointerSize); - size_t adjust = frame_size - (pop_values * kPointerSize); - DecreaseFrameSize(adjust); - - // Pop callee saves and PC - PopList(pop_list); -} - -void ArmAssembler::IncreaseFrameSize(size_t adjust) { - AddConstant(SP, -adjust); -} - -void ArmAssembler::DecreaseFrameSize(size_t adjust) { - AddConstant(SP, adjust); -} - -void ArmAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) { - ArmManagedRegister src = msrc.AsArm(); - if (src.IsNoRegister()) { - CHECK_EQ(0u, size); - } else if (src.IsCoreRegister()) { - CHECK_EQ(4u, size); - StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); - } else if (src.IsRegisterPair()) { - CHECK_EQ(8u, size); - StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value()); - StoreToOffset(kStoreWord, src.AsRegisterPairHigh(), - SP, dest.Int32Value() + 4); - } else if (src.IsSRegister()) { - StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value()); - } else { - CHECK(src.IsDRegister()) << src; - StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value()); - } -} - -void ArmAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) { - ArmManagedRegister src = msrc.AsArm(); - CHECK(src.IsCoreRegister()) << src; - StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); -} - -void ArmAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) { - ArmManagedRegister src = msrc.AsArm(); - CHECK(src.IsCoreRegister()) << src; - StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); -} - -void ArmAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc, - FrameOffset in_off, ManagedRegister mscratch) { - ArmManagedRegister src = msrc.AsArm(); - ArmManagedRegister scratch = mscratch.AsArm(); - StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value()); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4); -} - -void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src, - ManagedRegister mscratch) { - ArmManagedRegister scratch = mscratch.AsArm(); - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); -} - -void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, - MemberOffset offs) { - ArmManagedRegister dst = mdest.AsArm(); - CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst; - LoadFromOffset(kLoadWord, dst.AsCoreRegister(), - base.AsArm().AsCoreRegister(), offs.Int32Value()); -} - -void ArmAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) { - ArmManagedRegister dst = mdest.AsArm(); - CHECK(dst.IsCoreRegister()) << dst; - LoadFromOffset(kLoadWord, dst.AsCoreRegister(), SP, src.Int32Value()); -} - -void ArmAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, - Offset offs) { - ArmManagedRegister dst = mdest.AsArm(); - CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst; - LoadFromOffset(kLoadWord, dst.AsCoreRegister(), - base.AsArm().AsCoreRegister(), offs.Int32Value()); -} - -void ArmAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, - ManagedRegister mscratch) { - ArmManagedRegister scratch = mscratch.AsArm(); - CHECK(scratch.IsCoreRegister()) << scratch; - LoadImmediate(scratch.AsCoreRegister(), imm); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); -} - -void ArmAssembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm, - ManagedRegister mscratch) { - ArmManagedRegister scratch = mscratch.AsArm(); - CHECK(scratch.IsCoreRegister()) << scratch; - LoadImmediate(scratch.AsCoreRegister(), imm); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, dest.Int32Value()); -} - -static void EmitLoad(ArmAssembler* assembler, ManagedRegister m_dst, - Register src_register, int32_t src_offset, size_t size) { - ArmManagedRegister dst = m_dst.AsArm(); - if (dst.IsNoRegister()) { - CHECK_EQ(0u, size) << dst; - } else if (dst.IsCoreRegister()) { - CHECK_EQ(4u, size) << dst; - assembler->LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset); - } else if (dst.IsRegisterPair()) { - CHECK_EQ(8u, size) << dst; - assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset); - assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4); - } else if (dst.IsSRegister()) { - assembler->LoadSFromOffset(dst.AsSRegister(), src_register, src_offset); - } else { - CHECK(dst.IsDRegister()) << dst; - assembler->LoadDFromOffset(dst.AsDRegister(), src_register, src_offset); - } -} - -void ArmAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) { - return EmitLoad(this, m_dst, SP, src.Int32Value(), size); -} - -void ArmAssembler::Load(ManagedRegister m_dst, ThreadOffset src, size_t size) { - return EmitLoad(this, m_dst, TR, src.Int32Value(), size); -} - -void ArmAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset offs) { - ArmManagedRegister dst = m_dst.AsArm(); - CHECK(dst.IsCoreRegister()) << dst; - LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value()); -} - -void ArmAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, - ThreadOffset thr_offs, - ManagedRegister mscratch) { - ArmManagedRegister scratch = mscratch.AsArm(); - CHECK(scratch.IsCoreRegister()) << scratch; - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), - TR, thr_offs.Int32Value()); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), - SP, fr_offs.Int32Value()); -} - -void ArmAssembler::CopyRawPtrToThread(ThreadOffset thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { - ArmManagedRegister scratch = mscratch.AsArm(); - CHECK(scratch.IsCoreRegister()) << scratch; - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), - SP, fr_offs.Int32Value()); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), - TR, thr_offs.Int32Value()); -} - -void ArmAssembler::StoreStackOffsetToThread(ThreadOffset thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { - ArmManagedRegister scratch = mscratch.AsArm(); - CHECK(scratch.IsCoreRegister()) << scratch; - AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), - TR, thr_offs.Int32Value()); -} - -void ArmAssembler::StoreStackPointerToThread(ThreadOffset thr_offs) { - StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value()); -} - -void ArmAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) { - UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm"; -} - -void ArmAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) { - UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm"; -} - -void ArmAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t /*size*/) { - ArmManagedRegister dst = m_dst.AsArm(); - ArmManagedRegister src = m_src.AsArm(); - if (!dst.Equals(src)) { - if (dst.IsCoreRegister()) { - CHECK(src.IsCoreRegister()) << src; - mov(dst.AsCoreRegister(), ShifterOperand(src.AsCoreRegister())); - } else if (dst.IsDRegister()) { - CHECK(src.IsDRegister()) << src; - vmovd(dst.AsDRegister(), src.AsDRegister()); - } else if (dst.IsSRegister()) { - CHECK(src.IsSRegister()) << src; - vmovs(dst.AsSRegister(), src.AsSRegister()); - } else { - CHECK(dst.IsRegisterPair()) << dst; - CHECK(src.IsRegisterPair()) << src; - // Ensure that the first move doesn't clobber the input of the second - if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) { - mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow())); - mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh())); - } else { - mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh())); - mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow())); - } - } - } -} - -void ArmAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) { - ArmManagedRegister scratch = mscratch.AsArm(); - CHECK(scratch.IsCoreRegister()) << scratch; - CHECK(size == 4 || size == 8) << size; - if (size == 4) { - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); - } else if (size == 8) { - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4); - } -} - -void ArmAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, - ManagedRegister mscratch, size_t size) { - Register scratch = mscratch.AsArm().AsCoreRegister(); - CHECK_EQ(size, 4u); - LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value()); - StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value()); -} - -void ArmAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, - ManagedRegister mscratch, size_t size) { - Register scratch = mscratch.AsArm().AsCoreRegister(); - CHECK_EQ(size, 4u); - LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value()); - StoreToOffset(kStoreWord, scratch, dest_base.AsArm().AsCoreRegister(), dest_offset.Int32Value()); -} - -void ArmAssembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/, - ManagedRegister /*mscratch*/, size_t /*size*/) { - UNIMPLEMENTED(FATAL); -} - -void ArmAssembler::Copy(ManagedRegister dest, Offset dest_offset, - ManagedRegister src, Offset src_offset, - ManagedRegister mscratch, size_t size) { - CHECK_EQ(size, 4u); - Register scratch = mscratch.AsArm().AsCoreRegister(); - LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value()); - StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value()); -} - -void ArmAssembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/, - ManagedRegister /*scratch*/, size_t /*size*/) { - UNIMPLEMENTED(FATAL); -} - - -void ArmAssembler::MemoryBarrier(ManagedRegister mscratch) { - CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12); -#if ANDROID_SMP != 0 -#if defined(__ARM_HAVE_DMB) - int32_t encoding = 0xf57ff05f; // dmb - Emit(encoding); -#elif defined(__ARM_HAVE_LDREX_STREX) - LoadImmediate(R12, 0); - int32_t encoding = 0xee07cfba; // mcr p15, 0, r12, c7, c10, 5 - Emit(encoding); -#else - LoadImmediate(R12, 0xffff0fa0); // kuser_memory_barrier - blx(R12); -#endif -#endif -} - -void ArmAssembler::CreateSirtEntry(ManagedRegister mout_reg, - FrameOffset sirt_offset, - ManagedRegister min_reg, bool null_allowed) { - ArmManagedRegister out_reg = mout_reg.AsArm(); - ArmManagedRegister in_reg = min_reg.AsArm(); - CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg; - CHECK(out_reg.IsCoreRegister()) << out_reg; - if (null_allowed) { - // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is - // the address in the SIRT holding the reference. - // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset) - if (in_reg.IsNoRegister()) { - LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), - SP, sirt_offset.Int32Value()); - in_reg = out_reg; - } - cmp(in_reg.AsCoreRegister(), ShifterOperand(0)); - if (!out_reg.Equals(in_reg)) { - LoadImmediate(out_reg.AsCoreRegister(), 0, EQ); - } - AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE); - } else { - AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL); - } -} - -void ArmAssembler::CreateSirtEntry(FrameOffset out_off, - FrameOffset sirt_offset, - ManagedRegister mscratch, - bool null_allowed) { - ArmManagedRegister scratch = mscratch.AsArm(); - CHECK(scratch.IsCoreRegister()) << scratch; - if (null_allowed) { - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, - sirt_offset.Int32Value()); - // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is - // the address in the SIRT holding the reference. - // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset) - cmp(scratch.AsCoreRegister(), ShifterOperand(0)); - AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE); - } else { - AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL); - } - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value()); -} - -void ArmAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg, - ManagedRegister min_reg) { - ArmManagedRegister out_reg = mout_reg.AsArm(); - ArmManagedRegister in_reg = min_reg.AsArm(); - CHECK(out_reg.IsCoreRegister()) << out_reg; - CHECK(in_reg.IsCoreRegister()) << in_reg; - Label null_arg; - if (!out_reg.Equals(in_reg)) { - LoadImmediate(out_reg.AsCoreRegister(), 0, EQ); - } - cmp(in_reg.AsCoreRegister(), ShifterOperand(0)); - LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), - in_reg.AsCoreRegister(), 0, NE); -} - -void ArmAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) { - // TODO: not validating references -} - -void ArmAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) { - // TODO: not validating references -} - -void ArmAssembler::Call(ManagedRegister mbase, Offset offset, - ManagedRegister mscratch) { - ArmManagedRegister base = mbase.AsArm(); - ArmManagedRegister scratch = mscratch.AsArm(); - CHECK(base.IsCoreRegister()) << base; - CHECK(scratch.IsCoreRegister()) << scratch; - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), - base.AsCoreRegister(), offset.Int32Value()); - blx(scratch.AsCoreRegister()); - // TODO: place reference map on call -} - -void ArmAssembler::Call(FrameOffset base, Offset offset, - ManagedRegister mscratch) { - ArmManagedRegister scratch = mscratch.AsArm(); - CHECK(scratch.IsCoreRegister()) << scratch; - // Call *(*(SP + base) + offset) - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), - SP, base.Int32Value()); - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), - scratch.AsCoreRegister(), offset.Int32Value()); - blx(scratch.AsCoreRegister()); - // TODO: place reference map on call -} - -void ArmAssembler::Call(ThreadOffset /*offset*/, ManagedRegister /*scratch*/) { - UNIMPLEMENTED(FATAL); -} - -void ArmAssembler::GetCurrentThread(ManagedRegister tr) { - mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR)); -} - -void ArmAssembler::GetCurrentThread(FrameOffset offset, - ManagedRegister /*scratch*/) { - StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL); -} - -void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) { - ArmManagedRegister scratch = mscratch.AsArm(); - ArmExceptionSlowPath* slow = new ArmExceptionSlowPath(scratch, stack_adjust); - buffer_.EnqueueSlowPath(slow); - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), - TR, Thread::ExceptionOffset().Int32Value()); - cmp(scratch.AsCoreRegister(), ShifterOperand(0)); - b(slow->Entry(), NE); -} - -void ArmExceptionSlowPath::Emit(Assembler* sasm) { - ArmAssembler* sp_asm = down_cast(sasm); -#define __ sp_asm-> - __ Bind(&entry_); - if (stack_adjust_ != 0) { // Fix up the frame. - __ DecreaseFrameSize(stack_adjust_); - } - // Pass exception object as argument - // Don't care about preserving R0 as this call won't return - __ mov(R0, ShifterOperand(scratch_.AsCoreRegister())); - // Set up call to Thread::Current()->pDeliverException - __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pDeliverException)); - __ blx(R12); - // Call never returns - __ bkpt(0); -#undef __ -} - -} // namespace arm -} // namespace art diff --git a/runtime/oat/utils/arm/assembler_arm.h b/runtime/oat/utils/arm/assembler_arm.h deleted file mode 100644 index b8c79d21b9..0000000000 --- a/runtime/oat/utils/arm/assembler_arm.h +++ /dev/null @@ -1,659 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_OAT_UTILS_ARM_ASSEMBLER_ARM_H_ -#define ART_RUNTIME_OAT_UTILS_ARM_ASSEMBLER_ARM_H_ - -#include - -#include "base/logging.h" -#include "constants_arm.h" -#include "oat/utils/arm/managed_register_arm.h" -#include "oat/utils/assembler.h" -#include "offsets.h" -#include "utils.h" - -namespace art { -namespace arm { - -// Encodes Addressing Mode 1 - Data-processing operands defined in Section 5.1. -class ShifterOperand { - public: - // Data-processing operands - Uninitialized - ShifterOperand() { - type_ = -1; - } - - // Data-processing operands - Immediate - explicit ShifterOperand(uint32_t immediate) { - CHECK(immediate < (1 << kImmed8Bits)); - type_ = 1; - encoding_ = immediate; - } - - // Data-processing operands - Rotated immediate - ShifterOperand(uint32_t rotate, uint32_t immed8) { - CHECK((rotate < (1 << kRotateBits)) && (immed8 < (1 << kImmed8Bits))); - type_ = 1; - encoding_ = (rotate << kRotateShift) | (immed8 << kImmed8Shift); - } - - // Data-processing operands - Register - explicit ShifterOperand(Register rm) { - type_ = 0; - encoding_ = static_cast(rm); - } - - // Data-processing operands - Logical shift/rotate by immediate - ShifterOperand(Register rm, Shift shift, uint32_t shift_imm) { - CHECK(shift_imm < (1 << kShiftImmBits)); - type_ = 0; - encoding_ = shift_imm << kShiftImmShift | - static_cast(shift) << kShiftShift | - static_cast(rm); - } - - // Data-processing operands - Logical shift/rotate by register - ShifterOperand(Register rm, Shift shift, Register rs) { - type_ = 0; - encoding_ = static_cast(rs) << kShiftRegisterShift | - static_cast(shift) << kShiftShift | (1 << 4) | - static_cast(rm); - } - - static bool CanHold(uint32_t immediate, ShifterOperand* shifter_op) { - // Avoid the more expensive test for frequent small immediate values. - if (immediate < (1 << kImmed8Bits)) { - shifter_op->type_ = 1; - shifter_op->encoding_ = (0 << kRotateShift) | (immediate << kImmed8Shift); - return true; - } - // Note that immediate must be unsigned for the test to work correctly. - for (int rot = 0; rot < 16; rot++) { - uint32_t imm8 = (immediate << 2*rot) | (immediate >> (32 - 2*rot)); - if (imm8 < (1 << kImmed8Bits)) { - shifter_op->type_ = 1; - shifter_op->encoding_ = (rot << kRotateShift) | (imm8 << kImmed8Shift); - return true; - } - } - return false; - } - - private: - bool is_valid() const { return (type_ == 0) || (type_ == 1); } - - uint32_t type() const { - CHECK(is_valid()); - return type_; - } - - uint32_t encoding() const { - CHECK(is_valid()); - return encoding_; - } - - uint32_t type_; // Encodes the type field (bits 27-25) in the instruction. - uint32_t encoding_; - - friend class ArmAssembler; -#ifdef SOURCE_ASSEMBLER_SUPPORT - friend class BinaryAssembler; -#endif -}; - - -enum LoadOperandType { - kLoadSignedByte, - kLoadUnsignedByte, - kLoadSignedHalfword, - kLoadUnsignedHalfword, - kLoadWord, - kLoadWordPair, - kLoadSWord, - kLoadDWord -}; - - -enum StoreOperandType { - kStoreByte, - kStoreHalfword, - kStoreWord, - kStoreWordPair, - kStoreSWord, - kStoreDWord -}; - - -// Load/store multiple addressing mode. -enum BlockAddressMode { - // bit encoding P U W - DA = (0|0|0) << 21, // decrement after - IA = (0|4|0) << 21, // increment after - DB = (8|0|0) << 21, // decrement before - IB = (8|4|0) << 21, // increment before - DA_W = (0|0|1) << 21, // decrement after with writeback to base - IA_W = (0|4|1) << 21, // increment after with writeback to base - DB_W = (8|0|1) << 21, // decrement before with writeback to base - IB_W = (8|4|1) << 21 // increment before with writeback to base -}; - - -class Address { - public: - // Memory operand addressing mode - enum Mode { - // bit encoding P U W - Offset = (8|4|0) << 21, // offset (w/o writeback to base) - PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback - PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback - NegOffset = (8|0|0) << 21, // negative offset (w/o writeback to base) - NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback - NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback - }; - - explicit Address(Register rn, int32_t offset = 0, Mode am = Offset) { - CHECK(IsAbsoluteUint(12, offset)); - if (offset < 0) { - encoding_ = (am ^ (1 << kUShift)) | -offset; // Flip U to adjust sign. - } else { - encoding_ = am | offset; - } - encoding_ |= static_cast(rn) << kRnShift; - } - - static bool CanHoldLoadOffset(LoadOperandType type, int offset); - static bool CanHoldStoreOffset(StoreOperandType type, int offset); - - private: - uint32_t encoding() const { return encoding_; } - - // Encoding for addressing mode 3. - uint32_t encoding3() const { - const uint32_t offset_mask = (1 << 12) - 1; - uint32_t offset = encoding_ & offset_mask; - CHECK_LT(offset, 256u); - return (encoding_ & ~offset_mask) | ((offset & 0xf0) << 4) | (offset & 0xf); - } - - // Encoding for vfp load/store addressing. - uint32_t vencoding() const { - const uint32_t offset_mask = (1 << 12) - 1; - uint32_t offset = encoding_ & offset_mask; - CHECK(IsAbsoluteUint(10, offset)); // In the range -1020 to +1020. - CHECK_ALIGNED(offset, 2); // Multiple of 4. - int mode = encoding_ & ((8|4|1) << 21); - CHECK((mode == Offset) || (mode == NegOffset)); - uint32_t vencoding = (encoding_ & (0xf << kRnShift)) | (offset >> 2); - if (mode == Offset) { - vencoding |= 1 << 23; - } - return vencoding; - } - - uint32_t encoding_; - - friend class ArmAssembler; -}; - - -class ArmAssembler : public Assembler { - public: - ArmAssembler() {} - virtual ~ArmAssembler() {} - - // Data-processing instructions. - void and_(Register rd, Register rn, ShifterOperand so, Condition cond = AL); - - void eor(Register rd, Register rn, ShifterOperand so, Condition cond = AL); - - void sub(Register rd, Register rn, ShifterOperand so, Condition cond = AL); - void subs(Register rd, Register rn, ShifterOperand so, Condition cond = AL); - - void rsb(Register rd, Register rn, ShifterOperand so, Condition cond = AL); - void rsbs(Register rd, Register rn, ShifterOperand so, Condition cond = AL); - - void add(Register rd, Register rn, ShifterOperand so, Condition cond = AL); - - void adds(Register rd, Register rn, ShifterOperand so, Condition cond = AL); - - void adc(Register rd, Register rn, ShifterOperand so, Condition cond = AL); - - void sbc(Register rd, Register rn, ShifterOperand so, Condition cond = AL); - - void rsc(Register rd, Register rn, ShifterOperand so, Condition cond = AL); - - void tst(Register rn, ShifterOperand so, Condition cond = AL); - - void teq(Register rn, ShifterOperand so, Condition cond = AL); - - void cmp(Register rn, ShifterOperand so, Condition cond = AL); - - void cmn(Register rn, ShifterOperand so, Condition cond = AL); - - void orr(Register rd, Register rn, ShifterOperand so, Condition cond = AL); - void orrs(Register rd, Register rn, ShifterOperand so, Condition cond = AL); - - void mov(Register rd, ShifterOperand so, Condition cond = AL); - void movs(Register rd, ShifterOperand so, Condition cond = AL); - - void bic(Register rd, Register rn, ShifterOperand so, Condition cond = AL); - - void mvn(Register rd, ShifterOperand so, Condition cond = AL); - void mvns(Register rd, ShifterOperand so, Condition cond = AL); - - // Miscellaneous data-processing instructions. - void clz(Register rd, Register rm, Condition cond = AL); - void movw(Register rd, uint16_t imm16, Condition cond = AL); - void movt(Register rd, uint16_t imm16, Condition cond = AL); - - // Multiply instructions. - void mul(Register rd, Register rn, Register rm, Condition cond = AL); - void mla(Register rd, Register rn, Register rm, Register ra, - Condition cond = AL); - void mls(Register rd, Register rn, Register rm, Register ra, - Condition cond = AL); - void umull(Register rd_lo, Register rd_hi, Register rn, Register rm, - Condition cond = AL); - - // Load/store instructions. - void ldr(Register rd, Address ad, Condition cond = AL); - void str(Register rd, Address ad, Condition cond = AL); - - void ldrb(Register rd, Address ad, Condition cond = AL); - void strb(Register rd, Address ad, Condition cond = AL); - - void ldrh(Register rd, Address ad, Condition cond = AL); - void strh(Register rd, Address ad, Condition cond = AL); - - void ldrsb(Register rd, Address ad, Condition cond = AL); - void ldrsh(Register rd, Address ad, Condition cond = AL); - - void ldrd(Register rd, Address ad, Condition cond = AL); - void strd(Register rd, Address ad, Condition cond = AL); - - void ldm(BlockAddressMode am, Register base, - RegList regs, Condition cond = AL); - void stm(BlockAddressMode am, Register base, - RegList regs, Condition cond = AL); - - void ldrex(Register rd, Register rn, Condition cond = AL); - void strex(Register rd, Register rt, Register rn, Condition cond = AL); - - // Miscellaneous instructions. - void clrex(); - void nop(Condition cond = AL); - - // Note that gdb sets breakpoints using the undefined instruction 0xe7f001f0. - void bkpt(uint16_t imm16); - void svc(uint32_t imm24); - - // Floating point instructions (VFPv3-D16 and VFPv3-D32 profiles). - void vmovsr(SRegister sn, Register rt, Condition cond = AL); - void vmovrs(Register rt, SRegister sn, Condition cond = AL); - void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond = AL); - void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond = AL); - void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond = AL); - void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond = AL); - void vmovs(SRegister sd, SRegister sm, Condition cond = AL); - void vmovd(DRegister dd, DRegister dm, Condition cond = AL); - - // Returns false if the immediate cannot be encoded. - bool vmovs(SRegister sd, float s_imm, Condition cond = AL); - bool vmovd(DRegister dd, double d_imm, Condition cond = AL); - - void vldrs(SRegister sd, Address ad, Condition cond = AL); - void vstrs(SRegister sd, Address ad, Condition cond = AL); - void vldrd(DRegister dd, Address ad, Condition cond = AL); - void vstrd(DRegister dd, Address ad, Condition cond = AL); - - void vadds(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); - void vaddd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); - void vsubs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); - void vsubd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); - void vmuls(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); - void vmuld(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); - void vmlas(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); - void vmlad(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); - void vmlss(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); - void vmlsd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); - void vdivs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); - void vdivd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); - - void vabss(SRegister sd, SRegister sm, Condition cond = AL); - void vabsd(DRegister dd, DRegister dm, Condition cond = AL); - void vnegs(SRegister sd, SRegister sm, Condition cond = AL); - void vnegd(DRegister dd, DRegister dm, Condition cond = AL); - void vsqrts(SRegister sd, SRegister sm, Condition cond = AL); - void vsqrtd(DRegister dd, DRegister dm, Condition cond = AL); - - void vcvtsd(SRegister sd, DRegister dm, Condition cond = AL); - void vcvtds(DRegister dd, SRegister sm, Condition cond = AL); - void vcvtis(SRegister sd, SRegister sm, Condition cond = AL); - void vcvtid(SRegister sd, DRegister dm, Condition cond = AL); - void vcvtsi(SRegister sd, SRegister sm, Condition cond = AL); - void vcvtdi(DRegister dd, SRegister sm, Condition cond = AL); - void vcvtus(SRegister sd, SRegister sm, Condition cond = AL); - void vcvtud(SRegister sd, DRegister dm, Condition cond = AL); - void vcvtsu(SRegister sd, SRegister sm, Condition cond = AL); - void vcvtdu(DRegister dd, SRegister sm, Condition cond = AL); - - void vcmps(SRegister sd, SRegister sm, Condition cond = AL); - void vcmpd(DRegister dd, DRegister dm, Condition cond = AL); - void vcmpsz(SRegister sd, Condition cond = AL); - void vcmpdz(DRegister dd, Condition cond = AL); - void vmstat(Condition cond = AL); // VMRS APSR_nzcv, FPSCR - - // Branch instructions. - void b(Label* label, Condition cond = AL); - void bl(Label* label, Condition cond = AL); - void blx(Register rm, Condition cond = AL); - void bx(Register rm, Condition cond = AL); - - // Macros. - // Add signed constant value to rd. May clobber IP. - void AddConstant(Register rd, int32_t value, Condition cond = AL); - void AddConstant(Register rd, Register rn, int32_t value, - Condition cond = AL); - void AddConstantSetFlags(Register rd, Register rn, int32_t value, - Condition cond = AL); - void AddConstantWithCarry(Register rd, Register rn, int32_t value, - Condition cond = AL); - - // Load and Store. May clobber IP. - void LoadImmediate(Register rd, int32_t value, Condition cond = AL); - void LoadSImmediate(SRegister sd, float value, Condition cond = AL); - void LoadDImmediate(DRegister dd, double value, - Register scratch, Condition cond = AL); - void MarkExceptionHandler(Label* label); - void LoadFromOffset(LoadOperandType type, - Register reg, - Register base, - int32_t offset, - Condition cond = AL); - void StoreToOffset(StoreOperandType type, - Register reg, - Register base, - int32_t offset, - Condition cond = AL); - void LoadSFromOffset(SRegister reg, - Register base, - int32_t offset, - Condition cond = AL); - void StoreSToOffset(SRegister reg, - Register base, - int32_t offset, - Condition cond = AL); - void LoadDFromOffset(DRegister reg, - Register base, - int32_t offset, - Condition cond = AL); - void StoreDToOffset(DRegister reg, - Register base, - int32_t offset, - Condition cond = AL); - - void Push(Register rd, Condition cond = AL); - void Pop(Register rd, Condition cond = AL); - - void PushList(RegList regs, Condition cond = AL); - void PopList(RegList regs, Condition cond = AL); - - void Mov(Register rd, Register rm, Condition cond = AL); - - // Convenience shift instructions. Use mov instruction with shifter operand - // for variants setting the status flags or using a register shift count. - void Lsl(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL); - void Lsr(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL); - void Asr(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL); - void Ror(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL); - void Rrx(Register rd, Register rm, Condition cond = AL); - - // Encode a signed constant in tst instructions, only affecting the flags. - void EncodeUint32InTstInstructions(uint32_t data); - // ... and decode from a pc pointing to the start of encoding instructions. - static uint32_t DecodeUint32FromTstInstructions(uword pc); - static bool IsInstructionForExceptionHandling(uword pc); - - // Emit data (e.g. encoded instruction or immediate) to the - // instruction stream. - void Emit(int32_t value); - void Bind(Label* label); - - // - // Overridden common assembler high-level functionality - // - - // Emit code that will create an activation on the stack - virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg, - const std::vector& callee_save_regs, - const std::vector& entry_spills); - - // Emit code that will remove an activation from the stack - virtual void RemoveFrame(size_t frame_size, - const std::vector& callee_save_regs); - - virtual void IncreaseFrameSize(size_t adjust); - virtual void DecreaseFrameSize(size_t adjust); - - // Store routines - virtual void Store(FrameOffset offs, ManagedRegister src, size_t size); - virtual void StoreRef(FrameOffset dest, ManagedRegister src); - virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src); - - virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, - ManagedRegister scratch); - - virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm, - ManagedRegister scratch); - - virtual void StoreStackOffsetToThread(ThreadOffset thr_offs, - FrameOffset fr_offs, - ManagedRegister scratch); - - virtual void StoreStackPointerToThread(ThreadOffset thr_offs); - - virtual void StoreSpanning(FrameOffset dest, ManagedRegister src, - FrameOffset in_off, ManagedRegister scratch); - - // Load routines - virtual void Load(ManagedRegister dest, FrameOffset src, size_t size); - - virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size); - - virtual void LoadRef(ManagedRegister dest, FrameOffset src); - - virtual void LoadRef(ManagedRegister dest, ManagedRegister base, - MemberOffset offs); - - virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, - Offset offs); - - virtual void LoadRawPtrFromThread(ManagedRegister dest, - ThreadOffset offs); - - // Copying routines - virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size); - - virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs, - ManagedRegister scratch); - - virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs, - ManagedRegister scratch); - - virtual void CopyRef(FrameOffset dest, FrameOffset src, - ManagedRegister scratch); - - virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size); - - virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, - ManagedRegister scratch, size_t size); - - virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, - ManagedRegister scratch, size_t size); - - virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, - ManagedRegister scratch, size_t size); - - virtual void Copy(ManagedRegister dest, Offset dest_offset, - ManagedRegister src, Offset src_offset, - ManagedRegister scratch, size_t size); - - virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, - ManagedRegister scratch, size_t size); - - virtual void MemoryBarrier(ManagedRegister scratch); - - // Sign extension - virtual void SignExtend(ManagedRegister mreg, size_t size); - - // Zero extension - virtual void ZeroExtend(ManagedRegister mreg, size_t size); - - // Exploit fast access in managed code to Thread::Current() - virtual void GetCurrentThread(ManagedRegister tr); - virtual void GetCurrentThread(FrameOffset dest_offset, - ManagedRegister scratch); - - // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the - // value is null and null_allowed. in_reg holds a possibly stale reference - // that can be used to avoid loading the SIRT entry to see if the value is - // NULL. - virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, - ManagedRegister in_reg, bool null_allowed); - - // Set up out_off to hold a Object** into the SIRT, or to be NULL if the - // value is null and null_allowed. - virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, - ManagedRegister scratch, bool null_allowed); - - // src holds a SIRT entry (Object**) load this into dst - virtual void LoadReferenceFromSirt(ManagedRegister dst, - ManagedRegister src); - - // Heap::VerifyObject on src. In some cases (such as a reference to this) we - // know that src may not be null. - virtual void VerifyObject(ManagedRegister src, bool could_be_null); - virtual void VerifyObject(FrameOffset src, bool could_be_null); - - // Call to address held at [base+offset] - virtual void Call(ManagedRegister base, Offset offset, - ManagedRegister scratch); - virtual void Call(FrameOffset base, Offset offset, - ManagedRegister scratch); - virtual void Call(ThreadOffset offset, ManagedRegister scratch); - - // Generate code to check if Thread::Current()->exception_ is non-null - // and branch to a ExceptionSlowPath if it is. - virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust); - - private: - void EmitType01(Condition cond, - int type, - Opcode opcode, - int set_cc, - Register rn, - Register rd, - ShifterOperand so); - - void EmitType5(Condition cond, int offset, bool link); - - void EmitMemOp(Condition cond, - bool load, - bool byte, - Register rd, - Address ad); - - void EmitMemOpAddressMode3(Condition cond, - int32_t mode, - Register rd, - Address ad); - - void EmitMultiMemOp(Condition cond, - BlockAddressMode am, - bool load, - Register base, - RegList regs); - - void EmitShiftImmediate(Condition cond, - Shift opcode, - Register rd, - Register rm, - ShifterOperand so); - - void EmitShiftRegister(Condition cond, - Shift opcode, - Register rd, - Register rm, - ShifterOperand so); - - void EmitMulOp(Condition cond, - int32_t opcode, - Register rd, - Register rn, - Register rm, - Register rs); - - void EmitVFPsss(Condition cond, - int32_t opcode, - SRegister sd, - SRegister sn, - SRegister sm); - - void EmitVFPddd(Condition cond, - int32_t opcode, - DRegister dd, - DRegister dn, - DRegister dm); - - void EmitVFPsd(Condition cond, - int32_t opcode, - SRegister sd, - DRegister dm); - - void EmitVFPds(Condition cond, - int32_t opcode, - DRegister dd, - SRegister sm); - - void EmitBranch(Condition cond, Label* label, bool link); - static int32_t EncodeBranchOffset(int offset, int32_t inst); - static int DecodeBranchOffset(int32_t inst); - int32_t EncodeTstOffset(int offset, int32_t inst); - int DecodeTstOffset(int32_t inst); - - // Returns whether or not the given register is used for passing parameters. - static int RegisterCompare(const Register* reg1, const Register* reg2) { - return *reg1 - *reg2; - } -}; - -// Slowpath entered when Thread::Current()->_exception is non-null -class ArmExceptionSlowPath : public SlowPath { - public: - explicit ArmExceptionSlowPath(ArmManagedRegister scratch, size_t stack_adjust) - : scratch_(scratch), stack_adjust_(stack_adjust) { - } - virtual void Emit(Assembler *sp_asm); - private: - const ArmManagedRegister scratch_; - const size_t stack_adjust_; -}; - -} // namespace arm -} // namespace art - -#endif // ART_RUNTIME_OAT_UTILS_ARM_ASSEMBLER_ARM_H_ diff --git a/runtime/oat/utils/arm/managed_register_arm.cc b/runtime/oat/utils/arm/managed_register_arm.cc deleted file mode 100644 index 57c23059de..0000000000 --- a/runtime/oat/utils/arm/managed_register_arm.cc +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "managed_register_arm.h" - -#include "globals.h" - -namespace art { -namespace arm { - -// We need all registers for caching of locals. -// Register R9 .. R15 are reserved. -static const int kNumberOfAvailableCoreRegisters = (R8 - R0) + 1; -static const int kNumberOfAvailableSRegisters = kNumberOfSRegisters; -static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters; -static const int kNumberOfAvailableOverlappingDRegisters = - kNumberOfOverlappingDRegisters; -static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs; - - -// Returns true if this managed-register overlaps the other managed-register. -bool ArmManagedRegister::Overlaps(const ArmManagedRegister& other) const { - if (IsNoRegister() || other.IsNoRegister()) return false; - if (Equals(other)) return true; - if (IsRegisterPair()) { - Register low = AsRegisterPairLow(); - Register high = AsRegisterPairHigh(); - return ArmManagedRegister::FromCoreRegister(low).Overlaps(other) || - ArmManagedRegister::FromCoreRegister(high).Overlaps(other); - } - if (IsOverlappingDRegister()) { - if (other.IsDRegister()) return Equals(other); - if (other.IsSRegister()) { - SRegister low = AsOverlappingDRegisterLow(); - SRegister high = AsOverlappingDRegisterHigh(); - SRegister other_sreg = other.AsSRegister(); - return (low == other_sreg) || (high == other_sreg); - } - return false; - } - if (other.IsRegisterPair() || other.IsOverlappingDRegister()) { - return other.Overlaps(*this); - } - return false; -} - - -int ArmManagedRegister::AllocIdLow() const { - CHECK(IsOverlappingDRegister() || IsRegisterPair()); - const int r = RegId() - (kNumberOfCoreRegIds + kNumberOfSRegIds); - int low; - if (r < kNumberOfOverlappingDRegIds) { - CHECK(IsOverlappingDRegister()); - low = (r * 2) + kNumberOfCoreRegIds; // Return a SRegister. - } else { - CHECK(IsRegisterPair()); - low = (r - kNumberOfDRegIds) * 2; // Return a Register. - if (low > 6) { - // we didn't got a pair higher than R6_R7, must be the dalvik special case - low = 1; - } - } - return low; -} - - -int ArmManagedRegister::AllocIdHigh() const { - return AllocIdLow() + 1; -} - - -void ArmManagedRegister::Print(std::ostream& os) const { - if (!IsValidManagedRegister()) { - os << "No Register"; - } else if (IsCoreRegister()) { - os << "Core: " << static_cast(AsCoreRegister()); - } else if (IsRegisterPair()) { - os << "Pair: " << static_cast(AsRegisterPairLow()) << ", " - << static_cast(AsRegisterPairHigh()); - } else if (IsSRegister()) { - os << "SRegister: " << static_cast(AsSRegister()); - } else if (IsDRegister()) { - os << "DRegister: " << static_cast(AsDRegister()); - } else { - os << "??: " << RegId(); - } -} - -std::ostream& operator<<(std::ostream& os, const ArmManagedRegister& reg) { - reg.Print(os); - return os; -} - -std::ostream& operator<<(std::ostream& os, const RegisterPair& r) { - os << ArmManagedRegister::FromRegisterPair(r); - return os; -} - -} // namespace arm -} // namespace art diff --git a/runtime/oat/utils/arm/managed_register_arm.h b/runtime/oat/utils/arm/managed_register_arm.h deleted file mode 100644 index 01596bb6b1..0000000000 --- a/runtime/oat/utils/arm/managed_register_arm.h +++ /dev/null @@ -1,274 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_OAT_UTILS_ARM_MANAGED_REGISTER_ARM_H_ -#define ART_RUNTIME_OAT_UTILS_ARM_MANAGED_REGISTER_ARM_H_ - -#include "base/logging.h" -#include "constants_arm.h" -#include "oat/utils/managed_register.h" - -namespace art { -namespace arm { - -// Values for register pairs. -enum RegisterPair { - R0_R1 = 0, - R2_R3 = 1, - R4_R5 = 2, - R6_R7 = 3, - R1_R2 = 4, // Dalvik style passing - kNumberOfRegisterPairs = 5, - kNoRegisterPair = -1, -}; - -std::ostream& operator<<(std::ostream& os, const RegisterPair& reg); - -const int kNumberOfCoreRegIds = kNumberOfCoreRegisters; -const int kNumberOfCoreAllocIds = kNumberOfCoreRegisters; - -const int kNumberOfSRegIds = kNumberOfSRegisters; -const int kNumberOfSAllocIds = kNumberOfSRegisters; - -const int kNumberOfDRegIds = kNumberOfDRegisters; -const int kNumberOfOverlappingDRegIds = kNumberOfOverlappingDRegisters; -const int kNumberOfDAllocIds = kNumberOfDRegIds - kNumberOfOverlappingDRegIds; - -const int kNumberOfPairRegIds = kNumberOfRegisterPairs; - -const int kNumberOfRegIds = kNumberOfCoreRegIds + kNumberOfSRegIds + - kNumberOfDRegIds + kNumberOfPairRegIds; -const int kNumberOfAllocIds = - kNumberOfCoreAllocIds + kNumberOfSAllocIds + kNumberOfDAllocIds; - -// Register ids map: -// [0..R[ core registers (enum Register) -// [R..S[ single precision VFP registers (enum SRegister) -// [S..D[ double precision VFP registers (enum DRegister) -// [D..P[ core register pairs (enum RegisterPair) -// where -// R = kNumberOfCoreRegIds -// S = R + kNumberOfSRegIds -// D = S + kNumberOfDRegIds -// P = D + kNumberOfRegisterPairs - -// Allocation ids map: -// [0..R[ core registers (enum Register) -// [R..S[ single precision VFP registers (enum SRegister) -// [S..N[ non-overlapping double precision VFP registers (16-31 in enum -// DRegister, VFPv3-D32 only) -// where -// R = kNumberOfCoreAllocIds -// S = R + kNumberOfSAllocIds -// N = S + kNumberOfDAllocIds - - -// An instance of class 'ManagedRegister' represents a single ARM register or a -// pair of core ARM registers (enum RegisterPair). A single register is either a -// core register (enum Register), a VFP single precision register -// (enum SRegister), or a VFP double precision register (enum DRegister). -// 'ManagedRegister::NoRegister()' returns an invalid ManagedRegister. -// There is a one-to-one mapping between ManagedRegister and register id. -class ArmManagedRegister : public ManagedRegister { - public: - Register AsCoreRegister() const { - CHECK(IsCoreRegister()); - return static_cast(id_); - } - - SRegister AsSRegister() const { - CHECK(IsSRegister()); - return static_cast(id_ - kNumberOfCoreRegIds); - } - - DRegister AsDRegister() const { - CHECK(IsDRegister()); - return static_cast(id_ - kNumberOfCoreRegIds - kNumberOfSRegIds); - } - - SRegister AsOverlappingDRegisterLow() const { - CHECK(IsOverlappingDRegister()); - DRegister d_reg = AsDRegister(); - return static_cast(d_reg * 2); - } - - SRegister AsOverlappingDRegisterHigh() const { - CHECK(IsOverlappingDRegister()); - DRegister d_reg = AsDRegister(); - return static_cast(d_reg * 2 + 1); - } - - RegisterPair AsRegisterPair() const { - CHECK(IsRegisterPair()); - Register reg_low = AsRegisterPairLow(); - if (reg_low == R1) { - return R1_R2; - } else { - return static_cast(reg_low / 2); - } - } - - Register AsRegisterPairLow() const { - CHECK(IsRegisterPair()); - // Appropriate mapping of register ids allows to use AllocIdLow(). - return FromRegId(AllocIdLow()).AsCoreRegister(); - } - - Register AsRegisterPairHigh() const { - CHECK(IsRegisterPair()); - // Appropriate mapping of register ids allows to use AllocIdHigh(). - return FromRegId(AllocIdHigh()).AsCoreRegister(); - } - - bool IsCoreRegister() const { - CHECK(IsValidManagedRegister()); - return (0 <= id_) && (id_ < kNumberOfCoreRegIds); - } - - bool IsSRegister() const { - CHECK(IsValidManagedRegister()); - const int test = id_ - kNumberOfCoreRegIds; - return (0 <= test) && (test < kNumberOfSRegIds); - } - - bool IsDRegister() const { - CHECK(IsValidManagedRegister()); - const int test = id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds); - return (0 <= test) && (test < kNumberOfDRegIds); - } - - // Returns true if this DRegister overlaps SRegisters. - bool IsOverlappingDRegister() const { - CHECK(IsValidManagedRegister()); - const int test = id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds); - return (0 <= test) && (test < kNumberOfOverlappingDRegIds); - } - - bool IsRegisterPair() const { - CHECK(IsValidManagedRegister()); - const int test = - id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds + kNumberOfDRegIds); - return (0 <= test) && (test < kNumberOfPairRegIds); - } - - bool IsSameType(ArmManagedRegister test) const { - CHECK(IsValidManagedRegister() && test.IsValidManagedRegister()); - return - (IsCoreRegister() && test.IsCoreRegister()) || - (IsSRegister() && test.IsSRegister()) || - (IsDRegister() && test.IsDRegister()) || - (IsRegisterPair() && test.IsRegisterPair()); - } - - - // Returns true if the two managed-registers ('this' and 'other') overlap. - // Either managed-register may be the NoRegister. If both are the NoRegister - // then false is returned. - bool Overlaps(const ArmManagedRegister& other) const; - - void Print(std::ostream& os) const; - - static ArmManagedRegister FromCoreRegister(Register r) { - CHECK_NE(r, kNoRegister); - return FromRegId(r); - } - - static ArmManagedRegister FromSRegister(SRegister r) { - CHECK_NE(r, kNoSRegister); - return FromRegId(r + kNumberOfCoreRegIds); - } - - static ArmManagedRegister FromDRegister(DRegister r) { - CHECK_NE(r, kNoDRegister); - return FromRegId(r + (kNumberOfCoreRegIds + kNumberOfSRegIds)); - } - - static ArmManagedRegister FromRegisterPair(RegisterPair r) { - CHECK_NE(r, kNoRegisterPair); - return FromRegId(r + (kNumberOfCoreRegIds + - kNumberOfSRegIds + kNumberOfDRegIds)); - } - - // Return a RegisterPair consisting of Register r_low and r_low + 1. - static ArmManagedRegister FromCoreRegisterPair(Register r_low) { - if (r_low != R1) { // not the dalvik special case - CHECK_NE(r_low, kNoRegister); - CHECK_EQ(0, (r_low % 2)); - const int r = r_low / 2; - CHECK_LT(r, kNumberOfPairRegIds); - return FromRegisterPair(static_cast(r)); - } else { - return FromRegisterPair(R1_R2); - } - } - - // Return a DRegister overlapping SRegister r_low and r_low + 1. - static ArmManagedRegister FromSRegisterPair(SRegister r_low) { - CHECK_NE(r_low, kNoSRegister); - CHECK_EQ(0, (r_low % 2)); - const int r = r_low / 2; - CHECK_LT(r, kNumberOfOverlappingDRegIds); - return FromDRegister(static_cast(r)); - } - - private: - bool IsValidManagedRegister() const { - return (0 <= id_) && (id_ < kNumberOfRegIds); - } - - int RegId() const { - CHECK(!IsNoRegister()); - return id_; - } - - int AllocId() const { - CHECK(IsValidManagedRegister() && - !IsOverlappingDRegister() && !IsRegisterPair()); - int r = id_; - if ((kNumberOfDAllocIds > 0) && IsDRegister()) { // VFPv3-D32 only. - r -= kNumberOfOverlappingDRegIds; - } - CHECK_LT(r, kNumberOfAllocIds); - return r; - } - - int AllocIdLow() const; - int AllocIdHigh() const; - - friend class ManagedRegister; - - explicit ArmManagedRegister(int reg_id) : ManagedRegister(reg_id) {} - - static ArmManagedRegister FromRegId(int reg_id) { - ArmManagedRegister reg(reg_id); - CHECK(reg.IsValidManagedRegister()); - return reg; - } -}; - -std::ostream& operator<<(std::ostream& os, const ArmManagedRegister& reg); - -} // namespace arm - -inline arm::ArmManagedRegister ManagedRegister::AsArm() const { - arm::ArmManagedRegister reg(id_); - CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister()); - return reg; -} - -} // namespace art - -#endif // ART_RUNTIME_OAT_UTILS_ARM_MANAGED_REGISTER_ARM_H_ diff --git a/runtime/oat/utils/arm/managed_register_arm_test.cc b/runtime/oat/utils/arm/managed_register_arm_test.cc deleted file mode 100644 index f5d4cc0d10..0000000000 --- a/runtime/oat/utils/arm/managed_register_arm_test.cc +++ /dev/null @@ -1,767 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "globals.h" -#include "managed_register_arm.h" -#include "gtest/gtest.h" - -namespace art { -namespace arm { - -TEST(ArmManagedRegister, NoRegister) { - ArmManagedRegister reg = ManagedRegister::NoRegister().AsArm(); - EXPECT_TRUE(reg.IsNoRegister()); - EXPECT_TRUE(!reg.Overlaps(reg)); -} - -TEST(ArmManagedRegister, CoreRegister) { - ArmManagedRegister reg = ArmManagedRegister::FromCoreRegister(R0); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(!reg.IsDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(R0, reg.AsCoreRegister()); - - reg = ArmManagedRegister::FromCoreRegister(R1); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(!reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(R1, reg.AsCoreRegister()); - - reg = ArmManagedRegister::FromCoreRegister(R8); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(!reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(R8, reg.AsCoreRegister()); - - reg = ArmManagedRegister::FromCoreRegister(R15); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(!reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(R15, reg.AsCoreRegister()); -} - - -TEST(ArmManagedRegister, SRegister) { - ArmManagedRegister reg = ArmManagedRegister::FromSRegister(S0); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(reg.IsSRegister()); - EXPECT_TRUE(!reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(S0, reg.AsSRegister()); - - reg = ArmManagedRegister::FromSRegister(S1); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(reg.IsSRegister()); - EXPECT_TRUE(!reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(S1, reg.AsSRegister()); - - reg = ArmManagedRegister::FromSRegister(S3); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(reg.IsSRegister()); - EXPECT_TRUE(!reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(S3, reg.AsSRegister()); - - reg = ArmManagedRegister::FromSRegister(S15); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(reg.IsSRegister()); - EXPECT_TRUE(!reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(S15, reg.AsSRegister()); - - reg = ArmManagedRegister::FromSRegister(S30); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(reg.IsSRegister()); - EXPECT_TRUE(!reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(S30, reg.AsSRegister()); - - reg = ArmManagedRegister::FromSRegister(S31); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(reg.IsSRegister()); - EXPECT_TRUE(!reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(S31, reg.AsSRegister()); -} - - -TEST(ArmManagedRegister, DRegister) { - ArmManagedRegister reg = ArmManagedRegister::FromDRegister(D0); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(reg.IsDRegister()); - EXPECT_TRUE(reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(D0, reg.AsDRegister()); - EXPECT_EQ(S0, reg.AsOverlappingDRegisterLow()); - EXPECT_EQ(S1, reg.AsOverlappingDRegisterHigh()); - EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S0))); - - reg = ArmManagedRegister::FromDRegister(D1); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(reg.IsDRegister()); - EXPECT_TRUE(reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(D1, reg.AsDRegister()); - EXPECT_EQ(S2, reg.AsOverlappingDRegisterLow()); - EXPECT_EQ(S3, reg.AsOverlappingDRegisterHigh()); - EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S2))); - - reg = ArmManagedRegister::FromDRegister(D6); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(reg.IsDRegister()); - EXPECT_TRUE(reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(D6, reg.AsDRegister()); - EXPECT_EQ(S12, reg.AsOverlappingDRegisterLow()); - EXPECT_EQ(S13, reg.AsOverlappingDRegisterHigh()); - EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S12))); - - reg = ArmManagedRegister::FromDRegister(D14); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(reg.IsDRegister()); - EXPECT_TRUE(reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(D14, reg.AsDRegister()); - EXPECT_EQ(S28, reg.AsOverlappingDRegisterLow()); - EXPECT_EQ(S29, reg.AsOverlappingDRegisterHigh()); - EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S28))); - - reg = ArmManagedRegister::FromDRegister(D15); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(reg.IsDRegister()); - EXPECT_TRUE(reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(D15, reg.AsDRegister()); - EXPECT_EQ(S30, reg.AsOverlappingDRegisterLow()); - EXPECT_EQ(S31, reg.AsOverlappingDRegisterHigh()); - EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S30))); - -#ifdef VFPv3_D32 - reg = ArmManagedRegister::FromDRegister(D16); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(D16, reg.AsDRegister()); - - reg = ArmManagedRegister::FromDRegister(D18); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(D18, reg.AsDRegister()); - - reg = ArmManagedRegister::FromDRegister(D30); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(D30, reg.AsDRegister()); - - reg = ArmManagedRegister::FromDRegister(D31); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(D31, reg.AsDRegister()); -#endif // VFPv3_D32 -} - - -TEST(ArmManagedRegister, Pair) { - ArmManagedRegister reg = ArmManagedRegister::FromRegisterPair(R0_R1); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(!reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(reg.IsRegisterPair()); - EXPECT_EQ(R0_R1, reg.AsRegisterPair()); - EXPECT_EQ(R0, reg.AsRegisterPairLow()); - EXPECT_EQ(R1, reg.AsRegisterPairHigh()); - EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R0))); - - reg = ArmManagedRegister::FromRegisterPair(R1_R2); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(!reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(reg.IsRegisterPair()); - EXPECT_EQ(R1_R2, reg.AsRegisterPair()); - EXPECT_EQ(R1, reg.AsRegisterPairLow()); - EXPECT_EQ(R2, reg.AsRegisterPairHigh()); - EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R1))); - - reg = ArmManagedRegister::FromRegisterPair(R2_R3); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(!reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(reg.IsRegisterPair()); - EXPECT_EQ(R2_R3, reg.AsRegisterPair()); - EXPECT_EQ(R2, reg.AsRegisterPairLow()); - EXPECT_EQ(R3, reg.AsRegisterPairHigh()); - EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R2))); - - reg = ArmManagedRegister::FromRegisterPair(R4_R5); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(!reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(reg.IsRegisterPair()); - EXPECT_EQ(R4_R5, reg.AsRegisterPair()); - EXPECT_EQ(R4, reg.AsRegisterPairLow()); - EXPECT_EQ(R5, reg.AsRegisterPairHigh()); - EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R4))); - - reg = ArmManagedRegister::FromRegisterPair(R6_R7); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCoreRegister()); - EXPECT_TRUE(!reg.IsSRegister()); - EXPECT_TRUE(!reg.IsDRegister()); - EXPECT_TRUE(!reg.IsOverlappingDRegister()); - EXPECT_TRUE(reg.IsRegisterPair()); - EXPECT_EQ(R6_R7, reg.AsRegisterPair()); - EXPECT_EQ(R6, reg.AsRegisterPairLow()); - EXPECT_EQ(R7, reg.AsRegisterPairHigh()); - EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R6))); -} - - -TEST(ArmManagedRegister, Equals) { - ManagedRegister no_reg = ManagedRegister::NoRegister(); - EXPECT_TRUE(no_reg.Equals(ArmManagedRegister::NoRegister())); - EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); - - ArmManagedRegister reg_R0 = ArmManagedRegister::FromCoreRegister(R0); - EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::NoRegister())); - EXPECT_TRUE(reg_R0.Equals(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); - - ArmManagedRegister reg_R1 = ArmManagedRegister::FromCoreRegister(R1); - EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::NoRegister())); - EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(reg_R1.Equals(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); - - ArmManagedRegister reg_R8 = ArmManagedRegister::FromCoreRegister(R8); - EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::NoRegister())); - EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(reg_R8.Equals(ArmManagedRegister::FromCoreRegister(R8))); - EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); - - ArmManagedRegister reg_S0 = ArmManagedRegister::FromSRegister(S0); - EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::NoRegister())); - EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(reg_S0.Equals(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); - - ArmManagedRegister reg_S1 = ArmManagedRegister::FromSRegister(S1); - EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::NoRegister())); - EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(reg_S1.Equals(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); - - ArmManagedRegister reg_S31 = ArmManagedRegister::FromSRegister(S31); - EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::NoRegister())); - EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(reg_S31.Equals(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); - - ArmManagedRegister reg_D0 = ArmManagedRegister::FromDRegister(D0); - EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::NoRegister())); - EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(reg_D0.Equals(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); - - ArmManagedRegister reg_D15 = ArmManagedRegister::FromDRegister(D15); - EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::NoRegister())); - EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(reg_D15.Equals(ArmManagedRegister::FromDRegister(D15))); - EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); - -#ifdef VFPv3_D32 - ArmManagedRegister reg_D16 = ArmManagedRegister::FromDRegister(D16); - EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::NoRegister())); - EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D15))); - EXPECT_TRUE(reg_D16.Equals(ArmManagedRegister::FromDRegister(D16))); - EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); - - ArmManagedRegister reg_D30 = ArmManagedRegister::FromDRegister(D30); - EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::NoRegister())); - EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D15))); - EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D16))); - EXPECT_TRUE(reg_D30.Equals(ArmManagedRegister::FromDRegister(D30))); - EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); - - ArmManagedRegister reg_D31 = ArmManagedRegister::FromDRegister(D30); - EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::NoRegister())); - EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D15))); - EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D16))); - EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D30))); - EXPECT_TRUE(reg_D31.Equals(ArmManagedRegister::FromDRegister(D31))); - EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); -#endif // VFPv3_D32 - - ArmManagedRegister reg_R0R1 = ArmManagedRegister::FromRegisterPair(R0_R1); - EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::NoRegister())); - EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D15))); - EXPECT_TRUE(reg_R0R1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); - EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromRegisterPair(R2_R3))); - - ArmManagedRegister reg_R4R5 = ArmManagedRegister::FromRegisterPair(R4_R5); - EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::NoRegister())); - EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D15))); - EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); - EXPECT_TRUE(reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R4_R5))); - EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R6_R7))); - - ArmManagedRegister reg_R6R7 = ArmManagedRegister::FromRegisterPair(R6_R7); - EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::NoRegister())); - EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D15))); - EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R0_R1))); - EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R4_R5))); - EXPECT_TRUE(reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R6_R7))); -} - - -TEST(ArmManagedRegister, Overlaps) { - ArmManagedRegister reg = ArmManagedRegister::FromCoreRegister(R0); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); -#ifdef VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); -#endif // VFPv3_D32 - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); - - reg = ArmManagedRegister::FromCoreRegister(R1); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); -#ifdef VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); -#endif // VFPv3_D32 - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); - - reg = ArmManagedRegister::FromCoreRegister(R7); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); -#ifdef VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); -#endif // VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); - - reg = ArmManagedRegister::FromSRegister(S0); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); -#ifdef VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); -#endif // VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); - - reg = ArmManagedRegister::FromSRegister(S1); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); -#ifdef VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); -#endif // VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); - - reg = ArmManagedRegister::FromSRegister(S15); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); -#ifdef VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); -#endif // VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); - - reg = ArmManagedRegister::FromSRegister(S31); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); -#ifdef VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); -#endif // VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); - - reg = ArmManagedRegister::FromDRegister(D0); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); -#ifdef VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); -#endif // VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); - - reg = ArmManagedRegister::FromDRegister(D7); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); -#ifdef VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); -#endif // VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); - - reg = ArmManagedRegister::FromDRegister(D15); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); -#ifdef VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); -#endif // VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); - -#ifdef VFPv3_D32 - reg = ArmManagedRegister::FromDRegister(D16); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); - - reg = ArmManagedRegister::FromDRegister(D31); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); -#endif // VFPv3_D32 - - reg = ArmManagedRegister::FromRegisterPair(R0_R1); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); -#ifdef VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); -#endif // VFPv3_D32 - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); - - reg = ArmManagedRegister::FromRegisterPair(R4_R5); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15))); -#ifdef VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16))); - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31))); -#endif // VFPv3_D32 - EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1))); - EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5))); -} - -} // namespace arm -} // namespace art diff --git a/runtime/oat/utils/assembler.cc b/runtime/oat/utils/assembler.cc deleted file mode 100644 index 92ce0b8001..0000000000 --- a/runtime/oat/utils/assembler.cc +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "assembler.h" - -#include -#include - -#include "arm/assembler_arm.h" -#include "mips/assembler_mips.h" -#include "x86/assembler_x86.h" -#include "globals.h" -#include "memory_region.h" - -namespace art { - -static byte* NewContents(size_t capacity) { - return new byte[capacity]; -} - - -AssemblerBuffer::AssemblerBuffer() { - static const size_t kInitialBufferCapacity = 4 * KB; - contents_ = NewContents(kInitialBufferCapacity); - cursor_ = contents_; - limit_ = ComputeLimit(contents_, kInitialBufferCapacity); - fixup_ = NULL; - slow_path_ = NULL; -#ifndef NDEBUG - has_ensured_capacity_ = false; - fixups_processed_ = false; -#endif - - // Verify internal state. - CHECK_EQ(Capacity(), kInitialBufferCapacity); - CHECK_EQ(Size(), 0U); -} - - -AssemblerBuffer::~AssemblerBuffer() { - delete[] contents_; -} - - -void AssemblerBuffer::ProcessFixups(const MemoryRegion& region) { - AssemblerFixup* fixup = fixup_; - while (fixup != NULL) { - fixup->Process(region, fixup->position()); - fixup = fixup->previous(); - } -} - - -void AssemblerBuffer::FinalizeInstructions(const MemoryRegion& instructions) { - // Copy the instructions from the buffer. - MemoryRegion from(reinterpret_cast(contents()), Size()); - instructions.CopyFrom(0, from); - // Process fixups in the instructions. - ProcessFixups(instructions); -#ifndef NDEBUG - fixups_processed_ = true; -#endif -} - - -void AssemblerBuffer::ExtendCapacity() { - size_t old_size = Size(); - size_t old_capacity = Capacity(); - size_t new_capacity = std::min(old_capacity * 2, old_capacity + 1 * MB); - - // Allocate the new data area and copy contents of the old one to it. - byte* new_contents = NewContents(new_capacity); - memmove(reinterpret_cast(new_contents), - reinterpret_cast(contents_), - old_size); - - // Compute the relocation delta and switch to the new contents area. - ptrdiff_t delta = new_contents - contents_; - contents_ = new_contents; - - // Update the cursor and recompute the limit. - cursor_ += delta; - limit_ = ComputeLimit(new_contents, new_capacity); - - // Verify internal state. - CHECK_EQ(Capacity(), new_capacity); - CHECK_EQ(Size(), old_size); -} - - -Assembler* Assembler::Create(InstructionSet instruction_set) { - switch (instruction_set) { - case kArm: - case kThumb2: - return new arm::ArmAssembler(); - case kMips: - return new mips::MipsAssembler(); - case kX86: - return new x86::X86Assembler(); - default: - LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; - return NULL; - } -} - -} // namespace art diff --git a/runtime/oat/utils/assembler.h b/runtime/oat/utils/assembler.h deleted file mode 100644 index 05e2732c5f..0000000000 --- a/runtime/oat/utils/assembler.h +++ /dev/null @@ -1,459 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_OAT_UTILS_ASSEMBLER_H_ -#define ART_RUNTIME_OAT_UTILS_ASSEMBLER_H_ - -#include - -#include "base/logging.h" -#include "base/macros.h" -#include "constants_arm.h" -#include "constants_mips.h" -#include "constants_x86.h" -#include "instruction_set.h" -#include "managed_register.h" -#include "memory_region.h" -#include "offsets.h" - -namespace art { - -class Assembler; -class AssemblerBuffer; -class AssemblerFixup; - -namespace arm { - class ArmAssembler; -} -namespace mips { - class MipsAssembler; -} -namespace x86 { - class X86Assembler; -} - -class Label { - public: - Label() : position_(0) {} - - ~Label() { - // Assert if label is being destroyed with unresolved branches pending. - CHECK(!IsLinked()); - } - - // Returns the position for bound and linked labels. Cannot be used - // for unused labels. - int Position() const { - CHECK(!IsUnused()); - return IsBound() ? -position_ - kPointerSize : position_ - kPointerSize; - } - - int LinkPosition() const { - CHECK(IsLinked()); - return position_ - kWordSize; - } - - bool IsBound() const { return position_ < 0; } - bool IsUnused() const { return position_ == 0; } - bool IsLinked() const { return position_ > 0; } - - private: - int position_; - - void Reinitialize() { - position_ = 0; - } - - void BindTo(int position) { - CHECK(!IsBound()); - position_ = -position - kPointerSize; - CHECK(IsBound()); - } - - void LinkTo(int position) { - CHECK(!IsBound()); - position_ = position + kPointerSize; - CHECK(IsLinked()); - } - - friend class arm::ArmAssembler; - friend class mips::MipsAssembler; - friend class x86::X86Assembler; - - DISALLOW_COPY_AND_ASSIGN(Label); -}; - - -// Assembler fixups are positions in generated code that require processing -// after the code has been copied to executable memory. This includes building -// relocation information. -class AssemblerFixup { - public: - virtual void Process(const MemoryRegion& region, int position) = 0; - virtual ~AssemblerFixup() {} - - private: - AssemblerFixup* previous_; - int position_; - - AssemblerFixup* previous() const { return previous_; } - void set_previous(AssemblerFixup* previous) { previous_ = previous; } - - int position() const { return position_; } - void set_position(int position) { position_ = position; } - - friend class AssemblerBuffer; -}; - -// Parent of all queued slow paths, emitted during finalization -class SlowPath { - public: - SlowPath() : next_(NULL) {} - virtual ~SlowPath() {} - - Label* Continuation() { return &continuation_; } - Label* Entry() { return &entry_; } - // Generate code for slow path - virtual void Emit(Assembler *sp_asm) = 0; - - protected: - // Entry branched to by fast path - Label entry_; - // Optional continuation that is branched to at the end of the slow path - Label continuation_; - // Next in linked list of slow paths - SlowPath *next_; - - friend class AssemblerBuffer; - DISALLOW_COPY_AND_ASSIGN(SlowPath); -}; - -class AssemblerBuffer { - public: - AssemblerBuffer(); - ~AssemblerBuffer(); - - // Basic support for emitting, loading, and storing. - template void Emit(T value) { - CHECK(HasEnsuredCapacity()); - *reinterpret_cast(cursor_) = value; - cursor_ += sizeof(T); - } - - template T Load(size_t position) { - CHECK_LE(position, Size() - static_cast(sizeof(T))); - return *reinterpret_cast(contents_ + position); - } - - template void Store(size_t position, T value) { - CHECK_LE(position, Size() - static_cast(sizeof(T))); - *reinterpret_cast(contents_ + position) = value; - } - - // Emit a fixup at the current location. - void EmitFixup(AssemblerFixup* fixup) { - fixup->set_previous(fixup_); - fixup->set_position(Size()); - fixup_ = fixup; - } - - void EnqueueSlowPath(SlowPath* slowpath) { - if (slow_path_ == NULL) { - slow_path_ = slowpath; - } else { - SlowPath* cur = slow_path_; - for ( ; cur->next_ != NULL ; cur = cur->next_) {} - cur->next_ = slowpath; - } - } - - void EmitSlowPaths(Assembler* sp_asm) { - SlowPath* cur = slow_path_; - SlowPath* next = NULL; - slow_path_ = NULL; - for ( ; cur != NULL ; cur = next) { - cur->Emit(sp_asm); - next = cur->next_; - delete cur; - } - } - - // Get the size of the emitted code. - size_t Size() const { - CHECK_GE(cursor_, contents_); - return cursor_ - contents_; - } - - byte* contents() const { return contents_; } - - // Copy the assembled instructions into the specified memory block - // and apply all fixups. - void FinalizeInstructions(const MemoryRegion& region); - - // To emit an instruction to the assembler buffer, the EnsureCapacity helper - // must be used to guarantee that the underlying data area is big enough to - // hold the emitted instruction. Usage: - // - // AssemblerBuffer buffer; - // AssemblerBuffer::EnsureCapacity ensured(&buffer); - // ... emit bytes for single instruction ... - -#ifndef NDEBUG - - class EnsureCapacity { - public: - explicit EnsureCapacity(AssemblerBuffer* buffer) { - if (buffer->cursor() >= buffer->limit()) { - buffer->ExtendCapacity(); - } - // In debug mode, we save the assembler buffer along with the gap - // size before we start emitting to the buffer. This allows us to - // check that any single generated instruction doesn't overflow the - // limit implied by the minimum gap size. - buffer_ = buffer; - gap_ = ComputeGap(); - // Make sure that extending the capacity leaves a big enough gap - // for any kind of instruction. - CHECK_GE(gap_, kMinimumGap); - // Mark the buffer as having ensured the capacity. - CHECK(!buffer->HasEnsuredCapacity()); // Cannot nest. - buffer->has_ensured_capacity_ = true; - } - - ~EnsureCapacity() { - // Unmark the buffer, so we cannot emit after this. - buffer_->has_ensured_capacity_ = false; - // Make sure the generated instruction doesn't take up more - // space than the minimum gap. - int delta = gap_ - ComputeGap(); - CHECK_LE(delta, kMinimumGap); - } - - private: - AssemblerBuffer* buffer_; - int gap_; - - int ComputeGap() { return buffer_->Capacity() - buffer_->Size(); } - }; - - bool has_ensured_capacity_; - bool HasEnsuredCapacity() const { return has_ensured_capacity_; } - -#else - - class EnsureCapacity { - public: - explicit EnsureCapacity(AssemblerBuffer* buffer) { - if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity(); - } - }; - - // When building the C++ tests, assertion code is enabled. To allow - // asserting that the user of the assembler buffer has ensured the - // capacity needed for emitting, we add a dummy method in non-debug mode. - bool HasEnsuredCapacity() const { return true; } - -#endif - - // Returns the position in the instruction stream. - int GetPosition() { return cursor_ - contents_; } - - private: - // The limit is set to kMinimumGap bytes before the end of the data area. - // This leaves enough space for the longest possible instruction and allows - // for a single, fast space check per instruction. - static const int kMinimumGap = 32; - - byte* contents_; - byte* cursor_; - byte* limit_; - AssemblerFixup* fixup_; - bool fixups_processed_; - - // Head of linked list of slow paths - SlowPath* slow_path_; - - byte* cursor() const { return cursor_; } - byte* limit() const { return limit_; } - size_t Capacity() const { - CHECK_GE(limit_, contents_); - return (limit_ - contents_) + kMinimumGap; - } - - // Process the fixup chain starting at the given fixup. The offset is - // non-zero for fixups in the body if the preamble is non-empty. - void ProcessFixups(const MemoryRegion& region); - - // Compute the limit based on the data area and the capacity. See - // description of kMinimumGap for the reasoning behind the value. - static byte* ComputeLimit(byte* data, size_t capacity) { - return data + capacity - kMinimumGap; - } - - void ExtendCapacity(); - - friend class AssemblerFixup; -}; - -class Assembler { - public: - static Assembler* Create(InstructionSet instruction_set); - - // Emit slow paths queued during assembly - void EmitSlowPaths() { buffer_.EmitSlowPaths(this); } - - // Size of generated code - size_t CodeSize() const { return buffer_.Size(); } - - // Copy instructions out of assembly buffer into the given region of memory - void FinalizeInstructions(const MemoryRegion& region) { - buffer_.FinalizeInstructions(region); - } - - // Emit code that will create an activation on the stack - virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg, - const std::vector& callee_save_regs, - const std::vector& entry_spills) = 0; - - // Emit code that will remove an activation from the stack - virtual void RemoveFrame(size_t frame_size, - const std::vector& callee_save_regs) = 0; - - virtual void IncreaseFrameSize(size_t adjust) = 0; - virtual void DecreaseFrameSize(size_t adjust) = 0; - - // Store routines - virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0; - virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0; - virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0; - - virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, - ManagedRegister scratch) = 0; - - virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm, - ManagedRegister scratch) = 0; - - virtual void StoreStackOffsetToThread(ThreadOffset thr_offs, - FrameOffset fr_offs, - ManagedRegister scratch) = 0; - - virtual void StoreStackPointerToThread(ThreadOffset thr_offs) = 0; - - virtual void StoreSpanning(FrameOffset dest, ManagedRegister src, - FrameOffset in_off, ManagedRegister scratch) = 0; - - // Load routines - virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0; - - virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size) = 0; - - virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0; - - virtual void LoadRef(ManagedRegister dest, ManagedRegister base, - MemberOffset offs) = 0; - - virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, - Offset offs) = 0; - - virtual void LoadRawPtrFromThread(ManagedRegister dest, - ThreadOffset offs) = 0; - - // Copying routines - virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0; - - virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs, - ManagedRegister scratch) = 0; - - virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs, - ManagedRegister scratch) = 0; - - virtual void CopyRef(FrameOffset dest, FrameOffset src, - ManagedRegister scratch) = 0; - - virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0; - - virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, - ManagedRegister scratch, size_t size) = 0; - - virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, - ManagedRegister scratch, size_t size) = 0; - - virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, - ManagedRegister scratch, size_t size) = 0; - - virtual void Copy(ManagedRegister dest, Offset dest_offset, - ManagedRegister src, Offset src_offset, - ManagedRegister scratch, size_t size) = 0; - - virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, - ManagedRegister scratch, size_t size) = 0; - - virtual void MemoryBarrier(ManagedRegister scratch) = 0; - - // Sign extension - virtual void SignExtend(ManagedRegister mreg, size_t size) = 0; - - // Zero extension - virtual void ZeroExtend(ManagedRegister mreg, size_t size) = 0; - - // Exploit fast access in managed code to Thread::Current() - virtual void GetCurrentThread(ManagedRegister tr) = 0; - virtual void GetCurrentThread(FrameOffset dest_offset, - ManagedRegister scratch) = 0; - - // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the - // value is null and null_allowed. in_reg holds a possibly stale reference - // that can be used to avoid loading the SIRT entry to see if the value is - // NULL. - virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, - ManagedRegister in_reg, bool null_allowed) = 0; - - // Set up out_off to hold a Object** into the SIRT, or to be NULL if the - // value is null and null_allowed. - virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, - ManagedRegister scratch, bool null_allowed) = 0; - - // src holds a SIRT entry (Object**) load this into dst - virtual void LoadReferenceFromSirt(ManagedRegister dst, - ManagedRegister src) = 0; - - // Heap::VerifyObject on src. In some cases (such as a reference to this) we - // know that src may not be null. - virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0; - virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0; - - // Call to address held at [base+offset] - virtual void Call(ManagedRegister base, Offset offset, - ManagedRegister scratch) = 0; - virtual void Call(FrameOffset base, Offset offset, - ManagedRegister scratch) = 0; - virtual void Call(ThreadOffset offset, ManagedRegister scratch) = 0; - - // Generate code to check if Thread::Current()->exception_ is non-null - // and branch to a ExceptionSlowPath if it is. - virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0; - - virtual ~Assembler() {} - - protected: - Assembler() : buffer_() {} - - AssemblerBuffer buffer_; -}; - -} // namespace art - -#endif // ART_RUNTIME_OAT_UTILS_ASSEMBLER_H_ diff --git a/runtime/oat/utils/managed_register.h b/runtime/oat/utils/managed_register.h deleted file mode 100644 index 4dd2acd8fe..0000000000 --- a/runtime/oat/utils/managed_register.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_OAT_UTILS_MANAGED_REGISTER_H_ -#define ART_RUNTIME_OAT_UTILS_MANAGED_REGISTER_H_ - -namespace art { - -namespace arm { -class ArmManagedRegister; -} -namespace mips { -class MipsManagedRegister; -} -namespace x86 { -class X86ManagedRegister; -} - -class ManagedRegister { - public: - // ManagedRegister is a value class. There exists no method to change the - // internal state. We therefore allow a copy constructor and an - // assignment-operator. - ManagedRegister(const ManagedRegister& other) : id_(other.id_) { } - - ManagedRegister& operator=(const ManagedRegister& other) { - id_ = other.id_; - return *this; - } - - arm::ArmManagedRegister AsArm() const; - mips::MipsManagedRegister AsMips() const; - x86::X86ManagedRegister AsX86() const; - - // It is valid to invoke Equals on and with a NoRegister. - bool Equals(const ManagedRegister& other) const { - return id_ == other.id_; - } - - bool IsNoRegister() const { - return id_ == kNoRegister; - } - - static ManagedRegister NoRegister() { - return ManagedRegister(); - } - - protected: - static const int kNoRegister = -1; - - ManagedRegister() : id_(kNoRegister) { } - explicit ManagedRegister(int reg_id) : id_(reg_id) { } - - int id_; -}; - -} // namespace art - -#endif // ART_RUNTIME_OAT_UTILS_MANAGED_REGISTER_H_ diff --git a/runtime/oat/utils/mips/assembler_mips.cc b/runtime/oat/utils/mips/assembler_mips.cc deleted file mode 100644 index 25ba9b2219..0000000000 --- a/runtime/oat/utils/mips/assembler_mips.cc +++ /dev/null @@ -1,1023 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "assembler_mips.h" - -#include "base/casts.h" -#include "memory_region.h" -#include "oat/runtime/oat_support_entrypoints.h" -#include "thread.h" - -namespace art { -namespace mips { -#if 0 -class DirectCallRelocation : public AssemblerFixup { - public: - void Process(const MemoryRegion& region, int position) { - // Direct calls are relative to the following instruction on mips. - int32_t pointer = region.Load(position); - int32_t start = reinterpret_cast(region.start()); - int32_t delta = start + position + sizeof(int32_t); - region.Store(position, pointer - delta); - } -}; -#endif - -static const char* kRegisterNames[] = { - "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3", - "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", - "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", - "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra", -}; -std::ostream& operator<<(std::ostream& os, const Register& rhs) { - if (rhs >= ZERO && rhs <= RA) { - os << kRegisterNames[rhs]; - } else { - os << "Register[" << static_cast(rhs) << "]"; - } - return os; -} - -std::ostream& operator<<(std::ostream& os, const FRegister& rhs) { - if (rhs >= F0 && rhs < kNumberOfFRegisters) { - os << "f" << static_cast(rhs); - } else { - os << "FRegister[" << static_cast(rhs) << "]"; - } - return os; -} - -std::ostream& operator<<(std::ostream& os, const DRegister& rhs) { - if (rhs >= D0 && rhs < kNumberOfDRegisters) { - os << "d" << static_cast(rhs); - } else { - os << "DRegister[" << static_cast(rhs) << "]"; - } - return os; -} - -void MipsAssembler::Emit(int32_t value) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - buffer_.Emit(value); -} - -void MipsAssembler::EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct) { - CHECK_NE(rs, kNoRegister); - CHECK_NE(rt, kNoRegister); - CHECK_NE(rd, kNoRegister); - int32_t encoding = opcode << kOpcodeShift | - static_cast(rs) << kRsShift | - static_cast(rt) << kRtShift | - static_cast(rd) << kRdShift | - shamt << kShamtShift | - funct; - Emit(encoding); -} - -void MipsAssembler::EmitI(int opcode, Register rs, Register rt, uint16_t imm) { - CHECK_NE(rs, kNoRegister); - CHECK_NE(rt, kNoRegister); - int32_t encoding = opcode << kOpcodeShift | - static_cast(rs) << kRsShift | - static_cast(rt) << kRtShift | - imm; - Emit(encoding); -} - -void MipsAssembler::EmitJ(int opcode, int address) { - int32_t encoding = opcode << kOpcodeShift | - address; - Emit(encoding); -} - -void MipsAssembler::EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct) { - CHECK_NE(ft, kNoFRegister); - CHECK_NE(fs, kNoFRegister); - CHECK_NE(fd, kNoFRegister); - int32_t encoding = opcode << kOpcodeShift | - fmt << kFmtShift | - static_cast(ft) << kFtShift | - static_cast(fs) << kFsShift | - static_cast(fd) << kFdShift | - funct; - Emit(encoding); -} - -void MipsAssembler::EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm) { - CHECK_NE(rt, kNoFRegister); - int32_t encoding = opcode << kOpcodeShift | - fmt << kFmtShift | - static_cast(rt) << kRtShift | - imm; - Emit(encoding); -} - -void MipsAssembler::EmitBranch(Register rt, Register rs, Label* label, bool equal) { - int offset; - if (label->IsBound()) { - offset = label->Position() - buffer_.Size(); - } else { - // Use the offset field of the branch instruction for linking the sites. - offset = label->position_; - label->LinkTo(buffer_.Size()); - } - if (equal) { - Beq(rt, rs, (offset >> 2) & kBranchOffsetMask); - } else { - Bne(rt, rs, (offset >> 2) & kBranchOffsetMask); - } -} - -void MipsAssembler::EmitJump(Label* label, bool link) { - int offset; - if (label->IsBound()) { - offset = label->Position() - buffer_.Size(); - } else { - // Use the offset field of the jump instruction for linking the sites. - offset = label->position_; - label->LinkTo(buffer_.Size()); - } - if (link) { - Jal((offset >> 2) & kJumpOffsetMask); - } else { - J((offset >> 2) & kJumpOffsetMask); - } -} - -int32_t MipsAssembler::EncodeBranchOffset(int offset, int32_t inst, bool is_jump) { - CHECK_ALIGNED(offset, 4); - CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset)) << offset; - - // Properly preserve only the bits supported in the instruction. - offset >>= 2; - if (is_jump) { - offset &= kJumpOffsetMask; - return (inst & ~kJumpOffsetMask) | offset; - } else { - offset &= kBranchOffsetMask; - return (inst & ~kBranchOffsetMask) | offset; - } -} - -int MipsAssembler::DecodeBranchOffset(int32_t inst, bool is_jump) { - // Sign-extend, then left-shift by 2. - if (is_jump) { - return (((inst & kJumpOffsetMask) << 6) >> 4); - } else { - return (((inst & kBranchOffsetMask) << 16) >> 14); - } -} - -void MipsAssembler::Bind(Label* label, bool is_jump) { - CHECK(!label->IsBound()); - int bound_pc = buffer_.Size(); - while (label->IsLinked()) { - int32_t position = label->Position(); - int32_t next = buffer_.Load(position); - int32_t offset = is_jump ? bound_pc - position : bound_pc - position - 4; - int32_t encoded = MipsAssembler::EncodeBranchOffset(offset, next, is_jump); - buffer_.Store(position, encoded); - label->position_ = MipsAssembler::DecodeBranchOffset(next, is_jump); - } - label->BindTo(bound_pc); -} - -void MipsAssembler::Add(Register rd, Register rs, Register rt) { - EmitR(0, rs, rt, rd, 0, 0x20); -} - -void MipsAssembler::Addu(Register rd, Register rs, Register rt) { - EmitR(0, rs, rt, rd, 0, 0x21); -} - -void MipsAssembler::Addi(Register rt, Register rs, uint16_t imm16) { - EmitI(0x8, rs, rt, imm16); -} - -void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16) { - EmitI(0x9, rs, rt, imm16); -} - -void MipsAssembler::Sub(Register rd, Register rs, Register rt) { - EmitR(0, rs, rt, rd, 0, 0x22); -} - -void MipsAssembler::Subu(Register rd, Register rs, Register rt) { - EmitR(0, rs, rt, rd, 0, 0x23); -} - -void MipsAssembler::Mult(Register rs, Register rt) { - EmitR(0, rs, rt, static_cast(0), 0, 0x18); -} - -void MipsAssembler::Multu(Register rs, Register rt) { - EmitR(0, rs, rt, static_cast(0), 0, 0x19); -} - -void MipsAssembler::Div(Register rs, Register rt) { - EmitR(0, rs, rt, static_cast(0), 0, 0x1a); -} - -void MipsAssembler::Divu(Register rs, Register rt) { - EmitR(0, rs, rt, static_cast(0), 0, 0x1b); -} - -void MipsAssembler::And(Register rd, Register rs, Register rt) { - EmitR(0, rs, rt, rd, 0, 0x24); -} - -void MipsAssembler::Andi(Register rt, Register rs, uint16_t imm16) { - EmitI(0xc, rs, rt, imm16); -} - -void MipsAssembler::Or(Register rd, Register rs, Register rt) { - EmitR(0, rs, rt, rd, 0, 0x25); -} - -void MipsAssembler::Ori(Register rt, Register rs, uint16_t imm16) { - EmitI(0xd, rs, rt, imm16); -} - -void MipsAssembler::Xor(Register rd, Register rs, Register rt) { - EmitR(0, rs, rt, rd, 0, 0x26); -} - -void MipsAssembler::Xori(Register rt, Register rs, uint16_t imm16) { - EmitI(0xe, rs, rt, imm16); -} - -void MipsAssembler::Nor(Register rd, Register rs, Register rt) { - EmitR(0, rs, rt, rd, 0, 0x27); -} - -void MipsAssembler::Sll(Register rd, Register rs, int shamt) { - EmitR(0, rs, static_cast(0), rd, shamt, 0x00); -} - -void MipsAssembler::Srl(Register rd, Register rs, int shamt) { - EmitR(0, rs, static_cast(0), rd, shamt, 0x02); -} - -void MipsAssembler::Sra(Register rd, Register rs, int shamt) { - EmitR(0, rs, static_cast(0), rd, shamt, 0x03); -} - -void MipsAssembler::Sllv(Register rd, Register rs, Register rt) { - EmitR(0, rs, rt, rd, 0, 0x04); -} - -void MipsAssembler::Srlv(Register rd, Register rs, Register rt) { - EmitR(0, rs, rt, rd, 0, 0x06); -} - -void MipsAssembler::Srav(Register rd, Register rs, Register rt) { - EmitR(0, rs, rt, rd, 0, 0x07); -} - -void MipsAssembler::Lb(Register rt, Register rs, uint16_t imm16) { - EmitI(0x20, rs, rt, imm16); -} - -void MipsAssembler::Lh(Register rt, Register rs, uint16_t imm16) { - EmitI(0x21, rs, rt, imm16); -} - -void MipsAssembler::Lw(Register rt, Register rs, uint16_t imm16) { - EmitI(0x23, rs, rt, imm16); -} - -void MipsAssembler::Lbu(Register rt, Register rs, uint16_t imm16) { - EmitI(0x24, rs, rt, imm16); -} - -void MipsAssembler::Lhu(Register rt, Register rs, uint16_t imm16) { - EmitI(0x25, rs, rt, imm16); -} - -void MipsAssembler::Lui(Register rt, uint16_t imm16) { - EmitI(0xf, static_cast(0), rt, imm16); -} - -void MipsAssembler::Mfhi(Register rd) { - EmitR(0, static_cast(0), static_cast(0), rd, 0, 0x10); -} - -void MipsAssembler::Mflo(Register rd) { - EmitR(0, static_cast(0), static_cast(0), rd, 0, 0x12); -} - -void MipsAssembler::Sb(Register rt, Register rs, uint16_t imm16) { - EmitI(0x28, rs, rt, imm16); -} - -void MipsAssembler::Sh(Register rt, Register rs, uint16_t imm16) { - EmitI(0x29, rs, rt, imm16); -} - -void MipsAssembler::Sw(Register rt, Register rs, uint16_t imm16) { - EmitI(0x2b, rs, rt, imm16); -} - -void MipsAssembler::Slt(Register rd, Register rs, Register rt) { - EmitR(0, rs, rt, rd, 0, 0x2a); -} - -void MipsAssembler::Sltu(Register rd, Register rs, Register rt) { - EmitR(0, rs, rt, rd, 0, 0x2b); -} - -void MipsAssembler::Slti(Register rt, Register rs, uint16_t imm16) { - EmitI(0xa, rs, rt, imm16); -} - -void MipsAssembler::Sltiu(Register rt, Register rs, uint16_t imm16) { - EmitI(0xb, rs, rt, imm16); -} - -void MipsAssembler::Beq(Register rt, Register rs, uint16_t imm16) { - EmitI(0x4, rs, rt, imm16); - Nop(); -} - -void MipsAssembler::Bne(Register rt, Register rs, uint16_t imm16) { - EmitI(0x5, rs, rt, imm16); - Nop(); -} - -void MipsAssembler::J(uint32_t address) { - EmitJ(0x2, address); - Nop(); -} - -void MipsAssembler::Jal(uint32_t address) { - EmitJ(0x2, address); - Nop(); -} - -void MipsAssembler::Jr(Register rs) { - EmitR(0, rs, static_cast(0), static_cast(0), 0, 0x08); - Nop(); -} - -void MipsAssembler::Jalr(Register rs) { - EmitR(0, rs, static_cast(0), RA, 0, 0x09); - Nop(); -} - -void MipsAssembler::AddS(FRegister fd, FRegister fs, FRegister ft) { - EmitFR(0x11, 0x10, ft, fs, fd, 0x0); -} - -void MipsAssembler::SubS(FRegister fd, FRegister fs, FRegister ft) { - EmitFR(0x11, 0x10, ft, fs, fd, 0x1); -} - -void MipsAssembler::MulS(FRegister fd, FRegister fs, FRegister ft) { - EmitFR(0x11, 0x10, ft, fs, fd, 0x2); -} - -void MipsAssembler::DivS(FRegister fd, FRegister fs, FRegister ft) { - EmitFR(0x11, 0x10, ft, fs, fd, 0x3); -} - -void MipsAssembler::AddD(DRegister fd, DRegister fs, DRegister ft) { - EmitFR(0x11, 0x11, static_cast(ft), static_cast(fs), - static_cast(fd), 0x0); -} - -void MipsAssembler::SubD(DRegister fd, DRegister fs, DRegister ft) { - EmitFR(0x11, 0x11, static_cast(ft), static_cast(fs), - static_cast(fd), 0x1); -} - -void MipsAssembler::MulD(DRegister fd, DRegister fs, DRegister ft) { - EmitFR(0x11, 0x11, static_cast(ft), static_cast(fs), - static_cast(fd), 0x2); -} - -void MipsAssembler::DivD(DRegister fd, DRegister fs, DRegister ft) { - EmitFR(0x11, 0x11, static_cast(ft), static_cast(fs), - static_cast(fd), 0x3); -} - -void MipsAssembler::MovS(FRegister fd, FRegister fs) { - EmitFR(0x11, 0x10, static_cast(0), fs, fd, 0x6); -} - -void MipsAssembler::MovD(DRegister fd, DRegister fs) { - EmitFR(0x11, 0x11, static_cast(0), static_cast(fs), - static_cast(fd), 0x6); -} - -void MipsAssembler::Mfc1(Register rt, FRegister fs) { - EmitFR(0x11, 0x00, static_cast(rt), fs, static_cast(0), 0x0); -} - -void MipsAssembler::Mtc1(FRegister ft, Register rs) { - EmitFR(0x11, 0x04, ft, static_cast(rs), static_cast(0), 0x0); -} - -void MipsAssembler::Lwc1(FRegister ft, Register rs, uint16_t imm16) { - EmitI(0x31, rs, static_cast(ft), imm16); -} - -void MipsAssembler::Ldc1(DRegister ft, Register rs, uint16_t imm16) { - EmitI(0x35, rs, static_cast(ft), imm16); -} - -void MipsAssembler::Swc1(FRegister ft, Register rs, uint16_t imm16) { - EmitI(0x39, rs, static_cast(ft), imm16); -} - -void MipsAssembler::Sdc1(DRegister ft, Register rs, uint16_t imm16) { - EmitI(0x3d, rs, static_cast(ft), imm16); -} - -void MipsAssembler::Break() { - EmitR(0, static_cast(0), static_cast(0), - static_cast(0), 0, 0xD); -} - -void MipsAssembler::Nop() { - EmitR(0x0, static_cast(0), static_cast(0), static_cast(0), 0, 0x0); -} - -void MipsAssembler::Move(Register rt, Register rs) { - EmitI(0x8, rs, rt, 0); -} - -void MipsAssembler::Clear(Register rt) { - EmitR(0, static_cast(0), static_cast(0), rt, 0, 0x20); -} - -void MipsAssembler::Not(Register rt, Register rs) { - EmitR(0, static_cast(0), rs, rt, 0, 0x27); -} - -void MipsAssembler::Mul(Register rd, Register rs, Register rt) { - Mult(rs, rt); - Mflo(rd); -} - -void MipsAssembler::Div(Register rd, Register rs, Register rt) { - Div(rs, rt); - Mflo(rd); -} - -void MipsAssembler::Rem(Register rd, Register rs, Register rt) { - Div(rs, rt); - Mfhi(rd); -} - -void MipsAssembler::AddConstant(Register rt, Register rs, int32_t value) { - Addi(rt, rs, value); -} - -void MipsAssembler::LoadImmediate(Register rt, int32_t value) { - Addi(rt, ZERO, value); -} - -void MipsAssembler::EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset, - size_t size) { - MipsManagedRegister dst = m_dst.AsMips(); - if (dst.IsNoRegister()) { - CHECK_EQ(0u, size) << dst; - } else if (dst.IsCoreRegister()) { - CHECK_EQ(4u, size) << dst; - LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset); - } else if (dst.IsRegisterPair()) { - CHECK_EQ(8u, size) << dst; - LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset); - LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4); - } else if (dst.IsFRegister()) { - LoadSFromOffset(dst.AsFRegister(), src_register, src_offset); - } else { - CHECK(dst.IsDRegister()) << dst; - LoadDFromOffset(dst.AsDRegister(), src_register, src_offset); - } -} - -void MipsAssembler::LoadFromOffset(LoadOperandType type, Register reg, Register base, - int32_t offset) { - switch (type) { - case kLoadSignedByte: - Lb(reg, base, offset); - break; - case kLoadUnsignedByte: - Lbu(reg, base, offset); - break; - case kLoadSignedHalfword: - Lh(reg, base, offset); - break; - case kLoadUnsignedHalfword: - Lhu(reg, base, offset); - break; - case kLoadWord: - Lw(reg, base, offset); - break; - case kLoadWordPair: - LOG(FATAL) << "UNREACHABLE"; - break; - default: - LOG(FATAL) << "UNREACHABLE"; - } -} - -void MipsAssembler::LoadSFromOffset(FRegister reg, Register base, int32_t offset) { - Lwc1(reg, base, offset); -} - -void MipsAssembler::LoadDFromOffset(DRegister reg, Register base, int32_t offset) { - Ldc1(reg, base, offset); -} - -void MipsAssembler::StoreToOffset(StoreOperandType type, Register reg, Register base, - int32_t offset) { - switch (type) { - case kStoreByte: - Sb(reg, base, offset); - break; - case kStoreHalfword: - Sh(reg, base, offset); - break; - case kStoreWord: - Sw(reg, base, offset); - break; - case kStoreWordPair: - LOG(FATAL) << "UNREACHABLE"; - break; - default: - LOG(FATAL) << "UNREACHABLE"; - } -} - -void MipsAssembler::StoreFToOffset(FRegister reg, Register base, int32_t offset) { - Swc1(reg, base, offset); -} - -void MipsAssembler::StoreDToOffset(DRegister reg, Register base, int32_t offset) { - Sdc1(reg, base, offset); -} - -void MipsAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, - const std::vector& callee_save_regs, - const std::vector& entry_spills) { - CHECK_ALIGNED(frame_size, kStackAlignment); - - // Increase frame to required size. - IncreaseFrameSize(frame_size); - - // Push callee saves and return address - int stack_offset = frame_size - kPointerSize; - StoreToOffset(kStoreWord, RA, SP, stack_offset); - for (int i = callee_save_regs.size() - 1; i >= 0; --i) { - stack_offset -= kPointerSize; - Register reg = callee_save_regs.at(i).AsMips().AsCoreRegister(); - StoreToOffset(kStoreWord, reg, SP, stack_offset); - } - - // Write out Method*. - StoreToOffset(kStoreWord, method_reg.AsMips().AsCoreRegister(), SP, 0); - - // Write out entry spills. - for (size_t i = 0; i < entry_spills.size(); ++i) { - Register reg = entry_spills.at(i).AsMips().AsCoreRegister(); - StoreToOffset(kStoreWord, reg, SP, frame_size + kPointerSize + (i * kPointerSize)); - } -} - -void MipsAssembler::RemoveFrame(size_t frame_size, - const std::vector& callee_save_regs) { - CHECK_ALIGNED(frame_size, kStackAlignment); - - // Pop callee saves and return address - int stack_offset = frame_size - (callee_save_regs.size() * kPointerSize) - kPointerSize; - for (size_t i = 0; i < callee_save_regs.size(); ++i) { - Register reg = callee_save_regs.at(i).AsMips().AsCoreRegister(); - LoadFromOffset(kLoadWord, reg, SP, stack_offset); - stack_offset += kPointerSize; - } - LoadFromOffset(kLoadWord, RA, SP, stack_offset); - - // Decrease frame to required size. - DecreaseFrameSize(frame_size); - - // Then jump to the return address. - Jr(RA); -} - -void MipsAssembler::IncreaseFrameSize(size_t adjust) { - CHECK_ALIGNED(adjust, kStackAlignment); - AddConstant(SP, SP, -adjust); -} - -void MipsAssembler::DecreaseFrameSize(size_t adjust) { - CHECK_ALIGNED(adjust, kStackAlignment); - AddConstant(SP, SP, adjust); -} - -void MipsAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) { - MipsManagedRegister src = msrc.AsMips(); - if (src.IsNoRegister()) { - CHECK_EQ(0u, size); - } else if (src.IsCoreRegister()) { - CHECK_EQ(4u, size); - StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); - } else if (src.IsRegisterPair()) { - CHECK_EQ(8u, size); - StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value()); - StoreToOffset(kStoreWord, src.AsRegisterPairHigh(), - SP, dest.Int32Value() + 4); - } else if (src.IsFRegister()) { - StoreFToOffset(src.AsFRegister(), SP, dest.Int32Value()); - } else { - CHECK(src.IsDRegister()); - StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value()); - } -} - -void MipsAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) { - MipsManagedRegister src = msrc.AsMips(); - CHECK(src.IsCoreRegister()); - StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); -} - -void MipsAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) { - MipsManagedRegister src = msrc.AsMips(); - CHECK(src.IsCoreRegister()); - StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); -} - -void MipsAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, - ManagedRegister mscratch) { - MipsManagedRegister scratch = mscratch.AsMips(); - CHECK(scratch.IsCoreRegister()) << scratch; - LoadImmediate(scratch.AsCoreRegister(), imm); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); -} - -void MipsAssembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm, - ManagedRegister mscratch) { - MipsManagedRegister scratch = mscratch.AsMips(); - CHECK(scratch.IsCoreRegister()) << scratch; - LoadImmediate(scratch.AsCoreRegister(), imm); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), S1, dest.Int32Value()); -} - -void MipsAssembler::StoreStackOffsetToThread(ThreadOffset thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { - MipsManagedRegister scratch = mscratch.AsMips(); - CHECK(scratch.IsCoreRegister()) << scratch; - AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value()); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), - S1, thr_offs.Int32Value()); -} - -void MipsAssembler::StoreStackPointerToThread(ThreadOffset thr_offs) { - StoreToOffset(kStoreWord, SP, S1, thr_offs.Int32Value()); -} - -void MipsAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc, - FrameOffset in_off, ManagedRegister mscratch) { - MipsManagedRegister src = msrc.AsMips(); - MipsManagedRegister scratch = mscratch.AsMips(); - StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value()); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4); -} - -void MipsAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) { - return EmitLoad(mdest, SP, src.Int32Value(), size); -} - -void MipsAssembler::Load(ManagedRegister mdest, ThreadOffset src, size_t size) { - return EmitLoad(mdest, S1, src.Int32Value(), size); -} - -void MipsAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) { - MipsManagedRegister dest = mdest.AsMips(); - CHECK(dest.IsCoreRegister()); - LoadFromOffset(kLoadWord, dest.AsCoreRegister(), SP, src.Int32Value()); -} - -void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, - MemberOffset offs) { - MipsManagedRegister dest = mdest.AsMips(); - CHECK(dest.IsCoreRegister() && dest.IsCoreRegister()); - LoadFromOffset(kLoadWord, dest.AsCoreRegister(), - base.AsMips().AsCoreRegister(), offs.Int32Value()); -} - -void MipsAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, - Offset offs) { - MipsManagedRegister dest = mdest.AsMips(); - CHECK(dest.IsCoreRegister() && dest.IsCoreRegister()) << dest; - LoadFromOffset(kLoadWord, dest.AsCoreRegister(), - base.AsMips().AsCoreRegister(), offs.Int32Value()); -} - -void MipsAssembler::LoadRawPtrFromThread(ManagedRegister mdest, - ThreadOffset offs) { - MipsManagedRegister dest = mdest.AsMips(); - CHECK(dest.IsCoreRegister()); - LoadFromOffset(kLoadWord, dest.AsCoreRegister(), S1, offs.Int32Value()); -} - -void MipsAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) { - UNIMPLEMENTED(FATAL) << "no sign extension necessary for mips"; -} - -void MipsAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) { - UNIMPLEMENTED(FATAL) << "no zero extension necessary for mips"; -} - -void MipsAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t /*size*/) { - MipsManagedRegister dest = mdest.AsMips(); - MipsManagedRegister src = msrc.AsMips(); - if (!dest.Equals(src)) { - if (dest.IsCoreRegister()) { - CHECK(src.IsCoreRegister()) << src; - Move(dest.AsCoreRegister(), src.AsCoreRegister()); - } else if (dest.IsFRegister()) { - CHECK(src.IsFRegister()) << src; - MovS(dest.AsFRegister(), src.AsFRegister()); - } else if (dest.IsDRegister()) { - CHECK(src.IsDRegister()) << src; - MovD(dest.AsDRegister(), src.AsDRegister()); - } else { - CHECK(dest.IsRegisterPair()) << dest; - CHECK(src.IsRegisterPair()) << src; - // Ensure that the first move doesn't clobber the input of the second - if (src.AsRegisterPairHigh() != dest.AsRegisterPairLow()) { - Move(dest.AsRegisterPairLow(), src.AsRegisterPairLow()); - Move(dest.AsRegisterPairHigh(), src.AsRegisterPairHigh()); - } else { - Move(dest.AsRegisterPairHigh(), src.AsRegisterPairHigh()); - Move(dest.AsRegisterPairLow(), src.AsRegisterPairLow()); - } - } - } -} - -void MipsAssembler::CopyRef(FrameOffset dest, FrameOffset src, - ManagedRegister mscratch) { - MipsManagedRegister scratch = mscratch.AsMips(); - CHECK(scratch.IsCoreRegister()) << scratch; - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); -} - -void MipsAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, - ThreadOffset thr_offs, - ManagedRegister mscratch) { - MipsManagedRegister scratch = mscratch.AsMips(); - CHECK(scratch.IsCoreRegister()) << scratch; - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), - S1, thr_offs.Int32Value()); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), - SP, fr_offs.Int32Value()); -} - -void MipsAssembler::CopyRawPtrToThread(ThreadOffset thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { - MipsManagedRegister scratch = mscratch.AsMips(); - CHECK(scratch.IsCoreRegister()) << scratch; - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), - SP, fr_offs.Int32Value()); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), - S1, thr_offs.Int32Value()); -} - -void MipsAssembler::Copy(FrameOffset dest, FrameOffset src, - ManagedRegister mscratch, size_t size) { - MipsManagedRegister scratch = mscratch.AsMips(); - CHECK(scratch.IsCoreRegister()) << scratch; - CHECK(size == 4 || size == 8) << size; - if (size == 4) { - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); - } else if (size == 8) { - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4); - } -} - -void MipsAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, - ManagedRegister mscratch, size_t size) { - Register scratch = mscratch.AsMips().AsCoreRegister(); - CHECK_EQ(size, 4u); - LoadFromOffset(kLoadWord, scratch, src_base.AsMips().AsCoreRegister(), src_offset.Int32Value()); - StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value()); -} - -void MipsAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, - ManagedRegister mscratch, size_t size) { - Register scratch = mscratch.AsMips().AsCoreRegister(); - CHECK_EQ(size, 4u); - LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value()); - StoreToOffset(kStoreWord, scratch, dest_base.AsMips().AsCoreRegister(), dest_offset.Int32Value()); -} - -void MipsAssembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offset /*src_offset*/, - ManagedRegister /*mscratch*/, size_t /*size*/) { - UNIMPLEMENTED(FATAL) << "no arm implementation"; -#if 0 - Register scratch = mscratch.AsMips().AsCoreRegister(); - CHECK_EQ(size, 4u); - movl(scratch, Address(ESP, src_base)); - movl(scratch, Address(scratch, src_offset)); - movl(Address(ESP, dest), scratch); -#endif -} - -void MipsAssembler::Copy(ManagedRegister dest, Offset dest_offset, - ManagedRegister src, Offset src_offset, - ManagedRegister mscratch, size_t size) { - CHECK_EQ(size, 4u); - Register scratch = mscratch.AsMips().AsCoreRegister(); - LoadFromOffset(kLoadWord, scratch, src.AsMips().AsCoreRegister(), src_offset.Int32Value()); - StoreToOffset(kStoreWord, scratch, dest.AsMips().AsCoreRegister(), dest_offset.Int32Value()); -} - -void MipsAssembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/, - ManagedRegister /*mscratch*/, size_t /*size*/) { - UNIMPLEMENTED(FATAL) << "no arm implementation"; -#if 0 - Register scratch = mscratch.AsMips().AsCoreRegister(); - CHECK_EQ(size, 4u); - CHECK_EQ(dest.Int32Value(), src.Int32Value()); - movl(scratch, Address(ESP, src)); - pushl(Address(scratch, src_offset)); - popl(Address(scratch, dest_offset)); -#endif -} - -void MipsAssembler::MemoryBarrier(ManagedRegister) { - UNIMPLEMENTED(FATAL) << "NEEDS TO BE IMPLEMENTED"; -#if 0 -#if ANDROID_SMP != 0 - mfence(); -#endif -#endif -} - -void MipsAssembler::CreateSirtEntry(ManagedRegister mout_reg, - FrameOffset sirt_offset, - ManagedRegister min_reg, bool null_allowed) { - MipsManagedRegister out_reg = mout_reg.AsMips(); - MipsManagedRegister in_reg = min_reg.AsMips(); - CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg; - CHECK(out_reg.IsCoreRegister()) << out_reg; - if (null_allowed) { - Label null_arg; - // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is - // the address in the SIRT holding the reference. - // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset) - if (in_reg.IsNoRegister()) { - LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), - SP, sirt_offset.Int32Value()); - in_reg = out_reg; - } - if (!out_reg.Equals(in_reg)) { - LoadImmediate(out_reg.AsCoreRegister(), 0); - } - EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true); - AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value()); - Bind(&null_arg, false); - } else { - AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value()); - } -} - -void MipsAssembler::CreateSirtEntry(FrameOffset out_off, - FrameOffset sirt_offset, - ManagedRegister mscratch, - bool null_allowed) { - MipsManagedRegister scratch = mscratch.AsMips(); - CHECK(scratch.IsCoreRegister()) << scratch; - if (null_allowed) { - Label null_arg; - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, - sirt_offset.Int32Value()); - // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is - // the address in the SIRT holding the reference. - // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset) - EmitBranch(scratch.AsCoreRegister(), ZERO, &null_arg, true); - AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value()); - Bind(&null_arg, false); - } else { - AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value()); - } - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value()); -} - -// Given a SIRT entry, load the associated reference. -void MipsAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg, - ManagedRegister min_reg) { - MipsManagedRegister out_reg = mout_reg.AsMips(); - MipsManagedRegister in_reg = min_reg.AsMips(); - CHECK(out_reg.IsCoreRegister()) << out_reg; - CHECK(in_reg.IsCoreRegister()) << in_reg; - Label null_arg; - if (!out_reg.Equals(in_reg)) { - LoadImmediate(out_reg.AsCoreRegister(), 0); - } - EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true); - LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), - in_reg.AsCoreRegister(), 0); - Bind(&null_arg, false); -} - -void MipsAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) { - // TODO: not validating references -} - -void MipsAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) { - // TODO: not validating references -} - -void MipsAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister mscratch) { - MipsManagedRegister base = mbase.AsMips(); - MipsManagedRegister scratch = mscratch.AsMips(); - CHECK(base.IsCoreRegister()) << base; - CHECK(scratch.IsCoreRegister()) << scratch; - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), - base.AsCoreRegister(), offset.Int32Value()); - Jalr(scratch.AsCoreRegister()); - // TODO: place reference map on call -} - -void MipsAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) { - MipsManagedRegister scratch = mscratch.AsMips(); - CHECK(scratch.IsCoreRegister()) << scratch; - // Call *(*(SP + base) + offset) - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), - SP, base.Int32Value()); - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), - scratch.AsCoreRegister(), offset.Int32Value()); - Jalr(scratch.AsCoreRegister()); - // TODO: place reference map on call -} - -void MipsAssembler::Call(ThreadOffset /*offset*/, ManagedRegister /*mscratch*/) { - UNIMPLEMENTED(FATAL) << "no arm implementation"; -#if 0 - fs()->call(Address::Absolute(offset)); -#endif -} - -void MipsAssembler::GetCurrentThread(ManagedRegister tr) { - Move(tr.AsMips().AsCoreRegister(), S1); -} - -void MipsAssembler::GetCurrentThread(FrameOffset offset, - ManagedRegister /*mscratch*/) { - StoreToOffset(kStoreWord, S1, SP, offset.Int32Value()); -} - -void MipsAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) { - MipsManagedRegister scratch = mscratch.AsMips(); - MipsExceptionSlowPath* slow = new MipsExceptionSlowPath(scratch, stack_adjust); - buffer_.EnqueueSlowPath(slow); - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), - S1, Thread::ExceptionOffset().Int32Value()); - EmitBranch(scratch.AsCoreRegister(), ZERO, slow->Entry(), false); -} - -void MipsExceptionSlowPath::Emit(Assembler* sasm) { - MipsAssembler* sp_asm = down_cast(sasm); -#define __ sp_asm-> - __ Bind(&entry_, false); - if (stack_adjust_ != 0) { // Fix up the frame. - __ DecreaseFrameSize(stack_adjust_); - } - // Pass exception object as argument - // Don't care about preserving A0 as this call won't return - __ Move(A0, scratch_.AsCoreRegister()); - // Set up call to Thread::Current()->pDeliverException - __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pDeliverException)); - __ Jr(T9); - // Call never returns - __ Break(); -#undef __ -} - -} // namespace mips -} // namespace art diff --git a/runtime/oat/utils/mips/assembler_mips.h b/runtime/oat/utils/mips/assembler_mips.h deleted file mode 100644 index 8f4a33a40e..0000000000 --- a/runtime/oat/utils/mips/assembler_mips.h +++ /dev/null @@ -1,507 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_OAT_UTILS_MIPS_ASSEMBLER_MIPS_H_ -#define ART_RUNTIME_OAT_UTILS_MIPS_ASSEMBLER_MIPS_H_ - -#include - -#include "base/macros.h" -#include "constants_mips.h" -#include "globals.h" -#include "managed_register_mips.h" -#include "oat/utils/assembler.h" -#include "offsets.h" -#include "utils.h" - -namespace art { -namespace mips { -#if 0 -class Operand { - public: - uint8_t mod() const { - return (encoding_at(0) >> 6) & 3; - } - - Register rm() const { - return static_cast(encoding_at(0) & 7); - } - - ScaleFactor scale() const { - return static_cast((encoding_at(1) >> 6) & 3); - } - - Register index() const { - return static_cast((encoding_at(1) >> 3) & 7); - } - - Register base() const { - return static_cast(encoding_at(1) & 7); - } - - int8_t disp8() const { - CHECK_GE(length_, 2); - return static_cast(encoding_[length_ - 1]); - } - - int32_t disp32() const { - CHECK_GE(length_, 5); - int32_t value; - memcpy(&value, &encoding_[length_ - 4], sizeof(value)); - return value; - } - - bool IsRegister(Register reg) const { - return ((encoding_[0] & 0xF8) == 0xC0) // Addressing mode is register only. - && ((encoding_[0] & 0x07) == reg); // Register codes match. - } - - protected: - // Operand can be sub classed (e.g: Address). - Operand() : length_(0) { } - - void SetModRM(int mod, Register rm) { - CHECK_EQ(mod & ~3, 0); - encoding_[0] = (mod << 6) | rm; - length_ = 1; - } - - void SetSIB(ScaleFactor scale, Register index, Register base) { - CHECK_EQ(length_, 1); - CHECK_EQ(scale & ~3, 0); - encoding_[1] = (scale << 6) | (index << 3) | base; - length_ = 2; - } - - void SetDisp8(int8_t disp) { - CHECK(length_ == 1 || length_ == 2); - encoding_[length_++] = static_cast(disp); - } - - void SetDisp32(int32_t disp) { - CHECK(length_ == 1 || length_ == 2); - int disp_size = sizeof(disp); - memmove(&encoding_[length_], &disp, disp_size); - length_ += disp_size; - } - - private: - byte length_; - byte encoding_[6]; - byte padding_; - - explicit Operand(Register reg) { SetModRM(3, reg); } - - // Get the operand encoding byte at the given index. - uint8_t encoding_at(int index) const { - CHECK_GE(index, 0); - CHECK_LT(index, length_); - return encoding_[index]; - } - - friend class MipsAssembler; - - DISALLOW_COPY_AND_ASSIGN(Operand); -}; - - -class Address : public Operand { - public: - Address(Register base, int32_t disp) { - Init(base, disp); - } - - Address(Register base, Offset disp) { - Init(base, disp.Int32Value()); - } - - Address(Register base, FrameOffset disp) { - CHECK_EQ(base, ESP); - Init(ESP, disp.Int32Value()); - } - - Address(Register base, MemberOffset disp) { - Init(base, disp.Int32Value()); - } - - void Init(Register base, int32_t disp) { - if (disp == 0 && base != EBP) { - SetModRM(0, base); - if (base == ESP) SetSIB(TIMES_1, ESP, base); - } else if (disp >= -128 && disp <= 127) { - SetModRM(1, base); - if (base == ESP) SetSIB(TIMES_1, ESP, base); - SetDisp8(disp); - } else { - SetModRM(2, base); - if (base == ESP) SetSIB(TIMES_1, ESP, base); - SetDisp32(disp); - } - } - - - Address(Register index, ScaleFactor scale, int32_t disp) { - CHECK_NE(index, ESP); // Illegal addressing mode. - SetModRM(0, ESP); - SetSIB(scale, index, EBP); - SetDisp32(disp); - } - - Address(Register base, Register index, ScaleFactor scale, int32_t disp) { - CHECK_NE(index, ESP); // Illegal addressing mode. - if (disp == 0 && base != EBP) { - SetModRM(0, ESP); - SetSIB(scale, index, base); - } else if (disp >= -128 && disp <= 127) { - SetModRM(1, ESP); - SetSIB(scale, index, base); - SetDisp8(disp); - } else { - SetModRM(2, ESP); - SetSIB(scale, index, base); - SetDisp32(disp); - } - } - - static Address Absolute(uword addr) { - Address result; - result.SetModRM(0, EBP); - result.SetDisp32(addr); - return result; - } - - static Address Absolute(ThreadOffset addr) { - return Absolute(addr.Int32Value()); - } - - private: - Address() {} - - DISALLOW_COPY_AND_ASSIGN(Address); -}; - -#endif - -enum LoadOperandType { - kLoadSignedByte, - kLoadUnsignedByte, - kLoadSignedHalfword, - kLoadUnsignedHalfword, - kLoadWord, - kLoadWordPair, - kLoadSWord, - kLoadDWord -}; - -enum StoreOperandType { - kStoreByte, - kStoreHalfword, - kStoreWord, - kStoreWordPair, - kStoreSWord, - kStoreDWord -}; - -class MipsAssembler : public Assembler { - public: - MipsAssembler() {} - virtual ~MipsAssembler() {} - - // Emit Machine Instructions. - void Add(Register rd, Register rs, Register rt); - void Addu(Register rd, Register rs, Register rt); - void Addi(Register rt, Register rs, uint16_t imm16); - void Addiu(Register rt, Register rs, uint16_t imm16); - void Sub(Register rd, Register rs, Register rt); - void Subu(Register rd, Register rs, Register rt); - void Mult(Register rs, Register rt); - void Multu(Register rs, Register rt); - void Div(Register rs, Register rt); - void Divu(Register rs, Register rt); - - void And(Register rd, Register rs, Register rt); - void Andi(Register rt, Register rs, uint16_t imm16); - void Or(Register rd, Register rs, Register rt); - void Ori(Register rt, Register rs, uint16_t imm16); - void Xor(Register rd, Register rs, Register rt); - void Xori(Register rt, Register rs, uint16_t imm16); - void Nor(Register rd, Register rs, Register rt); - - void Sll(Register rd, Register rs, int shamt); - void Srl(Register rd, Register rs, int shamt); - void Sra(Register rd, Register rs, int shamt); - void Sllv(Register rd, Register rs, Register rt); - void Srlv(Register rd, Register rs, Register rt); - void Srav(Register rd, Register rs, Register rt); - - void Lb(Register rt, Register rs, uint16_t imm16); - void Lh(Register rt, Register rs, uint16_t imm16); - void Lw(Register rt, Register rs, uint16_t imm16); - void Lbu(Register rt, Register rs, uint16_t imm16); - void Lhu(Register rt, Register rs, uint16_t imm16); - void Lui(Register rt, uint16_t imm16); - void Mfhi(Register rd); - void Mflo(Register rd); - - void Sb(Register rt, Register rs, uint16_t imm16); - void Sh(Register rt, Register rs, uint16_t imm16); - void Sw(Register rt, Register rs, uint16_t imm16); - - void Slt(Register rd, Register rs, Register rt); - void Sltu(Register rd, Register rs, Register rt); - void Slti(Register rt, Register rs, uint16_t imm16); - void Sltiu(Register rt, Register rs, uint16_t imm16); - - void Beq(Register rt, Register rs, uint16_t imm16); - void Bne(Register rt, Register rs, uint16_t imm16); - void J(uint32_t address); - void Jal(uint32_t address); - void Jr(Register rs); - void Jalr(Register rs); - - void AddS(FRegister fd, FRegister fs, FRegister ft); - void SubS(FRegister fd, FRegister fs, FRegister ft); - void MulS(FRegister fd, FRegister fs, FRegister ft); - void DivS(FRegister fd, FRegister fs, FRegister ft); - void AddD(DRegister fd, DRegister fs, DRegister ft); - void SubD(DRegister fd, DRegister fs, DRegister ft); - void MulD(DRegister fd, DRegister fs, DRegister ft); - void DivD(DRegister fd, DRegister fs, DRegister ft); - void MovS(FRegister fd, FRegister fs); - void MovD(DRegister fd, DRegister fs); - - void Mfc1(Register rt, FRegister fs); - void Mtc1(FRegister ft, Register rs); - void Lwc1(FRegister ft, Register rs, uint16_t imm16); - void Ldc1(DRegister ft, Register rs, uint16_t imm16); - void Swc1(FRegister ft, Register rs, uint16_t imm16); - void Sdc1(DRegister ft, Register rs, uint16_t imm16); - - void Break(); - void Nop(); - void Move(Register rt, Register rs); - void Clear(Register rt); - void Not(Register rt, Register rs); - void Mul(Register rd, Register rs, Register rt); - void Div(Register rd, Register rs, Register rt); - void Rem(Register rd, Register rs, Register rt); - - void AddConstant(Register rt, Register rs, int32_t value); - void LoadImmediate(Register rt, int32_t value); - - void EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset, size_t size); - void LoadFromOffset(LoadOperandType type, Register reg, Register base, int32_t offset); - void LoadSFromOffset(FRegister reg, Register base, int32_t offset); - void LoadDFromOffset(DRegister reg, Register base, int32_t offset); - void StoreToOffset(StoreOperandType type, Register reg, Register base, int32_t offset); - void StoreFToOffset(FRegister reg, Register base, int32_t offset); - void StoreDToOffset(DRegister reg, Register base, int32_t offset); - -#if 0 - MipsAssembler* lock(); - - void mfence(); - - MipsAssembler* fs(); - - // - // Macros for High-level operations. - // - - void AddImmediate(Register reg, const Immediate& imm); - - void LoadDoubleConstant(XmmRegister dst, double value); - - void DoubleNegate(XmmRegister d); - void FloatNegate(XmmRegister f); - - void DoubleAbs(XmmRegister reg); - - void LockCmpxchgl(const Address& address, Register reg) { - lock()->cmpxchgl(address, reg); - } - - // - // Misc. functionality - // - int PreferredLoopAlignment() { return 16; } - void Align(int alignment, int offset); - - // Debugging and bringup support. - void Stop(const char* message); -#endif - - // Emit data (e.g. encoded instruction or immediate) to the instruction stream. - void Emit(int32_t value); - void EmitBranch(Register rt, Register rs, Label* label, bool equal); - void EmitJump(Label* label, bool link); - void Bind(Label* label, bool is_jump); - - // - // Overridden common assembler high-level functionality - // - - // Emit code that will create an activation on the stack - virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg, - const std::vector& callee_save_regs, - const std::vector& entry_spills); - - // Emit code that will remove an activation from the stack - virtual void RemoveFrame(size_t frame_size, - const std::vector& callee_save_regs); - - virtual void IncreaseFrameSize(size_t adjust); - virtual void DecreaseFrameSize(size_t adjust); - - // Store routines - virtual void Store(FrameOffset offs, ManagedRegister msrc, size_t size); - virtual void StoreRef(FrameOffset dest, ManagedRegister msrc); - virtual void StoreRawPtr(FrameOffset dest, ManagedRegister msrc); - - virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, - ManagedRegister mscratch); - - virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm, - ManagedRegister mscratch); - - virtual void StoreStackOffsetToThread(ThreadOffset thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch); - - virtual void StoreStackPointerToThread(ThreadOffset thr_offs); - - virtual void StoreSpanning(FrameOffset dest, ManagedRegister msrc, - FrameOffset in_off, ManagedRegister mscratch); - - // Load routines - virtual void Load(ManagedRegister mdest, FrameOffset src, size_t size); - - virtual void Load(ManagedRegister mdest, ThreadOffset src, size_t size); - - virtual void LoadRef(ManagedRegister dest, FrameOffset src); - - virtual void LoadRef(ManagedRegister mdest, ManagedRegister base, - MemberOffset offs); - - virtual void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, - Offset offs); - - virtual void LoadRawPtrFromThread(ManagedRegister mdest, - ThreadOffset offs); - - // Copying routines - virtual void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size); - - virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs, - ManagedRegister mscratch); - - virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs, - ManagedRegister mscratch); - - virtual void CopyRef(FrameOffset dest, FrameOffset src, - ManagedRegister mscratch); - - virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size); - - virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, - ManagedRegister mscratch, size_t size); - - virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, - ManagedRegister mscratch, size_t size); - - virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, - ManagedRegister mscratch, size_t size); - - virtual void Copy(ManagedRegister dest, Offset dest_offset, - ManagedRegister src, Offset src_offset, - ManagedRegister mscratch, size_t size); - - virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, - ManagedRegister mscratch, size_t size); - - virtual void MemoryBarrier(ManagedRegister); - - // Sign extension - virtual void SignExtend(ManagedRegister mreg, size_t size); - - // Zero extension - virtual void ZeroExtend(ManagedRegister mreg, size_t size); - - // Exploit fast access in managed code to Thread::Current() - virtual void GetCurrentThread(ManagedRegister tr); - virtual void GetCurrentThread(FrameOffset dest_offset, - ManagedRegister mscratch); - - // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the - // value is null and null_allowed. in_reg holds a possibly stale reference - // that can be used to avoid loading the SIRT entry to see if the value is - // NULL. - virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, - ManagedRegister in_reg, bool null_allowed); - - // Set up out_off to hold a Object** into the SIRT, or to be NULL if the - // value is null and null_allowed. - virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, - ManagedRegister mscratch, bool null_allowed); - - // src holds a SIRT entry (Object**) load this into dst - virtual void LoadReferenceFromSirt(ManagedRegister dst, - ManagedRegister src); - - // Heap::VerifyObject on src. In some cases (such as a reference to this) we - // know that src may not be null. - virtual void VerifyObject(ManagedRegister src, bool could_be_null); - virtual void VerifyObject(FrameOffset src, bool could_be_null); - - // Call to address held at [base+offset] - virtual void Call(ManagedRegister base, Offset offset, - ManagedRegister mscratch); - virtual void Call(FrameOffset base, Offset offset, - ManagedRegister mscratch); - virtual void Call(ThreadOffset offset, ManagedRegister mscratch); - - // Generate code to check if Thread::Current()->exception_ is non-null - // and branch to a ExceptionSlowPath if it is. - virtual void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust); - - private: - void EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct); - void EmitI(int opcode, Register rs, Register rt, uint16_t imm); - void EmitJ(int opcode, int address); - void EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct); - void EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm); - - int32_t EncodeBranchOffset(int offset, int32_t inst, bool is_jump); - int DecodeBranchOffset(int32_t inst, bool is_jump); - - DISALLOW_COPY_AND_ASSIGN(MipsAssembler); -}; - -// Slowpath entered when Thread::Current()->_exception is non-null -class MipsExceptionSlowPath : public SlowPath { - public: - explicit MipsExceptionSlowPath(MipsManagedRegister scratch, size_t stack_adjust) - : scratch_(scratch), stack_adjust_(stack_adjust) {} - virtual void Emit(Assembler *sp_asm); - private: - const MipsManagedRegister scratch_; - const size_t stack_adjust_; -}; - -} // namespace mips -} // namespace art - -#endif // ART_RUNTIME_OAT_UTILS_MIPS_ASSEMBLER_MIPS_H_ diff --git a/runtime/oat/utils/mips/managed_register_mips.cc b/runtime/oat/utils/mips/managed_register_mips.cc deleted file mode 100644 index 195dafb0a1..0000000000 --- a/runtime/oat/utils/mips/managed_register_mips.cc +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "managed_register_mips.h" - -#include "globals.h" - -namespace art { -namespace mips { - -// These core registers are never available for allocation. -static const Register kReservedCoreRegistersArray[] = { S0, S1 }; - -// We need all registers for caching. -static const int kNumberOfAvailableCoreRegisters = (S7 - T0) + 1; -static const int kNumberOfAvailableFRegisters = kNumberOfFRegisters; -static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters; -static const int kNumberOfAvailableOverlappingDRegisters = - kNumberOfOverlappingDRegisters; -static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs; - -bool MipsManagedRegister::Overlaps(const MipsManagedRegister& other) const { - if (IsNoRegister() || other.IsNoRegister()) return false; - CHECK(IsValidManagedRegister()); - CHECK(other.IsValidManagedRegister()); - if (Equals(other)) return true; - if (IsRegisterPair()) { - Register low = AsRegisterPairLow(); - Register high = AsRegisterPairHigh(); - return MipsManagedRegister::FromCoreRegister(low).Overlaps(other) || - MipsManagedRegister::FromCoreRegister(high).Overlaps(other); - } - if (IsOverlappingDRegister()) { - if (other.IsDRegister()) return Equals(other); - if (other.IsFRegister()) { - FRegister low = AsOverlappingDRegisterLow(); - FRegister high = AsOverlappingDRegisterHigh(); - FRegister other_freg = other.AsFRegister(); - return (low == other_freg) || (high == other_freg); - } - return false; - } - if (other.IsRegisterPair() || other.IsOverlappingDRegister()) { - return other.Overlaps(*this); - } - return false; -} - - -int MipsManagedRegister::AllocIdLow() const { - CHECK(IsOverlappingDRegister() || IsRegisterPair()); - const int r = RegId() - (kNumberOfCoreRegIds + kNumberOfFRegIds); - int low; - if (r < kNumberOfOverlappingDRegIds) { - CHECK(IsOverlappingDRegister()); - low = (r * 2) + kNumberOfCoreRegIds; // Return an FRegister. - } else { - CHECK(IsRegisterPair()); - low = (r - kNumberOfDRegIds) * 2 + 2; // Return a Register. - if (low >= 24) { - // we got a pair higher than S6_S7, must be the dalvik special case - low = 5; - } - } - return low; -} - - -int MipsManagedRegister::AllocIdHigh() const { - return AllocIdLow() + 1; -} - - -void MipsManagedRegister::Print(std::ostream& os) const { - if (!IsValidManagedRegister()) { - os << "No Register"; - } else if (IsCoreRegister()) { - os << "Core: " << static_cast(AsCoreRegister()); - } else if (IsRegisterPair()) { - os << "Pair: " << AsRegisterPairLow() << ", " << AsRegisterPairHigh(); - } else if (IsFRegister()) { - os << "FRegister: " << static_cast(AsFRegister()); - } else if (IsDRegister()) { - os << "DRegister: " << static_cast(AsDRegister()); - } else { - os << "??: " << RegId(); - } -} - -std::ostream& operator<<(std::ostream& os, const MipsManagedRegister& reg) { - reg.Print(os); - return os; -} - -std::ostream& operator<<(std::ostream& os, const RegisterPair& reg) { - os << MipsManagedRegister::FromRegisterPair(reg); - return os; -} - -} // namespace mips -} // namespace art diff --git a/runtime/oat/utils/mips/managed_register_mips.h b/runtime/oat/utils/mips/managed_register_mips.h deleted file mode 100644 index b335ff9649..0000000000 --- a/runtime/oat/utils/mips/managed_register_mips.h +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_OAT_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_ -#define ART_RUNTIME_OAT_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_ - -#include "constants_mips.h" -#include "oat/utils/managed_register.h" - -namespace art { -namespace mips { - -// Values for register pairs. -enum RegisterPair { - V0_V1 = 0, - A0_A1 = 1, - A2_A3 = 2, - T0_T1 = 3, - T2_T3 = 4, - T4_T5 = 5, - T6_T7 = 6, - S0_S1 = 7, - S2_S3 = 8, - S4_S5 = 9, - S6_S7 = 10, - A1_A2 = 11, // Dalvik style passing - kNumberOfRegisterPairs = 12, - kNoRegisterPair = -1, -}; - -std::ostream& operator<<(std::ostream& os, const RegisterPair& reg); - -const int kNumberOfCoreRegIds = kNumberOfCoreRegisters; -const int kNumberOfCoreAllocIds = kNumberOfCoreRegisters; - -const int kNumberOfFRegIds = kNumberOfFRegisters; -const int kNumberOfFAllocIds = kNumberOfFRegisters; - -const int kNumberOfDRegIds = kNumberOfDRegisters; -const int kNumberOfOverlappingDRegIds = kNumberOfOverlappingDRegisters; -const int kNumberOfDAllocIds = kNumberOfDRegisters; - -const int kNumberOfPairRegIds = kNumberOfRegisterPairs; - -const int kNumberOfRegIds = kNumberOfCoreRegIds + kNumberOfFRegIds + - kNumberOfDRegIds + kNumberOfPairRegIds; -const int kNumberOfAllocIds = - kNumberOfCoreAllocIds + kNumberOfFAllocIds + kNumberOfDAllocIds; - -// Register ids map: -// [0..R[ core registers (enum Register) -// [R..F[ single precision FP registers (enum FRegister) -// [F..D[ double precision FP registers (enum DRegister) -// [D..P[ core register pairs (enum RegisterPair) -// where -// R = kNumberOfCoreRegIds -// F = R + kNumberOfFRegIds -// D = F + kNumberOfDRegIds -// P = D + kNumberOfRegisterPairs - -// Allocation ids map: -// [0..R[ core registers (enum Register) -// [R..F[ single precision FP registers (enum FRegister) -// where -// R = kNumberOfCoreRegIds -// F = R + kNumberOfFRegIds - - -// An instance of class 'ManagedRegister' represents a single core register (enum -// Register), a single precision FP register (enum FRegister), a double precision -// FP register (enum DRegister), or a pair of core registers (enum RegisterPair). -// 'ManagedRegister::NoRegister()' provides an invalid register. -// There is a one-to-one mapping between ManagedRegister and register id. -class MipsManagedRegister : public ManagedRegister { - public: - Register AsCoreRegister() const { - CHECK(IsCoreRegister()); - return static_cast(id_); - } - - FRegister AsFRegister() const { - CHECK(IsFRegister()); - return static_cast(id_ - kNumberOfCoreRegIds); - } - - DRegister AsDRegister() const { - CHECK(IsDRegister()); - return static_cast(id_ - kNumberOfCoreRegIds - kNumberOfFRegIds); - } - - FRegister AsOverlappingDRegisterLow() const { - CHECK(IsOverlappingDRegister()); - DRegister d_reg = AsDRegister(); - return static_cast(d_reg * 2); - } - - FRegister AsOverlappingDRegisterHigh() const { - CHECK(IsOverlappingDRegister()); - DRegister d_reg = AsDRegister(); - return static_cast(d_reg * 2 + 1); - } - - Register AsRegisterPairLow() const { - CHECK(IsRegisterPair()); - // Appropriate mapping of register ids allows to use AllocIdLow(). - return FromRegId(AllocIdLow()).AsCoreRegister(); - } - - Register AsRegisterPairHigh() const { - CHECK(IsRegisterPair()); - // Appropriate mapping of register ids allows to use AllocIdHigh(). - return FromRegId(AllocIdHigh()).AsCoreRegister(); - } - - bool IsCoreRegister() const { - CHECK(IsValidManagedRegister()); - return (0 <= id_) && (id_ < kNumberOfCoreRegIds); - } - - bool IsFRegister() const { - CHECK(IsValidManagedRegister()); - const int test = id_ - kNumberOfCoreRegIds; - return (0 <= test) && (test < kNumberOfFRegIds); - } - - bool IsDRegister() const { - CHECK(IsValidManagedRegister()); - const int test = id_ - (kNumberOfCoreRegIds + kNumberOfFRegIds); - return (0 <= test) && (test < kNumberOfDRegIds); - } - - // Returns true if this DRegister overlaps FRegisters. - bool IsOverlappingDRegister() const { - CHECK(IsValidManagedRegister()); - const int test = id_ - (kNumberOfCoreRegIds + kNumberOfFRegIds); - return (0 <= test) && (test < kNumberOfOverlappingDRegIds); - } - - bool IsRegisterPair() const { - CHECK(IsValidManagedRegister()); - const int test = - id_ - (kNumberOfCoreRegIds + kNumberOfFRegIds + kNumberOfDRegIds); - return (0 <= test) && (test < kNumberOfPairRegIds); - } - - void Print(std::ostream& os) const; - - // Returns true if the two managed-registers ('this' and 'other') overlap. - // Either managed-register may be the NoRegister. If both are the NoRegister - // then false is returned. - bool Overlaps(const MipsManagedRegister& other) const; - - static MipsManagedRegister FromCoreRegister(Register r) { - CHECK_NE(r, kNoRegister); - return FromRegId(r); - } - - static MipsManagedRegister FromFRegister(FRegister r) { - CHECK_NE(r, kNoFRegister); - return FromRegId(r + kNumberOfCoreRegIds); - } - - static MipsManagedRegister FromDRegister(DRegister r) { - CHECK_NE(r, kNoDRegister); - return FromRegId(r + kNumberOfCoreRegIds + kNumberOfFRegIds); - } - - static MipsManagedRegister FromRegisterPair(RegisterPair r) { - CHECK_NE(r, kNoRegisterPair); - return FromRegId(r + (kNumberOfCoreRegIds + kNumberOfFRegIds + kNumberOfDRegIds)); - } - - private: - bool IsValidManagedRegister() const { - return (0 <= id_) && (id_ < kNumberOfRegIds); - } - - int RegId() const { - CHECK(!IsNoRegister()); - return id_; - } - - int AllocId() const { - CHECK(IsValidManagedRegister() && !IsOverlappingDRegister() && !IsRegisterPair()); - CHECK_LT(id_, kNumberOfAllocIds); - return id_; - } - - int AllocIdLow() const; - int AllocIdHigh() const; - - friend class ManagedRegister; - - explicit MipsManagedRegister(int reg_id) : ManagedRegister(reg_id) {} - - static MipsManagedRegister FromRegId(int reg_id) { - MipsManagedRegister reg(reg_id); - CHECK(reg.IsValidManagedRegister()); - return reg; - } -}; - -std::ostream& operator<<(std::ostream& os, const MipsManagedRegister& reg); - -} // namespace mips - -inline mips::MipsManagedRegister ManagedRegister::AsMips() const { - mips::MipsManagedRegister reg(id_); - CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister()); - return reg; -} - -} // namespace art - -#endif // ART_RUNTIME_OAT_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_ diff --git a/runtime/oat/utils/x86/assembler_x86.cc b/runtime/oat/utils/x86/assembler_x86.cc deleted file mode 100644 index fd8f152c54..0000000000 --- a/runtime/oat/utils/x86/assembler_x86.cc +++ /dev/null @@ -1,1859 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "assembler_x86.h" - -#include "base/casts.h" -#include "memory_region.h" -#include "oat/runtime/oat_support_entrypoints.h" -#include "thread.h" - -namespace art { -namespace x86 { - -class DirectCallRelocation : public AssemblerFixup { - public: - void Process(const MemoryRegion& region, int position) { - // Direct calls are relative to the following instruction on x86. - int32_t pointer = region.Load(position); - int32_t start = reinterpret_cast(region.start()); - int32_t delta = start + position + sizeof(int32_t); - region.Store(position, pointer - delta); - } -}; - -static const char* kRegisterNames[] = { - "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi", -}; -std::ostream& operator<<(std::ostream& os, const Register& rhs) { - if (rhs >= EAX && rhs <= EDI) { - os << kRegisterNames[rhs]; - } else { - os << "Register[" << static_cast(rhs) << "]"; - } - return os; -} - -std::ostream& operator<<(std::ostream& os, const XmmRegister& reg) { - return os << "XMM" << static_cast(reg); -} - -std::ostream& operator<<(std::ostream& os, const X87Register& reg) { - return os << "ST" << static_cast(reg); -} - -void X86Assembler::call(Register reg) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xFF); - EmitRegisterOperand(2, reg); -} - - -void X86Assembler::call(const Address& address) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xFF); - EmitOperand(2, address); -} - - -void X86Assembler::call(Label* label) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xE8); - static const int kSize = 5; - EmitLabel(label, kSize); -} - - -void X86Assembler::pushl(Register reg) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x50 + reg); -} - - -void X86Assembler::pushl(const Address& address) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xFF); - EmitOperand(6, address); -} - - -void X86Assembler::pushl(const Immediate& imm) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - if (imm.is_int8()) { - EmitUint8(0x6A); - EmitUint8(imm.value() & 0xFF); - } else { - EmitUint8(0x68); - EmitImmediate(imm); - } -} - - -void X86Assembler::popl(Register reg) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x58 + reg); -} - - -void X86Assembler::popl(const Address& address) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x8F); - EmitOperand(0, address); -} - - -void X86Assembler::movl(Register dst, const Immediate& imm) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xB8 + dst); - EmitImmediate(imm); -} - - -void X86Assembler::movl(Register dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x89); - EmitRegisterOperand(src, dst); -} - - -void X86Assembler::movl(Register dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x8B); - EmitOperand(dst, src); -} - - -void X86Assembler::movl(const Address& dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x89); - EmitOperand(src, dst); -} - - -void X86Assembler::movl(const Address& dst, const Immediate& imm) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xC7); - EmitOperand(0, dst); - EmitImmediate(imm); -} - -void X86Assembler::movl(const Address& dst, Label* lbl) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xC7); - EmitOperand(0, dst); - EmitLabel(lbl, dst.length_ + 5); -} - -void X86Assembler::movzxb(Register dst, ByteRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0xB6); - EmitRegisterOperand(dst, src); -} - - -void X86Assembler::movzxb(Register dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0xB6); - EmitOperand(dst, src); -} - - -void X86Assembler::movsxb(Register dst, ByteRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0xBE); - EmitRegisterOperand(dst, src); -} - - -void X86Assembler::movsxb(Register dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0xBE); - EmitOperand(dst, src); -} - - -void X86Assembler::movb(Register /*dst*/, const Address& /*src*/) { - LOG(FATAL) << "Use movzxb or movsxb instead."; -} - - -void X86Assembler::movb(const Address& dst, ByteRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x88); - EmitOperand(src, dst); -} - - -void X86Assembler::movb(const Address& dst, const Immediate& imm) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xC6); - EmitOperand(EAX, dst); - CHECK(imm.is_int8()); - EmitUint8(imm.value() & 0xFF); -} - - -void X86Assembler::movzxw(Register dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0xB7); - EmitRegisterOperand(dst, src); -} - - -void X86Assembler::movzxw(Register dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0xB7); - EmitOperand(dst, src); -} - - -void X86Assembler::movsxw(Register dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0xBF); - EmitRegisterOperand(dst, src); -} - - -void X86Assembler::movsxw(Register dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0xBF); - EmitOperand(dst, src); -} - - -void X86Assembler::movw(Register /*dst*/, const Address& /*src*/) { - LOG(FATAL) << "Use movzxw or movsxw instead."; -} - - -void X86Assembler::movw(const Address& dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitOperandSizeOverride(); - EmitUint8(0x89); - EmitOperand(src, dst); -} - - -void X86Assembler::leal(Register dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x8D); - EmitOperand(dst, src); -} - - -void X86Assembler::cmovl(Condition condition, Register dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0x40 + condition); - EmitRegisterOperand(dst, src); -} - - -void X86Assembler::setb(Condition condition, Register dst) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0x90 + condition); - EmitOperand(0, Operand(dst)); -} - - -void X86Assembler::movss(XmmRegister dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF3); - EmitUint8(0x0F); - EmitUint8(0x10); - EmitOperand(dst, src); -} - - -void X86Assembler::movss(const Address& dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF3); - EmitUint8(0x0F); - EmitUint8(0x11); - EmitOperand(src, dst); -} - - -void X86Assembler::movss(XmmRegister dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF3); - EmitUint8(0x0F); - EmitUint8(0x11); - EmitXmmRegisterOperand(src, dst); -} - - -void X86Assembler::movd(XmmRegister dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x66); - EmitUint8(0x0F); - EmitUint8(0x6E); - EmitOperand(dst, Operand(src)); -} - - -void X86Assembler::movd(Register dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x66); - EmitUint8(0x0F); - EmitUint8(0x7E); - EmitOperand(src, Operand(dst)); -} - - -void X86Assembler::addss(XmmRegister dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF3); - EmitUint8(0x0F); - EmitUint8(0x58); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::addss(XmmRegister dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF3); - EmitUint8(0x0F); - EmitUint8(0x58); - EmitOperand(dst, src); -} - - -void X86Assembler::subss(XmmRegister dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF3); - EmitUint8(0x0F); - EmitUint8(0x5C); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::subss(XmmRegister dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF3); - EmitUint8(0x0F); - EmitUint8(0x5C); - EmitOperand(dst, src); -} - - -void X86Assembler::mulss(XmmRegister dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF3); - EmitUint8(0x0F); - EmitUint8(0x59); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::mulss(XmmRegister dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF3); - EmitUint8(0x0F); - EmitUint8(0x59); - EmitOperand(dst, src); -} - - -void X86Assembler::divss(XmmRegister dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF3); - EmitUint8(0x0F); - EmitUint8(0x5E); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::divss(XmmRegister dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF3); - EmitUint8(0x0F); - EmitUint8(0x5E); - EmitOperand(dst, src); -} - - -void X86Assembler::flds(const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xD9); - EmitOperand(0, src); -} - - -void X86Assembler::fstps(const Address& dst) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xD9); - EmitOperand(3, dst); -} - - -void X86Assembler::movsd(XmmRegister dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF2); - EmitUint8(0x0F); - EmitUint8(0x10); - EmitOperand(dst, src); -} - - -void X86Assembler::movsd(const Address& dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF2); - EmitUint8(0x0F); - EmitUint8(0x11); - EmitOperand(src, dst); -} - - -void X86Assembler::movsd(XmmRegister dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF2); - EmitUint8(0x0F); - EmitUint8(0x11); - EmitXmmRegisterOperand(src, dst); -} - - -void X86Assembler::addsd(XmmRegister dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF2); - EmitUint8(0x0F); - EmitUint8(0x58); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::addsd(XmmRegister dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF2); - EmitUint8(0x0F); - EmitUint8(0x58); - EmitOperand(dst, src); -} - - -void X86Assembler::subsd(XmmRegister dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF2); - EmitUint8(0x0F); - EmitUint8(0x5C); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::subsd(XmmRegister dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF2); - EmitUint8(0x0F); - EmitUint8(0x5C); - EmitOperand(dst, src); -} - - -void X86Assembler::mulsd(XmmRegister dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF2); - EmitUint8(0x0F); - EmitUint8(0x59); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::mulsd(XmmRegister dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF2); - EmitUint8(0x0F); - EmitUint8(0x59); - EmitOperand(dst, src); -} - - -void X86Assembler::divsd(XmmRegister dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF2); - EmitUint8(0x0F); - EmitUint8(0x5E); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::divsd(XmmRegister dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF2); - EmitUint8(0x0F); - EmitUint8(0x5E); - EmitOperand(dst, src); -} - - -void X86Assembler::cvtsi2ss(XmmRegister dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF3); - EmitUint8(0x0F); - EmitUint8(0x2A); - EmitOperand(dst, Operand(src)); -} - - -void X86Assembler::cvtsi2sd(XmmRegister dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF2); - EmitUint8(0x0F); - EmitUint8(0x2A); - EmitOperand(dst, Operand(src)); -} - - -void X86Assembler::cvtss2si(Register dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF3); - EmitUint8(0x0F); - EmitUint8(0x2D); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::cvtss2sd(XmmRegister dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF3); - EmitUint8(0x0F); - EmitUint8(0x5A); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::cvtsd2si(Register dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF2); - EmitUint8(0x0F); - EmitUint8(0x2D); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::cvttss2si(Register dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF3); - EmitUint8(0x0F); - EmitUint8(0x2C); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::cvttsd2si(Register dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF2); - EmitUint8(0x0F); - EmitUint8(0x2C); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::cvtsd2ss(XmmRegister dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF2); - EmitUint8(0x0F); - EmitUint8(0x5A); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::cvtdq2pd(XmmRegister dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF3); - EmitUint8(0x0F); - EmitUint8(0xE6); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::comiss(XmmRegister a, XmmRegister b) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0x2F); - EmitXmmRegisterOperand(a, b); -} - - -void X86Assembler::comisd(XmmRegister a, XmmRegister b) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x66); - EmitUint8(0x0F); - EmitUint8(0x2F); - EmitXmmRegisterOperand(a, b); -} - - -void X86Assembler::sqrtsd(XmmRegister dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF2); - EmitUint8(0x0F); - EmitUint8(0x51); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::sqrtss(XmmRegister dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF3); - EmitUint8(0x0F); - EmitUint8(0x51); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::xorpd(XmmRegister dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x66); - EmitUint8(0x0F); - EmitUint8(0x57); - EmitOperand(dst, src); -} - - -void X86Assembler::xorpd(XmmRegister dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x66); - EmitUint8(0x0F); - EmitUint8(0x57); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::xorps(XmmRegister dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0x57); - EmitOperand(dst, src); -} - - -void X86Assembler::xorps(XmmRegister dst, XmmRegister src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0x57); - EmitXmmRegisterOperand(dst, src); -} - - -void X86Assembler::andpd(XmmRegister dst, const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x66); - EmitUint8(0x0F); - EmitUint8(0x54); - EmitOperand(dst, src); -} - - -void X86Assembler::fldl(const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xDD); - EmitOperand(0, src); -} - - -void X86Assembler::fstpl(const Address& dst) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xDD); - EmitOperand(3, dst); -} - - -void X86Assembler::fnstcw(const Address& dst) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xD9); - EmitOperand(7, dst); -} - - -void X86Assembler::fldcw(const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xD9); - EmitOperand(5, src); -} - - -void X86Assembler::fistpl(const Address& dst) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xDF); - EmitOperand(7, dst); -} - - -void X86Assembler::fistps(const Address& dst) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xDB); - EmitOperand(3, dst); -} - - -void X86Assembler::fildl(const Address& src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xDF); - EmitOperand(5, src); -} - - -void X86Assembler::fincstp() { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xD9); - EmitUint8(0xF7); -} - - -void X86Assembler::ffree(const Immediate& index) { - CHECK_LT(index.value(), 7); - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xDD); - EmitUint8(0xC0 + index.value()); -} - - -void X86Assembler::fsin() { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xD9); - EmitUint8(0xFE); -} - - -void X86Assembler::fcos() { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xD9); - EmitUint8(0xFF); -} - - -void X86Assembler::fptan() { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xD9); - EmitUint8(0xF2); -} - - -void X86Assembler::xchgl(Register dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x87); - EmitRegisterOperand(dst, src); -} - -void X86Assembler::xchgl(Register reg, const Address& address) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x87); - EmitOperand(reg, address); -} - - -void X86Assembler::cmpl(Register reg, const Immediate& imm) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitComplex(7, Operand(reg), imm); -} - - -void X86Assembler::cmpl(Register reg0, Register reg1) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x3B); - EmitOperand(reg0, Operand(reg1)); -} - - -void X86Assembler::cmpl(Register reg, const Address& address) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x3B); - EmitOperand(reg, address); -} - - -void X86Assembler::addl(Register dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x03); - EmitRegisterOperand(dst, src); -} - - -void X86Assembler::addl(Register reg, const Address& address) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x03); - EmitOperand(reg, address); -} - - -void X86Assembler::cmpl(const Address& address, Register reg) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x39); - EmitOperand(reg, address); -} - - -void X86Assembler::cmpl(const Address& address, const Immediate& imm) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitComplex(7, address, imm); -} - - -void X86Assembler::testl(Register reg1, Register reg2) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x85); - EmitRegisterOperand(reg1, reg2); -} - - -void X86Assembler::testl(Register reg, const Immediate& immediate) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - // For registers that have a byte variant (EAX, EBX, ECX, and EDX) - // we only test the byte register to keep the encoding short. - if (immediate.is_uint8() && reg < 4) { - // Use zero-extended 8-bit immediate. - if (reg == EAX) { - EmitUint8(0xA8); - } else { - EmitUint8(0xF6); - EmitUint8(0xC0 + reg); - } - EmitUint8(immediate.value() & 0xFF); - } else if (reg == EAX) { - // Use short form if the destination is EAX. - EmitUint8(0xA9); - EmitImmediate(immediate); - } else { - EmitUint8(0xF7); - EmitOperand(0, Operand(reg)); - EmitImmediate(immediate); - } -} - - -void X86Assembler::andl(Register dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x23); - EmitOperand(dst, Operand(src)); -} - - -void X86Assembler::andl(Register dst, const Immediate& imm) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitComplex(4, Operand(dst), imm); -} - - -void X86Assembler::orl(Register dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0B); - EmitOperand(dst, Operand(src)); -} - - -void X86Assembler::orl(Register dst, const Immediate& imm) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitComplex(1, Operand(dst), imm); -} - - -void X86Assembler::xorl(Register dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x33); - EmitOperand(dst, Operand(src)); -} - - -void X86Assembler::addl(Register reg, const Immediate& imm) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitComplex(0, Operand(reg), imm); -} - - -void X86Assembler::addl(const Address& address, Register reg) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x01); - EmitOperand(reg, address); -} - - -void X86Assembler::addl(const Address& address, const Immediate& imm) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitComplex(0, address, imm); -} - - -void X86Assembler::adcl(Register reg, const Immediate& imm) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitComplex(2, Operand(reg), imm); -} - - -void X86Assembler::adcl(Register dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x13); - EmitOperand(dst, Operand(src)); -} - - -void X86Assembler::adcl(Register dst, const Address& address) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x13); - EmitOperand(dst, address); -} - - -void X86Assembler::subl(Register dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x2B); - EmitOperand(dst, Operand(src)); -} - - -void X86Assembler::subl(Register reg, const Immediate& imm) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitComplex(5, Operand(reg), imm); -} - - -void X86Assembler::subl(Register reg, const Address& address) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x2B); - EmitOperand(reg, address); -} - - -void X86Assembler::cdq() { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x99); -} - - -void X86Assembler::idivl(Register reg) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF7); - EmitUint8(0xF8 | reg); -} - - -void X86Assembler::imull(Register dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0xAF); - EmitOperand(dst, Operand(src)); -} - - -void X86Assembler::imull(Register reg, const Immediate& imm) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x69); - EmitOperand(reg, Operand(reg)); - EmitImmediate(imm); -} - - -void X86Assembler::imull(Register reg, const Address& address) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0xAF); - EmitOperand(reg, address); -} - - -void X86Assembler::imull(Register reg) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF7); - EmitOperand(5, Operand(reg)); -} - - -void X86Assembler::imull(const Address& address) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF7); - EmitOperand(5, address); -} - - -void X86Assembler::mull(Register reg) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF7); - EmitOperand(4, Operand(reg)); -} - - -void X86Assembler::mull(const Address& address) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF7); - EmitOperand(4, address); -} - - -void X86Assembler::sbbl(Register dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x1B); - EmitOperand(dst, Operand(src)); -} - - -void X86Assembler::sbbl(Register reg, const Immediate& imm) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitComplex(3, Operand(reg), imm); -} - - -void X86Assembler::sbbl(Register dst, const Address& address) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x1B); - EmitOperand(dst, address); -} - - -void X86Assembler::incl(Register reg) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x40 + reg); -} - - -void X86Assembler::incl(const Address& address) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xFF); - EmitOperand(0, address); -} - - -void X86Assembler::decl(Register reg) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x48 + reg); -} - - -void X86Assembler::decl(const Address& address) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xFF); - EmitOperand(1, address); -} - - -void X86Assembler::shll(Register reg, const Immediate& imm) { - EmitGenericShift(4, reg, imm); -} - - -void X86Assembler::shll(Register operand, Register shifter) { - EmitGenericShift(4, operand, shifter); -} - - -void X86Assembler::shrl(Register reg, const Immediate& imm) { - EmitGenericShift(5, reg, imm); -} - - -void X86Assembler::shrl(Register operand, Register shifter) { - EmitGenericShift(5, operand, shifter); -} - - -void X86Assembler::sarl(Register reg, const Immediate& imm) { - EmitGenericShift(7, reg, imm); -} - - -void X86Assembler::sarl(Register operand, Register shifter) { - EmitGenericShift(7, operand, shifter); -} - - -void X86Assembler::shld(Register dst, Register src) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0xA5); - EmitRegisterOperand(src, dst); -} - - -void X86Assembler::negl(Register reg) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF7); - EmitOperand(3, Operand(reg)); -} - - -void X86Assembler::notl(Register reg) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF7); - EmitUint8(0xD0 | reg); -} - - -void X86Assembler::enter(const Immediate& imm) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xC8); - CHECK(imm.is_uint16()); - EmitUint8(imm.value() & 0xFF); - EmitUint8((imm.value() >> 8) & 0xFF); - EmitUint8(0x00); -} - - -void X86Assembler::leave() { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xC9); -} - - -void X86Assembler::ret() { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xC3); -} - - -void X86Assembler::ret(const Immediate& imm) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xC2); - CHECK(imm.is_uint16()); - EmitUint8(imm.value() & 0xFF); - EmitUint8((imm.value() >> 8) & 0xFF); -} - - - -void X86Assembler::nop() { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x90); -} - - -void X86Assembler::int3() { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xCC); -} - - -void X86Assembler::hlt() { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF4); -} - - -void X86Assembler::j(Condition condition, Label* label) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - if (label->IsBound()) { - static const int kShortSize = 2; - static const int kLongSize = 6; - int offset = label->Position() - buffer_.Size(); - CHECK_LE(offset, 0); - if (IsInt(8, offset - kShortSize)) { - EmitUint8(0x70 + condition); - EmitUint8((offset - kShortSize) & 0xFF); - } else { - EmitUint8(0x0F); - EmitUint8(0x80 + condition); - EmitInt32(offset - kLongSize); - } - } else { - EmitUint8(0x0F); - EmitUint8(0x80 + condition); - EmitLabelLink(label); - } -} - - -void X86Assembler::jmp(Register reg) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xFF); - EmitRegisterOperand(4, reg); -} - -void X86Assembler::jmp(const Address& address) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xFF); - EmitOperand(4, address); -} - -void X86Assembler::jmp(Label* label) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - if (label->IsBound()) { - static const int kShortSize = 2; - static const int kLongSize = 5; - int offset = label->Position() - buffer_.Size(); - CHECK_LE(offset, 0); - if (IsInt(8, offset - kShortSize)) { - EmitUint8(0xEB); - EmitUint8((offset - kShortSize) & 0xFF); - } else { - EmitUint8(0xE9); - EmitInt32(offset - kLongSize); - } - } else { - EmitUint8(0xE9); - EmitLabelLink(label); - } -} - - -X86Assembler* X86Assembler::lock() { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0xF0); - return this; -} - - -void X86Assembler::cmpxchgl(const Address& address, Register reg) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0xB1); - EmitOperand(reg, address); -} - -void X86Assembler::mfence() { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x0F); - EmitUint8(0xAE); - EmitUint8(0xF0); -} - -X86Assembler* X86Assembler::fs() { - // TODO: fs is a prefix and not an instruction - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x64); - return this; -} - -void X86Assembler::AddImmediate(Register reg, const Immediate& imm) { - int value = imm.value(); - if (value > 0) { - if (value == 1) { - incl(reg); - } else if (value != 0) { - addl(reg, imm); - } - } else if (value < 0) { - value = -value; - if (value == 1) { - decl(reg); - } else if (value != 0) { - subl(reg, Immediate(value)); - } - } -} - - -void X86Assembler::LoadDoubleConstant(XmmRegister dst, double value) { - // TODO: Need to have a code constants table. - int64_t constant = bit_cast(value); - pushl(Immediate(High32Bits(constant))); - pushl(Immediate(Low32Bits(constant))); - movsd(dst, Address(ESP, 0)); - addl(ESP, Immediate(2 * kWordSize)); -} - - -void X86Assembler::FloatNegate(XmmRegister f) { - static const struct { - uint32_t a; - uint32_t b; - uint32_t c; - uint32_t d; - } float_negate_constant __attribute__((aligned(16))) = - { 0x80000000, 0x00000000, 0x80000000, 0x00000000 }; - xorps(f, Address::Absolute(reinterpret_cast(&float_negate_constant))); -} - - -void X86Assembler::DoubleNegate(XmmRegister d) { - static const struct { - uint64_t a; - uint64_t b; - } double_negate_constant __attribute__((aligned(16))) = - {0x8000000000000000LL, 0x8000000000000000LL}; - xorpd(d, Address::Absolute(reinterpret_cast(&double_negate_constant))); -} - - -void X86Assembler::DoubleAbs(XmmRegister reg) { - static const struct { - uint64_t a; - uint64_t b; - } double_abs_constant __attribute__((aligned(16))) = - {0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL}; - andpd(reg, Address::Absolute(reinterpret_cast(&double_abs_constant))); -} - - -void X86Assembler::Align(int alignment, int offset) { - CHECK(IsPowerOfTwo(alignment)); - // Emit nop instruction until the real position is aligned. - while (((offset + buffer_.GetPosition()) & (alignment-1)) != 0) { - nop(); - } -} - - -void X86Assembler::Bind(Label* label) { - int bound = buffer_.Size(); - CHECK(!label->IsBound()); // Labels can only be bound once. - while (label->IsLinked()) { - int position = label->LinkPosition(); - int next = buffer_.Load(position); - buffer_.Store(position, bound - (position + 4)); - label->position_ = next; - } - label->BindTo(bound); -} - - -void X86Assembler::Stop(const char* message) { - // Emit the message address as immediate operand in the test rax instruction, - // followed by the int3 instruction. - // Execution can be resumed with the 'cont' command in gdb. - testl(EAX, Immediate(reinterpret_cast(message))); - int3(); -} - - -void X86Assembler::EmitOperand(int reg_or_opcode, const Operand& operand) { - CHECK_GE(reg_or_opcode, 0); - CHECK_LT(reg_or_opcode, 8); - const int length = operand.length_; - CHECK_GT(length, 0); - // Emit the ModRM byte updated with the given reg value. - CHECK_EQ(operand.encoding_[0] & 0x38, 0); - EmitUint8(operand.encoding_[0] + (reg_or_opcode << 3)); - // Emit the rest of the encoded operand. - for (int i = 1; i < length; i++) { - EmitUint8(operand.encoding_[i]); - } -} - - -void X86Assembler::EmitImmediate(const Immediate& imm) { - EmitInt32(imm.value()); -} - - -void X86Assembler::EmitComplex(int reg_or_opcode, - const Operand& operand, - const Immediate& immediate) { - CHECK_GE(reg_or_opcode, 0); - CHECK_LT(reg_or_opcode, 8); - if (immediate.is_int8()) { - // Use sign-extended 8-bit immediate. - EmitUint8(0x83); - EmitOperand(reg_or_opcode, operand); - EmitUint8(immediate.value() & 0xFF); - } else if (operand.IsRegister(EAX)) { - // Use short form if the destination is eax. - EmitUint8(0x05 + (reg_or_opcode << 3)); - EmitImmediate(immediate); - } else { - EmitUint8(0x81); - EmitOperand(reg_or_opcode, operand); - EmitImmediate(immediate); - } -} - - -void X86Assembler::EmitLabel(Label* label, int instruction_size) { - if (label->IsBound()) { - int offset = label->Position() - buffer_.Size(); - CHECK_LE(offset, 0); - EmitInt32(offset - instruction_size); - } else { - EmitLabelLink(label); - } -} - - -void X86Assembler::EmitLabelLink(Label* label) { - CHECK(!label->IsBound()); - int position = buffer_.Size(); - EmitInt32(label->position_); - label->LinkTo(position); -} - - -void X86Assembler::EmitGenericShift(int reg_or_opcode, - Register reg, - const Immediate& imm) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - CHECK(imm.is_int8()); - if (imm.value() == 1) { - EmitUint8(0xD1); - EmitOperand(reg_or_opcode, Operand(reg)); - } else { - EmitUint8(0xC1); - EmitOperand(reg_or_opcode, Operand(reg)); - EmitUint8(imm.value() & 0xFF); - } -} - - -void X86Assembler::EmitGenericShift(int reg_or_opcode, - Register operand, - Register shifter) { - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - CHECK_EQ(shifter, ECX); - EmitUint8(0xD3); - EmitOperand(reg_or_opcode, Operand(operand)); -} - -void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, - const std::vector& spill_regs, - const std::vector& entry_spills) { - CHECK_ALIGNED(frame_size, kStackAlignment); - for (int i = spill_regs.size() - 1; i >= 0; --i) { - pushl(spill_regs.at(i).AsX86().AsCpuRegister()); - } - // return address then method on stack - addl(ESP, Immediate(-frame_size + (spill_regs.size() * kPointerSize) + - kPointerSize /*method*/ + kPointerSize /*return address*/)); - pushl(method_reg.AsX86().AsCpuRegister()); - for (size_t i = 0; i < entry_spills.size(); ++i) { - movl(Address(ESP, frame_size + kPointerSize + (i * kPointerSize)), - entry_spills.at(i).AsX86().AsCpuRegister()); - } -} - -void X86Assembler::RemoveFrame(size_t frame_size, - const std::vector& spill_regs) { - CHECK_ALIGNED(frame_size, kStackAlignment); - addl(ESP, Immediate(frame_size - (spill_regs.size() * kPointerSize) - kPointerSize)); - for (size_t i = 0; i < spill_regs.size(); ++i) { - popl(spill_regs.at(i).AsX86().AsCpuRegister()); - } - ret(); -} - -void X86Assembler::IncreaseFrameSize(size_t adjust) { - CHECK_ALIGNED(adjust, kStackAlignment); - addl(ESP, Immediate(-adjust)); -} - -void X86Assembler::DecreaseFrameSize(size_t adjust) { - CHECK_ALIGNED(adjust, kStackAlignment); - addl(ESP, Immediate(adjust)); -} - -void X86Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) { - X86ManagedRegister src = msrc.AsX86(); - if (src.IsNoRegister()) { - CHECK_EQ(0u, size); - } else if (src.IsCpuRegister()) { - CHECK_EQ(4u, size); - movl(Address(ESP, offs), src.AsCpuRegister()); - } else if (src.IsRegisterPair()) { - CHECK_EQ(8u, size); - movl(Address(ESP, offs), src.AsRegisterPairLow()); - movl(Address(ESP, FrameOffset(offs.Int32Value()+4)), - src.AsRegisterPairHigh()); - } else if (src.IsX87Register()) { - if (size == 4) { - fstps(Address(ESP, offs)); - } else { - fstpl(Address(ESP, offs)); - } - } else { - CHECK(src.IsXmmRegister()); - if (size == 4) { - movss(Address(ESP, offs), src.AsXmmRegister()); - } else { - movsd(Address(ESP, offs), src.AsXmmRegister()); - } - } -} - -void X86Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) { - X86ManagedRegister src = msrc.AsX86(); - CHECK(src.IsCpuRegister()); - movl(Address(ESP, dest), src.AsCpuRegister()); -} - -void X86Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) { - X86ManagedRegister src = msrc.AsX86(); - CHECK(src.IsCpuRegister()); - movl(Address(ESP, dest), src.AsCpuRegister()); -} - -void X86Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, - ManagedRegister) { - movl(Address(ESP, dest), Immediate(imm)); -} - -void X86Assembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm, - ManagedRegister) { - fs()->movl(Address::Absolute(dest), Immediate(imm)); -} - -void X86Assembler::StoreStackOffsetToThread(ThreadOffset thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { - X86ManagedRegister scratch = mscratch.AsX86(); - CHECK(scratch.IsCpuRegister()); - leal(scratch.AsCpuRegister(), Address(ESP, fr_offs)); - fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister()); -} - -void X86Assembler::StoreStackPointerToThread(ThreadOffset thr_offs) { - fs()->movl(Address::Absolute(thr_offs), ESP); -} - -void X86Assembler::StoreLabelToThread(ThreadOffset thr_offs, Label* lbl) { - fs()->movl(Address::Absolute(thr_offs), lbl); -} - -void X86Assembler::StoreSpanning(FrameOffset /*dst*/, ManagedRegister /*src*/, - FrameOffset /*in_off*/, ManagedRegister /*scratch*/) { - UNIMPLEMENTED(FATAL); // this case only currently exists for ARM -} - -void X86Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) { - X86ManagedRegister dest = mdest.AsX86(); - if (dest.IsNoRegister()) { - CHECK_EQ(0u, size); - } else if (dest.IsCpuRegister()) { - CHECK_EQ(4u, size); - movl(dest.AsCpuRegister(), Address(ESP, src)); - } else if (dest.IsRegisterPair()) { - CHECK_EQ(8u, size); - movl(dest.AsRegisterPairLow(), Address(ESP, src)); - movl(dest.AsRegisterPairHigh(), Address(ESP, FrameOffset(src.Int32Value()+4))); - } else if (dest.IsX87Register()) { - if (size == 4) { - flds(Address(ESP, src)); - } else { - fldl(Address(ESP, src)); - } - } else { - CHECK(dest.IsXmmRegister()); - if (size == 4) { - movss(dest.AsXmmRegister(), Address(ESP, src)); - } else { - movsd(dest.AsXmmRegister(), Address(ESP, src)); - } - } -} - -void X86Assembler::Load(ManagedRegister mdest, ThreadOffset src, size_t size) { - X86ManagedRegister dest = mdest.AsX86(); - if (dest.IsNoRegister()) { - CHECK_EQ(0u, size); - } else if (dest.IsCpuRegister()) { - CHECK_EQ(4u, size); - fs()->movl(dest.AsCpuRegister(), Address::Absolute(src)); - } else if (dest.IsRegisterPair()) { - CHECK_EQ(8u, size); - fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src)); - fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset(src.Int32Value()+4))); - } else if (dest.IsX87Register()) { - if (size == 4) { - fs()->flds(Address::Absolute(src)); - } else { - fs()->fldl(Address::Absolute(src)); - } - } else { - CHECK(dest.IsXmmRegister()); - if (size == 4) { - fs()->movss(dest.AsXmmRegister(), Address::Absolute(src)); - } else { - fs()->movsd(dest.AsXmmRegister(), Address::Absolute(src)); - } - } -} - -void X86Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) { - X86ManagedRegister dest = mdest.AsX86(); - CHECK(dest.IsCpuRegister()); - movl(dest.AsCpuRegister(), Address(ESP, src)); -} - -void X86Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, - MemberOffset offs) { - X86ManagedRegister dest = mdest.AsX86(); - CHECK(dest.IsCpuRegister() && dest.IsCpuRegister()); - movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs)); -} - -void X86Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, - Offset offs) { - X86ManagedRegister dest = mdest.AsX86(); - CHECK(dest.IsCpuRegister() && dest.IsCpuRegister()); - movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs)); -} - -void X86Assembler::LoadRawPtrFromThread(ManagedRegister mdest, - ThreadOffset offs) { - X86ManagedRegister dest = mdest.AsX86(); - CHECK(dest.IsCpuRegister()); - fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs)); -} - -void X86Assembler::SignExtend(ManagedRegister mreg, size_t size) { - X86ManagedRegister reg = mreg.AsX86(); - CHECK(size == 1 || size == 2) << size; - CHECK(reg.IsCpuRegister()) << reg; - if (size == 1) { - movsxb(reg.AsCpuRegister(), reg.AsByteRegister()); - } else { - movsxw(reg.AsCpuRegister(), reg.AsCpuRegister()); - } -} - -void X86Assembler::ZeroExtend(ManagedRegister mreg, size_t size) { - X86ManagedRegister reg = mreg.AsX86(); - CHECK(size == 1 || size == 2) << size; - CHECK(reg.IsCpuRegister()) << reg; - if (size == 1) { - movzxb(reg.AsCpuRegister(), reg.AsByteRegister()); - } else { - movzxw(reg.AsCpuRegister(), reg.AsCpuRegister()); - } -} - -void X86Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) { - X86ManagedRegister dest = mdest.AsX86(); - X86ManagedRegister src = msrc.AsX86(); - if (!dest.Equals(src)) { - if (dest.IsCpuRegister() && src.IsCpuRegister()) { - movl(dest.AsCpuRegister(), src.AsCpuRegister()); - } else if (src.IsX87Register() && dest.IsXmmRegister()) { - // Pass via stack and pop X87 register - subl(ESP, Immediate(16)); - if (size == 4) { - CHECK_EQ(src.AsX87Register(), ST0); - fstps(Address(ESP, 0)); - movss(dest.AsXmmRegister(), Address(ESP, 0)); - } else { - CHECK_EQ(src.AsX87Register(), ST0); - fstpl(Address(ESP, 0)); - movsd(dest.AsXmmRegister(), Address(ESP, 0)); - } - addl(ESP, Immediate(16)); - } else { - // TODO: x87, SSE - UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src; - } - } -} - -void X86Assembler::CopyRef(FrameOffset dest, FrameOffset src, - ManagedRegister mscratch) { - X86ManagedRegister scratch = mscratch.AsX86(); - CHECK(scratch.IsCpuRegister()); - movl(scratch.AsCpuRegister(), Address(ESP, src)); - movl(Address(ESP, dest), scratch.AsCpuRegister()); -} - -void X86Assembler::CopyRawPtrFromThread(FrameOffset fr_offs, - ThreadOffset thr_offs, - ManagedRegister mscratch) { - X86ManagedRegister scratch = mscratch.AsX86(); - CHECK(scratch.IsCpuRegister()); - fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs)); - Store(fr_offs, scratch, 4); -} - -void X86Assembler::CopyRawPtrToThread(ThreadOffset thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { - X86ManagedRegister scratch = mscratch.AsX86(); - CHECK(scratch.IsCpuRegister()); - Load(scratch, fr_offs, 4); - fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister()); -} - -void X86Assembler::Copy(FrameOffset dest, FrameOffset src, - ManagedRegister mscratch, - size_t size) { - X86ManagedRegister scratch = mscratch.AsX86(); - if (scratch.IsCpuRegister() && size == 8) { - Load(scratch, src, 4); - Store(dest, scratch, 4); - Load(scratch, FrameOffset(src.Int32Value() + 4), 4); - Store(FrameOffset(dest.Int32Value() + 4), scratch, 4); - } else { - Load(scratch, src, size); - Store(dest, scratch, size); - } -} - -void X86Assembler::Copy(FrameOffset /*dst*/, ManagedRegister /*src_base*/, Offset /*src_offset*/, - ManagedRegister /*scratch*/, size_t /*size*/) { - UNIMPLEMENTED(FATAL); -} - -void X86Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, - ManagedRegister scratch, size_t size) { - CHECK(scratch.IsNoRegister()); - CHECK_EQ(size, 4u); - pushl(Address(ESP, src)); - popl(Address(dest_base.AsX86().AsCpuRegister(), dest_offset)); -} - -void X86Assembler::Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, - ManagedRegister mscratch, size_t size) { - Register scratch = mscratch.AsX86().AsCpuRegister(); - CHECK_EQ(size, 4u); - movl(scratch, Address(ESP, src_base)); - movl(scratch, Address(scratch, src_offset)); - movl(Address(ESP, dest), scratch); -} - -void X86Assembler::Copy(ManagedRegister dest, Offset dest_offset, - ManagedRegister src, Offset src_offset, - ManagedRegister scratch, size_t size) { - CHECK_EQ(size, 4u); - CHECK(scratch.IsNoRegister()); - pushl(Address(src.AsX86().AsCpuRegister(), src_offset)); - popl(Address(dest.AsX86().AsCpuRegister(), dest_offset)); -} - -void X86Assembler::Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, - ManagedRegister mscratch, size_t size) { - Register scratch = mscratch.AsX86().AsCpuRegister(); - CHECK_EQ(size, 4u); - CHECK_EQ(dest.Int32Value(), src.Int32Value()); - movl(scratch, Address(ESP, src)); - pushl(Address(scratch, src_offset)); - popl(Address(scratch, dest_offset)); -} - -void X86Assembler::MemoryBarrier(ManagedRegister) { -#if ANDROID_SMP != 0 - mfence(); -#endif -} - -void X86Assembler::CreateSirtEntry(ManagedRegister mout_reg, - FrameOffset sirt_offset, - ManagedRegister min_reg, bool null_allowed) { - X86ManagedRegister out_reg = mout_reg.AsX86(); - X86ManagedRegister in_reg = min_reg.AsX86(); - CHECK(in_reg.IsCpuRegister()); - CHECK(out_reg.IsCpuRegister()); - VerifyObject(in_reg, null_allowed); - if (null_allowed) { - Label null_arg; - if (!out_reg.Equals(in_reg)) { - xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister()); - } - testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister()); - j(kZero, &null_arg); - leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset)); - Bind(&null_arg); - } else { - leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset)); - } -} - -void X86Assembler::CreateSirtEntry(FrameOffset out_off, - FrameOffset sirt_offset, - ManagedRegister mscratch, - bool null_allowed) { - X86ManagedRegister scratch = mscratch.AsX86(); - CHECK(scratch.IsCpuRegister()); - if (null_allowed) { - Label null_arg; - movl(scratch.AsCpuRegister(), Address(ESP, sirt_offset)); - testl(scratch.AsCpuRegister(), scratch.AsCpuRegister()); - j(kZero, &null_arg); - leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset)); - Bind(&null_arg); - } else { - leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset)); - } - Store(out_off, scratch, 4); -} - -// Given a SIRT entry, load the associated reference. -void X86Assembler::LoadReferenceFromSirt(ManagedRegister mout_reg, - ManagedRegister min_reg) { - X86ManagedRegister out_reg = mout_reg.AsX86(); - X86ManagedRegister in_reg = min_reg.AsX86(); - CHECK(out_reg.IsCpuRegister()); - CHECK(in_reg.IsCpuRegister()); - Label null_arg; - if (!out_reg.Equals(in_reg)) { - xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister()); - } - testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister()); - j(kZero, &null_arg); - movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0)); - Bind(&null_arg); -} - -void X86Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) { - // TODO: not validating references -} - -void X86Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) { - // TODO: not validating references -} - -void X86Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) { - X86ManagedRegister base = mbase.AsX86(); - CHECK(base.IsCpuRegister()); - call(Address(base.AsCpuRegister(), offset.Int32Value())); - // TODO: place reference map on call -} - -void X86Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) { - Register scratch = mscratch.AsX86().AsCpuRegister(); - movl(scratch, Address(ESP, base)); - call(Address(scratch, offset)); -} - -void X86Assembler::Call(ThreadOffset offset, ManagedRegister /*mscratch*/) { - fs()->call(Address::Absolute(offset)); -} - -void X86Assembler::GetCurrentThread(ManagedRegister tr) { - fs()->movl(tr.AsX86().AsCpuRegister(), - Address::Absolute(Thread::SelfOffset())); -} - -void X86Assembler::GetCurrentThread(FrameOffset offset, - ManagedRegister mscratch) { - X86ManagedRegister scratch = mscratch.AsX86(); - fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset())); - movl(Address(ESP, offset), scratch.AsCpuRegister()); -} - -void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) { - X86ExceptionSlowPath* slow = new X86ExceptionSlowPath(stack_adjust); - buffer_.EnqueueSlowPath(slow); - fs()->cmpl(Address::Absolute(Thread::ExceptionOffset()), Immediate(0)); - j(kNotEqual, slow->Entry()); -} - -void X86ExceptionSlowPath::Emit(Assembler *sasm) { - X86Assembler* sp_asm = down_cast(sasm); -#define __ sp_asm-> - __ Bind(&entry_); - // Note: the return value is dead - if (stack_adjust_ != 0) { // Fix up the frame. - __ DecreaseFrameSize(stack_adjust_); - } - // Pass exception as argument in EAX - __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset())); - __ fs()->call(Address::Absolute(ENTRYPOINT_OFFSET(pDeliverException))); - // this call should never return - __ int3(); -#undef __ -} - -} // namespace x86 -} // namespace art diff --git a/runtime/oat/utils/x86/assembler_x86.h b/runtime/oat/utils/x86/assembler_x86.h deleted file mode 100644 index e0fbe0e7a3..0000000000 --- a/runtime/oat/utils/x86/assembler_x86.h +++ /dev/null @@ -1,646 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_OAT_UTILS_X86_ASSEMBLER_X86_H_ -#define ART_RUNTIME_OAT_UTILS_X86_ASSEMBLER_X86_H_ - -#include -#include "base/macros.h" -#include "constants_x86.h" -#include "globals.h" -#include "managed_register_x86.h" -#include "oat/utils/assembler.h" -#include "offsets.h" -#include "utils.h" - -namespace art { -namespace x86 { - -class Immediate { - public: - explicit Immediate(int32_t value) : value_(value) {} - - int32_t value() const { return value_; } - - bool is_int8() const { return IsInt(8, value_); } - bool is_uint8() const { return IsUint(8, value_); } - bool is_uint16() const { return IsUint(16, value_); } - - private: - const int32_t value_; - - DISALLOW_COPY_AND_ASSIGN(Immediate); -}; - - -class Operand { - public: - uint8_t mod() const { - return (encoding_at(0) >> 6) & 3; - } - - Register rm() const { - return static_cast(encoding_at(0) & 7); - } - - ScaleFactor scale() const { - return static_cast((encoding_at(1) >> 6) & 3); - } - - Register index() const { - return static_cast((encoding_at(1) >> 3) & 7); - } - - Register base() const { - return static_cast(encoding_at(1) & 7); - } - - int8_t disp8() const { - CHECK_GE(length_, 2); - return static_cast(encoding_[length_ - 1]); - } - - int32_t disp32() const { - CHECK_GE(length_, 5); - int32_t value; - memcpy(&value, &encoding_[length_ - 4], sizeof(value)); - return value; - } - - bool IsRegister(Register reg) const { - return ((encoding_[0] & 0xF8) == 0xC0) // Addressing mode is register only. - && ((encoding_[0] & 0x07) == reg); // Register codes match. - } - - protected: - // Operand can be sub classed (e.g: Address). - Operand() : length_(0) { } - - void SetModRM(int mod, Register rm) { - CHECK_EQ(mod & ~3, 0); - encoding_[0] = (mod << 6) | rm; - length_ = 1; - } - - void SetSIB(ScaleFactor scale, Register index, Register base) { - CHECK_EQ(length_, 1); - CHECK_EQ(scale & ~3, 0); - encoding_[1] = (scale << 6) | (index << 3) | base; - length_ = 2; - } - - void SetDisp8(int8_t disp) { - CHECK(length_ == 1 || length_ == 2); - encoding_[length_++] = static_cast(disp); - } - - void SetDisp32(int32_t disp) { - CHECK(length_ == 1 || length_ == 2); - int disp_size = sizeof(disp); - memmove(&encoding_[length_], &disp, disp_size); - length_ += disp_size; - } - - private: - byte length_; - byte encoding_[6]; - byte padding_; - - explicit Operand(Register reg) { SetModRM(3, reg); } - - // Get the operand encoding byte at the given index. - uint8_t encoding_at(int index) const { - CHECK_GE(index, 0); - CHECK_LT(index, length_); - return encoding_[index]; - } - - friend class X86Assembler; - - DISALLOW_COPY_AND_ASSIGN(Operand); -}; - - -class Address : public Operand { - public: - Address(Register base, int32_t disp) { - Init(base, disp); - } - - Address(Register base, Offset disp) { - Init(base, disp.Int32Value()); - } - - Address(Register base, FrameOffset disp) { - CHECK_EQ(base, ESP); - Init(ESP, disp.Int32Value()); - } - - Address(Register base, MemberOffset disp) { - Init(base, disp.Int32Value()); - } - - void Init(Register base, int32_t disp) { - if (disp == 0 && base != EBP) { - SetModRM(0, base); - if (base == ESP) SetSIB(TIMES_1, ESP, base); - } else if (disp >= -128 && disp <= 127) { - SetModRM(1, base); - if (base == ESP) SetSIB(TIMES_1, ESP, base); - SetDisp8(disp); - } else { - SetModRM(2, base); - if (base == ESP) SetSIB(TIMES_1, ESP, base); - SetDisp32(disp); - } - } - - - Address(Register index, ScaleFactor scale, int32_t disp) { - CHECK_NE(index, ESP); // Illegal addressing mode. - SetModRM(0, ESP); - SetSIB(scale, index, EBP); - SetDisp32(disp); - } - - Address(Register base, Register index, ScaleFactor scale, int32_t disp) { - CHECK_NE(index, ESP); // Illegal addressing mode. - if (disp == 0 && base != EBP) { - SetModRM(0, ESP); - SetSIB(scale, index, base); - } else if (disp >= -128 && disp <= 127) { - SetModRM(1, ESP); - SetSIB(scale, index, base); - SetDisp8(disp); - } else { - SetModRM(2, ESP); - SetSIB(scale, index, base); - SetDisp32(disp); - } - } - - static Address Absolute(uword addr) { - Address result; - result.SetModRM(0, EBP); - result.SetDisp32(addr); - return result; - } - - static Address Absolute(ThreadOffset addr) { - return Absolute(addr.Int32Value()); - } - - private: - Address() {} - - DISALLOW_COPY_AND_ASSIGN(Address); -}; - - -class X86Assembler : public Assembler { - public: - X86Assembler() {} - virtual ~X86Assembler() {} - - /* - * Emit Machine Instructions. - */ - void call(Register reg); - void call(const Address& address); - void call(Label* label); - - void pushl(Register reg); - void pushl(const Address& address); - void pushl(const Immediate& imm); - - void popl(Register reg); - void popl(const Address& address); - - void movl(Register dst, const Immediate& src); - void movl(Register dst, Register src); - - void movl(Register dst, const Address& src); - void movl(const Address& dst, Register src); - void movl(const Address& dst, const Immediate& imm); - void movl(const Address& dst, Label* lbl); - - void movzxb(Register dst, ByteRegister src); - void movzxb(Register dst, const Address& src); - void movsxb(Register dst, ByteRegister src); - void movsxb(Register dst, const Address& src); - void movb(Register dst, const Address& src); - void movb(const Address& dst, ByteRegister src); - void movb(const Address& dst, const Immediate& imm); - - void movzxw(Register dst, Register src); - void movzxw(Register dst, const Address& src); - void movsxw(Register dst, Register src); - void movsxw(Register dst, const Address& src); - void movw(Register dst, const Address& src); - void movw(const Address& dst, Register src); - - void leal(Register dst, const Address& src); - - void cmovl(Condition condition, Register dst, Register src); - - void setb(Condition condition, Register dst); - - void movss(XmmRegister dst, const Address& src); - void movss(const Address& dst, XmmRegister src); - void movss(XmmRegister dst, XmmRegister src); - - void movd(XmmRegister dst, Register src); - void movd(Register dst, XmmRegister src); - - void addss(XmmRegister dst, XmmRegister src); - void addss(XmmRegister dst, const Address& src); - void subss(XmmRegister dst, XmmRegister src); - void subss(XmmRegister dst, const Address& src); - void mulss(XmmRegister dst, XmmRegister src); - void mulss(XmmRegister dst, const Address& src); - void divss(XmmRegister dst, XmmRegister src); - void divss(XmmRegister dst, const Address& src); - - void movsd(XmmRegister dst, const Address& src); - void movsd(const Address& dst, XmmRegister src); - void movsd(XmmRegister dst, XmmRegister src); - - void addsd(XmmRegister dst, XmmRegister src); - void addsd(XmmRegister dst, const Address& src); - void subsd(XmmRegister dst, XmmRegister src); - void subsd(XmmRegister dst, const Address& src); - void mulsd(XmmRegister dst, XmmRegister src); - void mulsd(XmmRegister dst, const Address& src); - void divsd(XmmRegister dst, XmmRegister src); - void divsd(XmmRegister dst, const Address& src); - - void cvtsi2ss(XmmRegister dst, Register src); - void cvtsi2sd(XmmRegister dst, Register src); - - void cvtss2si(Register dst, XmmRegister src); - void cvtss2sd(XmmRegister dst, XmmRegister src); - - void cvtsd2si(Register dst, XmmRegister src); - void cvtsd2ss(XmmRegister dst, XmmRegister src); - - void cvttss2si(Register dst, XmmRegister src); - void cvttsd2si(Register dst, XmmRegister src); - - void cvtdq2pd(XmmRegister dst, XmmRegister src); - - void comiss(XmmRegister a, XmmRegister b); - void comisd(XmmRegister a, XmmRegister b); - - void sqrtsd(XmmRegister dst, XmmRegister src); - void sqrtss(XmmRegister dst, XmmRegister src); - - void xorpd(XmmRegister dst, const Address& src); - void xorpd(XmmRegister dst, XmmRegister src); - void xorps(XmmRegister dst, const Address& src); - void xorps(XmmRegister dst, XmmRegister src); - - void andpd(XmmRegister dst, const Address& src); - - void flds(const Address& src); - void fstps(const Address& dst); - - void fldl(const Address& src); - void fstpl(const Address& dst); - - void fnstcw(const Address& dst); - void fldcw(const Address& src); - - void fistpl(const Address& dst); - void fistps(const Address& dst); - void fildl(const Address& src); - - void fincstp(); - void ffree(const Immediate& index); - - void fsin(); - void fcos(); - void fptan(); - - void xchgl(Register dst, Register src); - void xchgl(Register reg, const Address& address); - - void cmpl(Register reg, const Immediate& imm); - void cmpl(Register reg0, Register reg1); - void cmpl(Register reg, const Address& address); - - void cmpl(const Address& address, Register reg); - void cmpl(const Address& address, const Immediate& imm); - - void testl(Register reg1, Register reg2); - void testl(Register reg, const Immediate& imm); - - void andl(Register dst, const Immediate& imm); - void andl(Register dst, Register src); - - void orl(Register dst, const Immediate& imm); - void orl(Register dst, Register src); - - void xorl(Register dst, Register src); - - void addl(Register dst, Register src); - void addl(Register reg, const Immediate& imm); - void addl(Register reg, const Address& address); - - void addl(const Address& address, Register reg); - void addl(const Address& address, const Immediate& imm); - - void adcl(Register dst, Register src); - void adcl(Register reg, const Immediate& imm); - void adcl(Register dst, const Address& address); - - void subl(Register dst, Register src); - void subl(Register reg, const Immediate& imm); - void subl(Register reg, const Address& address); - - void cdq(); - - void idivl(Register reg); - - void imull(Register dst, Register src); - void imull(Register reg, const Immediate& imm); - void imull(Register reg, const Address& address); - - void imull(Register reg); - void imull(const Address& address); - - void mull(Register reg); - void mull(const Address& address); - - void sbbl(Register dst, Register src); - void sbbl(Register reg, const Immediate& imm); - void sbbl(Register reg, const Address& address); - - void incl(Register reg); - void incl(const Address& address); - - void decl(Register reg); - void decl(const Address& address); - - void shll(Register reg, const Immediate& imm); - void shll(Register operand, Register shifter); - void shrl(Register reg, const Immediate& imm); - void shrl(Register operand, Register shifter); - void sarl(Register reg, const Immediate& imm); - void sarl(Register operand, Register shifter); - void shld(Register dst, Register src); - - void negl(Register reg); - void notl(Register reg); - - void enter(const Immediate& imm); - void leave(); - - void ret(); - void ret(const Immediate& imm); - - void nop(); - void int3(); - void hlt(); - - void j(Condition condition, Label* label); - - void jmp(Register reg); - void jmp(const Address& address); - void jmp(Label* label); - - X86Assembler* lock(); - void cmpxchgl(const Address& address, Register reg); - - void mfence(); - - X86Assembler* fs(); - - // - // Macros for High-level operations. - // - - void AddImmediate(Register reg, const Immediate& imm); - - void LoadDoubleConstant(XmmRegister dst, double value); - - void DoubleNegate(XmmRegister d); - void FloatNegate(XmmRegister f); - - void DoubleAbs(XmmRegister reg); - - void LockCmpxchgl(const Address& address, Register reg) { - lock()->cmpxchgl(address, reg); - } - - // - // Misc. functionality - // - int PreferredLoopAlignment() { return 16; } - void Align(int alignment, int offset); - void Bind(Label* label); - - // Debugging and bringup support. - void Stop(const char* message); - - // - // Overridden common assembler high-level functionality - // - - // Emit code that will create an activation on the stack - virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg, - const std::vector& callee_save_regs, - const std::vector& entry_spills); - - // Emit code that will remove an activation from the stack - virtual void RemoveFrame(size_t frame_size, - const std::vector& callee_save_regs); - - virtual void IncreaseFrameSize(size_t adjust); - virtual void DecreaseFrameSize(size_t adjust); - - // Store routines - virtual void Store(FrameOffset offs, ManagedRegister src, size_t size); - virtual void StoreRef(FrameOffset dest, ManagedRegister src); - virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src); - - virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, - ManagedRegister scratch); - - virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm, - ManagedRegister scratch); - - virtual void StoreStackOffsetToThread(ThreadOffset thr_offs, - FrameOffset fr_offs, - ManagedRegister scratch); - - virtual void StoreStackPointerToThread(ThreadOffset thr_offs); - - void StoreLabelToThread(ThreadOffset thr_offs, Label* lbl); - - virtual void StoreSpanning(FrameOffset dest, ManagedRegister src, - FrameOffset in_off, ManagedRegister scratch); - - // Load routines - virtual void Load(ManagedRegister dest, FrameOffset src, size_t size); - - virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size); - - virtual void LoadRef(ManagedRegister dest, FrameOffset src); - - virtual void LoadRef(ManagedRegister dest, ManagedRegister base, - MemberOffset offs); - - virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, - Offset offs); - - virtual void LoadRawPtrFromThread(ManagedRegister dest, - ThreadOffset offs); - - // Copying routines - virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size); - - virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs, - ManagedRegister scratch); - - virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs, - ManagedRegister scratch); - - virtual void CopyRef(FrameOffset dest, FrameOffset src, - ManagedRegister scratch); - - virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size); - - virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, - ManagedRegister scratch, size_t size); - - virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, - ManagedRegister scratch, size_t size); - - virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, - ManagedRegister scratch, size_t size); - - virtual void Copy(ManagedRegister dest, Offset dest_offset, - ManagedRegister src, Offset src_offset, - ManagedRegister scratch, size_t size); - - virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, - ManagedRegister scratch, size_t size); - - virtual void MemoryBarrier(ManagedRegister); - - // Sign extension - virtual void SignExtend(ManagedRegister mreg, size_t size); - - // Zero extension - virtual void ZeroExtend(ManagedRegister mreg, size_t size); - - // Exploit fast access in managed code to Thread::Current() - virtual void GetCurrentThread(ManagedRegister tr); - virtual void GetCurrentThread(FrameOffset dest_offset, - ManagedRegister scratch); - - // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the - // value is null and null_allowed. in_reg holds a possibly stale reference - // that can be used to avoid loading the SIRT entry to see if the value is - // NULL. - virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, - ManagedRegister in_reg, bool null_allowed); - - // Set up out_off to hold a Object** into the SIRT, or to be NULL if the - // value is null and null_allowed. - virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, - ManagedRegister scratch, bool null_allowed); - - // src holds a SIRT entry (Object**) load this into dst - virtual void LoadReferenceFromSirt(ManagedRegister dst, - ManagedRegister src); - - // Heap::VerifyObject on src. In some cases (such as a reference to this) we - // know that src may not be null. - virtual void VerifyObject(ManagedRegister src, bool could_be_null); - virtual void VerifyObject(FrameOffset src, bool could_be_null); - - // Call to address held at [base+offset] - virtual void Call(ManagedRegister base, Offset offset, - ManagedRegister scratch); - virtual void Call(FrameOffset base, Offset offset, - ManagedRegister scratch); - virtual void Call(ThreadOffset offset, ManagedRegister scratch); - - // Generate code to check if Thread::Current()->exception_ is non-null - // and branch to a ExceptionSlowPath if it is. - virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust); - - private: - inline void EmitUint8(uint8_t value); - inline void EmitInt32(int32_t value); - inline void EmitRegisterOperand(int rm, int reg); - inline void EmitXmmRegisterOperand(int rm, XmmRegister reg); - inline void EmitFixup(AssemblerFixup* fixup); - inline void EmitOperandSizeOverride(); - - void EmitOperand(int rm, const Operand& operand); - void EmitImmediate(const Immediate& imm); - void EmitComplex(int rm, const Operand& operand, const Immediate& immediate); - void EmitLabel(Label* label, int instruction_size); - void EmitLabelLink(Label* label); - void EmitNearLabelLink(Label* label); - - void EmitGenericShift(int rm, Register reg, const Immediate& imm); - void EmitGenericShift(int rm, Register operand, Register shifter); - - DISALLOW_COPY_AND_ASSIGN(X86Assembler); -}; - -inline void X86Assembler::EmitUint8(uint8_t value) { - buffer_.Emit(value); -} - -inline void X86Assembler::EmitInt32(int32_t value) { - buffer_.Emit(value); -} - -inline void X86Assembler::EmitRegisterOperand(int rm, int reg) { - CHECK_GE(rm, 0); - CHECK_LT(rm, 8); - buffer_.Emit(0xC0 + (rm << 3) + reg); -} - -inline void X86Assembler::EmitXmmRegisterOperand(int rm, XmmRegister reg) { - EmitRegisterOperand(rm, static_cast(reg)); -} - -inline void X86Assembler::EmitFixup(AssemblerFixup* fixup) { - buffer_.EmitFixup(fixup); -} - -inline void X86Assembler::EmitOperandSizeOverride() { - EmitUint8(0x66); -} - -// Slowpath entered when Thread::Current()->_exception is non-null -class X86ExceptionSlowPath : public SlowPath { - public: - explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {} - virtual void Emit(Assembler *sp_asm); - private: - const size_t stack_adjust_; -}; - -} // namespace x86 -} // namespace art - -#endif // ART_RUNTIME_OAT_UTILS_X86_ASSEMBLER_X86_H_ diff --git a/runtime/oat/utils/x86/assembler_x86_test.cc b/runtime/oat/utils/x86/assembler_x86_test.cc deleted file mode 100644 index 5d8a3b1521..0000000000 --- a/runtime/oat/utils/x86/assembler_x86_test.cc +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "assembler_x86.h" - -#include "gtest/gtest.h" - -namespace art { - -TEST(AssemblerX86, CreateBuffer) { - AssemblerBuffer buffer; - AssemblerBuffer::EnsureCapacity ensured(&buffer); - buffer.Emit(0x42); - ASSERT_EQ(static_cast(1), buffer.Size()); - buffer.Emit(42); - ASSERT_EQ(static_cast(5), buffer.Size()); -} - -} // namespace art diff --git a/runtime/oat/utils/x86/managed_register_x86.cc b/runtime/oat/utils/x86/managed_register_x86.cc deleted file mode 100644 index 4697d06136..0000000000 --- a/runtime/oat/utils/x86/managed_register_x86.cc +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "managed_register_x86.h" - -#include "globals.h" - -namespace art { -namespace x86 { - -// These cpu registers are never available for allocation. -static const Register kReservedCpuRegistersArray[] = { ESP }; - - -// We reduce the number of available registers for allocation in debug-code -// mode in order to increase register pressure. - -// We need all registers for caching. -static const int kNumberOfAvailableCpuRegisters = kNumberOfCpuRegisters; -static const int kNumberOfAvailableXmmRegisters = kNumberOfXmmRegisters; -static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs; - - -// Define register pairs. -// This list must be kept in sync with the RegisterPair enum. -#define REGISTER_PAIR_LIST(P) \ - P(EAX, EDX) \ - P(EAX, ECX) \ - P(EAX, EBX) \ - P(EAX, EDI) \ - P(EDX, ECX) \ - P(EDX, EBX) \ - P(EDX, EDI) \ - P(ECX, EBX) \ - P(ECX, EDI) \ - P(EBX, EDI) - - -struct RegisterPairDescriptor { - RegisterPair reg; // Used to verify that the enum is in sync. - Register low; - Register high; -}; - - -static const RegisterPairDescriptor kRegisterPairs[] = { -#define REGISTER_PAIR_ENUMERATION(low, high) { low##_##high, low, high }, - REGISTER_PAIR_LIST(REGISTER_PAIR_ENUMERATION) -#undef REGISTER_PAIR_ENUMERATION -}; - -std::ostream& operator<<(std::ostream& os, const RegisterPair& reg) { - os << X86ManagedRegister::FromRegisterPair(reg); - return os; -} - -bool X86ManagedRegister::Overlaps(const X86ManagedRegister& other) const { - if (IsNoRegister() || other.IsNoRegister()) return false; - CHECK(IsValidManagedRegister()); - CHECK(other.IsValidManagedRegister()); - if (Equals(other)) return true; - if (IsRegisterPair()) { - Register low = AsRegisterPairLow(); - Register high = AsRegisterPairHigh(); - return X86ManagedRegister::FromCpuRegister(low).Overlaps(other) || - X86ManagedRegister::FromCpuRegister(high).Overlaps(other); - } - if (other.IsRegisterPair()) { - return other.Overlaps(*this); - } - return false; -} - - -int X86ManagedRegister::AllocIdLow() const { - CHECK(IsRegisterPair()); - const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds + - kNumberOfX87RegIds); - CHECK_EQ(r, kRegisterPairs[r].reg); - return kRegisterPairs[r].low; -} - - -int X86ManagedRegister::AllocIdHigh() const { - CHECK(IsRegisterPair()); - const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds + - kNumberOfX87RegIds); - CHECK_EQ(r, kRegisterPairs[r].reg); - return kRegisterPairs[r].high; -} - - -void X86ManagedRegister::Print(std::ostream& os) const { - if (!IsValidManagedRegister()) { - os << "No Register"; - } else if (IsXmmRegister()) { - os << "XMM: " << static_cast(AsXmmRegister()); - } else if (IsX87Register()) { - os << "X87: " << static_cast(AsX87Register()); - } else if (IsCpuRegister()) { - os << "CPU: " << static_cast(AsCpuRegister()); - } else if (IsRegisterPair()) { - os << "Pair: " << AsRegisterPairLow() << ", " << AsRegisterPairHigh(); - } else { - os << "??: " << RegId(); - } -} - -std::ostream& operator<<(std::ostream& os, const X86ManagedRegister& reg) { - reg.Print(os); - return os; -} - -} // namespace x86 -} // namespace art diff --git a/runtime/oat/utils/x86/managed_register_x86.h b/runtime/oat/utils/x86/managed_register_x86.h deleted file mode 100644 index b564a8396f..0000000000 --- a/runtime/oat/utils/x86/managed_register_x86.h +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_OAT_UTILS_X86_MANAGED_REGISTER_X86_H_ -#define ART_RUNTIME_OAT_UTILS_X86_MANAGED_REGISTER_X86_H_ - -#include "constants_x86.h" -#include "oat/utils/managed_register.h" - -namespace art { -namespace x86 { - -// Values for register pairs. -// The registers in kReservedCpuRegistersArray in x86.cc are not used in pairs. -// The table kRegisterPairs in x86.cc must be kept in sync with this enum. -enum RegisterPair { - EAX_EDX = 0, - EAX_ECX = 1, - EAX_EBX = 2, - EAX_EDI = 3, - EDX_ECX = 4, - EDX_EBX = 5, - EDX_EDI = 6, - ECX_EBX = 7, - ECX_EDI = 8, - EBX_EDI = 9, - kNumberOfRegisterPairs = 10, - kNoRegisterPair = -1, -}; - -std::ostream& operator<<(std::ostream& os, const RegisterPair& reg); - -const int kNumberOfCpuRegIds = kNumberOfCpuRegisters; -const int kNumberOfCpuAllocIds = kNumberOfCpuRegisters; - -const int kNumberOfXmmRegIds = kNumberOfXmmRegisters; -const int kNumberOfXmmAllocIds = kNumberOfXmmRegisters; - -const int kNumberOfX87RegIds = kNumberOfX87Registers; -const int kNumberOfX87AllocIds = kNumberOfX87Registers; - -const int kNumberOfPairRegIds = kNumberOfRegisterPairs; - -const int kNumberOfRegIds = kNumberOfCpuRegIds + kNumberOfXmmRegIds + - kNumberOfX87RegIds + kNumberOfPairRegIds; -const int kNumberOfAllocIds = kNumberOfCpuAllocIds + kNumberOfXmmAllocIds + - kNumberOfX87RegIds; - -// Register ids map: -// [0..R[ cpu registers (enum Register) -// [R..X[ xmm registers (enum XmmRegister) -// [X..S[ x87 registers (enum X87Register) -// [S..P[ register pairs (enum RegisterPair) -// where -// R = kNumberOfCpuRegIds -// X = R + kNumberOfXmmRegIds -// S = X + kNumberOfX87RegIds -// P = X + kNumberOfRegisterPairs - -// Allocation ids map: -// [0..R[ cpu registers (enum Register) -// [R..X[ xmm registers (enum XmmRegister) -// [X..S[ x87 registers (enum X87Register) -// where -// R = kNumberOfCpuRegIds -// X = R + kNumberOfXmmRegIds -// S = X + kNumberOfX87RegIds - - -// An instance of class 'ManagedRegister' represents a single cpu register (enum -// Register), an xmm register (enum XmmRegister), or a pair of cpu registers -// (enum RegisterPair). -// 'ManagedRegister::NoRegister()' provides an invalid register. -// There is a one-to-one mapping between ManagedRegister and register id. -class X86ManagedRegister : public ManagedRegister { - public: - ByteRegister AsByteRegister() const { - CHECK(IsCpuRegister()); - CHECK_LT(AsCpuRegister(), ESP); // ESP, EBP, ESI and EDI cannot be encoded as byte registers. - return static_cast(id_); - } - - Register AsCpuRegister() const { - CHECK(IsCpuRegister()); - return static_cast(id_); - } - - XmmRegister AsXmmRegister() const { - CHECK(IsXmmRegister()); - return static_cast(id_ - kNumberOfCpuRegIds); - } - - X87Register AsX87Register() const { - CHECK(IsX87Register()); - return static_cast(id_ - - (kNumberOfCpuRegIds + kNumberOfXmmRegIds)); - } - - Register AsRegisterPairLow() const { - CHECK(IsRegisterPair()); - // Appropriate mapping of register ids allows to use AllocIdLow(). - return FromRegId(AllocIdLow()).AsCpuRegister(); - } - - Register AsRegisterPairHigh() const { - CHECK(IsRegisterPair()); - // Appropriate mapping of register ids allows to use AllocIdHigh(). - return FromRegId(AllocIdHigh()).AsCpuRegister(); - } - - bool IsCpuRegister() const { - CHECK(IsValidManagedRegister()); - return (0 <= id_) && (id_ < kNumberOfCpuRegIds); - } - - bool IsXmmRegister() const { - CHECK(IsValidManagedRegister()); - const int test = id_ - kNumberOfCpuRegIds; - return (0 <= test) && (test < kNumberOfXmmRegIds); - } - - bool IsX87Register() const { - CHECK(IsValidManagedRegister()); - const int test = id_ - (kNumberOfCpuRegIds + kNumberOfXmmRegIds); - return (0 <= test) && (test < kNumberOfX87RegIds); - } - - bool IsRegisterPair() const { - CHECK(IsValidManagedRegister()); - const int test = id_ - - (kNumberOfCpuRegIds + kNumberOfXmmRegIds + kNumberOfX87RegIds); - return (0 <= test) && (test < kNumberOfPairRegIds); - } - - void Print(std::ostream& os) const; - - // Returns true if the two managed-registers ('this' and 'other') overlap. - // Either managed-register may be the NoRegister. If both are the NoRegister - // then false is returned. - bool Overlaps(const X86ManagedRegister& other) const; - - static X86ManagedRegister FromCpuRegister(Register r) { - CHECK_NE(r, kNoRegister); - return FromRegId(r); - } - - static X86ManagedRegister FromXmmRegister(XmmRegister r) { - CHECK_NE(r, kNoXmmRegister); - return FromRegId(r + kNumberOfCpuRegIds); - } - - static X86ManagedRegister FromX87Register(X87Register r) { - CHECK_NE(r, kNoX87Register); - return FromRegId(r + kNumberOfCpuRegIds + kNumberOfXmmRegIds); - } - - static X86ManagedRegister FromRegisterPair(RegisterPair r) { - CHECK_NE(r, kNoRegisterPair); - return FromRegId(r + (kNumberOfCpuRegIds + kNumberOfXmmRegIds + - kNumberOfX87RegIds)); - } - - private: - bool IsValidManagedRegister() const { - return (0 <= id_) && (id_ < kNumberOfRegIds); - } - - int RegId() const { - CHECK(!IsNoRegister()); - return id_; - } - - int AllocId() const { - CHECK(IsValidManagedRegister() && !IsRegisterPair()); - CHECK_LT(id_, kNumberOfAllocIds); - return id_; - } - - int AllocIdLow() const; - int AllocIdHigh() const; - - friend class ManagedRegister; - - explicit X86ManagedRegister(int reg_id) : ManagedRegister(reg_id) {} - - static X86ManagedRegister FromRegId(int reg_id) { - X86ManagedRegister reg(reg_id); - CHECK(reg.IsValidManagedRegister()); - return reg; - } -}; - -std::ostream& operator<<(std::ostream& os, const X86ManagedRegister& reg); - -} // namespace x86 - -inline x86::X86ManagedRegister ManagedRegister::AsX86() const { - x86::X86ManagedRegister reg(id_); - CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister()); - return reg; -} - -} // namespace art - -#endif // ART_RUNTIME_OAT_UTILS_X86_MANAGED_REGISTER_X86_H_ diff --git a/runtime/oat/utils/x86/managed_register_x86_test.cc b/runtime/oat/utils/x86/managed_register_x86_test.cc deleted file mode 100644 index 4fbafdadf9..0000000000 --- a/runtime/oat/utils/x86/managed_register_x86_test.cc +++ /dev/null @@ -1,359 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "globals.h" -#include "managed_register_x86.h" -#include "gtest/gtest.h" - -namespace art { -namespace x86 { - -TEST(X86ManagedRegister, NoRegister) { - X86ManagedRegister reg = ManagedRegister::NoRegister().AsX86(); - EXPECT_TRUE(reg.IsNoRegister()); - EXPECT_TRUE(!reg.Overlaps(reg)); -} - -TEST(X86ManagedRegister, CpuRegister) { - X86ManagedRegister reg = X86ManagedRegister::FromCpuRegister(EAX); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(reg.IsCpuRegister()); - EXPECT_TRUE(!reg.IsXmmRegister()); - EXPECT_TRUE(!reg.IsX87Register()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(EAX, reg.AsCpuRegister()); - - reg = X86ManagedRegister::FromCpuRegister(EBX); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(reg.IsCpuRegister()); - EXPECT_TRUE(!reg.IsXmmRegister()); - EXPECT_TRUE(!reg.IsX87Register()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(EBX, reg.AsCpuRegister()); - - reg = X86ManagedRegister::FromCpuRegister(ECX); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(reg.IsCpuRegister()); - EXPECT_TRUE(!reg.IsXmmRegister()); - EXPECT_TRUE(!reg.IsX87Register()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(ECX, reg.AsCpuRegister()); - - reg = X86ManagedRegister::FromCpuRegister(EDI); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(reg.IsCpuRegister()); - EXPECT_TRUE(!reg.IsXmmRegister()); - EXPECT_TRUE(!reg.IsX87Register()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(EDI, reg.AsCpuRegister()); -} - -TEST(X86ManagedRegister, XmmRegister) { - X86ManagedRegister reg = X86ManagedRegister::FromXmmRegister(XMM0); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCpuRegister()); - EXPECT_TRUE(reg.IsXmmRegister()); - EXPECT_TRUE(!reg.IsX87Register()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(XMM0, reg.AsXmmRegister()); - - reg = X86ManagedRegister::FromXmmRegister(XMM1); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCpuRegister()); - EXPECT_TRUE(reg.IsXmmRegister()); - EXPECT_TRUE(!reg.IsX87Register()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(XMM1, reg.AsXmmRegister()); - - reg = X86ManagedRegister::FromXmmRegister(XMM7); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCpuRegister()); - EXPECT_TRUE(reg.IsXmmRegister()); - EXPECT_TRUE(!reg.IsX87Register()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(XMM7, reg.AsXmmRegister()); -} - -TEST(X86ManagedRegister, X87Register) { - X86ManagedRegister reg = X86ManagedRegister::FromX87Register(ST0); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCpuRegister()); - EXPECT_TRUE(!reg.IsXmmRegister()); - EXPECT_TRUE(reg.IsX87Register()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(ST0, reg.AsX87Register()); - - reg = X86ManagedRegister::FromX87Register(ST1); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCpuRegister()); - EXPECT_TRUE(!reg.IsXmmRegister()); - EXPECT_TRUE(reg.IsX87Register()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(ST1, reg.AsX87Register()); - - reg = X86ManagedRegister::FromX87Register(ST7); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCpuRegister()); - EXPECT_TRUE(!reg.IsXmmRegister()); - EXPECT_TRUE(reg.IsX87Register()); - EXPECT_TRUE(!reg.IsRegisterPair()); - EXPECT_EQ(ST7, reg.AsX87Register()); -} - -TEST(X86ManagedRegister, RegisterPair) { - X86ManagedRegister reg = X86ManagedRegister::FromRegisterPair(EAX_EDX); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCpuRegister()); - EXPECT_TRUE(!reg.IsXmmRegister()); - EXPECT_TRUE(!reg.IsX87Register()); - EXPECT_TRUE(reg.IsRegisterPair()); - EXPECT_EQ(EAX, reg.AsRegisterPairLow()); - EXPECT_EQ(EDX, reg.AsRegisterPairHigh()); - - reg = X86ManagedRegister::FromRegisterPair(EAX_ECX); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCpuRegister()); - EXPECT_TRUE(!reg.IsXmmRegister()); - EXPECT_TRUE(!reg.IsX87Register()); - EXPECT_TRUE(reg.IsRegisterPair()); - EXPECT_EQ(EAX, reg.AsRegisterPairLow()); - EXPECT_EQ(ECX, reg.AsRegisterPairHigh()); - - reg = X86ManagedRegister::FromRegisterPair(EAX_EBX); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCpuRegister()); - EXPECT_TRUE(!reg.IsXmmRegister()); - EXPECT_TRUE(!reg.IsX87Register()); - EXPECT_TRUE(reg.IsRegisterPair()); - EXPECT_EQ(EAX, reg.AsRegisterPairLow()); - EXPECT_EQ(EBX, reg.AsRegisterPairHigh()); - - reg = X86ManagedRegister::FromRegisterPair(EAX_EDI); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCpuRegister()); - EXPECT_TRUE(!reg.IsXmmRegister()); - EXPECT_TRUE(!reg.IsX87Register()); - EXPECT_TRUE(reg.IsRegisterPair()); - EXPECT_EQ(EAX, reg.AsRegisterPairLow()); - EXPECT_EQ(EDI, reg.AsRegisterPairHigh()); - - reg = X86ManagedRegister::FromRegisterPair(EDX_ECX); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCpuRegister()); - EXPECT_TRUE(!reg.IsXmmRegister()); - EXPECT_TRUE(!reg.IsX87Register()); - EXPECT_TRUE(reg.IsRegisterPair()); - EXPECT_EQ(EDX, reg.AsRegisterPairLow()); - EXPECT_EQ(ECX, reg.AsRegisterPairHigh()); - - reg = X86ManagedRegister::FromRegisterPair(EDX_EBX); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCpuRegister()); - EXPECT_TRUE(!reg.IsXmmRegister()); - EXPECT_TRUE(!reg.IsX87Register()); - EXPECT_TRUE(reg.IsRegisterPair()); - EXPECT_EQ(EDX, reg.AsRegisterPairLow()); - EXPECT_EQ(EBX, reg.AsRegisterPairHigh()); - - reg = X86ManagedRegister::FromRegisterPair(EDX_EDI); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCpuRegister()); - EXPECT_TRUE(!reg.IsXmmRegister()); - EXPECT_TRUE(!reg.IsX87Register()); - EXPECT_TRUE(reg.IsRegisterPair()); - EXPECT_EQ(EDX, reg.AsRegisterPairLow()); - EXPECT_EQ(EDI, reg.AsRegisterPairHigh()); - - reg = X86ManagedRegister::FromRegisterPair(ECX_EBX); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCpuRegister()); - EXPECT_TRUE(!reg.IsXmmRegister()); - EXPECT_TRUE(!reg.IsX87Register()); - EXPECT_TRUE(reg.IsRegisterPair()); - EXPECT_EQ(ECX, reg.AsRegisterPairLow()); - EXPECT_EQ(EBX, reg.AsRegisterPairHigh()); - - reg = X86ManagedRegister::FromRegisterPair(ECX_EDI); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCpuRegister()); - EXPECT_TRUE(!reg.IsXmmRegister()); - EXPECT_TRUE(!reg.IsX87Register()); - EXPECT_TRUE(reg.IsRegisterPair()); - EXPECT_EQ(ECX, reg.AsRegisterPairLow()); - EXPECT_EQ(EDI, reg.AsRegisterPairHigh()); - - reg = X86ManagedRegister::FromRegisterPair(EBX_EDI); - EXPECT_TRUE(!reg.IsNoRegister()); - EXPECT_TRUE(!reg.IsCpuRegister()); - EXPECT_TRUE(!reg.IsXmmRegister()); - EXPECT_TRUE(!reg.IsX87Register()); - EXPECT_TRUE(reg.IsRegisterPair()); - EXPECT_EQ(EBX, reg.AsRegisterPairLow()); - EXPECT_EQ(EDI, reg.AsRegisterPairHigh()); -} - -TEST(X86ManagedRegister, Equals) { - X86ManagedRegister reg_eax = X86ManagedRegister::FromCpuRegister(EAX); - EXPECT_TRUE(reg_eax.Equals(X86ManagedRegister::FromCpuRegister(EAX))); - EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromCpuRegister(EBX))); - EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromCpuRegister(EDI))); - EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromXmmRegister(XMM0))); - EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromXmmRegister(XMM7))); - EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromX87Register(ST0))); - EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromX87Register(ST7))); - EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX))); - EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI))); - - X86ManagedRegister reg_xmm0 = X86ManagedRegister::FromXmmRegister(XMM0); - EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromCpuRegister(EAX))); - EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromCpuRegister(EBX))); - EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromCpuRegister(EDI))); - EXPECT_TRUE(reg_xmm0.Equals(X86ManagedRegister::FromXmmRegister(XMM0))); - EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromXmmRegister(XMM7))); - EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromX87Register(ST0))); - EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromX87Register(ST7))); - EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX))); - EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI))); - - X86ManagedRegister reg_st0 = X86ManagedRegister::FromX87Register(ST0); - EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromCpuRegister(EAX))); - EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromCpuRegister(EBX))); - EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromCpuRegister(EDI))); - EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromXmmRegister(XMM0))); - EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromXmmRegister(XMM7))); - EXPECT_TRUE(reg_st0.Equals(X86ManagedRegister::FromX87Register(ST0))); - EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromX87Register(ST7))); - EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX))); - EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI))); - - X86ManagedRegister reg_pair = X86ManagedRegister::FromRegisterPair(EAX_EDX); - EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromCpuRegister(EAX))); - EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromCpuRegister(EBX))); - EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromCpuRegister(EDI))); - EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromXmmRegister(XMM0))); - EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromXmmRegister(XMM7))); - EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromX87Register(ST0))); - EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromX87Register(ST7))); - EXPECT_TRUE(reg_pair.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX))); - EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI))); -} - -TEST(X86ManagedRegister, Overlaps) { - X86ManagedRegister reg = X86ManagedRegister::FromCpuRegister(EAX); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); - - reg = X86ManagedRegister::FromCpuRegister(EDX); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); - - reg = X86ManagedRegister::FromCpuRegister(EDI); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); - - reg = X86ManagedRegister::FromCpuRegister(EBX); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); - - reg = X86ManagedRegister::FromXmmRegister(XMM0); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); - - reg = X86ManagedRegister::FromX87Register(ST0); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); - - reg = X86ManagedRegister::FromRegisterPair(EAX_EDX); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EDX_ECX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); - - reg = X86ManagedRegister::FromRegisterPair(EBX_EDI); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EDX_EBX))); - - reg = X86ManagedRegister::FromRegisterPair(EDX_ECX); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7))); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX))); - EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI))); - EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EDX_EBX))); -} - -} // namespace x86 -} // namespace art diff --git a/runtime/oat_test.cc b/runtime/oat_test.cc index ebb228e111..5d0dca9e4c 100644 --- a/runtime/oat_test.cc +++ b/runtime/oat_test.cc @@ -74,10 +74,11 @@ TEST_F(OatTest, WriteRead) { #else CompilerBackend compiler_backend = kQuick; #endif - compiler_driver_.reset(new CompilerDriver(compiler_backend, kThumb2, false, NULL, 2, true)); + InstructionSet insn_set = kIsTargetBuild ? kThumb2 : kX86; + compiler_driver_.reset(new CompilerDriver(compiler_backend, insn_set, false, NULL, 2, true)); jobject class_loader = NULL; if (compile) { - TimingLogger timings("OatTest::WriteRead", false); + base::TimingLogger timings("OatTest::WriteRead", false, false); compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings); } @@ -96,7 +97,7 @@ TEST_F(OatTest, WriteRead) { ASSERT_TRUE(success); if (compile) { // OatWriter strips the code, regenerate to compare - TimingLogger timings("CommonTest::WriteRead", false); + base::TimingLogger timings("CommonTest::WriteRead", false, false); compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings); } UniquePtr oat_file(OatFile::Open(tmp.GetFilename(), tmp.GetFilename(), NULL, false)); diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 0c13ad23ed..485c636b1c 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -28,11 +28,11 @@ #include #include +#include "arch/arm/registers_arm.h" +#include "arch/mips/registers_mips.h" +#include "arch/x86/registers_x86.h" #include "atomic.h" #include "class_linker.h" -#include "constants_arm.h" -#include "constants_mips.h" -#include "constants_x86.h" #include "debugger.h" #include "gc/accounting/card_table-inl.h" #include "gc/heap.h" @@ -134,10 +134,10 @@ Runtime::~Runtime() { delete java_vm_; Thread::Shutdown(); QuasiAtomic::Shutdown(); + verifier::MethodVerifier::Shutdown(); // TODO: acquire a static mutex on Runtime to avoid racing. CHECK(instance_ == NULL || instance_ == this); instance_ = NULL; - verifier::MethodVerifier::Shutdown(); } struct AbortState { diff --git a/runtime/runtime_support.cc b/runtime/runtime_support.cc deleted file mode 100644 index d28aad1e8f..0000000000 --- a/runtime/runtime_support.cc +++ /dev/null @@ -1,475 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "runtime_support.h" - -#include "class_linker-inl.h" -#include "dex_file-inl.h" -#include "gc/accounting/card_table-inl.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/class-inl.h" -#include "mirror/field-inl.h" -#include "mirror/object-inl.h" -#include "mirror/object_array-inl.h" -#include "mirror/proxy.h" -#include "reflection.h" -#include "scoped_thread_state_change.h" -#include "ScopedLocalRef.h" -#include "well_known_classes.h" - -double art_l2d(int64_t l) { - return static_cast(l); -} - -float art_l2f(int64_t l) { - return static_cast(l); -} - -/* - * Float/double conversion requires clamping to min and max of integer form. If - * target doesn't support this normally, use these. - */ -int64_t art_d2l(double d) { - static const double kMaxLong = static_cast(static_cast(0x7fffffffffffffffULL)); - static const double kMinLong = static_cast(static_cast(0x8000000000000000ULL)); - if (d >= kMaxLong) { - return static_cast(0x7fffffffffffffffULL); - } else if (d <= kMinLong) { - return static_cast(0x8000000000000000ULL); - } else if (d != d) { // NaN case - return 0; - } else { - return static_cast(d); - } -} - -int64_t art_f2l(float f) { - static const float kMaxLong = static_cast(static_cast(0x7fffffffffffffffULL)); - static const float kMinLong = static_cast(static_cast(0x8000000000000000ULL)); - if (f >= kMaxLong) { - return static_cast(0x7fffffffffffffffULL); - } else if (f <= kMinLong) { - return static_cast(0x8000000000000000ULL); - } else if (f != f) { // NaN case - return 0; - } else { - return static_cast(f); - } -} - -int32_t art_d2i(double d) { - static const double kMaxInt = static_cast(static_cast(0x7fffffffUL)); - static const double kMinInt = static_cast(static_cast(0x80000000UL)); - if (d >= kMaxInt) { - return static_cast(0x7fffffffUL); - } else if (d <= kMinInt) { - return static_cast(0x80000000UL); - } else if (d != d) { // NaN case - return 0; - } else { - return static_cast(d); - } -} - -int32_t art_f2i(float f) { - static const float kMaxInt = static_cast(static_cast(0x7fffffffUL)); - static const float kMinInt = static_cast(static_cast(0x80000000UL)); - if (f >= kMaxInt) { - return static_cast(0x7fffffffUL); - } else if (f <= kMinInt) { - return static_cast(0x80000000UL); - } else if (f != f) { // NaN case - return 0; - } else { - return static_cast(f); - } -} - -namespace art { - -// Helper function to allocate array for FILLED_NEW_ARRAY. -mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* referrer, - int32_t component_count, Thread* self, - bool access_check) { - if (UNLIKELY(component_count < 0)) { - ThrowNegativeArraySizeException(component_count); - return NULL; // Failure - } - mirror::Class* klass = referrer->GetDexCacheResolvedTypes()->Get(type_idx); - if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve - klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, referrer); - if (klass == NULL) { // Error - DCHECK(self->IsExceptionPending()); - return NULL; // Failure - } - } - if (UNLIKELY(klass->IsPrimitive() && !klass->IsPrimitiveInt())) { - if (klass->IsPrimitiveLong() || klass->IsPrimitiveDouble()) { - ThrowRuntimeException("Bad filled array request for type %s", - PrettyDescriptor(klass).c_str()); - } else { - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - DCHECK(throw_location.GetMethod() == referrer); - self->ThrowNewExceptionF(throw_location, "Ljava/lang/InternalError;", - "Found type %s; filled-new-array not implemented for anything but \'int\'", - PrettyDescriptor(klass).c_str()); - } - return NULL; // Failure - } else { - if (access_check) { - mirror::Class* referrer_klass = referrer->GetDeclaringClass(); - if (UNLIKELY(!referrer_klass->CanAccess(klass))) { - ThrowIllegalAccessErrorClass(referrer_klass, klass); - return NULL; // Failure - } - } - DCHECK(klass->IsArrayClass()) << PrettyClass(klass); - return mirror::Array::Alloc(self, klass, component_count); - } -} - -mirror::Field* FindFieldFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer, - Thread* self, FindFieldType type, size_t expected_size, - bool access_check) { - bool is_primitive; - bool is_set; - bool is_static; - switch (type) { - case InstanceObjectRead: is_primitive = false; is_set = false; is_static = false; break; - case InstanceObjectWrite: is_primitive = false; is_set = true; is_static = false; break; - case InstancePrimitiveRead: is_primitive = true; is_set = false; is_static = false; break; - case InstancePrimitiveWrite: is_primitive = true; is_set = true; is_static = false; break; - case StaticObjectRead: is_primitive = false; is_set = false; is_static = true; break; - case StaticObjectWrite: is_primitive = false; is_set = true; is_static = true; break; - case StaticPrimitiveRead: is_primitive = true; is_set = false; is_static = true; break; - case StaticPrimitiveWrite: // Keep GCC happy by having a default handler, fall-through. - default: is_primitive = true; is_set = true; is_static = true; break; - } - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - mirror::Field* resolved_field = class_linker->ResolveField(field_idx, referrer, is_static); - if (UNLIKELY(resolved_field == NULL)) { - DCHECK(self->IsExceptionPending()); // Throw exception and unwind. - return NULL; // Failure. - } - mirror::Class* fields_class = resolved_field->GetDeclaringClass(); - if (access_check) { - if (UNLIKELY(resolved_field->IsStatic() != is_static)) { - ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, referrer); - return NULL; - } - mirror::Class* referring_class = referrer->GetDeclaringClass(); - if (UNLIKELY(!referring_class->CanAccess(fields_class) || - !referring_class->CanAccessMember(fields_class, - resolved_field->GetAccessFlags()))) { - // The referring class can't access the resolved field, this may occur as a result of a - // protected field being made public by a sub-class. Resort to the dex file to determine - // the correct class for the access check. - const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile(); - fields_class = class_linker->ResolveType(dex_file, - dex_file.GetFieldId(field_idx).class_idx_, - referring_class); - if (UNLIKELY(!referring_class->CanAccess(fields_class))) { - ThrowIllegalAccessErrorClass(referring_class, fields_class); - return NULL; // failure - } else if (UNLIKELY(!referring_class->CanAccessMember(fields_class, - resolved_field->GetAccessFlags()))) { - ThrowIllegalAccessErrorField(referring_class, resolved_field); - return NULL; // failure - } - } - if (UNLIKELY(is_set && resolved_field->IsFinal() && (fields_class != referring_class))) { - ThrowIllegalAccessErrorFinalField(referrer, resolved_field); - return NULL; // failure - } else { - FieldHelper fh(resolved_field); - if (UNLIKELY(fh.IsPrimitiveType() != is_primitive || - fh.FieldSize() != expected_size)) { - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - DCHECK(throw_location.GetMethod() == referrer); - self->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;", - "Attempted read of %zd-bit %s on field '%s'", - expected_size * (32 / sizeof(int32_t)), - is_primitive ? "primitive" : "non-primitive", - PrettyField(resolved_field, true).c_str()); - return NULL; // failure - } - } - } - if (!is_static) { - // instance fields must be being accessed on an initialized class - return resolved_field; - } else { - // If the class is initialized we're done. - if (fields_class->IsInitialized()) { - return resolved_field; - } else if (Runtime::Current()->GetClassLinker()->EnsureInitialized(fields_class, true, true)) { - // Otherwise let's ensure the class is initialized before resolving the field. - return resolved_field; - } else { - DCHECK(self->IsExceptionPending()); // Throw exception and unwind - return NULL; // failure - } - } -} - -// Slow path method resolution -mirror::AbstractMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* this_object, - mirror::AbstractMethod* referrer, - Thread* self, bool access_check, InvokeType type) { - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - bool is_direct = type == kStatic || type == kDirect; - mirror::AbstractMethod* resolved_method = class_linker->ResolveMethod(method_idx, referrer, type); - if (UNLIKELY(resolved_method == NULL)) { - DCHECK(self->IsExceptionPending()); // Throw exception and unwind. - return NULL; // Failure. - } else if (UNLIKELY(this_object == NULL && type != kStatic)) { - // Maintain interpreter-like semantics where NullPointerException is thrown - // after potential NoSuchMethodError from class linker. - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - DCHECK(referrer == throw_location.GetMethod()); - ThrowNullPointerExceptionForMethodAccess(throw_location, method_idx, type); - return NULL; // Failure. - } else { - if (!access_check) { - if (is_direct) { - return resolved_method; - } else if (type == kInterface) { - mirror::AbstractMethod* interface_method = - this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); - if (UNLIKELY(interface_method == NULL)) { - ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object, - referrer); - return NULL; // Failure. - } else { - return interface_method; - } - } else { - mirror::ObjectArray* vtable; - uint16_t vtable_index = resolved_method->GetMethodIndex(); - if (type == kSuper) { - vtable = referrer->GetDeclaringClass()->GetSuperClass()->GetVTable(); - } else { - vtable = this_object->GetClass()->GetVTable(); - } - // TODO: eliminate bounds check? - return vtable->Get(vtable_index); - } - } else { - // Incompatible class change should have been handled in resolve method. - if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(type))) { - ThrowIncompatibleClassChangeError(type, resolved_method->GetInvokeType(), resolved_method, - referrer); - return NULL; // Failure. - } - mirror::Class* methods_class = resolved_method->GetDeclaringClass(); - mirror::Class* referring_class = referrer->GetDeclaringClass(); - if (UNLIKELY(!referring_class->CanAccess(methods_class) || - !referring_class->CanAccessMember(methods_class, - resolved_method->GetAccessFlags()))) { - // The referring class can't access the resolved method, this may occur as a result of a - // protected method being made public by implementing an interface that re-declares the - // method public. Resort to the dex file to determine the correct class for the access check - const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile(); - methods_class = class_linker->ResolveType(dex_file, - dex_file.GetMethodId(method_idx).class_idx_, - referring_class); - if (UNLIKELY(!referring_class->CanAccess(methods_class))) { - ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class, - referrer, resolved_method, type); - return NULL; // Failure. - } else if (UNLIKELY(!referring_class->CanAccessMember(methods_class, - resolved_method->GetAccessFlags()))) { - ThrowIllegalAccessErrorMethod(referring_class, resolved_method); - return NULL; // Failure. - } - } - if (is_direct) { - return resolved_method; - } else if (type == kInterface) { - mirror::AbstractMethod* interface_method = - this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); - if (UNLIKELY(interface_method == NULL)) { - ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object, - referrer); - return NULL; // Failure. - } else { - return interface_method; - } - } else { - mirror::ObjectArray* vtable; - uint16_t vtable_index = resolved_method->GetMethodIndex(); - if (type == kSuper) { - mirror::Class* super_class = referring_class->GetSuperClass(); - if (LIKELY(super_class != NULL)) { - vtable = referring_class->GetSuperClass()->GetVTable(); - } else { - vtable = NULL; - } - } else { - vtable = this_object->GetClass()->GetVTable(); - } - if (LIKELY(vtable != NULL && - vtable_index < static_cast(vtable->GetLength()))) { - return vtable->GetWithoutChecks(vtable_index); - } else { - // Behavior to agree with that of the verifier. - MethodHelper mh(resolved_method); - ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(), mh.GetName(), - mh.GetSignature()); - return NULL; // Failure. - } - } - } - } -} - -void ThrowStackOverflowError(Thread* self) { - CHECK(!self->IsHandlingStackOverflow()) << "Recursive stack overflow."; - - if (Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) { - // Remove extra entry pushed onto second stack during method tracing. - Runtime::Current()->GetInstrumentation()->PopMethodForUnwind(self, false); - } - - self->SetStackEndForStackOverflow(); // Allow space on the stack for constructor to execute. - JNIEnvExt* env = self->GetJniEnv(); - std::string msg("stack size "); - msg += PrettySize(self->GetStackSize()); - // Use low-level JNI routine and pre-baked error class to avoid class linking operations that - // would consume more stack. - int rc = ::art::ThrowNewException(env, WellKnownClasses::java_lang_StackOverflowError, - msg.c_str(), NULL); - if (rc != JNI_OK) { - // TODO: ThrowNewException failed presumably because of an OOME, we continue to throw the OOME - // or die in the CHECK below. We may want to throw a pre-baked StackOverflowError - // instead. - LOG(ERROR) << "Couldn't throw new StackOverflowError because JNI ThrowNew failed."; - CHECK(self->IsExceptionPending()); - } - self->ResetDefaultStackEnd(); // Return to default stack size. -} - -JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char* shorty, - jobject rcvr_jobj, jobject interface_method_jobj, - std::vector& args) { - DCHECK(soa.Env()->IsInstanceOf(rcvr_jobj, WellKnownClasses::java_lang_reflect_Proxy)); - - // Build argument array possibly triggering GC. - soa.Self()->AssertThreadSuspensionIsAllowable(); - jobjectArray args_jobj = NULL; - const JValue zero; - if (args.size() > 0) { - args_jobj = soa.Env()->NewObjectArray(args.size(), WellKnownClasses::java_lang_Object, NULL); - if (args_jobj == NULL) { - CHECK(soa.Self()->IsExceptionPending()); - return zero; - } - for (size_t i = 0; i < args.size(); ++i) { - if (shorty[i + 1] == 'L') { - jobject val = args.at(i).l; - soa.Env()->SetObjectArrayElement(args_jobj, i, val); - } else { - JValue jv; - jv.SetJ(args.at(i).j); - mirror::Object* val = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv); - if (val == NULL) { - CHECK(soa.Self()->IsExceptionPending()); - return zero; - } - soa.Decode* >(args_jobj)->Set(i, val); - } - } - } - - // Call InvocationHandler.invoke(Object proxy, Method method, Object[] args). - jobject inv_hand = soa.Env()->GetObjectField(rcvr_jobj, - WellKnownClasses::java_lang_reflect_Proxy_h); - jvalue invocation_args[3]; - invocation_args[0].l = rcvr_jobj; - invocation_args[1].l = interface_method_jobj; - invocation_args[2].l = args_jobj; - jobject result = - soa.Env()->CallObjectMethodA(inv_hand, - WellKnownClasses::java_lang_reflect_InvocationHandler_invoke, - invocation_args); - - // Unbox result and handle error conditions. - if (LIKELY(!soa.Self()->IsExceptionPending())) { - if (shorty[0] == 'V' || (shorty[0] == 'L' && result == NULL)) { - // Do nothing. - return zero; - } else { - mirror::Object* result_ref = soa.Decode(result); - mirror::Object* rcvr = soa.Decode(rcvr_jobj); - mirror::AbstractMethod* interface_method = - soa.Decode(interface_method_jobj); - mirror::Class* result_type = MethodHelper(interface_method).GetReturnType(); - mirror::AbstractMethod* proxy_method; - if (interface_method->GetDeclaringClass()->IsInterface()) { - proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface(interface_method); - } else { - // Proxy dispatch to a method defined in Object. - DCHECK(interface_method->GetDeclaringClass()->IsObjectClass()); - proxy_method = interface_method; - } - ThrowLocation throw_location(rcvr, proxy_method, -1); - JValue result_unboxed; - if (!UnboxPrimitiveForResult(throw_location, result_ref, result_type, result_unboxed)) { - DCHECK(soa.Self()->IsExceptionPending()); - return zero; - } - return result_unboxed; - } - } else { - // In the case of checked exceptions that aren't declared, the exception must be wrapped by - // a UndeclaredThrowableException. - mirror::Throwable* exception = soa.Self()->GetException(NULL); - if (exception->IsCheckedException()) { - mirror::Object* rcvr = soa.Decode(rcvr_jobj); - mirror::SynthesizedProxyClass* proxy_class = - down_cast(rcvr->GetClass()); - mirror::AbstractMethod* interface_method = - soa.Decode(interface_method_jobj); - mirror::AbstractMethod* proxy_method = - rcvr->GetClass()->FindVirtualMethodForInterface(interface_method); - int throws_index = -1; - size_t num_virt_methods = proxy_class->NumVirtualMethods(); - for (size_t i = 0; i < num_virt_methods; i++) { - if (proxy_class->GetVirtualMethod(i) == proxy_method) { - throws_index = i; - break; - } - } - CHECK_NE(throws_index, -1); - mirror::ObjectArray* declared_exceptions = proxy_class->GetThrows()->Get(throws_index); - mirror::Class* exception_class = exception->GetClass(); - bool declares_exception = false; - for (int i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) { - mirror::Class* declared_exception = declared_exceptions->Get(i); - declares_exception = declared_exception->IsAssignableFrom(exception_class); - } - if (!declares_exception) { - ThrowLocation throw_location(rcvr, proxy_method, -1); - soa.Self()->ThrowNewWrappedException(throw_location, - "Ljava/lang/reflect/UndeclaredThrowableException;", - NULL); - } - } - return zero; - } -} - -} // namespace art diff --git a/runtime/runtime_support.h b/runtime/runtime_support.h deleted file mode 100644 index 43c678428b..0000000000 --- a/runtime/runtime_support.h +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_RUNTIME_SUPPORT_H_ -#define ART_RUNTIME_RUNTIME_SUPPORT_H_ - -#include "class_linker.h" -#include "common_throws.h" -#include "dex_file.h" -#include "indirect_reference_table.h" -#include "invoke_type.h" -#include "jni_internal.h" -#include "mirror/abstract_method.h" -#include "mirror/array.h" -#include "mirror/class-inl.h" -#include "mirror/throwable.h" -#include "object_utils.h" -#include "thread.h" - -extern "C" void art_interpreter_invoke_handler(); -extern "C" void art_jni_dlsym_lookup_stub(); -extern "C" void art_portable_abstract_method_error_stub(); -extern "C" void art_portable_proxy_invoke_handler(); -extern "C" void art_quick_abstract_method_error_stub(); -extern "C" void art_quick_deoptimize(); -extern "C" void art_quick_instrumentation_entry_from_code(void*); -extern "C" void art_quick_instrumentation_exit_from_code(); -extern "C" void art_quick_interpreter_entry(void*); -extern "C" void art_quick_proxy_invoke_handler(); -extern "C" void art_work_around_app_jni_bugs(); - -extern "C" double art_l2d(int64_t l); -extern "C" float art_l2f(int64_t l); -extern "C" int64_t art_d2l(double d); -extern "C" int32_t art_d2i(double d); -extern "C" int64_t art_f2l(float f); -extern "C" int32_t art_f2i(float f); - -namespace art { -namespace mirror { -class Class; -class Field; -class Object; -} - -// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it -// cannot be resolved, throw an error. If it can, use it to create an instance. -// When verification/compiler hasn't been able to verify access, optionally perform an access -// check. -static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, mirror::AbstractMethod* method, - Thread* self, - bool access_check) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); - Runtime* runtime = Runtime::Current(); - if (UNLIKELY(klass == NULL)) { - klass = runtime->GetClassLinker()->ResolveType(type_idx, method); - if (klass == NULL) { - DCHECK(self->IsExceptionPending()); - return NULL; // Failure - } - } - if (access_check) { - if (UNLIKELY(!klass->IsInstantiable())) { - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - self->ThrowNewException(throw_location, "Ljava/lang/InstantiationError;", - PrettyDescriptor(klass).c_str()); - return NULL; // Failure - } - mirror::Class* referrer = method->GetDeclaringClass(); - if (UNLIKELY(!referrer->CanAccess(klass))) { - ThrowIllegalAccessErrorClass(referrer, klass); - return NULL; // Failure - } - } - if (!klass->IsInitialized() && - !runtime->GetClassLinker()->EnsureInitialized(klass, true, true)) { - DCHECK(self->IsExceptionPending()); - return NULL; // Failure - } - return klass->AllocObject(self); -} - -// Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If -// it cannot be resolved, throw an error. If it can, use it to create an array. -// When verification/compiler hasn't been able to verify access, optionally perform an access -// check. -static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, - int32_t component_count, - Thread* self, bool access_check) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (UNLIKELY(component_count < 0)) { - ThrowNegativeArraySizeException(component_count); - return NULL; // Failure - } - mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); - if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve - klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method); - if (klass == NULL) { // Error - DCHECK(Thread::Current()->IsExceptionPending()); - return NULL; // Failure - } - CHECK(klass->IsArrayClass()) << PrettyClass(klass); - } - if (access_check) { - mirror::Class* referrer = method->GetDeclaringClass(); - if (UNLIKELY(!referrer->CanAccess(klass))) { - ThrowIllegalAccessErrorClass(referrer, klass); - return NULL; // Failure - } - } - return mirror::Array::Alloc(self, klass, component_count); -} - -extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, - int32_t component_count, - Thread* self, bool access_check) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - -// Type of find field operation for fast and slow case. -enum FindFieldType { - InstanceObjectRead, - InstanceObjectWrite, - InstancePrimitiveRead, - InstancePrimitiveWrite, - StaticObjectRead, - StaticObjectWrite, - StaticPrimitiveRead, - StaticPrimitiveWrite, -}; - -// Slow field find that can initialize classes and may throw exceptions. -extern mirror::Field* FindFieldFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer, - Thread* self, FindFieldType type, size_t expected_size, - bool access_check) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - -// Fast path field resolution that can't initialize classes or throw exceptions. -static inline mirror::Field* FindFieldFast(uint32_t field_idx, - const mirror::AbstractMethod* referrer, - FindFieldType type, size_t expected_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Field* resolved_field = - referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx); - if (UNLIKELY(resolved_field == NULL)) { - return NULL; - } - mirror::Class* fields_class = resolved_field->GetDeclaringClass(); - // Check class is initiliazed or initializing. - if (UNLIKELY(!fields_class->IsInitializing())) { - return NULL; - } - // Check for incompatible class change. - bool is_primitive; - bool is_set; - bool is_static; - switch (type) { - case InstanceObjectRead: is_primitive = false; is_set = false; is_static = false; break; - case InstanceObjectWrite: is_primitive = false; is_set = true; is_static = false; break; - case InstancePrimitiveRead: is_primitive = true; is_set = false; is_static = false; break; - case InstancePrimitiveWrite: is_primitive = true; is_set = true; is_static = false; break; - case StaticObjectRead: is_primitive = false; is_set = false; is_static = true; break; - case StaticObjectWrite: is_primitive = false; is_set = true; is_static = true; break; - case StaticPrimitiveRead: is_primitive = true; is_set = false; is_static = true; break; - case StaticPrimitiveWrite: is_primitive = true; is_set = true; is_static = true; break; - default: - LOG(FATAL) << "UNREACHABLE"; // Assignment below to avoid GCC warnings. - is_primitive = true; - is_set = true; - is_static = true; - break; - } - if (UNLIKELY(resolved_field->IsStatic() != is_static)) { - // Incompatible class change. - return NULL; - } - mirror::Class* referring_class = referrer->GetDeclaringClass(); - if (UNLIKELY(!referring_class->CanAccess(fields_class) || - !referring_class->CanAccessMember(fields_class, - resolved_field->GetAccessFlags()) || - (is_set && resolved_field->IsFinal() && (fields_class != referring_class)))) { - // Illegal access. - return NULL; - } - FieldHelper fh(resolved_field); - if (UNLIKELY(fh.IsPrimitiveType() != is_primitive || - fh.FieldSize() != expected_size)) { - return NULL; - } - return resolved_field; -} - -// Fast path method resolution that can't throw exceptions. -static inline mirror::AbstractMethod* FindMethodFast(uint32_t method_idx, - mirror::Object* this_object, - const mirror::AbstractMethod* referrer, - bool access_check, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - bool is_direct = type == kStatic || type == kDirect; - if (UNLIKELY(this_object == NULL && !is_direct)) { - return NULL; - } - mirror::AbstractMethod* resolved_method = - referrer->GetDeclaringClass()->GetDexCache()->GetResolvedMethod(method_idx); - if (UNLIKELY(resolved_method == NULL)) { - return NULL; - } - if (access_check) { - // Check for incompatible class change errors and access. - bool icce = resolved_method->CheckIncompatibleClassChange(type); - if (UNLIKELY(icce)) { - return NULL; - } - mirror::Class* methods_class = resolved_method->GetDeclaringClass(); - mirror::Class* referring_class = referrer->GetDeclaringClass(); - if (UNLIKELY(!referring_class->CanAccess(methods_class) || - !referring_class->CanAccessMember(methods_class, - resolved_method->GetAccessFlags()))) { - // Potential illegal access, may need to refine the method's class. - return NULL; - } - } - if (type == kInterface) { // Most common form of slow path dispatch. - return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); - } else if (is_direct) { - return resolved_method; - } else if (type == kSuper) { - return referrer->GetDeclaringClass()->GetSuperClass()->GetVTable()-> - Get(resolved_method->GetMethodIndex()); - } else { - DCHECK(type == kVirtual); - return this_object->GetClass()->GetVTable()->Get(resolved_method->GetMethodIndex()); - } -} - -extern mirror::AbstractMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* this_object, - mirror::AbstractMethod* referrer, - Thread* self, bool access_check, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - -static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx, - const mirror::AbstractMethod* referrer, - Thread* self, bool can_run_clinit, - bool verify_access) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - mirror::Class* klass = class_linker->ResolveType(type_idx, referrer); - if (UNLIKELY(klass == NULL)) { - CHECK(self->IsExceptionPending()); - return NULL; // Failure - Indicate to caller to deliver exception - } - // Perform access check if necessary. - mirror::Class* referring_class = referrer->GetDeclaringClass(); - if (verify_access && UNLIKELY(!referring_class->CanAccess(klass))) { - ThrowIllegalAccessErrorClass(referring_class, klass); - return NULL; // Failure - Indicate to caller to deliver exception - } - // If we're just implementing const-class, we shouldn't call . - if (!can_run_clinit) { - return klass; - } - // If we are the of this class, just return our storage. - // - // Do not set the DexCache InitializedStaticStorage, since that implies has finished - // running. - if (klass == referring_class && MethodHelper(referrer).IsClassInitializer()) { - return klass; - } - if (!class_linker->EnsureInitialized(klass, true, true)) { - CHECK(self->IsExceptionPending()); - return NULL; // Failure - Indicate to caller to deliver exception - } - referrer->GetDexCacheInitializedStaticStorage()->Set(type_idx, klass); - return klass; -} - -extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - -static inline mirror::String* ResolveStringFromCode(const mirror::AbstractMethod* referrer, - uint32_t string_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - return class_linker->ResolveString(string_idx, referrer); -} - -static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - UNLOCK_FUNCTION(monitor_lock_) { - // Save any pending exception over monitor exit call. - mirror::Throwable* saved_exception = NULL; - ThrowLocation saved_throw_location; - if (UNLIKELY(self->IsExceptionPending())) { - saved_exception = self->GetException(&saved_throw_location); - self->ClearException(); - } - // Decode locked object and unlock, before popping local references. - self->DecodeJObject(locked)->MonitorExit(self); - if (UNLIKELY(self->IsExceptionPending())) { - LOG(FATAL) << "Synchronized JNI code returning with an exception:\n" - << saved_exception->Dump() - << "\nEncountered second exception during implicit MonitorExit:\n" - << self->GetException(NULL)->Dump(); - } - // Restore pending exception. - if (saved_exception != NULL) { - self->SetException(saved_throw_location, saved_exception); - } -} - -static inline void CheckReferenceResult(mirror::Object* o, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (o == NULL) { - return; - } - mirror::AbstractMethod* m = self->GetCurrentMethod(NULL); - if (o == kInvalidIndirectRefObject) { - JniAbortF(NULL, "invalid reference returned from %s", PrettyMethod(m).c_str()); - } - // Make sure that the result is an instance of the type this method was expected to return. - mirror::Class* return_type = MethodHelper(m).GetReturnType(); - - if (!o->InstanceOf(return_type)) { - JniAbortF(NULL, "attempt to return an instance of %s from %s", - PrettyTypeOf(o).c_str(), PrettyMethod(m).c_str()); - } -} - -static inline void CheckSuspend(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - for (;;) { - if (thread->ReadFlag(kCheckpointRequest)) { - thread->RunCheckpointFunction(); - thread->AtomicClearFlag(kCheckpointRequest); - } else if (thread->ReadFlag(kSuspendRequest)) { - thread->FullSuspendCheck(); - } else { - break; - } - } -} - -JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char* shorty, - jobject rcvr_jobj, jobject interface_method_jobj, - std::vector& args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - -// Entry point for deoptimization. -static inline uintptr_t GetDeoptimizationEntryPoint() { - return reinterpret_cast(art_quick_deoptimize); -} - -// Return address of instrumentation stub. -static inline void* GetInstrumentationEntryPoint() { - return reinterpret_cast(art_quick_instrumentation_entry_from_code); -} - -// The return_pc of instrumentation exit stub. -static inline uintptr_t GetInstrumentationExitPc() { - return reinterpret_cast(art_quick_instrumentation_exit_from_code); -} - -// Return address of interpreter stub. -static inline void* GetInterpreterEntryPoint() { - return reinterpret_cast(art_quick_interpreter_entry); -} - -static inline const void* GetPortableResolutionTrampoline(ClassLinker* class_linker) { - return class_linker->GetPortableResolutionTrampoline(); -} - -static inline const void* GetQuickResolutionTrampoline(ClassLinker* class_linker) { - return class_linker->GetQuickResolutionTrampoline(); -} - -// Return address of resolution trampoline stub for defined compiler. -static inline const void* GetResolutionTrampoline(ClassLinker* class_linker) { -#if defined(ART_USE_PORTABLE_COMPILER) - return GetPortableResolutionTrampoline(class_linker); -#else - return GetQuickResolutionTrampoline(class_linker); -#endif -} - -static inline void* GetPortableAbstractMethodErrorStub() { - return reinterpret_cast(art_portable_abstract_method_error_stub); -} - -static inline void* GetQuickAbstractMethodErrorStub() { - return reinterpret_cast(art_quick_abstract_method_error_stub); -} - -// Return address of abstract method error stub for defined compiler. -static inline void* GetAbstractMethodErrorStub() { -#if defined(ART_USE_PORTABLE_COMPILER) - return GetPortableAbstractMethodErrorStub(); -#else - return GetQuickAbstractMethodErrorStub(); -#endif -} - -static inline void* GetJniDlsymLookupStub() { - return reinterpret_cast(art_jni_dlsym_lookup_stub); -} - -} // namespace art - -#endif // ART_RUNTIME_RUNTIME_SUPPORT_H_ diff --git a/runtime/runtime_support_llvm.cc b/runtime/runtime_support_llvm.cc deleted file mode 100644 index 9d83f9e49e..0000000000 --- a/runtime/runtime_support_llvm.cc +++ /dev/null @@ -1,925 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "runtime_support_llvm.h" - -#include "ScopedLocalRef.h" -#include "asm_support.h" -#include "class_linker.h" -#include "class_linker-inl.h" -#include "dex_file-inl.h" -#include "dex_instruction.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/class-inl.h" -#include "mirror/dex_cache-inl.h" -#include "mirror/field-inl.h" -#include "mirror/object.h" -#include "mirror/object-inl.h" -#include "mirror/object_array-inl.h" -#include "nth_caller_visitor.h" -#include "object_utils.h" -#include "reflection.h" -#include "runtime_support.h" -#include "runtime_support_llvm_func_list.h" -#include "scoped_thread_state_change.h" -#include "thread.h" -#include "thread_list.h" -#include "verifier/dex_gc_map.h" -#include "verifier/method_verifier.h" -#include "well_known_classes.h" - -#include -#include -#include -#include -#include - -namespace art { - -using ::art::mirror::AbstractMethod; - -class ShadowFrameCopyVisitor : public StackVisitor { - public: - explicit ShadowFrameCopyVisitor(Thread* self) : StackVisitor(self, NULL), prev_frame_(NULL), - top_frame_(NULL) {} - - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (IsShadowFrame()) { - ShadowFrame* cur_frame = GetCurrentShadowFrame(); - size_t num_regs = cur_frame->NumberOfVRegs(); - AbstractMethod* method = cur_frame->GetMethod(); - uint32_t dex_pc = cur_frame->GetDexPC(); - ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, method, dex_pc); - - const uint8_t* gc_map = method->GetNativeGcMap(); - uint32_t gc_map_length = static_cast((gc_map[0] << 24) | - (gc_map[1] << 16) | - (gc_map[2] << 8) | - (gc_map[3] << 0)); - verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length); - const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc); - for (size_t reg = 0; reg < num_regs; ++reg) { - if (TestBitmap(reg, reg_bitmap)) { - new_frame->SetVRegReference(reg, cur_frame->GetVRegReference(reg)); - } else { - new_frame->SetVReg(reg, cur_frame->GetVReg(reg)); - } - } - - if (prev_frame_ != NULL) { - prev_frame_->SetLink(new_frame); - } else { - top_frame_ = new_frame; - } - prev_frame_ = new_frame; - } - return true; - } - - ShadowFrame* GetShadowFrameCopy() { - return top_frame_; - } - - private: - static bool TestBitmap(int reg, const uint8_t* reg_vector) { - return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0; - } - - ShadowFrame* prev_frame_; - ShadowFrame* top_frame_; -}; - -} // namespace art - -extern "C" { -using ::art::CatchHandlerIterator; -using ::art::DexFile; -using ::art::FindFieldFast; -using ::art::FindMethodFast; -using ::art::InstanceObjectRead; -using ::art::InstanceObjectWrite; -using ::art::InstancePrimitiveRead; -using ::art::InstancePrimitiveWrite; -using ::art::Instruction; -using ::art::InvokeType; -using ::art::JNIEnvExt; -using ::art::JValue; -using ::art::Locks; -using ::art::MethodHelper; -using ::art::PrettyClass; -using ::art::PrettyMethod; -using ::art::Primitive; -using ::art::ResolveStringFromCode; -using ::art::Runtime; -using ::art::ScopedJniEnvLocalRefState; -using ::art::ScopedObjectAccessUnchecked; -using ::art::ShadowFrame; -using ::art::ShadowFrameCopyVisitor; -using ::art::StaticObjectRead; -using ::art::StaticObjectWrite; -using ::art::StaticPrimitiveRead; -using ::art::StaticPrimitiveWrite; -using ::art::Thread; -using ::art::Thread; -using ::art::ThrowArithmeticExceptionDivideByZero; -using ::art::ThrowArrayIndexOutOfBoundsException; -using ::art::ThrowArrayStoreException; -using ::art::ThrowClassCastException; -using ::art::ThrowLocation; -using ::art::ThrowNoSuchMethodError; -using ::art::ThrowNullPointerException; -using ::art::ThrowNullPointerExceptionFromDexPC; -using ::art::ThrowStackOverflowError; -using ::art::kDirect; -using ::art::kInterface; -using ::art::kNative; -using ::art::kStatic; -using ::art::kSuper; -using ::art::kVirtual; -using ::art::mirror::AbstractMethod; -using ::art::mirror::Array; -using ::art::mirror::Class; -using ::art::mirror::Field; -using ::art::mirror::Object; -using ::art::mirror::Throwable; - -//---------------------------------------------------------------------------- -// Thread -//---------------------------------------------------------------------------- - -Thread* art_portable_get_current_thread_from_code() { -#if defined(__arm__) || defined(__i386__) - LOG(FATAL) << "UNREACHABLE"; -#endif - return Thread::Current(); -} - -void* art_portable_set_current_thread_from_code(void* thread_object_addr) { - // Hijacked to set r9 on ARM. - LOG(FATAL) << "UNREACHABLE"; - return NULL; -} - -void art_portable_lock_object_from_code(Object* obj, Thread* thread) - EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) { - DCHECK(obj != NULL); // Assumed to have been checked before entry - obj->MonitorEnter(thread); // May block - DCHECK(thread->HoldsLock(obj)); - // Only possible exception is NPE and is handled before entry - DCHECK(!thread->IsExceptionPending()); -} - -void art_portable_unlock_object_from_code(Object* obj, Thread* thread) - UNLOCK_FUNCTION(monitor_lock_) { - DCHECK(obj != NULL); // Assumed to have been checked before entry - // MonitorExit may throw exception - obj->MonitorExit(thread); -} - -void art_portable_test_suspend_from_code(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CheckSuspend(self); - if (Runtime::Current()->GetInstrumentation()->ShouldPortableCodeDeoptimize()) { - // Save out the shadow frame to the heap - ShadowFrameCopyVisitor visitor(self); - visitor.WalkStack(true); - self->SetDeoptimizationShadowFrame(visitor.GetShadowFrameCopy()); - self->SetDeoptimizationReturnValue(JValue()); - self->SetException(ThrowLocation(), reinterpret_cast(-1)); - } -} - -ShadowFrame* art_portable_push_shadow_frame_from_code(Thread* thread, - ShadowFrame* new_shadow_frame, - AbstractMethod* method, - uint32_t num_vregs) { - ShadowFrame* old_frame = thread->PushShadowFrame(new_shadow_frame); - new_shadow_frame->SetMethod(method); - new_shadow_frame->SetNumberOfVRegs(num_vregs); - return old_frame; -} - -void art_portable_pop_shadow_frame_from_code(void*) { - LOG(FATAL) << "Implemented by IRBuilder."; -} - -void art_portable_mark_gc_card_from_code(void *, void*) { - LOG(FATAL) << "Implemented by IRBuilder."; -} - -//---------------------------------------------------------------------------- -// Exception -//---------------------------------------------------------------------------- - -bool art_portable_is_exception_pending_from_code() { - LOG(FATAL) << "Implemented by IRBuilder."; - return false; -} - -void art_portable_throw_div_zero_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ThrowArithmeticExceptionDivideByZero(); -} - -void art_portable_throw_array_bounds_from_code(int32_t index, int32_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ThrowArrayIndexOutOfBoundsException(index, length); -} - -void art_portable_throw_no_such_method_from_code(int32_t method_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ThrowNoSuchMethodError(method_idx); -} - -void art_portable_throw_null_pointer_exception_from_code(uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // TODO: remove dex_pc argument from caller. - UNUSED(dex_pc); - Thread* self = Thread::Current(); - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - ThrowNullPointerExceptionFromDexPC(throw_location); -} - -void art_portable_throw_stack_overflow_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ThrowStackOverflowError(Thread::Current()); -} - -void art_portable_throw_exception_from_code(Throwable* exception) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Thread* self = Thread::Current(); - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - if (exception == NULL) { - ThrowNullPointerException(NULL, "throw with null exception"); - } else { - self->SetException(throw_location, exception); - } -} - -void* art_portable_get_and_clear_exception(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(self->IsExceptionPending()); - // TODO: make this inline. - Throwable* exception = self->GetException(NULL); - self->ClearException(); - return exception; -} - -int32_t art_portable_find_catch_block_from_code(AbstractMethod* current_method, - uint32_t ti_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Thread* self = Thread::Current(); // TODO: make an argument. - ThrowLocation throw_location; - Throwable* exception = self->GetException(&throw_location); - // Check for special deoptimization exception. - if (UNLIKELY(reinterpret_cast(exception) == -1)) { - return -1; - } - Class* exception_type = exception->GetClass(); - MethodHelper mh(current_method); - const DexFile::CodeItem* code_item = mh.GetCodeItem(); - DCHECK_LT(ti_offset, code_item->tries_size_); - const DexFile::TryItem* try_item = DexFile::GetTryItems(*code_item, ti_offset); - - int iter_index = 0; - int result = -1; - uint32_t catch_dex_pc = -1; - // Iterate over the catch handlers associated with dex_pc - for (CatchHandlerIterator it(*code_item, *try_item); it.HasNext(); it.Next()) { - uint16_t iter_type_idx = it.GetHandlerTypeIndex(); - // Catch all case - if (iter_type_idx == DexFile::kDexNoIndex16) { - catch_dex_pc = it.GetHandlerAddress(); - result = iter_index; - break; - } - // Does this catch exception type apply? - Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx); - if (UNLIKELY(iter_exception_type == NULL)) { - // TODO: check, the verifier (class linker?) should take care of resolving all exception - // classes early. - LOG(WARNING) << "Unresolved exception class when finding catch block: " - << mh.GetTypeDescriptorFromTypeIdx(iter_type_idx); - } else if (iter_exception_type->IsAssignableFrom(exception_type)) { - catch_dex_pc = it.GetHandlerAddress(); - result = iter_index; - break; - } - ++iter_index; - } - if (result != -1) { - // Handler found. - Runtime::Current()->GetInstrumentation()->ExceptionCaughtEvent(self, - throw_location, - current_method, - catch_dex_pc, - exception); - } - return result; -} - - -//---------------------------------------------------------------------------- -// Object Space -//---------------------------------------------------------------------------- - -Object* art_portable_alloc_object_from_code(uint32_t type_idx, AbstractMethod* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return AllocObjectFromCode(type_idx, referrer, thread, false); -} - -Object* art_portable_alloc_object_from_code_with_access_check(uint32_t type_idx, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return AllocObjectFromCode(type_idx, referrer, thread, true); -} - -Object* art_portable_alloc_array_from_code(uint32_t type_idx, - AbstractMethod* referrer, - uint32_t length, - Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return AllocArrayFromCode(type_idx, referrer, length, self, false); -} - -Object* art_portable_alloc_array_from_code_with_access_check(uint32_t type_idx, - AbstractMethod* referrer, - uint32_t length, - Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return AllocArrayFromCode(type_idx, referrer, length, self, true); -} - -Object* art_portable_check_and_alloc_array_from_code(uint32_t type_idx, - AbstractMethod* referrer, - uint32_t length, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, false); -} - -Object* art_portable_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx, - AbstractMethod* referrer, - uint32_t length, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, true); -} - -static AbstractMethod* FindMethodHelper(uint32_t method_idx, - Object* this_object, - AbstractMethod* caller_method, - bool access_check, - InvokeType type, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method = FindMethodFast(method_idx, - this_object, - caller_method, - access_check, - type); - if (UNLIKELY(method == NULL)) { - method = FindMethodFromCode(method_idx, this_object, caller_method, - thread, access_check, type); - if (UNLIKELY(method == NULL)) { - CHECK(thread->IsExceptionPending()); - return 0; // failure - } - } - DCHECK(!thread->IsExceptionPending()); - const void* code = method->GetEntryPointFromCompiledCode(); - - // When we return, the caller will branch to this address, so it had better not be 0! - if (UNLIKELY(code == NULL)) { - MethodHelper mh(method); - LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method) - << " location: " << mh.GetDexFile().GetLocation(); - } - return method; -} - -Object* art_portable_find_static_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, true, kStatic, thread); -} - -Object* art_portable_find_direct_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, true, kDirect, thread); -} - -Object* art_portable_find_virtual_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, true, kVirtual, thread); -} - -Object* art_portable_find_super_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, true, kSuper, thread); -} - -Object* art_portable_find_interface_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, true, kInterface, thread); -} - -Object* art_portable_find_interface_method_from_code(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, false, kInterface, thread); -} - -Object* art_portable_initialize_static_storage_from_code(uint32_t type_idx, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return ResolveVerifyAndClinit(type_idx, referrer, thread, true, false); -} - -Object* art_portable_initialize_type_from_code(uint32_t type_idx, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return ResolveVerifyAndClinit(type_idx, referrer, thread, false, false); -} - -Object* art_portable_initialize_type_and_verify_access_from_code(uint32_t type_idx, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Called when caller isn't guaranteed to have access to a type and the dex cache may be - // unpopulated - return ResolveVerifyAndClinit(type_idx, referrer, thread, false, true); -} - -Object* art_portable_resolve_string_from_code(AbstractMethod* referrer, uint32_t string_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return ResolveStringFromCode(referrer, string_idx); -} - -int32_t art_portable_set32_static_from_code(uint32_t field_idx, - AbstractMethod* referrer, - int32_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, - referrer, - StaticPrimitiveWrite, - sizeof(uint32_t)); - if (LIKELY(field != NULL)) { - field->Set32(field->GetDeclaringClass(), new_value); - return 0; - } - field = FindFieldFromCode(field_idx, - referrer, - Thread::Current(), - StaticPrimitiveWrite, - sizeof(uint32_t), - true); - if (LIKELY(field != NULL)) { - field->Set32(field->GetDeclaringClass(), new_value); - return 0; - } - return -1; -} - -int32_t art_portable_set64_static_from_code(uint32_t field_idx, - AbstractMethod* referrer, - int64_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t)); - if (LIKELY(field != NULL)) { - field->Set64(field->GetDeclaringClass(), new_value); - return 0; - } - field = FindFieldFromCode(field_idx, - referrer, - Thread::Current(), - StaticPrimitiveWrite, - sizeof(uint64_t), - true); - if (LIKELY(field != NULL)) { - field->Set64(field->GetDeclaringClass(), new_value); - return 0; - } - return -1; -} - -int32_t art_portable_set_obj_static_from_code(uint32_t field_idx, - AbstractMethod* referrer, - Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(Object*)); - if (LIKELY(field != NULL)) { - field->SetObj(field->GetDeclaringClass(), new_value); - return 0; - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - StaticObjectWrite, sizeof(Object*), true); - if (LIKELY(field != NULL)) { - field->SetObj(field->GetDeclaringClass(), new_value); - return 0; - } - return -1; -} - -int32_t art_portable_get32_static_from_code(uint32_t field_idx, AbstractMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t)); - if (LIKELY(field != NULL)) { - return field->Get32(field->GetDeclaringClass()); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - StaticPrimitiveRead, sizeof(uint32_t), true); - if (LIKELY(field != NULL)) { - return field->Get32(field->GetDeclaringClass()); - } - return 0; -} - -int64_t art_portable_get64_static_from_code(uint32_t field_idx, AbstractMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t)); - if (LIKELY(field != NULL)) { - return field->Get64(field->GetDeclaringClass()); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - StaticPrimitiveRead, sizeof(uint64_t), true); - if (LIKELY(field != NULL)) { - return field->Get64(field->GetDeclaringClass()); - } - return 0; -} - -Object* art_portable_get_obj_static_from_code(uint32_t field_idx, AbstractMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(Object*)); - if (LIKELY(field != NULL)) { - return field->GetObj(field->GetDeclaringClass()); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - StaticObjectRead, sizeof(Object*), true); - if (LIKELY(field != NULL)) { - return field->GetObj(field->GetDeclaringClass()); - } - return 0; -} - -int32_t art_portable_set32_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, - Object* obj, uint32_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t)); - if (LIKELY(field != NULL)) { - field->Set32(obj, new_value); - return 0; - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstancePrimitiveWrite, sizeof(uint32_t), true); - if (LIKELY(field != NULL)) { - field->Set32(obj, new_value); - return 0; - } - return -1; -} - -int32_t art_portable_set64_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, - Object* obj, int64_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t)); - if (LIKELY(field != NULL)) { - field->Set64(obj, new_value); - return 0; - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstancePrimitiveWrite, sizeof(uint64_t), true); - if (LIKELY(field != NULL)) { - field->Set64(obj, new_value); - return 0; - } - return -1; -} - -int32_t art_portable_set_obj_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, - Object* obj, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(Object*)); - if (LIKELY(field != NULL)) { - field->SetObj(obj, new_value); - return 0; - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstanceObjectWrite, sizeof(Object*), true); - if (LIKELY(field != NULL)) { - field->SetObj(obj, new_value); - return 0; - } - return -1; -} - -int32_t art_portable_get32_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t)); - if (LIKELY(field != NULL)) { - return field->Get32(obj); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstancePrimitiveRead, sizeof(uint32_t), true); - if (LIKELY(field != NULL)) { - return field->Get32(obj); - } - return 0; -} - -int64_t art_portable_get64_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t)); - if (LIKELY(field != NULL)) { - return field->Get64(obj); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstancePrimitiveRead, sizeof(uint64_t), true); - if (LIKELY(field != NULL)) { - return field->Get64(obj); - } - return 0; -} - -Object* art_portable_get_obj_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(Object*)); - if (LIKELY(field != NULL)) { - return field->GetObj(obj); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstanceObjectRead, sizeof(Object*), true); - if (LIKELY(field != NULL)) { - return field->GetObj(obj); - } - return 0; -} - -void art_portable_fill_array_data_from_code(AbstractMethod* method, uint32_t dex_pc, - Array* array, uint32_t payload_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Test: Is array equal to null? (Guard NullPointerException) - if (UNLIKELY(array == NULL)) { - art_portable_throw_null_pointer_exception_from_code(dex_pc); - return; - } - - // Find the payload from the CodeItem - MethodHelper mh(method); - const DexFile::CodeItem* code_item = mh.GetCodeItem(); - - DCHECK_GT(code_item->insns_size_in_code_units_, payload_offset); - - const Instruction::ArrayDataPayload* payload = - reinterpret_cast( - code_item->insns_ + payload_offset); - - DCHECK_EQ(payload->ident, - static_cast(Instruction::kArrayDataSignature)); - - // Test: Is array big enough? - uint32_t array_len = static_cast(array->GetLength()); - if (UNLIKELY(array_len < payload->element_count)) { - int32_t last_index = payload->element_count - 1; - art_portable_throw_array_bounds_from_code(array_len, last_index); - return; - } - - // Copy the data - size_t size = payload->element_width * payload->element_count; - memcpy(array->GetRawData(payload->element_width), payload->data, size); -} - - - -//---------------------------------------------------------------------------- -// Type checking, in the nature of casting -//---------------------------------------------------------------------------- - -int32_t art_portable_is_assignable_from_code(const Class* dest_type, const Class* src_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(dest_type != NULL); - DCHECK(src_type != NULL); - return dest_type->IsAssignableFrom(src_type) ? 1 : 0; -} - -void art_portable_check_cast_from_code(const Class* dest_type, const Class* src_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(dest_type->IsClass()) << PrettyClass(dest_type); - DCHECK(src_type->IsClass()) << PrettyClass(src_type); - if (UNLIKELY(!dest_type->IsAssignableFrom(src_type))) { - ThrowClassCastException(dest_type, src_type); - } -} - -void art_portable_check_put_array_element_from_code(const Object* element, - const Object* array) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (element == NULL) { - return; - } - DCHECK(array != NULL); - Class* array_class = array->GetClass(); - DCHECK(array_class != NULL); - Class* component_type = array_class->GetComponentType(); - Class* element_class = element->GetClass(); - if (UNLIKELY(!component_type->IsAssignableFrom(element_class))) { - ThrowArrayStoreException(element_class, array_class); - } - return; -} - -//---------------------------------------------------------------------------- -// JNI -//---------------------------------------------------------------------------- - -// Called on entry to JNI, transition out of Runnable and release share of mutator_lock_. -uint32_t art_portable_jni_method_start(Thread* self) - UNLOCK_FUNCTION(GlobalSynchronizatio::mutator_lock_) { - JNIEnvExt* env = self->GetJniEnv(); - uint32_t saved_local_ref_cookie = env->local_ref_cookie; - env->local_ref_cookie = env->locals.GetSegmentState(); - self->TransitionFromRunnableToSuspended(kNative); - return saved_local_ref_cookie; -} - -uint32_t art_portable_jni_method_start_synchronized(jobject to_lock, Thread* self) - UNLOCK_FUNCTION(Locks::mutator_lock_) { - self->DecodeJObject(to_lock)->MonitorEnter(self); - return art_portable_jni_method_start(self); -} - -static inline void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) { - JNIEnvExt* env = self->GetJniEnv(); - env->locals.SetSegmentState(env->local_ref_cookie); - env->local_ref_cookie = saved_local_ref_cookie; -} - -void art_portable_jni_method_end(uint32_t saved_local_ref_cookie, Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { - self->TransitionFromSuspendedToRunnable(); - PopLocalReferences(saved_local_ref_cookie, self); -} - - -void art_portable_jni_method_end_synchronized(uint32_t saved_local_ref_cookie, - jobject locked, - Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { - self->TransitionFromSuspendedToRunnable(); - UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. - PopLocalReferences(saved_local_ref_cookie, self); -} - -Object* art_portable_jni_method_end_with_reference(jobject result, - uint32_t saved_local_ref_cookie, - Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { - self->TransitionFromSuspendedToRunnable(); - Object* o = self->DecodeJObject(result); // Must decode before pop. - PopLocalReferences(saved_local_ref_cookie, self); - // Process result. - if (UNLIKELY(self->GetJniEnv()->check_jni)) { - if (self->IsExceptionPending()) { - return NULL; - } - CheckReferenceResult(o, self); - } - return o; -} - -Object* art_portable_jni_method_end_with_reference_synchronized(jobject result, - uint32_t saved_local_ref_cookie, - jobject locked, - Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { - self->TransitionFromSuspendedToRunnable(); - UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. - Object* o = self->DecodeJObject(result); - PopLocalReferences(saved_local_ref_cookie, self); - // Process result. - if (UNLIKELY(self->GetJniEnv()->check_jni)) { - if (self->IsExceptionPending()) { - return NULL; - } - CheckReferenceResult(o, self); - } - return o; -} - -// Handler for invocation on proxy methods. Create a boxed argument array and invoke the invocation -// handler which is a field within the proxy object receiver. The var args encode the arguments -// with the last argument being a pointer to a JValue to store the result in. -void art_portable_proxy_invoke_handler_from_code(AbstractMethod* proxy_method, ...) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - va_list ap; - va_start(ap, proxy_method); - - Object* receiver = va_arg(ap, Object*); - Thread* self = va_arg(ap, Thread*); - MethodHelper proxy_mh(proxy_method); - - // Ensure we don't get thread suspension until the object arguments are safely in jobjects. - const char* old_cause = - self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); - self->VerifyStack(); - - // Start new JNI local reference state. - JNIEnvExt* env = self->GetJniEnv(); - ScopedObjectAccessUnchecked soa(env); - ScopedJniEnvLocalRefState env_state(env); - - // Create local ref. copies of the receiver. - jobject rcvr_jobj = soa.AddLocalReference(receiver); - - // Convert proxy method into expected interface method. - AbstractMethod* interface_method = proxy_method->FindOverriddenMethod(); - DCHECK(interface_method != NULL); - DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); - jobject interface_method_jobj = soa.AddLocalReference(interface_method); - - // Record arguments and turn Object* arguments into jobject to survive GC. - std::vector args; - const size_t num_params = proxy_mh.NumArgs(); - for (size_t i = 1; i < num_params; ++i) { - jvalue val; - switch (proxy_mh.GetParamPrimitiveType(i)) { - case Primitive::kPrimNot: - val.l = soa.AddLocalReference(va_arg(ap, Object*)); - break; - case Primitive::kPrimBoolean: // Fall-through. - case Primitive::kPrimByte: // Fall-through. - case Primitive::kPrimChar: // Fall-through. - case Primitive::kPrimShort: // Fall-through. - case Primitive::kPrimInt: // Fall-through. - val.i = va_arg(ap, jint); - break; - case Primitive::kPrimFloat: - // TODO: should this be jdouble? Floats aren't passed to var arg routines. - val.i = va_arg(ap, jint); - break; - case Primitive::kPrimDouble: - val.d = (va_arg(ap, jdouble)); - break; - case Primitive::kPrimLong: - val.j = (va_arg(ap, jlong)); - break; - case Primitive::kPrimVoid: - LOG(FATAL) << "UNREACHABLE"; - val.j = 0; - break; - } - args.push_back(val); - } - self->EndAssertNoThreadSuspension(old_cause); - JValue* result_location = NULL; - const char* shorty = proxy_mh.GetShorty(); - if (shorty[0] != 'V') { - result_location = va_arg(ap, JValue*); - } - va_end(ap); - JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args); - if (result_location != NULL) { - *result_location = result; - } -} - -//---------------------------------------------------------------------------- -// Memory barrier -//---------------------------------------------------------------------------- - -void art_portable_constructor_barrier() { - LOG(FATAL) << "Implemented by IRBuilder."; -} -} // extern "C" diff --git a/runtime/runtime_support_llvm.h b/runtime/runtime_support_llvm.h deleted file mode 100644 index 43ea953a96..0000000000 --- a/runtime/runtime_support_llvm.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_ -#define ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_ - -extern "C" { -//---------------------------------------------------------------------------- -// Runtime Support Function Lookup Callback -//---------------------------------------------------------------------------- -void* art_portable_find_runtime_support_func(void* context, const char* name); -} // extern "C" - -#endif // ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_ diff --git a/runtime/runtime_support_llvm_func_list.h b/runtime/runtime_support_llvm_func_list.h deleted file mode 100644 index 8b635cbd2c..0000000000 --- a/runtime/runtime_support_llvm_func_list.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_RUNTIME_SUPPORT_LLVM_FUNC_LIST_H_ -#define ART_RUNTIME_RUNTIME_SUPPORT_LLVM_FUNC_LIST_H_ - -#define RUNTIME_SUPPORT_FUNC_LIST(V) \ - V(LockObject, art_portable_lock_object_from_code) \ - V(UnlockObject, art_portable_unlock_object_from_code) \ - V(GetCurrentThread, art_portable_get_current_thread_from_code) \ - V(SetCurrentThread, art_portable_set_current_thread_from_code) \ - V(PushShadowFrame, art_portable_push_shadow_frame_from_code) \ - V(PopShadowFrame, art_portable_pop_shadow_frame_from_code) \ - V(TestSuspend, art_portable_test_suspend_from_code) \ - V(ThrowException, art_portable_throw_exception_from_code) \ - V(ThrowStackOverflowException, art_portable_throw_stack_overflow_from_code) \ - V(ThrowNullPointerException, art_portable_throw_null_pointer_exception_from_code) \ - V(ThrowDivZeroException, art_portable_throw_div_zero_from_code) \ - V(ThrowIndexOutOfBounds, art_portable_throw_array_bounds_from_code) \ - V(InitializeTypeAndVerifyAccess, art_portable_initialize_type_and_verify_access_from_code) \ - V(InitializeType, art_portable_initialize_type_from_code) \ - V(IsAssignable, art_portable_is_assignable_from_code) \ - V(CheckCast, art_portable_check_cast_from_code) \ - V(CheckPutArrayElement, art_portable_check_put_array_element_from_code) \ - V(AllocObject, art_portable_alloc_object_from_code) \ - V(AllocObjectWithAccessCheck, art_portable_alloc_object_from_code_with_access_check) \ - V(AllocArray, art_portable_alloc_array_from_code) \ - V(AllocArrayWithAccessCheck, art_portable_alloc_array_from_code_with_access_check) \ - V(CheckAndAllocArray, art_portable_check_and_alloc_array_from_code) \ - V(CheckAndAllocArrayWithAccessCheck, art_portable_check_and_alloc_array_from_code_with_access_check) \ - V(FindStaticMethodWithAccessCheck, art_portable_find_static_method_from_code_with_access_check) \ - V(FindDirectMethodWithAccessCheck, art_portable_find_direct_method_from_code_with_access_check) \ - V(FindVirtualMethodWithAccessCheck, art_portable_find_virtual_method_from_code_with_access_check) \ - V(FindSuperMethodWithAccessCheck, art_portable_find_super_method_from_code_with_access_check) \ - V(FindInterfaceMethodWithAccessCheck, art_portable_find_interface_method_from_code_with_access_check) \ - V(FindInterfaceMethod, art_portable_find_interface_method_from_code) \ - V(ResolveString, art_portable_resolve_string_from_code) \ - V(Set32Static, art_portable_set32_static_from_code) \ - V(Set64Static, art_portable_set64_static_from_code) \ - V(SetObjectStatic, art_portable_set_obj_static_from_code) \ - V(Get32Static, art_portable_get32_static_from_code) \ - V(Get64Static, art_portable_get64_static_from_code) \ - V(GetObjectStatic, art_portable_get_obj_static_from_code) \ - V(Set32Instance, art_portable_set32_instance_from_code) \ - V(Set64Instance, art_portable_set64_instance_from_code) \ - V(SetObjectInstance, art_portable_set_obj_instance_from_code) \ - V(Get32Instance, art_portable_get32_instance_from_code) \ - V(Get64Instance, art_portable_get64_instance_from_code) \ - V(GetObjectInstance, art_portable_get_obj_instance_from_code) \ - V(InitializeStaticStorage, art_portable_initialize_static_storage_from_code) \ - V(FillArrayData, art_portable_fill_array_data_from_code) \ - V(GetAndClearException, art_portable_get_and_clear_exception) \ - V(IsExceptionPending, art_portable_is_exception_pending_from_code) \ - V(FindCatchBlock, art_portable_find_catch_block_from_code) \ - V(MarkGCCard, art_portable_mark_gc_card_from_code) \ - V(ProxyInvokeHandler, art_portable_proxy_invoke_handler_from_code) \ - V(art_d2l, art_d2l) \ - V(art_d2i, art_d2i) \ - V(art_f2l, art_f2l) \ - V(art_f2i, art_f2i) \ - V(JniMethodStart, art_portable_jni_method_start) \ - V(JniMethodStartSynchronized, art_portable_jni_method_start_synchronized) \ - V(JniMethodEnd, art_portable_jni_method_end) \ - V(JniMethodEndSynchronized, art_portable_jni_method_end_synchronized) \ - V(JniMethodEndWithReference, art_portable_jni_method_end_with_reference) \ - V(JniMethodEndWithReferenceSynchronized, art_portable_jni_method_end_with_reference_synchronized) - -#endif // ART_RUNTIME_RUNTIME_SUPPORT_LLVM_FUNC_LIST_H_ -#undef ART_RUNTIME_RUNTIME_SUPPORT_LLVM_FUNC_LIST_H_ // the guard in this file is just for cpplint diff --git a/runtime/runtime_support_test.cc b/runtime/runtime_support_test.cc deleted file mode 100644 index b827813146..0000000000 --- a/runtime/runtime_support_test.cc +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "runtime_support.h" - -#include "common_test.h" -#include - -namespace art { - -class RuntimeSupportTest : public CommonTest {}; - -TEST_F(RuntimeSupportTest, DoubleToLong) { - EXPECT_EQ(std::numeric_limits::max(), art_d2l(1.85e19)); - EXPECT_EQ(std::numeric_limits::min(), art_d2l(-1.85e19)); - EXPECT_EQ(0LL, art_d2l(0)); - EXPECT_EQ(1LL, art_d2l(1.0)); - EXPECT_EQ(10LL, art_d2l(10.0)); - EXPECT_EQ(100LL, art_d2l(100.0)); - EXPECT_EQ(-1LL, art_d2l(-1.0)); - EXPECT_EQ(-10LL, art_d2l(-10.0)); - EXPECT_EQ(-100LL, art_d2l(-100.0)); -} - -TEST_F(RuntimeSupportTest, FloatToLong) { - EXPECT_EQ(std::numeric_limits::max(), art_f2l(1.85e19)); - EXPECT_EQ(std::numeric_limits::min(), art_f2l(-1.85e19)); - EXPECT_EQ(0LL, art_f2l(0)); - EXPECT_EQ(1LL, art_f2l(1.0)); - EXPECT_EQ(10LL, art_f2l(10.0)); - EXPECT_EQ(100LL, art_f2l(100.0)); - EXPECT_EQ(-1LL, art_f2l(-1.0)); - EXPECT_EQ(-10LL, art_f2l(-10.0)); - EXPECT_EQ(-100LL, art_f2l(-100.0)); -} - -TEST_F(RuntimeSupportTest, DoubleToInt) { - EXPECT_EQ(std::numeric_limits::max(), art_d2i(4.3e9)); - EXPECT_EQ(std::numeric_limits::min(), art_d2i(-4.3e9)); - EXPECT_EQ(0L, art_d2i(0)); - EXPECT_EQ(1L, art_d2i(1.0)); - EXPECT_EQ(10L, art_d2i(10.0)); - EXPECT_EQ(100L, art_d2i(100.0)); - EXPECT_EQ(-1L, art_d2i(-1.0)); - EXPECT_EQ(-10L, art_d2i(-10.0)); - EXPECT_EQ(-100L, art_d2i(-100.0)); -} - -TEST_F(RuntimeSupportTest, FloatToInt) { - EXPECT_EQ(std::numeric_limits::max(), art_f2i(4.3e9)); - EXPECT_EQ(std::numeric_limits::min(), art_f2i(-4.3e9)); - EXPECT_EQ(0L, art_f2i(0)); - EXPECT_EQ(1L, art_f2i(1.0)); - EXPECT_EQ(10L, art_f2i(10.0)); - EXPECT_EQ(100L, art_f2i(100.0)); - EXPECT_EQ(-1L, art_f2i(-1.0)); - EXPECT_EQ(-10L, art_f2i(-10.0)); - EXPECT_EQ(-100L, art_f2i(-100.0)); -} - -} // namespace art diff --git a/runtime/stack.cc b/runtime/stack.cc index 286a2a6a5e..aeb15f09bd 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -16,7 +16,6 @@ #include "stack.h" -#include "oat/runtime/context.h" #include "mirror/abstract_method-inl.h" #include "mirror/class-inl.h" #include "mirror/object.h" diff --git a/runtime/stack.h b/runtime/stack.h index 0b94f27c4b..de93846112 100644 --- a/runtime/stack.h +++ b/runtime/stack.h @@ -20,7 +20,7 @@ #include "dex_file.h" #include "instrumentation.h" #include "base/macros.h" -#include "oat/runtime/context.h" +#include "arch/context.h" #include #include diff --git a/runtime/thread.cc b/runtime/thread.cc index 0b3a5b4959..97a1410892 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -30,6 +30,7 @@ #include #include +#include "arch/context.h" #include "base/mutex.h" #include "class_linker.h" #include "class_linker-inl.h" @@ -37,6 +38,7 @@ #include "cutils/atomic-inline.h" #include "debugger.h" #include "dex_file-inl.h" +#include "entrypoints/entrypoint_utils.h" #include "gc_map.h" #include "gc/accounting/card_table-inl.h" #include "gc/heap.h" @@ -50,11 +52,9 @@ #include "mirror/object_array-inl.h" #include "mirror/stack_trace_element.h" #include "monitor.h" -#include "oat/runtime/context.h" #include "object_utils.h" #include "reflection.h" #include "runtime.h" -#include "runtime_support.h" #include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" #include "ScopedUtfChars.h" @@ -86,16 +86,23 @@ static void UnimplementedEntryPoint() { } #endif +void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints); + void Thread::InitFunctionPointers() { #if !defined(__APPLE__) // The Mac GCC is too old to accept this code. // Insert a placeholder so we can easily tell if we call an unimplemented entry point. - uintptr_t* begin = reinterpret_cast(&entrypoints_); - uintptr_t* end = reinterpret_cast(reinterpret_cast(begin) + sizeof(entrypoints_)); + uintptr_t* begin = reinterpret_cast(&quick_entrypoints_); + uintptr_t* end = reinterpret_cast(reinterpret_cast(begin) + sizeof(quick_entrypoints_)); + for (uintptr_t* it = begin; it != end; ++it) { + *it = reinterpret_cast(UnimplementedEntryPoint); + } + begin = reinterpret_cast(&portable_entrypoints_); + end = reinterpret_cast(reinterpret_cast(begin) + sizeof(portable_entrypoints_)); for (uintptr_t* it = begin; it != end; ++it) { *it = reinterpret_cast(UnimplementedEntryPoint); } #endif - InitEntryPoints(&entrypoints_); + InitEntryPoints(&quick_entrypoints_, &portable_entrypoints_); } void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) { @@ -1582,86 +1589,87 @@ struct EntryPointInfo { uint32_t offset; const char* name; }; -#define ENTRY_POINT_INFO(x) { ENTRYPOINT_OFFSET(x), #x } +#define QUICK_ENTRY_POINT_INFO(x) { QUICK_ENTRYPOINT_OFFSET(x), #x } +#define PORTABLE_ENTRY_POINT_INFO(x) { PORTABLE_ENTRYPOINT_OFFSET(x), #x } static const EntryPointInfo gThreadEntryPointInfo[] = { - ENTRY_POINT_INFO(pAllocArrayFromCode), - ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck), - ENTRY_POINT_INFO(pAllocObjectFromCode), - ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck), - ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode), - ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck), - ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode), - ENTRY_POINT_INFO(pCanPutArrayElementFromCode), - ENTRY_POINT_INFO(pCheckCastFromCode), - ENTRY_POINT_INFO(pInitializeStaticStorage), - ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode), - ENTRY_POINT_INFO(pInitializeTypeFromCode), - ENTRY_POINT_INFO(pResolveStringFromCode), - ENTRY_POINT_INFO(pSet32Instance), - ENTRY_POINT_INFO(pSet32Static), - ENTRY_POINT_INFO(pSet64Instance), - ENTRY_POINT_INFO(pSet64Static), - ENTRY_POINT_INFO(pSetObjInstance), - ENTRY_POINT_INFO(pSetObjStatic), - ENTRY_POINT_INFO(pGet32Instance), - ENTRY_POINT_INFO(pGet32Static), - ENTRY_POINT_INFO(pGet64Instance), - ENTRY_POINT_INFO(pGet64Static), - ENTRY_POINT_INFO(pGetObjInstance), - ENTRY_POINT_INFO(pGetObjStatic), - ENTRY_POINT_INFO(pHandleFillArrayDataFromCode), - ENTRY_POINT_INFO(pJniMethodStart), - ENTRY_POINT_INFO(pJniMethodStartSynchronized), - ENTRY_POINT_INFO(pJniMethodEnd), - ENTRY_POINT_INFO(pJniMethodEndSynchronized), - ENTRY_POINT_INFO(pJniMethodEndWithReference), - ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized), - ENTRY_POINT_INFO(pLockObjectFromCode), - ENTRY_POINT_INFO(pUnlockObjectFromCode), - ENTRY_POINT_INFO(pCmpgDouble), - ENTRY_POINT_INFO(pCmpgFloat), - ENTRY_POINT_INFO(pCmplDouble), - ENTRY_POINT_INFO(pCmplFloat), - ENTRY_POINT_INFO(pFmod), - ENTRY_POINT_INFO(pSqrt), - ENTRY_POINT_INFO(pL2d), - ENTRY_POINT_INFO(pFmodf), - ENTRY_POINT_INFO(pL2f), - ENTRY_POINT_INFO(pD2iz), - ENTRY_POINT_INFO(pF2iz), - ENTRY_POINT_INFO(pIdivmod), - ENTRY_POINT_INFO(pD2l), - ENTRY_POINT_INFO(pF2l), - ENTRY_POINT_INFO(pLdiv), - ENTRY_POINT_INFO(pLdivmod), - ENTRY_POINT_INFO(pLmul), - ENTRY_POINT_INFO(pShlLong), - ENTRY_POINT_INFO(pShrLong), - ENTRY_POINT_INFO(pUshrLong), - ENTRY_POINT_INFO(pInterpreterToInterpreterEntry), - ENTRY_POINT_INFO(pInterpreterToQuickEntry), - ENTRY_POINT_INFO(pIndexOf), - ENTRY_POINT_INFO(pMemcmp16), - ENTRY_POINT_INFO(pStringCompareTo), - ENTRY_POINT_INFO(pMemcpy), - ENTRY_POINT_INFO(pPortableResolutionTrampolineFromCode), - ENTRY_POINT_INFO(pQuickResolutionTrampolineFromCode), - ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck), - ENTRY_POINT_INFO(pInvokeInterfaceTrampoline), - ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck), - ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck), - ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck), - ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck), - ENTRY_POINT_INFO(pCheckSuspendFromCode), - ENTRY_POINT_INFO(pTestSuspendFromCode), - ENTRY_POINT_INFO(pDeliverException), - ENTRY_POINT_INFO(pThrowArrayBoundsFromCode), - ENTRY_POINT_INFO(pThrowDivZeroFromCode), - ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode), - ENTRY_POINT_INFO(pThrowNullPointerFromCode), - ENTRY_POINT_INFO(pThrowStackOverflowFromCode), + QUICK_ENTRY_POINT_INFO(pAllocArrayFromCode), + QUICK_ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pAllocObjectFromCode), + QUICK_ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode), + QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode), + QUICK_ENTRY_POINT_INFO(pCanPutArrayElementFromCode), + QUICK_ENTRY_POINT_INFO(pCheckCastFromCode), + QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage), + QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode), + QUICK_ENTRY_POINT_INFO(pInitializeTypeFromCode), + QUICK_ENTRY_POINT_INFO(pResolveStringFromCode), + QUICK_ENTRY_POINT_INFO(pSet32Instance), + QUICK_ENTRY_POINT_INFO(pSet32Static), + QUICK_ENTRY_POINT_INFO(pSet64Instance), + QUICK_ENTRY_POINT_INFO(pSet64Static), + QUICK_ENTRY_POINT_INFO(pSetObjInstance), + QUICK_ENTRY_POINT_INFO(pSetObjStatic), + QUICK_ENTRY_POINT_INFO(pGet32Instance), + QUICK_ENTRY_POINT_INFO(pGet32Static), + QUICK_ENTRY_POINT_INFO(pGet64Instance), + QUICK_ENTRY_POINT_INFO(pGet64Static), + QUICK_ENTRY_POINT_INFO(pGetObjInstance), + QUICK_ENTRY_POINT_INFO(pGetObjStatic), + QUICK_ENTRY_POINT_INFO(pHandleFillArrayDataFromCode), + QUICK_ENTRY_POINT_INFO(pJniMethodStart), + QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized), + QUICK_ENTRY_POINT_INFO(pJniMethodEnd), + QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized), + QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference), + QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized), + QUICK_ENTRY_POINT_INFO(pLockObjectFromCode), + QUICK_ENTRY_POINT_INFO(pUnlockObjectFromCode), + QUICK_ENTRY_POINT_INFO(pCmpgDouble), + QUICK_ENTRY_POINT_INFO(pCmpgFloat), + QUICK_ENTRY_POINT_INFO(pCmplDouble), + QUICK_ENTRY_POINT_INFO(pCmplFloat), + QUICK_ENTRY_POINT_INFO(pFmod), + QUICK_ENTRY_POINT_INFO(pSqrt), + QUICK_ENTRY_POINT_INFO(pL2d), + QUICK_ENTRY_POINT_INFO(pFmodf), + QUICK_ENTRY_POINT_INFO(pL2f), + QUICK_ENTRY_POINT_INFO(pD2iz), + QUICK_ENTRY_POINT_INFO(pF2iz), + QUICK_ENTRY_POINT_INFO(pIdivmod), + QUICK_ENTRY_POINT_INFO(pD2l), + QUICK_ENTRY_POINT_INFO(pF2l), + QUICK_ENTRY_POINT_INFO(pLdiv), + QUICK_ENTRY_POINT_INFO(pLdivmod), + QUICK_ENTRY_POINT_INFO(pLmul), + QUICK_ENTRY_POINT_INFO(pShlLong), + QUICK_ENTRY_POINT_INFO(pShrLong), + QUICK_ENTRY_POINT_INFO(pUshrLong), + QUICK_ENTRY_POINT_INFO(pInterpreterToInterpreterEntry), + QUICK_ENTRY_POINT_INFO(pInterpreterToQuickEntry), + QUICK_ENTRY_POINT_INFO(pIndexOf), + QUICK_ENTRY_POINT_INFO(pMemcmp16), + QUICK_ENTRY_POINT_INFO(pStringCompareTo), + QUICK_ENTRY_POINT_INFO(pMemcpy), + QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampolineFromCode), + QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampoline), + QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pCheckSuspendFromCode), + QUICK_ENTRY_POINT_INFO(pTestSuspendFromCode), + QUICK_ENTRY_POINT_INFO(pDeliverException), + QUICK_ENTRY_POINT_INFO(pThrowArrayBoundsFromCode), + QUICK_ENTRY_POINT_INFO(pThrowDivZeroFromCode), + QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode), + QUICK_ENTRY_POINT_INFO(pThrowNullPointerFromCode), + QUICK_ENTRY_POINT_INFO(pThrowStackOverflowFromCode), + PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampolineFromCode), }; -#undef ENTRY_POINT_INFO +#undef QUICK_ENTRY_POINT_INFO void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers) { CHECK_EQ(size_of_pointers, 4U); // TODO: support 64-bit targets. @@ -1686,8 +1694,9 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_ #undef DO_THREAD_OFFSET size_t entry_point_count = arraysize(gThreadEntryPointInfo); - CHECK_EQ(entry_point_count * size_of_pointers, sizeof(EntryPoints)); - uint32_t expected_offset = OFFSETOF_MEMBER(Thread, entrypoints_); + CHECK_EQ(entry_point_count * size_of_pointers, + sizeof(QuickEntryPoints) + sizeof(PortableEntryPoints)); + uint32_t expected_offset = OFFSETOF_MEMBER(Thread, quick_entrypoints_); for (size_t i = 0; i < entry_point_count; ++i) { CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset) << gThreadEntryPointInfo[i].name; expected_offset += size_of_pointers; @@ -1709,7 +1718,7 @@ class CatchBlockStackVisitor : public StackVisitor { self_(self), exception_(exception), is_deoptimization_(is_deoptimization), to_find_(is_deoptimization ? NULL : exception->GetClass()), throw_location_(throw_location), handler_quick_frame_(NULL), handler_quick_frame_pc_(0), handler_dex_pc_(0), - native_method_count_(0), + native_method_count_(0), clear_exception_(false), method_tracing_active_(is_deoptimization || Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()), instrumentation_frames_to_pop_(0), top_shadow_frame_(NULL), prev_shadow_frame_(NULL) { @@ -1754,7 +1763,7 @@ class CatchBlockStackVisitor : public StackVisitor { dex_pc = GetDexPc(); } if (dex_pc != DexFile::kDexNoIndex) { - uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc); + uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc, &clear_exception_); if (found_dex_pc != DexFile::kDexNoIndex) { handler_dex_pc_ = found_dex_pc; handler_quick_frame_pc_ = method->ToNativePc(found_dex_pc); @@ -1820,8 +1829,13 @@ class CatchBlockStackVisitor : public StackVisitor { LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")"; } } - // Put exception back in root set and clear throw location. - self_->SetException(ThrowLocation(), exception_); + if (clear_exception_) { + // Exception was cleared as part of delivery. + DCHECK(!self_->IsExceptionPending()); + } else { + // Put exception back in root set with clear throw location. + self_->SetException(ThrowLocation(), exception_); + } self_->EndAssertNoThreadSuspension(last_no_assert_suspension_cause_); // Do instrumentation events after allowing thread suspension again. instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); @@ -1864,6 +1878,8 @@ class CatchBlockStackVisitor : public StackVisitor { uint32_t handler_dex_pc_; // Number of native methods passed in crawl (equates to number of SIRTs to pop) uint32_t native_method_count_; + // Should the exception be cleared as the catch block has no move-exception? + bool clear_exception_; // Is method tracing active? const bool method_tracing_active_; // Support for nesting no thread suspension checks. diff --git a/runtime/thread.h b/runtime/thread.h index b9393a3052..ff0fe228c0 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -26,9 +26,10 @@ #include #include "base/macros.h" +#include "entrypoints/portable/portable_entrypoints.h" +#include "entrypoints/quick/quick_entrypoints.h" #include "globals.h" #include "jvalue.h" -#include "oat/runtime/oat_support_entrypoints.h" #include "locks.h" #include "offsets.h" #include "root_visitor.h" @@ -773,9 +774,10 @@ class PACKED(4) Thread { Closure* checkpoint_function_; public: - // Runtime support function pointers + // Entrypoint function pointers // TODO: move this near the top, since changing its offset requires all oats to be recompiled! - EntryPoints entrypoints_; + QuickEntryPoints quick_entrypoints_; + PortableEntryPoints portable_entrypoints_; private: // How many times has our pthread key's destructor been called? diff --git a/runtime/thread_arm.cc b/runtime/thread_arm.cc deleted file mode 100644 index 0ef26bff5e..0000000000 --- a/runtime/thread_arm.cc +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "thread.h" - -#include "asm_support.h" -#include "base/macros.h" - -namespace art { - -void Thread::InitCpu() { - CHECK_EQ(THREAD_FLAGS_OFFSET, OFFSETOF_MEMBER(Thread, state_and_flags_)); - CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); -} - -} // namespace art diff --git a/runtime/thread_mips.cc b/runtime/thread_mips.cc deleted file mode 100644 index 0ef26bff5e..0000000000 --- a/runtime/thread_mips.cc +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "thread.h" - -#include "asm_support.h" -#include "base/macros.h" - -namespace art { - -void Thread::InitCpu() { - CHECK_EQ(THREAD_FLAGS_OFFSET, OFFSETOF_MEMBER(Thread, state_and_flags_)); - CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); -} - -} // namespace art diff --git a/runtime/thread_x86.cc b/runtime/thread_x86.cc deleted file mode 100644 index c398b2877a..0000000000 --- a/runtime/thread_x86.cc +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "thread.h" - -#include -#include - -#include "asm_support.h" -#include "base/macros.h" -#include "thread.h" -#include "thread_list.h" - -#if defined(__APPLE__) -#include -#include -struct descriptor_table_entry_t { - uint16_t limit0; - uint16_t base0; - unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; - unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; -} __attribute__((packed)); -#define MODIFY_LDT_CONTENTS_DATA 0 -#else -#include -#endif - -namespace art { - -void Thread::InitCpu() { - static Mutex modify_ldt_lock("modify_ldt lock"); - MutexLock mu(Thread::Current(), modify_ldt_lock); - - const uintptr_t base = reinterpret_cast(this); - const size_t limit = kPageSize; - - const int contents = MODIFY_LDT_CONTENTS_DATA; - const int seg_32bit = 1; - const int read_exec_only = 0; - const int limit_in_pages = 0; - const int seg_not_present = 0; - const int useable = 1; - - int entry_number = -1; - -#if defined(__APPLE__) - descriptor_table_entry_t entry; - memset(&entry, 0, sizeof(entry)); - entry.limit0 = (limit & 0x0ffff); - entry.limit = (limit & 0xf0000) >> 16; - entry.base0 = (base & 0x0000ffff); - entry.base1 = (base & 0x00ff0000) >> 16; - entry.base2 = (base & 0xff000000) >> 24; - entry.type = ((read_exec_only ^ 1) << 1) | (contents << 2); - entry.s = 1; - entry.dpl = 0x3; - entry.p = seg_not_present ^ 1; - entry.avl = useable; - entry.l = 0; - entry.d = seg_32bit; - entry.g = limit_in_pages; - - entry_number = i386_set_ldt(LDT_AUTO_ALLOC, reinterpret_cast(&entry), 1); - if (entry_number == -1) { - PLOG(FATAL) << "i386_set_ldt failed"; - } -#else - // Read current LDT entries. - CHECK_EQ((size_t)LDT_ENTRY_SIZE, sizeof(uint64_t)); - std::vector ldt(LDT_ENTRIES); - size_t ldt_size(sizeof(uint64_t) * ldt.size()); - memset(&ldt[0], 0, ldt_size); - // TODO: why doesn't this return LDT_ENTRY_SIZE * LDT_ENTRIES for the main thread? - syscall(__NR_modify_ldt, 0, &ldt[0], ldt_size); - - // Find the first empty slot. - for (entry_number = 0; entry_number < LDT_ENTRIES && ldt[entry_number] != 0; ++entry_number) { - } - if (entry_number >= LDT_ENTRIES) { - LOG(FATAL) << "Failed to find a free LDT slot"; - } - - // Update LDT entry. - user_desc ldt_entry; - memset(&ldt_entry, 0, sizeof(ldt_entry)); - ldt_entry.entry_number = entry_number; - ldt_entry.base_addr = base; - ldt_entry.limit = limit; - ldt_entry.seg_32bit = seg_32bit; - ldt_entry.contents = contents; - ldt_entry.read_exec_only = read_exec_only; - ldt_entry.limit_in_pages = limit_in_pages; - ldt_entry.seg_not_present = seg_not_present; - ldt_entry.useable = useable; - CHECK_EQ(0, syscall(__NR_modify_ldt, 1, &ldt_entry, sizeof(ldt_entry))); - entry_number = ldt_entry.entry_number; -#endif - - // Change %fs to be new LDT entry. - uint16_t table_indicator = 1 << 2; // LDT - uint16_t rpl = 3; // Requested privilege level - uint16_t selector = (entry_number << 3) | table_indicator | rpl; - // TODO: use our assembler to generate code - __asm__ __volatile__("movw %w0, %%fs" - : // output - : "q"(selector) // input - :); // clobber - - // Allow easy indirection back to Thread*. - self_ = this; - - // Sanity check that reads from %fs point to this Thread*. - Thread* self_check; - // TODO: use our assembler to generate code - CHECK_EQ(THREAD_SELF_OFFSET, OFFSETOF_MEMBER(Thread, self_)); - __asm__ __volatile__("movl %%fs:(%1), %0" - : "=r"(self_check) // output - : "r"(THREAD_SELF_OFFSET) // input - :); // clobber - CHECK_EQ(self_check, this); - - // Sanity check other offsets. - CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); -} - -} // namespace art diff --git a/runtime/trace.cc b/runtime/trace.cc index 177fd48f7d..2bce70f7c1 100644 --- a/runtime/trace.cc +++ b/runtime/trace.cc @@ -29,14 +29,14 @@ #include "mirror/dex_cache.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" -#if !defined(ART_USE_PORTABLE_COMPILER) -#include "oat/runtime/oat_support_entrypoints.h" -#endif #include "object_utils.h" #include "os.h" #include "scoped_thread_state_change.h" #include "thread.h" #include "thread_list.h" +#if !defined(ART_USE_PORTABLE_COMPILER) +#include "entrypoints/quick/quick_entrypoints.h" +#endif namespace art { diff --git a/runtime/vector_output_stream.cc b/runtime/vector_output_stream.cc deleted file mode 100644 index e5ff729036..0000000000 --- a/runtime/vector_output_stream.cc +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (C) 2013 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "vector_output_stream.h" - -#include "base/logging.h" - -namespace art { - -VectorOutputStream::VectorOutputStream(const std::string& location, std::vector& vector) - : OutputStream(location), offset_(vector.size()), vector_(vector) {} - -off_t VectorOutputStream::Seek(off_t offset, Whence whence) { - CHECK(whence == kSeekSet || whence == kSeekCurrent || whence == kSeekEnd) << whence; - off_t new_offset = 0; - switch (whence) { - case kSeekSet: { - new_offset = offset; - break; - } - case kSeekCurrent: { - new_offset = offset_ + offset; - break; - } - case kSeekEnd: { - new_offset = vector_.size() + offset; - break; - } - } - EnsureCapacity(new_offset); - offset_ = new_offset; - return offset_; -} - -} // namespace art diff --git a/runtime/vector_output_stream.h b/runtime/vector_output_stream.h deleted file mode 100644 index 7daa39ffa5..0000000000 --- a/runtime/vector_output_stream.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (C) 2013 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_VECTOR_OUTPUT_STREAM_H_ -#define ART_RUNTIME_VECTOR_OUTPUT_STREAM_H_ - -#include "output_stream.h" - -#include -#include -#include - -namespace art { - -class VectorOutputStream : public OutputStream { - public: - VectorOutputStream(const std::string& location, std::vector& vector); - - virtual ~VectorOutputStream() {} - - bool WriteFully(const void* buffer, int64_t byte_count) { - if (static_cast(offset_) == vector_.size()) { - const uint8_t* start = reinterpret_cast(buffer); - vector_.insert(vector_.end(), &start[0], &start[byte_count]); - offset_ += byte_count; - } else { - off_t new_offset = offset_ + byte_count; - EnsureCapacity(new_offset); - memcpy(&vector_[offset_], buffer, byte_count); - offset_ = new_offset; - } - return true; - } - - off_t Seek(off_t offset, Whence whence); - - private: - void EnsureCapacity(off_t new_offset) { - if (new_offset > static_cast(vector_.size())) { - vector_.resize(new_offset); - } - } - - off_t offset_; - std::vector& vector_; - - DISALLOW_COPY_AND_ASSIGN(VectorOutputStream); -}; - -} // namespace art - -#endif // ART_RUNTIME_VECTOR_OUTPUT_STREAM_H_ diff --git a/runtime/verifier/instruction_flags.cc b/runtime/verifier/instruction_flags.cc index 358791ddf6..f76c226e90 100644 --- a/runtime/verifier/instruction_flags.cc +++ b/runtime/verifier/instruction_flags.cc @@ -22,16 +22,17 @@ namespace art { namespace verifier { std::string InstructionFlags::ToString() const { - char encoding[6]; + char encoding[7]; if (!IsOpcode()) { - strncpy(encoding, "XXXXX", sizeof(encoding)); + strncpy(encoding, "XXXXXX", sizeof(encoding)); } else { - strncpy(encoding, "-----", sizeof(encoding)); - if (IsInTry()) encoding[kInTry] = 'T'; - if (IsBranchTarget()) encoding[kBranchTarget] = 'B'; + strncpy(encoding, "------", sizeof(encoding)); + if (IsVisited()) encoding[kVisited] = 'V'; + if (IsChanged()) encoding[kChanged] = 'C'; + if (IsInTry()) encoding[kInTry] = 'T'; + if (IsBranchTarget()) encoding[kBranchTarget] = 'B'; if (IsCompileTimeInfoPoint()) encoding[kCompileTimeInfoPoint] = 'G'; - if (IsVisited()) encoding[kVisited] = 'V'; - if (IsChanged()) encoding[kChanged] = 'C'; + if (IsReturn()) encoding[kReturn] = 'R'; } return encoding; } diff --git a/runtime/verifier/instruction_flags.h b/runtime/verifier/instruction_flags.h index 9b2e595b9d..e50ba13932 100644 --- a/runtime/verifier/instruction_flags.h +++ b/runtime/verifier/instruction_flags.h @@ -93,6 +93,21 @@ class InstructionFlags { return IsVisited() || IsChanged(); } + void SetReturn() { + flags_ |= 1 << kReturn; + } + void ClearReturn() { + flags_ &= ~(1 << kReturn); + } + bool IsReturn() const { + return (flags_ & (1 << kReturn)) != 0; + } + + void SetCompileTimeInfoPointAndReturn() { + SetCompileTimeInfoPoint(); + SetReturn(); + } + std::string ToString() const; private: @@ -108,6 +123,8 @@ class InstructionFlags { kBranchTarget = 3, // Location of interest to the compiler for GC maps and verifier based method sharpening. kCompileTimeInfoPoint = 4, + // A return instruction. + kReturn = 5, }; // Size of instruction in code units. diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index acb6557d52..9f0d911aa5 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -102,7 +102,11 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(const mirror::Class* kla error += dex_file.GetLocation(); return kHardFailure; } - return VerifyClass(&dex_file, kh.GetDexCache(), klass->GetClassLoader(), class_def_idx, error, allow_soft_failures); + return VerifyClass(&dex_file, + kh.GetDexCache(), + klass->GetClassLoader(), + class_def_idx, error, + allow_soft_failures); } MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file, @@ -142,8 +146,15 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file, // We couldn't resolve the method, but continue regardless. Thread::Current()->ClearException(); } - MethodVerifier::FailureKind result = VerifyMethod(method_idx, dex_file, dex_cache, class_loader, - class_def_idx, it.GetMethodCodeItem(), method, it.GetMemberAccessFlags(), allow_soft_failures); + MethodVerifier::FailureKind result = VerifyMethod(method_idx, + dex_file, + dex_cache, + class_loader, + class_def_idx, + it.GetMethodCodeItem(), + method, + it.GetMemberAccessFlags(), + allow_soft_failures); if (result != kNoFailure) { if (result == kHardFailure) { hard_fail = true; @@ -177,8 +188,15 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file, // We couldn't resolve the method, but continue regardless. Thread::Current()->ClearException(); } - MethodVerifier::FailureKind result = VerifyMethod(method_idx, dex_file, dex_cache, class_loader, - class_def_idx, it.GetMethodCodeItem(), method, it.GetMemberAccessFlags(), allow_soft_failures); + MethodVerifier::FailureKind result = VerifyMethod(method_idx, + dex_file, + dex_cache, + class_loader, + class_def_idx, + it.GetMethodCodeItem(), + method, + it.GetMemberAccessFlags(), + allow_soft_failures); if (result != kNoFailure) { if (result == kHardFailure) { hard_fail = true; @@ -282,7 +300,9 @@ MethodVerifier::MethodVerifier(const DexFile* dex_file, mirror::DexCache* dex_ca new_instance_count_(0), monitor_enter_count_(0), can_load_classes_(can_load_classes), - allow_soft_failures_(allow_soft_failures) { + allow_soft_failures_(allow_soft_failures), + has_check_casts_(false), + has_virtual_or_interface_invokes_(false) { } void MethodVerifier::FindLocksAtDexPc(mirror::AbstractMethod* m, uint32_t dex_pc, @@ -470,6 +490,13 @@ bool MethodVerifier::ComputeWidthsAndCountOps() { new_instance_count++; } else if (opcode == Instruction::MONITOR_ENTER) { monitor_enter_count++; + } else if (opcode == Instruction::CHECK_CAST) { + has_check_casts_ = true; + } else if ((inst->Opcode() == Instruction::INVOKE_VIRTUAL) || + (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE) || + (inst->Opcode() == Instruction::INVOKE_INTERFACE) || + (inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE)) { + has_virtual_or_interface_invokes_ = true; } size_t inst_size = inst->SizeInCodeUnits(); insn_flags_[dex_pc].SetLengthInCodeUnits(inst_size); @@ -506,7 +533,8 @@ bool MethodVerifier::ScanTryCatchBlocks() { return false; } if (!insn_flags_[start].IsOpcode()) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "'try' block starts inside an instruction (" << start << ")"; + Fail(VERIFY_ERROR_BAD_CLASS_HARD) + << "'try' block starts inside an instruction (" << start << ")"; return false; } for (uint32_t dex_pc = start; dex_pc < end; @@ -523,7 +551,8 @@ bool MethodVerifier::ScanTryCatchBlocks() { for (; iterator.HasNext(); iterator.Next()) { uint32_t dex_pc= iterator.GetHandlerAddress(); if (!insn_flags_[dex_pc].IsOpcode()) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "exception handler starts at bad address (" << dex_pc << ")"; + Fail(VERIFY_ERROR_BAD_CLASS_HARD) + << "exception handler starts at bad address (" << dex_pc << ")"; return false; } insn_flags_[dex_pc].SetBranchTarget(); @@ -560,8 +589,10 @@ bool MethodVerifier::VerifyInstructions() { /* Flag instructions that are garbage collection points */ // All invoke points are marked as "Throw" points already. // We are relying on this to also count all the invokes as interesting. - if (inst->IsBranch() || inst->IsSwitch() || inst->IsThrow() || inst->IsReturn()) { + if (inst->IsBranch() || inst->IsSwitch() || inst->IsThrow()) { insn_flags_[dex_pc].SetCompileTimeInfoPoint(); + } else if (inst->IsReturn()) { + insn_flags_[dex_pc].SetCompileTimeInfoPointAndReturn(); } dex_pc += inst->SizeInCodeUnits(); inst = inst->Next(); @@ -727,11 +758,13 @@ bool MethodVerifier::CheckNewArray(uint32_t idx) { } if (bracket_count == 0) { /* The given class must be an array type. */ - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "can't new-array class '" << descriptor << "' (not an array)"; + Fail(VERIFY_ERROR_BAD_CLASS_HARD) + << "can't new-array class '" << descriptor << "' (not an array)"; return false; } else if (bracket_count > 255) { /* It is illegal to create an array of more than 255 dimensions. */ - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "can't new-array class '" << descriptor << "' (exceeds limit)"; + Fail(VERIFY_ERROR_BAD_CLASS_HARD) + << "can't new-array class '" << descriptor << "' (exceeds limit)"; return false; } return true; @@ -749,7 +782,8 @@ bool MethodVerifier::CheckArrayData(uint32_t cur_offset) { if ((int32_t) cur_offset + array_data_offset < 0 || cur_offset + array_data_offset + 2 >= insn_count) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid array data start: at " << cur_offset - << ", data offset " << array_data_offset << ", count " << insn_count; + << ", data offset " << array_data_offset + << ", count " << insn_count; return false; } /* offset to array data table is a relative branch-style offset */ @@ -781,18 +815,22 @@ bool MethodVerifier::CheckBranchTarget(uint32_t cur_offset) { return false; } if (!selfOkay && offset == 0) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "branch offset of zero not allowed at" << reinterpret_cast(cur_offset); + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "branch offset of zero not allowed at" + << reinterpret_cast(cur_offset); return false; } // Check for 32-bit overflow. This isn't strictly necessary if we can depend on the runtime // to have identical "wrap-around" behavior, but it's unwise to depend on that. if (((int64_t) cur_offset + (int64_t) offset) != (int64_t) (cur_offset + offset)) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "branch target overflow " << reinterpret_cast(cur_offset) << " +" << offset; + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "branch target overflow " + << reinterpret_cast(cur_offset) << " +" << offset; return false; } const uint32_t insn_count = code_item_->insns_size_in_code_units_; int32_t abs_offset = cur_offset + offset; - if (abs_offset < 0 || (uint32_t) abs_offset >= insn_count || !insn_flags_[abs_offset].IsOpcode()) { + if (abs_offset < 0 || + (uint32_t) abs_offset >= insn_count || + !insn_flags_[abs_offset].IsOpcode()) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid branch target " << offset << " (-> " << reinterpret_cast(abs_offset) << ") at " << reinterpret_cast(cur_offset); @@ -848,7 +886,8 @@ bool MethodVerifier::CheckSwitchTargets(uint32_t cur_offset) { int32_t switch_offset = insns[1] | ((int32_t) insns[2]) << 16; if ((int32_t) cur_offset + switch_offset < 0 || cur_offset + switch_offset + 2 >= insn_count) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch start: at " << cur_offset - << ", switch offset " << switch_offset << ", count " << insn_count; + << ", switch offset " << switch_offset + << ", count " << insn_count; return false; } /* offset to switch table is a relative branch-style offset */ @@ -875,15 +914,16 @@ bool MethodVerifier::CheckSwitchTargets(uint32_t cur_offset) { } uint32_t table_size = targets_offset + switch_count * 2; if (switch_insns[0] != expected_signature) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << StringPrintf("wrong signature for switch table (%x, wanted %x)", - switch_insns[0], expected_signature); + Fail(VERIFY_ERROR_BAD_CLASS_HARD) + << StringPrintf("wrong signature for switch table (%x, wanted %x)", + switch_insns[0], expected_signature); return false; } /* make sure the end of the switch is in range */ if (cur_offset + switch_offset + table_size > (uint32_t) insn_count) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch end: at " << cur_offset << ", switch offset " - << switch_offset << ", end " - << (cur_offset + switch_offset + table_size) + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch end: at " << cur_offset + << ", switch offset " << switch_offset + << ", end " << (cur_offset + switch_offset + table_size) << ", count " << insn_count; return false; } @@ -906,10 +946,13 @@ bool MethodVerifier::CheckSwitchTargets(uint32_t cur_offset) { int32_t offset = (int32_t) switch_insns[targets_offset + targ * 2] | (int32_t) (switch_insns[targets_offset + targ * 2 + 1] << 16); int32_t abs_offset = cur_offset + offset; - if (abs_offset < 0 || abs_offset >= (int32_t) insn_count || !insn_flags_[abs_offset].IsOpcode()) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch target " << offset << " (-> " - << reinterpret_cast(abs_offset) << ") at " - << reinterpret_cast(cur_offset) << "[" << targ << "]"; + if (abs_offset < 0 || + abs_offset >= (int32_t) insn_count || + !insn_flags_[abs_offset].IsOpcode()) { + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch target " << offset + << " (-> " << reinterpret_cast(abs_offset) << ") at " + << reinterpret_cast(cur_offset) + << "[" << targ << "]"; return false; } insn_flags_[abs_offset].SetBranchTarget(); @@ -939,14 +982,15 @@ bool MethodVerifier::CheckVarArgRangeRegs(uint32_t vA, uint32_t vC) { // vA/vC are unsigned 8-bit/16-bit quantities for /range instructions, so there's no risk of // integer overflow when adding them here. if (vA + vC > registers_size) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid reg index " << vA << "+" << vC << " in range invoke (> " - << registers_size << ")"; + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid reg index " << vA << "+" << vC + << " in range invoke (> " << registers_size << ")"; return false; } return true; } -static const std::vector* CreateLengthPrefixedDexGcMap(const std::vector& gc_map) { +static const std::vector* CreateLengthPrefixedDexGcMap( + const std::vector& gc_map) { std::vector* length_prefixed_gc_map = new std::vector; length_prefixed_gc_map->reserve(gc_map.size() + 4); length_prefixed_gc_map->push_back((gc_map.size() & 0xff000000) >> 24); @@ -974,7 +1018,11 @@ bool MethodVerifier::VerifyCodeFlow() { << " insns_size=" << insns_size << ")"; } /* Create and initialize table holding register status */ - reg_table_.Init(kTrackCompilerInterestPoints, insn_flags_.get(), insns_size, registers_size, this); + reg_table_.Init(kTrackCompilerInterestPoints, + insn_flags_.get(), + insns_size, + registers_size, + this); work_line_.reset(new RegisterLine(registers_size, this)); @@ -994,27 +1042,37 @@ bool MethodVerifier::VerifyCodeFlow() { return false; } - /* Generate a register map and add it to the method. */ - UniquePtr > map(GenerateGcMap()); - if (map.get() == NULL) { - DCHECK_NE(failures_.size(), 0U); - return false; // Not a real failure, but a failure to encode - } - if (kIsDebugBuild) { - VerifyGcMap(*map); - } - MethodReference ref(dex_file_, dex_method_idx_); - const std::vector* dex_gc_map = CreateLengthPrefixedDexGcMap(*(map.get())); - verifier::MethodVerifier::SetDexGcMap(ref, *dex_gc_map); + // Compute information for compiler. + if (Runtime::Current()->IsCompiler()) { + MethodReference ref(dex_file_, dex_method_idx_); + bool compile = IsCandidateForCompilation(code_item_, method_access_flags_); + if (compile) { + /* Generate a register map and add it to the method. */ + UniquePtr > map(GenerateGcMap()); + if (map.get() == NULL) { + DCHECK_NE(failures_.size(), 0U); + return false; // Not a real failure, but a failure to encode + } + if (kIsDebugBuild) { + VerifyGcMap(*map); + } + const std::vector* dex_gc_map = CreateLengthPrefixedDexGcMap(*(map.get())); + verifier::MethodVerifier::SetDexGcMap(ref, *dex_gc_map); + } - MethodVerifier::MethodSafeCastSet* method_to_safe_casts = GenerateSafeCastSet(); - if (method_to_safe_casts != NULL) { - SetSafeCastMap(ref, method_to_safe_casts); - } + if (has_check_casts_) { + MethodVerifier::MethodSafeCastSet* method_to_safe_casts = GenerateSafeCastSet(); + if (method_to_safe_casts != NULL) { + SetSafeCastMap(ref, method_to_safe_casts); + } + } - MethodVerifier::PcToConcreteMethodMap* pc_to_concrete_method = GenerateDevirtMap(); - if (pc_to_concrete_method != NULL) { - SetDevirtMap(ref, pc_to_concrete_method); + if (has_virtual_or_interface_invokes_) { + MethodVerifier::PcToConcreteMethodMap* pc_to_concrete_method = GenerateDevirtMap(); + if (pc_to_concrete_method != NULL) { + SetDevirtMap(ref, pc_to_concrete_method); + } + } } return true; } @@ -1154,13 +1212,15 @@ bool MethodVerifier::SetTypesFromSignature() { break; } default: - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected signature type char '" << descriptor << "'"; + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected signature type char '" + << descriptor << "'"; return false; } cur_arg++; } if (cur_arg != expected_args) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected " << expected_args << " arguments, found " << cur_arg; + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected " << expected_args + << " arguments, found " << cur_arg; return false; } const char* descriptor = dex_file_->GetReturnTypeDescriptor(proto_id); @@ -1294,12 +1354,14 @@ bool MethodVerifier::CodeFlowVerifyMethod() { if (dead_start < 0) dead_start = insn_idx; } else if (dead_start >= 0) { - LogVerifyInfo() << "dead code " << reinterpret_cast(dead_start) << "-" << reinterpret_cast(insn_idx - 1); + LogVerifyInfo() << "dead code " << reinterpret_cast(dead_start) + << "-" << reinterpret_cast(insn_idx - 1); dead_start = -1; } } if (dead_start >= 0) { - LogVerifyInfo() << "dead code " << reinterpret_cast(dead_start) << "-" << reinterpret_cast(insn_idx - 1); + LogVerifyInfo() << "dead code " << reinterpret_cast(dead_start) + << "-" << reinterpret_cast(insn_idx - 1); } // To dump the state of the verify after a method, do something like: // if (PrettyMethod(dex_method_idx_, *dex_file_) == @@ -1456,7 +1518,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { /* check the method signature */ const RegType& return_type = GetMethodReturnType(); if (!return_type.IsCategory1Types()) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected non-category 1 return type " << return_type; + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected non-category 1 return type " + << return_type; } else { // Compilers may generate synthetic functions that write byte values into boolean fields. // Also, it may use integer values for boolean, byte, short, and character return types. @@ -1505,10 +1568,14 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { // Disallow returning uninitialized values and verify that the reference in vAA is an // instance of the "return_type" if (reg_type.IsUninitializedTypes()) { - Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "returning uninitialized object '" << reg_type << "'"; + Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "returning uninitialized object '" + << reg_type << "'"; } else if (!return_type.IsAssignableFrom(reg_type)) { - Fail(reg_type.IsUnresolvedTypes() ? VERIFY_ERROR_BAD_CLASS_SOFT : VERIFY_ERROR_BAD_CLASS_HARD) - << "returning '" << reg_type << "', but expected from declaration '" << return_type << "'"; + Fail(reg_type.IsUnresolvedTypes() ? + VERIFY_ERROR_BAD_CLASS_SOFT : + VERIFY_ERROR_BAD_CLASS_HARD) + << "returning '" << reg_type << "', but expected from declaration '" + << return_type << "'"; } } } @@ -1728,7 +1795,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { case Instruction::THROW: { const RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x()); if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(res_type)) { - Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "thrown class " << res_type << " not instanceof Throwable"; + Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "thrown class " << res_type + << " not instanceof Throwable"; } break; } @@ -1750,7 +1818,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { /* array_type can be null if the reg type is Zero */ if (!array_type.IsZero()) { if (!array_type.IsArrayTypes()) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with array type " << array_type; + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with array type " + << array_type; } else { const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_); DCHECK(!component_type.IsConflict()); @@ -1790,8 +1859,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { mismatch = !reg_type1.IsIntegralTypes() || !reg_type2.IsIntegralTypes(); } if (mismatch) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "args to if-eq/if-ne (" << reg_type1 << "," << reg_type2 - << ") must both be references or integral"; + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "args to if-eq/if-ne (" << reg_type1 << "," + << reg_type2 << ") must both be references or integral"; } break; } @@ -1811,7 +1880,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { case Instruction::IF_NEZ: { const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t()); if (!reg_type.IsReferenceTypes() && !reg_type.IsIntegralTypes()) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type << " unexpected as arg to if-eqz/if-nez"; + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type + << " unexpected as arg to if-eqz/if-nez"; } // Find previous instruction - its existence is a precondition to peephole optimization. @@ -2133,7 +2203,10 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { case Instruction::INVOKE_STATIC: case Instruction::INVOKE_STATIC_RANGE: { bool is_range = (inst->Opcode() == Instruction::INVOKE_STATIC_RANGE); - mirror::AbstractMethod* called_method = VerifyInvocationArgs(inst, METHOD_STATIC, is_range, false); + mirror::AbstractMethod* called_method = VerifyInvocationArgs(inst, + METHOD_STATIC, + is_range, + false); const char* descriptor; if (called_method == NULL) { uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c(); @@ -2155,7 +2228,10 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { case Instruction::INVOKE_INTERFACE: case Instruction::INVOKE_INTERFACE_RANGE: { bool is_range = (inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE); - mirror::AbstractMethod* abs_method = VerifyInvocationArgs(inst, METHOD_INTERFACE, is_range, false); + mirror::AbstractMethod* abs_method = VerifyInvocationArgs(inst, + METHOD_INTERFACE, + is_range, + false); if (abs_method != NULL) { mirror::Class* called_interface = abs_method->GetDeclaringClass(); if (!called_interface->IsInterface() && !called_interface->IsObjectClass()) { @@ -2319,7 +2395,11 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { case Instruction::MUL_FLOAT: case Instruction::DIV_FLOAT: case Instruction::REM_FLOAT: - work_line_->CheckBinaryOp(inst, reg_types_.Float(), reg_types_.Float(), reg_types_.Float(), false); + work_line_->CheckBinaryOp(inst, + reg_types_.Float(), + reg_types_.Float(), + reg_types_.Float(), + false); break; case Instruction::ADD_DOUBLE: case Instruction::SUB_DOUBLE: @@ -2337,15 +2417,27 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { case Instruction::SHL_INT_2ADDR: case Instruction::SHR_INT_2ADDR: case Instruction::USHR_INT_2ADDR: - work_line_->CheckBinaryOp2addr(inst, reg_types_.Integer(), reg_types_.Integer(), reg_types_.Integer(), false); + work_line_->CheckBinaryOp2addr(inst, + reg_types_.Integer(), + reg_types_.Integer(), + reg_types_.Integer(), + false); break; case Instruction::AND_INT_2ADDR: case Instruction::OR_INT_2ADDR: case Instruction::XOR_INT_2ADDR: - work_line_->CheckBinaryOp2addr(inst, reg_types_.Integer(), reg_types_.Integer(), reg_types_.Integer(), true); + work_line_->CheckBinaryOp2addr(inst, + reg_types_.Integer(), + reg_types_.Integer(), + reg_types_.Integer(), + true); break; case Instruction::DIV_INT_2ADDR: - work_line_->CheckBinaryOp2addr(inst, reg_types_.Integer(), reg_types_.Integer(), reg_types_.Integer(), false); + work_line_->CheckBinaryOp2addr(inst, + reg_types_.Integer(), + reg_types_.Integer(), + reg_types_.Integer(), + false); break; case Instruction::ADD_LONG_2ADDR: case Instruction::SUB_LONG_2ADDR: @@ -2370,7 +2462,11 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { case Instruction::MUL_FLOAT_2ADDR: case Instruction::DIV_FLOAT_2ADDR: case Instruction::REM_FLOAT_2ADDR: - work_line_->CheckBinaryOp2addr(inst, reg_types_.Float(), reg_types_.Float(), reg_types_.Float(), false); + work_line_->CheckBinaryOp2addr(inst, + reg_types_.Float(), + reg_types_.Float(), + reg_types_.Float(), + false); break; case Instruction::ADD_DOUBLE_2ADDR: case Instruction::SUB_DOUBLE_2ADDR: @@ -2650,6 +2746,20 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { // Make workline consistent with fallthrough computed from peephole optimization. work_line_->CopyFromLine(fallthrough_line.get()); } + if (insn_flags_[next_insn_idx].IsReturn()) { + // For returns we only care about the operand to the return, all other registers are dead. + const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn_idx); + Instruction::Code opcode = ret_inst->Opcode(); + if ((opcode == Instruction::RETURN_VOID) || (opcode == Instruction::RETURN_VOID_BARRIER)) { + work_line_->MarkAllRegistersAsConflicts(); + } else { + if (opcode == Instruction::RETURN_WIDE) { + work_line_->MarkAllRegistersAsConflictsExceptWide(ret_inst->VRegA_11x()); + } else { + work_line_->MarkAllRegistersAsConflictsExcept(ret_inst->VRegA_11x()); + } + } + } RegisterLine* next_line = reg_table_.GetLine(next_insn_idx); if (next_line != NULL) { // Merge registers into what we have for the next instruction, @@ -3062,8 +3172,9 @@ mirror::AbstractMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instr for (size_t param_index = 0; param_index < params_size; param_index++) { if (actual_args >= expected_args) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invalid call to '" << PrettyMethod(res_method) - << "'. Expected " << expected_args << " arguments, processing argument " << actual_args - << " (where longs/doubles count twice)."; + << "'. Expected " << expected_args + << " arguments, processing argument " << actual_args + << " (where longs/doubles count twice)."; return NULL; } const char* descriptor = @@ -3216,7 +3327,8 @@ void MethodVerifier::VerifyAPut(const Instruction* inst, // The instruction agrees with the type of array, confirm the value to be stored does too // Note: we use the instruction type (rather than the component type) for aput-object as // incompatible classes will be caught at runtime as an array store exception - work_line_->VerifyRegisterType(inst->VRegA_23x(), is_primitive ? component_type : insn_type); + work_line_->VerifyRegisterType(inst->VRegA_23x(), + is_primitive ? component_type : insn_type); } } } @@ -3235,8 +3347,10 @@ mirror::Field* MethodVerifier::GetStaticField(int field_idx) { if (klass_type.IsUnresolvedTypes()) { return NULL; // Can't resolve Class so no more to do here, will do checking at runtime. } - mirror::Field* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_, field_idx, - dex_cache_, class_loader_); + mirror::Field* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_, + field_idx, + dex_cache_, + class_loader_); if (field == NULL) { LOG(INFO) << "Unable to resolve static field " << field_idx << " (" << dex_file_->GetFieldName(field_id) << ") in " @@ -3270,8 +3384,10 @@ mirror::Field* MethodVerifier::GetInstanceField(const RegType& obj_type, int fie if (klass_type.IsUnresolvedTypes()) { return NULL; // Can't resolve Class so no more to do here } - mirror::Field* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_, field_idx, - dex_cache_, class_loader_); + mirror::Field* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_, + field_idx, + dex_cache_, + class_loader_); if (field == NULL) { LOG(INFO) << "Unable to resolve instance field " << field_idx << " (" << dex_file_->GetFieldName(field_id) << ") in " @@ -3302,8 +3418,8 @@ mirror::Field* MethodVerifier::GetInstanceField(const RegType& obj_type, int fie // Field accesses through uninitialized references are only allowable for constructors where // the field is declared in this class Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "cannot access instance field " << PrettyField(field) - << " of a not fully initialized object within the context of " - << PrettyMethod(dex_method_idx_, *dex_file_); + << " of a not fully initialized object within the context" + << " of " << PrettyMethod(dex_method_idx_, *dex_file_); return NULL; } else if (!field_klass.IsAssignableFrom(obj_type)) { // Trying to access C1.field1 using reference of type C2, which is neither C1 or a sub-class @@ -3637,9 +3753,28 @@ bool MethodVerifier::UpdateRegisters(uint32_t next_insn, const RegisterLine* mer * there's nothing to "merge". Copy the registers over and mark it as changed. (This is the * only way a register can transition out of "unknown", so this is not just an optimization.) */ - target_line->CopyFromLine(merge_line); + if (!insn_flags_[next_insn].IsReturn()) { + target_line->CopyFromLine(merge_line); + } else { + // For returns we only care about the operand to the return, all other registers are dead. + // Initialize them as conflicts so they don't add to GC and deoptimization information. + const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn); + Instruction::Code opcode = ret_inst->Opcode(); + if ((opcode == Instruction::RETURN_VOID) || (opcode == Instruction::RETURN_VOID_BARRIER)) { + target_line->MarkAllRegistersAsConflicts(); + } else { + target_line->CopyFromLine(merge_line); + if (opcode == Instruction::RETURN_WIDE) { + target_line->MarkAllRegistersAsConflictsExceptWide(ret_inst->VRegA_11x()); + } else { + target_line->MarkAllRegistersAsConflictsExcept(ret_inst->VRegA_11x()); + } + } + } } else { - UniquePtr copy(gDebugVerify ? new RegisterLine(target_line->NumRegs(), this) : NULL); + UniquePtr copy(gDebugVerify ? + new RegisterLine(target_line->NumRegs(), this) : + NULL); if (gDebugVerify) { copy->CopyFromLine(target_line); } @@ -3676,7 +3811,8 @@ const RegType& MethodVerifier::GetMethodReturnType() { const RegType& MethodVerifier::GetDeclaringClass() { if (declaring_class_ == NULL) { const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_); - const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_)); + const char* descriptor + = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_)); if (mirror_method_ != NULL) { mirror::Class* klass = mirror_method_->GetDeclaringClass(); declaring_class_ = ®_types_.FromClass(descriptor, klass, @@ -3909,6 +4045,7 @@ void MethodVerifier::VerifyGcMap(const std::vector& data) { } void MethodVerifier::SetDexGcMap(MethodReference ref, const std::vector& gc_map) { + DCHECK(Runtime::Current()->IsCompiler()); { WriterMutexLock mu(Thread::Current(), *dex_gc_maps_lock_); DexGcMapTable::iterator it = dex_gc_maps_->find(ref); @@ -3923,6 +4060,7 @@ void MethodVerifier::SetDexGcMap(MethodReference ref, const std::vector void MethodVerifier::SetSafeCastMap(MethodReference ref, const MethodSafeCastSet* cast_set) { + DCHECK(Runtime::Current()->IsCompiler()); MutexLock mu(Thread::Current(), *safecast_map_lock_); SafeCastMap::iterator it = safecast_map_->find(ref); if (it != safecast_map_->end()) { @@ -3931,10 +4069,11 @@ void MethodVerifier::SetSafeCastMap(MethodReference ref, const MethodSafeCastSe } safecast_map_->Put(ref, cast_set); - CHECK(safecast_map_->find(ref) != safecast_map_->end()); + DCHECK(safecast_map_->find(ref) != safecast_map_->end()); } bool MethodVerifier::IsSafeCast(MethodReference ref, uint32_t pc) { + DCHECK(Runtime::Current()->IsCompiler()); MutexLock mu(Thread::Current(), *safecast_map_lock_); SafeCastMap::const_iterator it = safecast_map_->find(ref); if (it == safecast_map_->end()) { @@ -3947,6 +4086,7 @@ bool MethodVerifier::IsSafeCast(MethodReference ref, uint32_t pc) { } const std::vector* MethodVerifier::GetDexGcMap(MethodReference ref) { + DCHECK(Runtime::Current()->IsCompiler()); ReaderMutexLock mu(Thread::Current(), *dex_gc_maps_lock_); DexGcMapTable::const_iterator it = dex_gc_maps_->find(ref); if (it == dex_gc_maps_->end()) { @@ -3959,6 +4099,7 @@ const std::vector* MethodVerifier::GetDexGcMap(MethodReference ref) { void MethodVerifier::SetDevirtMap(MethodReference ref, const PcToConcreteMethodMap* devirt_map) { + DCHECK(Runtime::Current()->IsCompiler()); WriterMutexLock mu(Thread::Current(), *devirt_maps_lock_); DevirtualizationMapTable::iterator it = devirt_maps_->find(ref); if (it != devirt_maps_->end()) { @@ -3967,11 +4108,12 @@ void MethodVerifier::SetDevirtMap(MethodReference ref, } devirt_maps_->Put(ref, devirt_map); - CHECK(devirt_maps_->find(ref) != devirt_maps_->end()); + DCHECK(devirt_maps_->find(ref) != devirt_maps_->end()); } const MethodReference* MethodVerifier::GetDevirtMap(const MethodReference& ref, uint32_t dex_pc) { + DCHECK(Runtime::Current()->IsCompiler()); ReaderMutexLock mu(Thread::Current(), *devirt_maps_lock_); DevirtualizationMapTable::const_iterator it = devirt_maps_->find(ref); if (it == devirt_maps_->end()) { @@ -3979,7 +4121,8 @@ const MethodReference* MethodVerifier::GetDevirtMap(const MethodReference& ref, } // Look up the PC in the map, get the concrete method to execute and return its reference. - MethodVerifier::PcToConcreteMethodMap::const_iterator pc_to_concrete_method = it->second->find(dex_pc); + MethodVerifier::PcToConcreteMethodMap::const_iterator pc_to_concrete_method + = it->second->find(dex_pc); if (pc_to_concrete_method != it->second->end()) { return &(pc_to_concrete_method->second); } else { @@ -4031,6 +4174,24 @@ std::vector MethodVerifier::DescribeVRegs(uint32_t dex_pc) { return result; } +bool MethodVerifier::IsCandidateForCompilation(const DexFile::CodeItem* code_item, + const uint32_t access_flags) { + // Don't compile class initializers, ever. + if (((access_flags & kAccConstructor) != 0) && ((access_flags & kAccStatic) != 0)) { + return false; + } + + const Runtime* runtime = Runtime::Current(); + if (runtime->IsSmallMode() && runtime->UseCompileTimeClassPath()) { + // In Small mode, we only compile small methods. + const uint32_t code_size = code_item->insns_size_in_code_units_; + return (code_size < runtime->GetSmallModeMethodDexSizeLimit()); + } else { + // In normal mode, we compile everything. + return true; + } +} + ReaderWriterMutex* MethodVerifier::dex_gc_maps_lock_ = NULL; MethodVerifier::DexGcMapTable* MethodVerifier::dex_gc_maps_ = NULL; @@ -4044,65 +4205,79 @@ Mutex* MethodVerifier::rejected_classes_lock_ = NULL; MethodVerifier::RejectedClassesTable* MethodVerifier::rejected_classes_ = NULL; void MethodVerifier::Init() { - dex_gc_maps_lock_ = new ReaderWriterMutex("verifier GC maps lock"); - Thread* self = Thread::Current(); - { - WriterMutexLock mu(self, *dex_gc_maps_lock_); - dex_gc_maps_ = new MethodVerifier::DexGcMapTable; - } + if (Runtime::Current()->IsCompiler()) { + dex_gc_maps_lock_ = new ReaderWriterMutex("verifier GC maps lock"); + Thread* self = Thread::Current(); + { + WriterMutexLock mu(self, *dex_gc_maps_lock_); + dex_gc_maps_ = new MethodVerifier::DexGcMapTable; + } - safecast_map_lock_ = new Mutex("verifier Cast Elision lock"); - { - MutexLock mu(self, *safecast_map_lock_); - safecast_map_ = new MethodVerifier::SafeCastMap(); - } + safecast_map_lock_ = new Mutex("verifier Cast Elision lock"); + { + MutexLock mu(self, *safecast_map_lock_); + safecast_map_ = new MethodVerifier::SafeCastMap(); + } - devirt_maps_lock_ = new ReaderWriterMutex("verifier Devirtualization lock"); + devirt_maps_lock_ = new ReaderWriterMutex("verifier Devirtualization lock"); - { - WriterMutexLock mu(self, *devirt_maps_lock_); - devirt_maps_ = new MethodVerifier::DevirtualizationMapTable(); - } + { + WriterMutexLock mu(self, *devirt_maps_lock_); + devirt_maps_ = new MethodVerifier::DevirtualizationMapTable(); + } - rejected_classes_lock_ = new Mutex("verifier rejected classes lock"); - { - MutexLock mu(self, *rejected_classes_lock_); - rejected_classes_ = new MethodVerifier::RejectedClassesTable; + rejected_classes_lock_ = new Mutex("verifier rejected classes lock"); + { + MutexLock mu(self, *rejected_classes_lock_); + rejected_classes_ = new MethodVerifier::RejectedClassesTable; + } } art::verifier::RegTypeCache::Init(); } void MethodVerifier::Shutdown() { - Thread* self = Thread::Current(); - { - WriterMutexLock mu(self, *dex_gc_maps_lock_); - STLDeleteValues(dex_gc_maps_); - delete dex_gc_maps_; - dex_gc_maps_ = NULL; - } - delete dex_gc_maps_lock_; - dex_gc_maps_lock_ = NULL; - - { - WriterMutexLock mu(self, *devirt_maps_lock_); - STLDeleteValues(devirt_maps_); - delete devirt_maps_; - devirt_maps_ = NULL; - } - delete devirt_maps_lock_; - devirt_maps_lock_ = NULL; + if (Runtime::Current()->IsCompiler()) { + Thread* self = Thread::Current(); + { + WriterMutexLock mu(self, *dex_gc_maps_lock_); + STLDeleteValues(dex_gc_maps_); + delete dex_gc_maps_; + dex_gc_maps_ = NULL; + } + delete dex_gc_maps_lock_; + dex_gc_maps_lock_ = NULL; + + { + MutexLock mu(self, *safecast_map_lock_); + STLDeleteValues(safecast_map_); + delete safecast_map_; + safecast_map_ = NULL; + } + delete safecast_map_lock_; + safecast_map_lock_ = NULL; + + { + WriterMutexLock mu(self, *devirt_maps_lock_); + STLDeleteValues(devirt_maps_); + delete devirt_maps_; + devirt_maps_ = NULL; + } + delete devirt_maps_lock_; + devirt_maps_lock_ = NULL; - { - MutexLock mu(self, *rejected_classes_lock_); - delete rejected_classes_; - rejected_classes_ = NULL; + { + MutexLock mu(self, *rejected_classes_lock_); + delete rejected_classes_; + rejected_classes_ = NULL; + } + delete rejected_classes_lock_; + rejected_classes_lock_ = NULL; } - delete rejected_classes_lock_; - rejected_classes_lock_ = NULL; verifier::RegTypeCache::ShutDown(); } void MethodVerifier::AddRejectedClass(ClassReference ref) { + DCHECK(Runtime::Current()->IsCompiler()); { MutexLock mu(Thread::Current(), *rejected_classes_lock_); rejected_classes_->insert(ref); @@ -4111,6 +4286,7 @@ void MethodVerifier::AddRejectedClass(ClassReference ref) { } bool MethodVerifier::IsClassRejected(ClassReference ref) { + DCHECK(Runtime::Current()->IsCompiler()); MutexLock mu(Thread::Current(), *rejected_classes_lock_); return (rejected_classes_->find(ref) != rejected_classes_->end()); } diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h index e1bcbb19e5..3f98a00adc 100644 --- a/runtime/verifier/method_verifier.h +++ b/runtime/verifier/method_verifier.h @@ -237,6 +237,9 @@ class MethodVerifier { // Describe VRegs at the given dex pc. std::vector DescribeVRegs(uint32_t dex_pc); + static bool IsCandidateForCompilation(const DexFile::CodeItem* code_item, + const uint32_t access_flags); + private: // Adds the given string to the beginning of the last failure message. void PrependToLastFailMessage(std::string); @@ -654,7 +657,7 @@ class MethodVerifier { LOCKS_EXCLUDED(devirt_maps_lock_); typedef std::set RejectedClassesTable; static Mutex* rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; - static RejectedClassesTable* rejected_classes_; + static RejectedClassesTable* rejected_classes_ GUARDED_BY(rejected_classes_lock_); static void AddRejectedClass(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_); @@ -717,6 +720,13 @@ class MethodVerifier { // Converts soft failures to hard failures when false. Only false when the compiler isn't // running and the verifier is called from the class linker. const bool allow_soft_failures_; + + // Indicates if the method being verified contains at least one check-cast instruction. + bool has_check_casts_; + + // Indicates if the method being verified contains at least one invoke-virtual/range + // or invoke-interface/range. + bool has_virtual_or_interface_invokes_; }; std::ostream& operator<<(std::ostream& os, const MethodVerifier::FailureKind& rhs); diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc index d2abaac6f7..7965c0641e 100644 --- a/runtime/verifier/register_line.cc +++ b/runtime/verifier/register_line.cc @@ -167,7 +167,7 @@ void RegisterLine::MarkRefsAsInitialized(const RegType& uninit_type) { DCHECK(uninit_type.IsUninitializedTypes()); const RegType& init_type = verifier_->GetRegTypeCache()->FromUninitialized(uninit_type); size_t changed = 0; - for (size_t i = 0; i < num_regs_; i++) { + for (uint32_t i = 0; i < num_regs_; i++) { if (GetRegisterType(i).Equals(uninit_type)) { line_[i] = init_type.GetId(); changed++; @@ -176,6 +176,31 @@ void RegisterLine::MarkRefsAsInitialized(const RegType& uninit_type) { DCHECK_GT(changed, 0u); } +void RegisterLine::MarkAllRegistersAsConflicts() { + uint16_t conflict_type_id = verifier_->GetRegTypeCache()->Conflict().GetId(); + for (uint32_t i = 0; i < num_regs_; i++) { + line_[i] = conflict_type_id; + } +} + +void RegisterLine::MarkAllRegistersAsConflictsExcept(uint32_t vsrc) { + uint16_t conflict_type_id = verifier_->GetRegTypeCache()->Conflict().GetId(); + for (uint32_t i = 0; i < num_regs_; i++) { + if (i != vsrc) { + line_[i] = conflict_type_id; + } + } +} + +void RegisterLine::MarkAllRegistersAsConflictsExceptWide(uint32_t vsrc) { + uint16_t conflict_type_id = verifier_->GetRegTypeCache()->Conflict().GetId(); + for (uint32_t i = 0; i < num_regs_; i++) { + if ((i != vsrc) && (i != (vsrc + 1))) { + line_[i] = conflict_type_id; + } + } +} + std::string RegisterLine::Dump() const { std::string result; for (size_t i = 0; i < num_regs_; i++) { diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h index cde7b9b0be..f3808776f2 100644 --- a/runtime/verifier/register_line.h +++ b/runtime/verifier/register_line.h @@ -140,6 +140,13 @@ class RegisterLine { void MarkRefsAsInitialized(const RegType& uninit_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + /* + * Update all registers to be Conflict except vsrc. + */ + void MarkAllRegistersAsConflicts(); + void MarkAllRegistersAsConflictsExcept(uint32_t vsrc); + void MarkAllRegistersAsConflictsExceptWide(uint32_t vsrc); + /* * Check constraints on constructor return. Specifically, make sure that the "this" argument got * initialized. diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc index 10ca5638d0..3b5d80d981 100644 --- a/test/ReferenceMap/stack_walk_refmap_jni.cc +++ b/test/ReferenceMap/stack_walk_refmap_jni.cc @@ -103,7 +103,9 @@ struct ReferenceMap2Visitor : public StackVisitor { // 0024: move-object v3, v2 // 0025: goto 0013 // Detaled dex instructions for ReferenceMap.java are at the end of this function. - CHECK_REGS_CONTAIN_REFS(8, 3, 2, 1); // v8: this, v3: y, v2: y, v1: x + // CHECK_REGS_CONTAIN_REFS(8, 3, 2, 1); // v8: this, v3: y, v2: y, v1: x + // We eliminate the non-live registers at a return, so only v3 is live: + CHECK_REGS_CONTAIN_REFS(3); // v3: y ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x18U))); CHECK(ref_bitmap); @@ -188,7 +190,7 @@ struct ReferenceMap2Visitor : public StackVisitor { // 0:[Unknown],1:[Reference: java.lang.Object[]],2:[Zero],3:[Reference: java.lang.Object],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap], // |0010: +invoke-virtual-quick {v8, v7}, [000c] // vtable #000c -// 0:[Conflict],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Reference: java.lang.Object],4:[32-bit Constant: 2],5:[Conflict],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap], +// 0:[Conflict],1:[Conflict],2:[Conflict],3:[Reference: java.lang.Object],4:[Conflict],5:[Conflict],6:[Conflict],7:[Conflict],8:[Conflict], // |0013: return-object v3 // |0014: move-exception v0 -- cgit v1.2.3-59-g8ed1b