summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Nicolas Geoffray <ngeoffray@google.com> 2021-07-12 15:53:27 +0100
committer Nicolas Geoffray <ngeoffray@google.com> 2021-07-13 12:14:13 +0000
commite4f983c808b0b7d2d9c39f34f67971bba1354b08 (patch)
tree80dd518ac4e7ea00670f2ebdb70efbe8b1f577cd
parent5f2f289b64f163e491b9b53364ce04874f82b4e3 (diff)
Remove mterp.
Test: test.py Change-Id: Id800bcf86965ab19cf1e79ecbfa8996a6a6c335e
-rw-r--r--libdexfile/dex/modifiers.h8
-rw-r--r--runtime/Android.bp62
-rw-r--r--runtime/art_method.h19
-rw-r--r--runtime/class_linker.cc2
-rw-r--r--runtime/instrumentation.cc6
-rw-r--r--runtime/interpreter/interpreter.cc61
-rw-r--r--runtime/interpreter/interpreter.h2
-rw-r--r--runtime/interpreter/interpreter_common.h94
-rw-r--r--runtime/interpreter/interpreter_mterp_impl.h45
-rw-r--r--runtime/interpreter/mterp/arm/arithmetic.S975
-rw-r--r--runtime/interpreter/mterp/arm/array.S250
-rw-r--r--runtime/interpreter/mterp/arm/control_flow.S200
-rw-r--r--runtime/interpreter/mterp/arm/floating_point.S482
-rw-r--r--runtime/interpreter/mterp/arm/invoke.S115
-rw-r--r--runtime/interpreter/mterp/arm/main.S777
-rw-r--r--runtime/interpreter/mterp/arm/object.S207
-rw-r--r--runtime/interpreter/mterp/arm/other.S433
-rw-r--r--runtime/interpreter/mterp/arm64/array.S235
-rw-r--r--runtime/interpreter/mterp/arm64/control_flow.S211
-rw-r--r--runtime/interpreter/mterp/arm64/invoke.S104
-rw-r--r--runtime/interpreter/mterp/arm64/main.S799
-rw-r--r--runtime/interpreter/mterp/arm64/object.S201
-rw-r--r--runtime/interpreter/mterp/arm64/other.S407
-rw-r--r--runtime/interpreter/mterp/arm64ng/arithmetic.S (renamed from runtime/interpreter/mterp/arm64/arithmetic.S)0
-rw-r--r--runtime/interpreter/mterp/arm64ng/floating_point.S (renamed from runtime/interpreter/mterp/arm64/floating_point.S)0
-rw-r--r--runtime/interpreter/mterp/common/gen_setup.py4
-rw-r--r--runtime/interpreter/mterp/mterp.cc921
-rw-r--r--runtime/interpreter/mterp/mterp.h53
-rw-r--r--runtime/interpreter/mterp/mterp_stub.cc52
-rw-r--r--runtime/interpreter/mterp/nterp.cc108
-rw-r--r--runtime/interpreter/mterp/x86/array.S215
-rw-r--r--runtime/interpreter/mterp/x86/control_flow.S208
-rw-r--r--runtime/interpreter/mterp/x86/invoke.S115
-rw-r--r--runtime/interpreter/mterp/x86/main.S806
-rw-r--r--runtime/interpreter/mterp/x86/object.S167
-rw-r--r--runtime/interpreter/mterp/x86/other.S379
-rw-r--r--runtime/interpreter/mterp/x86_64/array.S178
-rw-r--r--runtime/interpreter/mterp/x86_64/control_flow.S197
-rw-r--r--runtime/interpreter/mterp/x86_64/invoke.S109
-rw-r--r--runtime/interpreter/mterp/x86_64/main.S761
-rw-r--r--runtime/interpreter/mterp/x86_64/object.S152
-rw-r--r--runtime/interpreter/mterp/x86_64/other.S348
-rw-r--r--runtime/interpreter/mterp/x86_64ng/arithmetic.S (renamed from runtime/interpreter/mterp/x86_64/arithmetic.S)0
-rw-r--r--runtime/interpreter/mterp/x86_64ng/floating_point.S (renamed from runtime/interpreter/mterp/x86_64/floating_point.S)0
-rw-r--r--runtime/interpreter/mterp/x86ng/arithmetic.S (renamed from runtime/interpreter/mterp/x86/arithmetic.S)0
-rw-r--r--runtime/interpreter/mterp/x86ng/floating_point.S (renamed from runtime/interpreter/mterp/x86/floating_point.S)0
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc4
-rw-r--r--runtime/oat.h4
-rw-r--r--runtime/oat_quick_method_header.cc1
-rw-r--r--runtime/runtime-inl.h10
-rw-r--r--runtime/runtime.cc4
-rw-r--r--runtime/runtime.h15
-rw-r--r--runtime/thread.cc10
-rw-r--r--runtime/thread.h19
-rw-r--r--runtime/thread_list.cc1
-rw-r--r--tools/cpp-define-generator/globals.def9
-rw-r--r--tools/cpp-define-generator/thread.def2
57 files changed, 124 insertions, 10423 deletions
diff --git a/libdexfile/dex/modifiers.h b/libdexfile/dex/modifiers.h
index 60141df7e8..3c745b27d2 100644
--- a/libdexfile/dex/modifiers.h
+++ b/libdexfile/dex/modifiers.h
@@ -106,9 +106,9 @@ static constexpr uint32_t kAccNterpInvokeFastPathFlag = 0x00200000; // meth
static constexpr uint32_t kAccPublicApi = 0x10000000; // field, method
static constexpr uint32_t kAccCorePlatformApi = 0x20000000; // field, method
-// Non-intrinsics: Caches whether we can use fast-path in the interpreter invokes.
-// Intrinsics: These bits are part of the intrinsic ordinal.
-static constexpr uint32_t kAccFastInterpreterToInterpreterInvoke = 0x40000000; // method.
+// Not currently used, except for intrinsic methods where these bits
+// are part of the intrinsic ordinal.
+static constexpr uint32_t kAccMayBeUnusedBits = 0x40000000;
// Set by the compiler driver when compiling boot classes with instrinsic methods.
static constexpr uint32_t kAccIntrinsic = 0x80000000; // method (runtime)
@@ -127,7 +127,7 @@ static constexpr uint32_t kAccHiddenapiBits = kAccPublicApi | kAccCorePlatformAp
// which overlap are not valid when kAccIntrinsic is set.
static constexpr uint32_t kAccIntrinsicBits = kAccHiddenapiBits |
kAccSingleImplementation | kAccMustCountLocks | kAccCompileDontBother | kAccCopied |
- kAccPreviouslyWarm | kAccFastInterpreterToInterpreterInvoke;
+ kAccPreviouslyWarm | kAccMayBeUnusedBits;
// Valid (meaningful) bits for a field.
static constexpr uint32_t kAccValidFieldFlags = kAccPublic | kAccPrivate | kAccProtected |
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 3bea92da71..a801340f5e 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -291,9 +291,7 @@ libart_cc_defaults {
arch: {
arm: {
srcs: [
- "interpreter/mterp/mterp.cc",
"interpreter/mterp/nterp.cc",
- ":libart_mterp.arm",
":libart_mterp.armng",
"arch/arm/context_arm.cc",
"arch/arm/entrypoints_init_arm.cc",
@@ -308,9 +306,7 @@ libart_cc_defaults {
},
arm64: {
srcs: [
- "interpreter/mterp/mterp.cc",
"interpreter/mterp/nterp.cc",
- ":libart_mterp.arm64",
":libart_mterp.arm64ng",
"arch/arm64/context_arm64.cc",
"arch/arm64/entrypoints_init_arm64.cc",
@@ -324,9 +320,7 @@ libart_cc_defaults {
},
x86: {
srcs: [
- "interpreter/mterp/mterp.cc",
"interpreter/mterp/nterp.cc",
- ":libart_mterp.x86",
":libart_mterp.x86ng",
"arch/x86/context_x86.cc",
"arch/x86/entrypoints_init_x86.cc",
@@ -347,9 +341,7 @@ libart_cc_defaults {
srcs: [
// Note that the fault_handler_x86.cc is not a mistake. This file is
// shared between the x86 and x86_64 architectures.
- "interpreter/mterp/mterp.cc",
"interpreter/mterp/nterp.cc",
- ":libart_mterp.x86_64",
":libart_mterp.x86_64ng",
"arch/x86_64/context_x86_64.cc",
"arch/x86_64/entrypoints_init_x86_64.cc",
@@ -892,60 +884,10 @@ cc_library_headers {
}
genrule {
- name: "libart_mterp.arm",
- out: ["mterp_arm.S"],
- srcs: ["interpreter/mterp/arm/*.S"],
- tool_files: [
- "interpreter/mterp/gen_mterp.py",
- "interpreter/mterp/common/gen_setup.py",
- ":art_libdexfile_dex_instruction_list_header",
- ],
- cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
-}
-
-genrule {
- name: "libart_mterp.arm64",
- out: ["mterp_arm64.S"],
- srcs: ["interpreter/mterp/arm64/*.S"],
- tool_files: [
- "interpreter/mterp/gen_mterp.py",
- "interpreter/mterp/common/gen_setup.py",
- ":art_libdexfile_dex_instruction_list_header",
- ],
- cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
-}
-
-genrule {
- name: "libart_mterp.x86",
- out: ["mterp_x86.S"],
- srcs: ["interpreter/mterp/x86/*.S"],
- tool_files: [
- "interpreter/mterp/gen_mterp.py",
- "interpreter/mterp/common/gen_setup.py",
- ":art_libdexfile_dex_instruction_list_header",
- ],
- cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
-}
-
-genrule {
- name: "libart_mterp.x86_64",
- out: ["mterp_x86_64.S"],
- srcs: ["interpreter/mterp/x86_64/*.S"],
- tool_files: [
- "interpreter/mterp/gen_mterp.py",
- "interpreter/mterp/common/gen_setup.py",
- ":art_libdexfile_dex_instruction_list_header",
- ],
- cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
-}
-
-genrule {
name: "libart_mterp.x86ng",
out: ["mterp_x86ng.S"],
srcs: [
"interpreter/mterp/x86ng/*.S",
- "interpreter/mterp/x86/arithmetic.S",
- "interpreter/mterp/x86/floating_point.S",
],
tool_files: [
"interpreter/mterp/gen_mterp.py",
@@ -960,8 +902,6 @@ genrule {
out: ["mterp_x86_64ng.S"],
srcs: [
"interpreter/mterp/x86_64ng/*.S",
- "interpreter/mterp/x86_64/arithmetic.S",
- "interpreter/mterp/x86_64/floating_point.S",
],
tool_files: [
"interpreter/mterp/gen_mterp.py",
@@ -976,8 +916,6 @@ genrule {
out: ["mterp_arm64ng.S"],
srcs: [
"interpreter/mterp/arm64ng/*.S",
- "interpreter/mterp/arm64/arithmetic.S",
- "interpreter/mterp/arm64/floating_point.S",
],
tool_files: [
"interpreter/mterp/gen_mterp.py",
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 44a2f3dac1..5af14b67b8 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -326,23 +326,6 @@ class ArtMethod final {
bool IsSignaturePolymorphic() REQUIRES_SHARED(Locks::mutator_lock_);
- bool UseFastInterpreterToInterpreterInvoke() const {
- // The bit is applicable only if the method is not intrinsic.
- constexpr uint32_t mask = kAccFastInterpreterToInterpreterInvoke | kAccIntrinsic;
- return (GetAccessFlags() & mask) == kAccFastInterpreterToInterpreterInvoke;
- }
-
- void SetFastInterpreterToInterpreterInvokeFlag() REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(!IsIntrinsic());
- AddAccessFlags(kAccFastInterpreterToInterpreterInvoke);
- }
-
- void ClearFastInterpreterToInterpreterInvokeFlag() REQUIRES_SHARED(Locks::mutator_lock_) {
- if (!IsIntrinsic()) {
- ClearAccessFlags(kAccFastInterpreterToInterpreterInvoke);
- }
- }
-
bool SkipAccessChecks() const {
// The kAccSkipAccessChecks flag value is used with a different meaning for native methods,
// so we need to check the kAccNative flag as well.
@@ -498,8 +481,6 @@ class ArtMethod final {
SetNativePointer(EntryPointFromQuickCompiledCodeOffset(pointer_size),
entry_point_from_quick_compiled_code,
pointer_size);
- // We might want to invoke compiled code, so don't use the fast path.
- ClearFastInterpreterToInterpreterInvokeFlag();
}
static constexpr MemberOffset DataOffset(PointerSize pointer_size) {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index eba25ee502..f842265d29 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -151,8 +151,6 @@
#include "verifier/verifier_deps.h"
#include "well_known_classes.h"
-#include "interpreter/interpreter_mterp_impl.h"
-
namespace art {
using android::base::StringPrintf;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 5170a37174..d19f11d745 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -577,7 +577,7 @@ static void PotentiallyAddListenerTo(Instrumentation::InstrumentationEvent event
} else {
list.push_back(listener);
}
- Runtime::DoAndMaybeSwitchInterpreter([=](){ *has_listener = true; });
+ *has_listener = true;
}
void Instrumentation::AddListener(InstrumentationListener* listener, uint32_t events) {
@@ -655,11 +655,11 @@ static void PotentiallyRemoveListenerFrom(Instrumentation::InstrumentationEvent
// Check if the list contains any non-null listener, and update 'has_listener'.
for (InstrumentationListener* l : list) {
if (l != nullptr) {
- Runtime::DoAndMaybeSwitchInterpreter([=](){ *has_listener = true; });
+ *has_listener = true;
return;
}
}
- Runtime::DoAndMaybeSwitchInterpreter([=](){ *has_listener = false; });
+ *has_listener = false;
}
void Instrumentation::RemoveListener(InstrumentationListener* listener, uint32_t events) {
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 302551f8dd..580e555db0 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -23,13 +23,11 @@
#include "common_throws.h"
#include "dex/dex_file_types.h"
#include "interpreter_common.h"
-#include "interpreter_mterp_impl.h"
#include "interpreter_switch_impl.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "jvalue-inl.h"
#include "mirror/string-inl.h"
-#include "mterp/mterp.h"
#include "nativehelper/scoped_local_ref.h"
#include "scoped_thread_state_change-inl.h"
#include "shadow_frame-inl.h"
@@ -233,17 +231,6 @@ static void InterpreterJni(Thread* self,
}
}
-enum InterpreterImplKind {
- kSwitchImplKind, // Switch-based interpreter implementation.
- kMterpImplKind // Assembly interpreter
-};
-
-#if ART_USE_CXX_INTERPRETER
-static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImplKind;
-#else
-static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
-#endif
-
static JValue ExecuteSwitch(Thread* self,
const CodeItemDataAccessor& accessor,
ShadowFrame& shadow_frame,
@@ -278,13 +265,6 @@ static inline JValue Execute(
DCHECK(!shadow_frame.GetMethod()->IsAbstract());
DCHECK(!shadow_frame.GetMethod()->IsNative());
- // Check that we are using the right interpreter.
- if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) {
- // The flag might be currently being updated on all threads. Retry with lock.
- MutexLock tll_mu(self, *Locks::thread_list_lock_);
- DCHECK_EQ(self->UseMterp(), CanUseMterp());
- }
-
if (LIKELY(!from_deoptimize)) { // Entering the method, but not via deoptimization.
if (kIsDebugBuild) {
CHECK_EQ(shadow_frame.GetDexPC(), 0u);
@@ -365,40 +345,8 @@ static inline JValue Execute(
VLOG(interpreter) << "Interpreting " << method->PrettyMethod();
- // Note that mterp doesn't support non-compilable methods, nor methods on
- // which we must count locks.
- if (kInterpreterImplKind == kSwitchImplKind ||
- UNLIKELY(!Runtime::Current()->IsStarted()) ||
- !method->IsCompilable() ||
- method->MustCountLocks() ||
- Runtime::Current()->IsActiveTransaction()) {
- return ExecuteSwitch(
- self, accessor, shadow_frame, result_register, /*interpret_one_instruction=*/ false);
- }
-
- CHECK_EQ(kInterpreterImplKind, kMterpImplKind);
- while (true) {
- // Mterp does not support all instrumentation/debugging.
- if (!self->UseMterp()) {
- return ExecuteSwitch(
- self, accessor, shadow_frame, result_register, /*interpret_one_instruction=*/ false);
- }
- bool returned = ExecuteMterpImpl(self,
- accessor.Insns(),
- &shadow_frame,
- &result_register);
- if (returned) {
- return result_register;
- } else {
- // Mterp didn't like that instruction. Single-step it with the reference interpreter.
- result_register = ExecuteSwitch(
- self, accessor, shadow_frame, result_register, /*interpret_one_instruction=*/ true);
- if (shadow_frame.GetDexPC() == dex::kDexNoIndex) {
- // Single-stepped a return or an exception not handled locally. Return to caller.
- return result_register;
- }
- }
- }
+ return ExecuteSwitch(
+ self, accessor, shadow_frame, result_register, /*interpret_one_instruction=*/ false);
}
void EnterInterpreterFromInvoke(Thread* self,
@@ -691,14 +639,9 @@ void ArtInterpreterToInterpreterBridge(Thread* self,
}
void CheckInterpreterAsmConstants() {
- CheckMterpAsmConstants();
CheckNterpAsmConstants();
}
-void InitInterpreterTls(Thread* self) {
- InitMterpTls(self);
-}
-
bool PrevFrameWillRetry(Thread* self, const ShadowFrame& frame) {
ShadowFrame* prev_frame = frame.GetLink();
if (prev_frame == nullptr) {
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index ff69c42612..f7bc1a3d55 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -67,8 +67,6 @@ void ArtInterpreterToInterpreterBridge(Thread* self,
// One-time check of assembler constants.
void CheckInterpreterAsmConstants();
-void InitInterpreterTls(Thread* self);
-
// Returns true if the previous frame has the ForceRetryInstruction bit set. This is required for
// ForPopFrame to work correctly since that will cause the java function return with null/0 which
// might not be expected by the code being run.
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 3bbfc55179..2619573400 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -46,7 +46,6 @@
#include "dex/dex_instruction-inl.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "handle_scope-inl.h"
-#include "interpreter_mterp_impl.h"
#include "interpreter_switch_impl.h"
#include "jit/jit-inl.h"
#include "mirror/call_site.h"
@@ -57,7 +56,6 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
-#include "mterp/mterp.h"
#include "obj_ptr.h"
#include "stack.h"
#include "thread.h"
@@ -232,9 +230,7 @@ static ALWAYS_INLINE bool DoInvoke(Thread* self,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Make sure to check for async exceptions before anything else.
- if (is_mterp && self->UseMterp()) {
- DCHECK(!self->ObserveAsyncException());
- } else if (UNLIKELY(self->ObserveAsyncException())) {
+ if (UNLIKELY(self->ObserveAsyncException())) {
return false;
}
const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
@@ -292,94 +288,6 @@ static ALWAYS_INLINE bool DoInvoke(Thread* self,
}
}
- // Check whether we can use the fast path. The result is cached in the ArtMethod.
- // If the bit is not set, we explicitly recheck all the conditions.
- // If any of the conditions get falsified, it is important to clear the bit.
- bool use_fast_path = false;
- if (is_mterp && self->UseMterp()) {
- use_fast_path = called_method->UseFastInterpreterToInterpreterInvoke();
- if (!use_fast_path) {
- use_fast_path = UseFastInterpreterToInterpreterInvoke(called_method);
- if (use_fast_path) {
- called_method->SetFastInterpreterToInterpreterInvokeFlag();
- }
- }
- }
-
- if (use_fast_path) {
- DCHECK(Runtime::Current()->IsStarted());
- DCHECK(!Runtime::Current()->IsActiveTransaction());
- DCHECK(called_method->SkipAccessChecks());
- DCHECK(!called_method->IsNative());
- DCHECK(!called_method->IsProxyMethod());
- DCHECK(!called_method->IsIntrinsic());
- DCHECK(!(called_method->GetDeclaringClass()->IsStringClass() &&
- called_method->IsConstructor()));
- DCHECK(type != kStatic || called_method->GetDeclaringClass()->IsVisiblyInitialized());
-
- const uint16_t number_of_inputs =
- (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
- CodeItemDataAccessor accessor(called_method->DexInstructionData());
- uint32_t num_regs = accessor.RegistersSize();
- DCHECK_EQ(number_of_inputs, accessor.InsSize());
- DCHECK_GE(num_regs, number_of_inputs);
- size_t first_dest_reg = num_regs - number_of_inputs;
-
- if (UNLIKELY(!CheckStackOverflow(self, ShadowFrame::ComputeSize(num_regs)))) {
- return false;
- }
-
- if (jit != nullptr) {
- jit->AddSamples(self, called_method, 1, /* with_backedges */false);
- }
-
- // Create shadow frame on the stack.
- const char* old_cause = self->StartAssertNoThreadSuspension("DoFastInvoke");
- ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_regs, &shadow_frame, called_method, /* dex pc */ 0);
- ShadowFrame* new_shadow_frame = shadow_frame_unique_ptr.get();
- if (is_range) {
- size_t src = vregC;
- for (size_t i = 0, dst = first_dest_reg; i < number_of_inputs; ++i, ++dst, ++src) {
- *new_shadow_frame->GetVRegAddr(dst) = *shadow_frame.GetVRegAddr(src);
- *new_shadow_frame->GetShadowRefAddr(dst) = *shadow_frame.GetShadowRefAddr(src);
- }
- } else {
- uint32_t arg[Instruction::kMaxVarArgRegs];
- inst->GetVarArgs(arg, inst_data);
- for (size_t i = 0, dst = first_dest_reg; i < number_of_inputs; ++i, ++dst) {
- *new_shadow_frame->GetVRegAddr(dst) = *shadow_frame.GetVRegAddr(arg[i]);
- *new_shadow_frame->GetShadowRefAddr(dst) = *shadow_frame.GetShadowRefAddr(arg[i]);
- }
- }
- self->PushShadowFrame(new_shadow_frame);
- self->EndAssertNoThreadSuspension(old_cause);
-
- VLOG(interpreter) << "Interpreting " << called_method->PrettyMethod();
-
- DCheckStaticState(self, called_method);
- while (true) {
- // Mterp does not support all instrumentation/debugging.
- if (!self->UseMterp()) {
- *result =
- ExecuteSwitchImpl<false, false>(self, accessor, *new_shadow_frame, *result, false);
- break;
- }
- if (ExecuteMterpImpl(self, accessor.Insns(), new_shadow_frame, result)) {
- break;
- } else {
- // Mterp didn't like that instruction. Single-step it with the reference interpreter.
- *result = ExecuteSwitchImpl<false, false>(self, accessor, *new_shadow_frame, *result, true);
- if (new_shadow_frame->GetDexPC() == dex::kDexNoIndex) {
- break; // Single-stepped a return or an exception not handled locally.
- }
- }
- }
- self->PopShadowFrame();
-
- return !self->IsExceptionPending();
- }
-
return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
result);
}
diff --git a/runtime/interpreter/interpreter_mterp_impl.h b/runtime/interpreter/interpreter_mterp_impl.h
deleted file mode 100644
index 892790b118..0000000000
--- a/runtime/interpreter/interpreter_mterp_impl.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_MTERP_IMPL_H_
-#define ART_RUNTIME_INTERPRETER_INTERPRETER_MTERP_IMPL_H_
-
-#include "base/locks.h"
-#include "base/macros.h"
-#include "dex/dex_file.h"
-#include "jvalue.h"
-#include "obj_ptr.h"
-
-namespace art {
-
-class ShadowFrame;
-class Thread;
-
-namespace interpreter {
-
-// Mterp does not support transactions or access check, thus no templated versions.
-extern "C" bool ExecuteMterpImpl(Thread* self,
- const uint16_t* dex_instructions,
- ShadowFrame* shadow_frame,
- JValue* result_register) REQUIRES_SHARED(Locks::mutator_lock_);
-
-// The entrypoint for nterp, which ArtMethods can directly point to.
-extern "C" void ExecuteNterpImpl() REQUIRES_SHARED(Locks::mutator_lock_);
-
-} // namespace interpreter
-} // namespace art
-
-#endif // ART_RUNTIME_INTERPRETER_INTERPRETER_MTERP_IMPL_H_
diff --git a/runtime/interpreter/mterp/arm/arithmetic.S b/runtime/interpreter/mterp/arm/arithmetic.S
deleted file mode 100644
index a6ba454882..0000000000
--- a/runtime/interpreter/mterp/arm/arithmetic.S
+++ /dev/null
@@ -1,975 +0,0 @@
-%def binop(preinstr="", result="r0", chkzero="0", instr=""):
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if $chkzero
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- $preinstr @ optional op; may set condition codes
- $instr @ $result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG $result, r9 @ vAA<- $result
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-%def binop2addr(preinstr="", result="r0", chkzero="0", instr=""):
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if $chkzero
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- $preinstr @ optional op; may set condition codes
- $instr @ $result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG $result, r9 @ vAA<- $result
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-%def binopLit16(result="r0", chkzero="0", instr=""):
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if $chkzero
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- $instr @ $result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG $result, r9 @ vAA<- $result
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-%def binopLit8(extract="asr r1, r3, #8", result="r0", chkzero="0", instr=""):
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- $extract @ optional; typically r1<- ssssssCC (sign extended)
- .if $chkzero
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- $instr @ $result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG $result, r9 @ vAA<- $result
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-%def binopWide(preinstr="", result0="r0", result1="r1", chkzero="0", instr=""):
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- GET_VREG_WIDE_BY_ADDR r0, r1, r2 @ r0/r1<- vBB/vBB+1
- GET_VREG_WIDE_BY_ADDR r2, r3, r3 @ r2/r3<- vCC/vCC+1
- .if $chkzero
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- $preinstr @ optional op; may set condition codes
- $instr @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR $result0,$result1,r9 @ vAA/vAA+1<, $result0/$result1
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
-
-%def binopWide2addr(preinstr="", result0="r0", result1="r1", chkzero="0", instr=""):
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- GET_VREG_WIDE_BY_ADDR r2, r3, r1 @ r2/r3<- vBB/vBB+1
- GET_VREG_WIDE_BY_ADDR r0, r1, r9 @ r0/r1<- vAA/vAA+1
- .if $chkzero
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- $preinstr @ optional op; may set condition codes
- $instr @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR $result0,$result1,r9 @ vAA/vAA+1<- $result0/$result1
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
-
-%def unop(preinstr="", instr=""):
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- $preinstr @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- $instr @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 8-9 instructions */
-
-%def unopNarrower(preinstr="", instr=""):
- /*
- * Generic 64bit-to-32bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op r0/r1", where
- * "result" is a 32-bit quantity in r0.
- *
- * For: long-to-float
- *
- * (This would work for long-to-int, but that instruction is actually
- * an exact match for op_move.)
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vB/vB+1
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- $preinstr @ optional op; may set condition codes
- $instr @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 9-10 instructions */
-
-%def unopWide(preinstr="", instr=""):
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0/r1".
- * This could be an ARM instruction or a function call.
- *
- * For: neg-long, not-long, neg-double, long-to-double, double-to-long
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vAA
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- $preinstr @ optional op; may set condition codes
- $instr @ r0/r1<- op, r2-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-11 instructions */
-
-%def unopWider(preinstr="", instr=""):
- /*
- * Generic 32bit-to-64bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op r0", where
- * "result" is a 64-bit quantity in r0/r1.
- *
- * For: int-to-long, int-to-double, float-to-long, float-to-double
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- GET_VREG r0, r3 @ r0<- vB
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- $preinstr @ optional op; may set condition codes
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- $instr @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vA/vA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 9-10 instructions */
-
-%def op_add_int():
-% binop(instr="add r0, r0, r1")
-
-%def op_add_int_2addr():
-% binop2addr(instr="add r0, r0, r1")
-
-%def op_add_int_lit16():
-% binopLit16(instr="add r0, r0, r1")
-
-%def op_add_int_lit8():
-% binopLit8(extract="", instr="add r0, r0, r3, asr #8")
-
-%def op_add_long():
-% binopWide(preinstr="adds r0, r0, r2", instr="adc r1, r1, r3")
-
-%def op_add_long_2addr():
-% binopWide2addr(preinstr="adds r0, r0, r2", instr="adc r1, r1, r3")
-
-%def op_and_int():
-% binop(instr="and r0, r0, r1")
-
-%def op_and_int_2addr():
-% binop2addr(instr="and r0, r0, r1")
-
-%def op_and_int_lit16():
-% binopLit16(instr="and r0, r0, r1")
-
-%def op_and_int_lit8():
-% binopLit8(extract="", instr="and r0, r0, r3, asr #8")
-
-%def op_and_long():
-% binopWide(preinstr="and r0, r0, r2", instr="and r1, r1, r3")
-
-%def op_and_long_2addr():
-% binopWide2addr(preinstr="and r0, r0, r2", instr="and r1, r1, r3")
-
-%def op_cmp_long():
- /*
- * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
- * register based on the results of the comparison.
- */
- /* cmp-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- GET_VREG_WIDE_BY_ADDR r0, r1, r2 @ r0/r1<- vBB/vBB+1
- GET_VREG_WIDE_BY_ADDR r2, r3, r3 @ r2/r3<- vCC/vCC+1
- cmp r0, r2
- sbcs ip, r1, r3 @ Sets correct CCs for checking LT (but not EQ/NE)
- mov ip, #0
- mvnlt ip, #0 @ -1
- cmpeq r0, r2 @ For correct EQ/NE, we may need to repeat the first CMP
- orrne ip, #1
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG ip, r9 @ vAA<- ip
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_div_int():
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * div-int
- *
- */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r0, r0, r1 @ r0<- op
-#else
- bl __aeabi_idiv @ r0<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-%def op_div_int_2addr():
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * div-int/2addr
- *
- */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r0, r0, r1 @ r0<- op
-#else
- bl __aeabi_idiv @ r0<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-%def op_div_int_lit16():
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * div-int/lit16
- *
- */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r0, r0, r1 @ r0<- op
-#else
- bl __aeabi_idiv @ r0<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-%def op_div_int_lit8():
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * div-int/lit8
- *
- */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r0, r0, r1 @ r0<- op
-#else
- bl __aeabi_idiv @ r0<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-%def op_div_long():
-% binopWide(instr="bl __aeabi_ldivmod", chkzero="1")
-
-%def op_div_long_2addr():
-% binopWide2addr(instr="bl __aeabi_ldivmod", chkzero="1")
-
-%def op_int_to_byte():
-% unop(instr="sxtb r0, r0")
-
-%def op_int_to_char():
-% unop(instr="uxth r0, r0")
-
-%def op_int_to_long():
-% unopWider(instr="mov r1, r0, asr #31")
-
-%def op_int_to_short():
-% unop(instr="sxth r0, r0")
-
-%def op_long_to_int():
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-% op_move()
-
-%def op_mul_int():
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-% binop(instr="mul r0, r1, r0")
-
-%def op_mul_int_2addr():
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-% binop2addr(instr="mul r0, r1, r0")
-
-%def op_mul_int_lit16():
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-% binopLit16(instr="mul r0, r1, r0")
-
-%def op_mul_int_lit8():
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-% binopLit8(instr="mul r0, r1, r0")
-
-%def op_mul_long():
- /*
- * Signed 64-bit integer multiply.
- *
- * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
- * WX
- * x YZ
- * --------
- * ZW ZX
- * YW YX
- *
- * The low word of the result holds ZX, the high word holds
- * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
- * it doesn't fit in the low 64 bits.
- *
- * Unlike most ARM math operations, multiply instructions have
- * restrictions on using the same register more than once (Rd and Rm
- * cannot be the same).
- */
- /* mul-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- GET_VREG_WIDE_BY_ADDR r0, r1, r2 @ r0/r1<- vBB/vBB+1
- GET_VREG_WIDE_BY_ADDR r2, r3, r3 @ r2/r3<- vCC/vCC+1
- mul ip, r2, r1 @ ip<- ZxW
- umull r1, lr, r2, r0 @ r1/lr <- ZxX
- mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
- mov r0, rINST, lsr #8 @ r0<- AA
- add r2, r2, lr @ r2<- lr + low(ZxW + (YxX))
- CLEAR_SHADOW_PAIR r0, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[AA]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR r1, r2 , r0 @ vAA/vAA+1<- r1/r2
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_mul_long_2addr():
- /*
- * Signed 64-bit integer multiply, "/2addr" version.
- *
- * See op_mul_long for an explanation.
- *
- * We get a little tight on registers, so to avoid looking up &fp[A]
- * again we stuff it into rINST.
- */
- /* mul-long/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A]
- GET_VREG_WIDE_BY_ADDR r2, r3, r1 @ r2/r3<- vBB/vBB+1
- GET_VREG_WIDE_BY_ADDR r0, r1, rINST @ r0/r1<- vAA/vAA+1
- mul ip, r2, r1 @ ip<- ZxW
- umull r1, lr, r2, r0 @ r1/lr <- ZxX
- mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
- mov r0, rINST @ r0<- &fp[A] (free up rINST)
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- add r2, r2, lr @ r2<- r2 + low(ZxW + (YxX))
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR r1, r2, r0 @ vAA/vAA+1<- r1/r2
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_neg_int():
-% unop(instr="rsb r0, r0, #0")
-
-%def op_neg_long():
-% unopWide(preinstr="rsbs r0, r0, #0", instr="rsc r1, r1, #0")
-
-%def op_not_int():
-% unop(instr="mvn r0, r0")
-
-%def op_not_long():
-% unopWide(preinstr="mvn r0, r0", instr="mvn r1, r1")
-
-%def op_or_int():
-% binop(instr="orr r0, r0, r1")
-
-%def op_or_int_2addr():
-% binop2addr(instr="orr r0, r0, r1")
-
-%def op_or_int_lit16():
-% binopLit16(instr="orr r0, r0, r1")
-
-%def op_or_int_lit8():
-% binopLit8(extract="", instr="orr r0, r0, r3, asr #8")
-
-%def op_or_long():
-% binopWide(preinstr="orr r0, r0, r2", instr="orr r1, r1, r3")
-
-%def op_or_long_2addr():
-% binopWide2addr(preinstr="orr r0, r0, r2", instr="orr r1, r1, r3")
-
-%def op_rem_int():
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * NOTE: idivmod returns quotient in r0 and remainder in r1
- *
- * rem-int
- *
- */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r2, r0, r1
- mls r1, r1, r2, r0 @ r1<- op, r0-r2 changed
-#else
- bl __aeabi_idivmod @ r1<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r1, r9 @ vAA<- r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-%def op_rem_int_2addr():
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * NOTE: idivmod returns quotient in r0 and remainder in r1
- *
- * rem-int/2addr
- *
- */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r2, r0, r1
- mls r1, r1, r2, r0 @ r1<- op
-#else
- bl __aeabi_idivmod @ r1<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r1, r9 @ vAA<- r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-%def op_rem_int_lit16():
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * NOTE: idivmod returns quotient in r0 and remainder in r1
- *
- * rem-int/lit16
- *
- */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r2, r0, r1
- mls r1, r1, r2, r0 @ r1<- op
-#else
- bl __aeabi_idivmod @ r1<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r1, r9 @ vAA<- r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-%def op_rem_int_lit8():
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * NOTE: idivmod returns quotient in r0 and remainder in r1
- *
- * rem-int/lit8
- *
- */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r2, r0, r1
- mls r1, r1, r2, r0 @ r1<- op
-#else
- bl __aeabi_idivmod @ r1<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r1, r9 @ vAA<- r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-%def op_rem_long():
-/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
-% binopWide(instr="bl __aeabi_ldivmod", result0="r2", result1="r3", chkzero="1")
-
-%def op_rem_long_2addr():
-/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
-% binopWide2addr(instr="bl __aeabi_ldivmod", result0="r2", result1="r3", chkzero="1")
-
-%def op_rsub_int():
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-% binopLit16(instr="rsb r0, r0, r1")
-
-%def op_rsub_int_lit8():
-% binopLit8(extract="", instr="rsb r0, r0, r3, asr #8")
-
-%def op_shl_int():
-% binop(preinstr="and r1, r1, #31", instr="mov r0, r0, asl r1")
-
-%def op_shl_int_2addr():
-% binop2addr(preinstr="and r1, r1, #31", instr="mov r0, r0, asl r1")
-
-%def op_shl_int_lit8():
-% binopLit8(extract="ubfx r1, r3, #8, #5", instr="mov r0, r0, asl r1")
-
-%def op_shl_long():
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* shl-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r3, r0, #255 @ r3<- BB
- mov r0, r0, lsr #8 @ r0<- CC
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
- GET_VREG r2, r0 @ r2<- vCC
- GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vBB/vBB+1
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- and r2, r2, #63 @ r2<- r2 & 0x3f
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- mov r1, r1, asl r2 @ r1<- r1 << r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r0, r0, asl r2 @ r0<- r0 << r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_shl_long_2addr():
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shl-long/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r2, r3 @ r2<- vB
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
- and r2, r2, #63 @ r2<- r2 & 0x3f
- GET_VREG_WIDE_BY_ADDR r0, r1, r9 @ r0/r1<- vAA/vAA+1
- mov r1, r1, asl r2 @ r1<- r1 << r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
- mov r0, r0, asl r2 @ r0<- r0 << r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_shr_int():
-% binop(preinstr="and r1, r1, #31", instr="mov r0, r0, asr r1")
-
-%def op_shr_int_2addr():
-% binop2addr(preinstr="and r1, r1, #31", instr="mov r0, r0, asr r1")
-
-%def op_shr_int_lit8():
-% binopLit8(extract="ubfx r1, r3, #8, #5", instr="mov r0, r0, asr r1")
-
-%def op_shr_long():
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* shr-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r3, r0, #255 @ r3<- BB
- mov r0, r0, lsr #8 @ r0<- CC
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
- GET_VREG r2, r0 @ r2<- vCC
- GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vBB/vBB+1
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- and r2, r2, #63 @ r0<- r0 & 0x3f
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- mov r0, r0, lsr r2 @ r0<- r2 >> r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r1, r1, asr r2 @ r1<- r1 >> r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_shr_long_2addr():
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shr-long/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r2, r3 @ r2<- vB
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
- and r2, r2, #63 @ r2<- r2 & 0x3f
- GET_VREG_WIDE_BY_ADDR r0, r1, r9 @ r0/r1<- vAA/vAA+1
- mov r0, r0, lsr r2 @ r0<- r2 >> r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
- mov r1, r1, asr r2 @ r1<- r1 >> r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_sub_int():
-% binop(instr="sub r0, r0, r1")
-
-%def op_sub_int_2addr():
-% binop2addr(instr="sub r0, r0, r1")
-
-%def op_sub_long():
-% binopWide(preinstr="subs r0, r0, r2", instr="sbc r1, r1, r3")
-
-%def op_sub_long_2addr():
-% binopWide2addr(preinstr="subs r0, r0, r2", instr="sbc r1, r1, r3")
-
-%def op_ushr_int():
-% binop(preinstr="and r1, r1, #31", instr="mov r0, r0, lsr r1")
-
-%def op_ushr_int_2addr():
-% binop2addr(preinstr="and r1, r1, #31", instr="mov r0, r0, lsr r1")
-
-%def op_ushr_int_lit8():
-% binopLit8(extract="ubfx r1, r3, #8, #5", instr="mov r0, r0, lsr r1")
-
-%def op_ushr_long():
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* ushr-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r3, r0, #255 @ r3<- BB
- mov r0, r0, lsr #8 @ r0<- CC
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
- GET_VREG r2, r0 @ r2<- vCC
- GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vBB/vBB+1
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- and r2, r2, #63 @ r0<- r0 & 0x3f
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- mov r0, r0, lsr r2 @ r0<- r2 >> r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r1, r1, lsr r2 @ r1<- r1 >>> r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_ushr_long_2addr():
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* ushr-long/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r2, r3 @ r2<- vB
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
- and r2, r2, #63 @ r2<- r2 & 0x3f
- GET_VREG_WIDE_BY_ADDR r0, r1, r9 @ r0/r1<- vAA/vAA+1
- mov r0, r0, lsr r2 @ r0<- r2 >> r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
- mov r1, r1, lsr r2 @ r1<- r1 >>> r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_xor_int():
-% binop(instr="eor r0, r0, r1")
-
-%def op_xor_int_2addr():
-% binop2addr(instr="eor r0, r0, r1")
-
-%def op_xor_int_lit16():
-% binopLit16(instr="eor r0, r0, r1")
-
-%def op_xor_int_lit8():
-% binopLit8(extract="", instr="eor r0, r0, r3, asr #8")
-
-%def op_xor_long():
-% binopWide(preinstr="eor r0, r0, r2", instr="eor r1, r1, r3")
-
-%def op_xor_long_2addr():
-% binopWide2addr(preinstr="eor r0, r0, r2", instr="eor r1, r1, r3")
diff --git a/runtime/interpreter/mterp/arm/array.S b/runtime/interpreter/mterp/arm/array.S
deleted file mode 100644
index 7b3db6165b..0000000000
--- a/runtime/interpreter/mterp/arm/array.S
+++ /dev/null
@@ -1,250 +0,0 @@
-%def op_aget(load="ldr", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #$shift @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- $load r2, [r0, #$data_offset] @ r2<- vBB[vCC]
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r2, r9 @ vAA<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_aget_boolean():
-% op_aget(load="ldrb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
-
-%def op_aget_byte():
-% op_aget(load="ldrsb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
-
-%def op_aget_char():
-% op_aget(load="ldrh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
-
-%def op_aget_object():
- /*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- EXPORT_PC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- bl artAGetObjectFromMterp @ (array, index)
- ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
- PREFETCH_INST 2
- cmp r1, #0
- bne MterpException
- SET_VREG_OBJECT r0, r9
- ADVANCE 2
- GET_INST_OPCODE ip
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_aget_short():
-% op_aget(load="ldrsh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
-
-%def op_aget_wide():
- /*
- * Array get, 64 bits. vAA <- vBB[vCC].
- *
- * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
- */
- /* aget-wide vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR r2, r3, r9 @ vAA/vAA+1<- r2/r3
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_aput(store="str", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #$shift @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r9 @ r2<- vAA
- GET_INST_OPCODE ip @ extract opcode from rINST
- $store r2, [r0, #$data_offset] @ vBB[vCC]<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_aput_boolean():
-% op_aput(store="strb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
-
-%def op_aput_byte():
-% op_aput(store="strb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
-
-%def op_aput_char():
-% op_aput(store="strh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
-
-%def op_aput_object():
- /*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rINST
- bl MterpAputObject
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_aput_short():
-% op_aput(store="strh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
-
-%def op_aput_wide():
- /*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- *
- * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
- */
- /* aput-wide vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG_WIDE_BY_ADDR r2, r3, r9 @ r2/r3<- vAA/vAA+1
- GET_INST_OPCODE ip @ extract opcode from rINST
- strd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_array_length():
- /*
- * Return the length of an array.
- */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r2, rINST, #8, #4 @ r2<- A
- GET_VREG r0, r1 @ r0<- vB (object ref)
- cmp r0, #0 @ is object null?
- beq common_errNullObject @ yup, fail
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- array length
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r3, r2 @ vB<- length
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_fill_array_data():
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- bbbb (lo)
- FETCH r1, 2 @ r1<- BBBB (hi)
- mov r3, rINST, lsr #8 @ r3<- AA
- orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
- GET_VREG r0, r3 @ r0<- vAA (array object)
- add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.)
- bl MterpFillArrayData @ (obj, payload)
- cmp r0, #0 @ 0 means an exception is thrown
- beq MterpPossibleException @ exception?
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_filled_new_array(helper="MterpFilledNewArray"):
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern $helper
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rSELF
- bl $helper
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_filled_new_array_range():
-% op_filled_new_array(helper="MterpFilledNewArrayRange")
-
-%def op_new_array():
- /*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class@CCCC */
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rINST
- mov r3, rSELF
- bl MterpNewArray
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/control_flow.S b/runtime/interpreter/mterp/arm/control_flow.S
deleted file mode 100644
index 2b4cf206b7..0000000000
--- a/runtime/interpreter/mterp/arm/control_flow.S
+++ /dev/null
@@ -1,200 +0,0 @@
-%def bincmp(condition=""):
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, r3 @ compare (vA, vB)
- b${condition} MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def zcmp(condition=""):
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- mov r0, rINST, lsr #8 @ r0<- AA
- GET_VREG r0, r0 @ r0<- vAA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, #0 @ compare (vA, 0)
- b${condition} MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_goto():
- /*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
-
-%def op_goto_16():
- /*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- FETCH_S rINST, 1 @ rINST<- ssssAAAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
-
-%def op_goto_32():
- /*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0". Because
- * we need the V bit set, we'll use an adds to convert from Dalvik
- * offset to byte offset.
- */
- /* goto/32 +AAAAAAAA */
- FETCH r0, 1 @ r0<- aaaa (lo)
- FETCH r3, 2 @ r1<- AAAA (hi)
- orrs rINST, r0, r3, lsl #16 @ rINST<- AAAAaaaa
- b MterpCommonTakenBranch
-
-%def op_if_eq():
-% bincmp(condition="eq")
-
-%def op_if_eqz():
-% zcmp(condition="eq")
-
-%def op_if_ge():
-% bincmp(condition="ge")
-
-%def op_if_gez():
-% zcmp(condition="ge")
-
-%def op_if_gt():
-% bincmp(condition="gt")
-
-%def op_if_gtz():
-% zcmp(condition="gt")
-
-%def op_if_le():
-% bincmp(condition="le")
-
-%def op_if_lez():
-% zcmp(condition="le")
-
-%def op_if_lt():
-% bincmp(condition="lt")
-
-%def op_if_ltz():
-% zcmp(condition="lt")
-
-%def op_if_ne():
-% bincmp(condition="ne")
-
-%def op_if_nez():
-% zcmp(condition="ne")
-
-%def op_packed_switch(func="MterpDoPackedSwitch"):
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- FETCH r0, 1 @ r0<- bbbb (lo)
- FETCH r1, 2 @ r1<- BBBB (hi)
- mov r3, rINST, lsr #8 @ r3<- AA
- orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
- GET_VREG r1, r3 @ r1<- vAA
- add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
- bl $func @ r0<- code-unit branch offset
- movs rINST, r0
- b MterpCommonTakenBranch
-
-%def op_return():
- /*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- mov r0, rSELF
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- blne MterpSuspendCheck @ (self)
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r0, r2 @ r0<- vAA
- mov r1, #0
- b MterpReturn
-
-%def op_return_object():
-% op_return()
-
-%def op_return_void():
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- mov r0, rSELF
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- blne MterpSuspendCheck @ (self)
- mov r0, #0
- mov r1, #0
- b MterpReturn
-
-%def op_return_wide():
- /*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- mov r0, rSELF
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- blne MterpSuspendCheck @ (self)
- mov r2, rINST, lsr #8 @ r2<- AA
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[AA]
- GET_VREG_WIDE_BY_ADDR r0, r1, r2 @ r0/r1 <- vAA/vAA+1
- b MterpReturn
-
-%def op_sparse_switch():
-% op_packed_switch(func="MterpDoSparseSwitch")
-
-%def op_throw():
- /*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r1, r2 @ r1<- vAA (exception object)
- cmp r1, #0 @ null object?
- beq common_errNullObject @ yes, throw an NPE instead
- str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ thread->exception<- obj
- b MterpException
diff --git a/runtime/interpreter/mterp/arm/floating_point.S b/runtime/interpreter/mterp/arm/floating_point.S
deleted file mode 100644
index 035fc134dd..0000000000
--- a/runtime/interpreter/mterp/arm/floating_point.S
+++ /dev/null
@@ -1,482 +0,0 @@
-%def fbinop(instr=""):
- /*
- * Generic 32-bit floating-point operation. Provide an "instr" line that
- * specifies an instruction that performs "s2 = s0 op s1". Because we
- * use the "softfp" ABI, this must be an instruction, not a function call.
- *
- * For: add-float, sub-float, mul-float, div-float
- */
- /* floatop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vCC
- GET_VREG_FLOAT_BY_ADDR s0, r2 @ s0<- vBB
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- $instr @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_FLOAT s2, r9, lr @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
-
-%def fbinop2addr(instr=""):
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vB
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_VREG_FLOAT_BY_ADDR s0, r9 @ s0<- vA
- $instr @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_FLOAT_BY_ADDR s2, r9 @ vAA<- s2 No need to clear as it's 2addr
- GOTO_OPCODE ip @ jump to next instruction
-
-%def fbinopWide(instr=""):
- /*
- * Generic 64-bit double-precision floating point binary operation.
- * Provide an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * for: add-double, sub-double, mul-double, div-double
- */
- /* doubleop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vCC
- GET_VREG_DOUBLE_BY_ADDR d0, r2 @ d0<- vBB
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- $instr @ s2<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- SET_VREG_DOUBLE_BY_ADDR d2, r9 @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-%def fbinopWide2addr(instr=""):
- /*
- * Generic 64-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
- GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_VREG_DOUBLE_BY_ADDR d0, r9 @ d0<- vA
- $instr @ d2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_DOUBLE_BY_ADDR d2, r9 @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-%def funop(instr=""):
- /*
- * Generic 32-bit unary floating-point operation. Provide an "instr"
- * line that specifies an instruction that performs "s1 = op s0".
- *
- * for: int-to-float, float-to-int
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- GET_VREG_FLOAT_BY_ADDR s0, r3 @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- $instr @ s1<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_FLOAT s1, r9, lr @ vA<- s1
- GOTO_OPCODE ip @ jump to next instruction
-
-%def funopNarrower(instr=""):
- /*
- * Generic 64bit-to-32bit unary floating point operation. Provide an
- * "instr" line that specifies an instruction that performs "s0 = op d0".
- *
- * For: double-to-int, double-to-float
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- GET_VREG_DOUBLE_BY_ADDR d0, r3 @ d0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- $instr @ s0<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_FLOAT s0, r9, lr @ vA<- s0
- GOTO_OPCODE ip @ jump to next instruction
-
-%def funopWider(instr=""):
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "d0 = op s0".
- *
- * For: int-to-double, float-to-double
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- GET_VREG_FLOAT_BY_ADDR s0, r3 @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- $instr @ d0<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- SET_VREG_DOUBLE_BY_ADDR d0, r9 @ vA<- d0
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_add_double():
-% fbinopWide(instr="faddd d2, d0, d1")
-
-%def op_add_double_2addr():
-% fbinopWide2addr(instr="faddd d2, d0, d1")
-
-%def op_add_float():
-% fbinop(instr="fadds s2, s0, s1")
-
-%def op_add_float_2addr():
-% fbinop2addr(instr="fadds s2, s0, s1")
-
-%def op_cmpg_double():
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return 1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- GET_VREG_DOUBLE_BY_ADDR d0, r2 @ d0<- vBB
- GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vCC
- vcmpe.f64 d0, d1 @ compare (vBB, vCC)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r0, #1 @ r0<- 1 (default)
- GET_INST_OPCODE ip @ extract opcode from rINST
- fmstat @ export status flags
- mvnmi r0, #0 @ (less than) r1<- -1
- moveq r0, #0 @ (equal) r1<- 0
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_cmpg_float():
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return 1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- GET_VREG_FLOAT_BY_ADDR s0, r2 @ s0<- vBB
- GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vCC
- vcmpe.f32 s0, s1 @ compare (vBB, vCC)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r0, #1 @ r0<- 1 (default)
- GET_INST_OPCODE ip @ extract opcode from rINST
- fmstat @ export status flags
- mvnmi r0, #0 @ (less than) r1<- -1
- moveq r0, #0 @ (equal) r1<- 0
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_cmpl_double():
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x > y) {
- * return 1;
- * } else if (x < y) {
- * return -1;
- * } else {
- * return -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- GET_VREG_DOUBLE_BY_ADDR d0, r2 @ d0<- vBB
- GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vCC
- vcmpe.f64 d0, d1 @ compare (vBB, vCC)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mvn r0, #0 @ r0<- -1 (default)
- GET_INST_OPCODE ip @ extract opcode from rINST
- fmstat @ export status flags
- movgt r0, #1 @ (greater than) r1<- 1
- moveq r0, #0 @ (equal) r1<- 0
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_cmpl_float():
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x > y) {
- * return 1;
- * } else if (x < y) {
- * return -1;
- * } else {
- * return -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- GET_VREG_FLOAT_BY_ADDR s0, r2 @ s0<- vBB
- GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vCC
- vcmpe.f32 s0, s1 @ compare (vBB, vCC)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mvn r0, #0 @ r0<- -1 (default)
- GET_INST_OPCODE ip @ extract opcode from rINST
- fmstat @ export status flags
- movgt r0, #1 @ (greater than) r1<- 1
- moveq r0, #0 @ (equal) r1<- 0
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_div_double():
-% fbinopWide(instr="fdivd d2, d0, d1")
-
-%def op_div_double_2addr():
-% fbinopWide2addr(instr="fdivd d2, d0, d1")
-
-%def op_div_float():
-% fbinop(instr="fdivs s2, s0, s1")
-
-%def op_div_float_2addr():
-% fbinop2addr(instr="fdivs s2, s0, s1")
-
-%def op_double_to_float():
-% funopNarrower(instr="vcvt.f32.f64 s0, d0")
-
-%def op_double_to_int():
-% funopNarrower(instr="ftosizd s0, d0")
-
-%def op_double_to_long():
-% unopWide(instr="bl d2l_doconv")
-% add_helper(op_double_to_long_helper)
-
-%def op_double_to_long_helper():
-/*
- * Convert the double in r0/r1 to a long in r0/r1.
- *
- * We have to clip values to long min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
- */
-d2l_doconv:
- ubfx r2, r1, #20, #11 @ grab the exponent
- movw r3, #0x43e
- cmp r2, r3 @ MINLONG < x > MAXLONG?
- bhs d2l_special_cases
- b __aeabi_d2lz @ tail call to convert double to long
-d2l_special_cases:
- movw r3, #0x7ff
- cmp r2, r3
- beq d2l_maybeNaN @ NaN?
-d2l_notNaN:
- adds r1, r1, r1 @ sign bit to carry
- mov r0, #0xffffffff @ assume maxlong for lsw
- mov r1, #0x7fffffff @ assume maxlong for msw
- adc r0, r0, #0
- adc r1, r1, #0 @ convert maxlong to minlong if exp negative
- bx lr @ return
-d2l_maybeNaN:
- orrs r3, r0, r1, lsl #12
- beq d2l_notNaN @ if fraction is non-zero, it's a NaN
- mov r0, #0
- mov r1, #0
- bx lr @ return 0 for NaN
-
-%def op_float_to_double():
-% funopWider(instr="vcvt.f64.f32 d0, s0")
-
-%def op_float_to_int():
-% funop(instr="ftosizs s1, s0")
-
-%def op_float_to_long():
-% unopWider(instr="bl f2l_doconv")
-% add_helper(op_float_to_long_helper)
-
-%def op_float_to_long_helper():
-/*
- * Convert the float in r0 to a long in r0/r1.
- *
- * We have to clip values to long min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
- */
-f2l_doconv:
- ubfx r2, r0, #23, #8 @ grab the exponent
- cmp r2, #0xbe @ MININT < x > MAXINT?
- bhs f2l_special_cases
- b __aeabi_f2lz @ tail call to convert float to long
-f2l_special_cases:
- cmp r2, #0xff @ NaN or infinity?
- beq f2l_maybeNaN
-f2l_notNaN:
- adds r0, r0, r0 @ sign bit to carry
- mov r0, #0xffffffff @ assume maxlong for lsw
- mov r1, #0x7fffffff @ assume maxlong for msw
- adc r0, r0, #0
- adc r1, r1, #0 @ convert maxlong to minlong if exp negative
- bx lr @ return
-f2l_maybeNaN:
- lsls r3, r0, #9
- beq f2l_notNaN @ if fraction is non-zero, it's a NaN
- mov r0, #0
- mov r1, #0
- bx lr @ return 0 for NaN
-
-%def op_int_to_double():
-% funopWider(instr="fsitod d0, s0")
-
-%def op_int_to_float():
-% funop(instr="fsitos s1, s0")
-
-%def op_long_to_double():
- /*
- * Specialised 64-bit floating point operation.
- *
- * Note: The result will be returned in d2.
- *
- * For: long-to-double
- */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
- GET_VREG_DOUBLE_BY_ADDR d0, r3 @ d0<- vBB
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- vcvt.f64.s32 d1, s1 @ d1<- (double)(vAAh)
- vcvt.f64.u32 d2, s0 @ d2<- (double)(vAAl)
- vldr d3, constval$opcode
- vmla.f64 d2, d1, d3 @ d2<- vAAh*2^32 + vAAl
-
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_DOUBLE_BY_ADDR d2, r9 @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
- /* literal pool helper */
-constval${opcode}:
- .8byte 0x41f0000000000000
-
-%def op_long_to_float():
-% unopNarrower(instr="bl __aeabi_l2f")
-
-%def op_mul_double():
-% fbinopWide(instr="fmuld d2, d0, d1")
-
-%def op_mul_double_2addr():
-% fbinopWide2addr(instr="fmuld d2, d0, d1")
-
-%def op_mul_float():
-% fbinop(instr="fmuls s2, s0, s1")
-
-%def op_mul_float_2addr():
-% fbinop2addr(instr="fmuls s2, s0, s1")
-
-%def op_neg_double():
-% unopWide(instr="add r1, r1, #0x80000000")
-
-%def op_neg_float():
-% unop(instr="add r0, r0, #0x80000000")
-
-%def op_rem_double():
-/* EABI doesn't define a double remainder function, but libm does */
-% binopWide(instr="bl fmod")
-
-%def op_rem_double_2addr():
-/* EABI doesn't define a double remainder function, but libm does */
-% binopWide2addr(instr="bl fmod")
-
-%def op_rem_float():
-/* EABI doesn't define a float remainder function, but libm does */
-% binop(instr="bl fmodf")
-
-%def op_rem_float_2addr():
-/* EABI doesn't define a float remainder function, but libm does */
-% binop2addr(instr="bl fmodf")
-
-%def op_sub_double():
-% fbinopWide(instr="fsubd d2, d0, d1")
-
-%def op_sub_double_2addr():
-% fbinopWide2addr(instr="fsubd d2, d0, d1")
-
-%def op_sub_float():
-% fbinop(instr="fsubs s2, s0, s1")
-
-%def op_sub_float_2addr():
-% fbinop2addr(instr="fsubs s2, s0, s1")
diff --git a/runtime/interpreter/mterp/arm/invoke.S b/runtime/interpreter/mterp/arm/invoke.S
deleted file mode 100644
index 376c93befb..0000000000
--- a/runtime/interpreter/mterp/arm/invoke.S
+++ /dev/null
@@ -1,115 +0,0 @@
-%def invoke(helper="UndefinedInvokeHandler"):
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern $helper
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl $helper
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- ldr r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
- cmp r0, #0
- beq MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern $helper
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl $helper
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 4
- ldr r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
- cmp r0, #0
- beq MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-%def op_invoke_custom():
-% invoke(helper="MterpInvokeCustom")
- /*
- * Handle an invoke-custom invocation.
- *
- * for: invoke-custom, invoke-custom/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, call_site@BBBB */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, call_site@BBBB */
-
-%def op_invoke_custom_range():
-% invoke(helper="MterpInvokeCustomRange")
-
-%def op_invoke_direct():
-% invoke(helper="MterpInvokeDirect")
-
-%def op_invoke_direct_range():
-% invoke(helper="MterpInvokeDirectRange")
-
-%def op_invoke_interface():
-% invoke(helper="MterpInvokeInterface")
- /*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_interface_range():
-% invoke(helper="MterpInvokeInterfaceRange")
-
-%def op_invoke_polymorphic():
-% invoke_polymorphic(helper="MterpInvokePolymorphic")
-
-%def op_invoke_polymorphic_range():
-% invoke_polymorphic(helper="MterpInvokePolymorphicRange")
-
-%def op_invoke_static():
-% invoke(helper="MterpInvokeStatic")
-
-
-%def op_invoke_static_range():
-% invoke(helper="MterpInvokeStaticRange")
-
-%def op_invoke_super():
-% invoke(helper="MterpInvokeSuper")
- /*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_super_range():
-% invoke(helper="MterpInvokeSuperRange")
-
-%def op_invoke_virtual():
-% invoke(helper="MterpInvokeVirtual")
- /*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_virtual_range():
-% invoke(helper="MterpInvokeVirtualRange")
diff --git a/runtime/interpreter/mterp/arm/main.S b/runtime/interpreter/mterp/arm/main.S
deleted file mode 100644
index 13ea84a6f3..0000000000
--- a/runtime/interpreter/mterp/arm/main.S
+++ /dev/null
@@ -1,777 +0,0 @@
-%def header():
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via rFP &
- number_of_vregs_.
-
- */
-
-/*
-ARM EABI general notes:
-
-r0-r3 hold first 4 args to a method; they are not preserved across method calls
-r4-r8 are available for general use
-r9 is given special treatment in some situations, but not for us
-r10 (sl) seems to be generally available
-r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
-r12 (ip) is scratch -- not preserved across method calls
-r13 (sp) should be managed carefully in case a signal arrives
-r14 (lr) must be preserved
-r15 (pc) can be tinkered with directly
-
-r0 holds returns of <= 4 bytes
-r0-r1 hold returns of 8 bytes, low word in r0
-
-Callee must save/restore r4+ (except r12) if it modifies them. If VFP
-is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
-s0-s15 (d0-d7, q0-a3) do not need to be.
-
-Stack is "full descending". Only the arguments that don't fit in the first 4
-registers are placed on the stack. "sp" points at the first stacked argument
-(i.e. the 5th arg).
-
-VFP: single-precision results in s0, double-precision results in d0.
-
-In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
-64-bit quantities (long long, double) must be 64-bit aligned.
-*/
-
-/*
-Mterp and ARM notes:
-
-The following registers have fixed assignments:
-
- reg nick purpose
- r4 rPC interpreted program counter, used for fetching instructions
- r5 rFP interpreted frame pointer, used for accessing locals and args
- r6 rSELF self (Thread) pointer
- r7 rINST first 16-bit code unit of current instruction
- r8 rIBASE interpreted instruction base pointer, used for computed goto
- r10 rPROFILE branch profiling countdown
- r11 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
-
-Macros are provided for common operations. Each macro MUST emit only
-one instruction to make instruction-counting easier. They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rPC r4
-#define CFI_DEX 4 // DWARF register number of the register holding dex-pc (xPC).
-#define CFI_TMP 0 // DWARF register number of the first argument register (r0).
-#define rFP r5
-#define rSELF r6
-#define rINST r7
-#define rIBASE r8
-#define rPROFILE r10
-#define rREFS r11
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
-.endm
-
-.macro EXPORT_DEX_PC tmp
- ldr \tmp, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
- str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
- sub \tmp, rPC, \tmp
- asr \tmp, #1
- str \tmp, [rFP, #OFF_FP_DEX_PC]
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINST. Does not advance rPC.
- */
-.macro FETCH_INST
- ldrh rINST, [rPC]
-.endm
-
-/*
- * Fetch the next instruction from the specified offset. Advances rPC
- * to point to the next instruction. "_count" is in 16-bit code units.
- *
- * Because of the limited size of immediate constants on ARM, this is only
- * suitable for small forward movements (i.e. don't try to implement "goto"
- * with this).
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss. (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
- ldrh rINST, [rPC, #((\count)*2)]!
-.endm
-
-/*
- * The operation performed here is similar to FETCH_ADVANCE_INST, except the
- * src and dest registers are parameterized (not hard-wired to rPC and rINST).
- */
-.macro PREFETCH_ADVANCE_INST dreg, sreg, count
- ldrh \dreg, [\sreg, #((\count)*2)]!
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
- * rINST ahead of possible exception point. Be sure to manually advance rPC
- * later.
- */
-.macro PREFETCH_INST count
- ldrh rINST, [rPC, #((\count)*2)]
-.endm
-
-/* Advance rPC by some number of code units. */
-.macro ADVANCE count
- add rPC, #((\count)*2)
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg. Updates
- * rPC to point to the next instruction. "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.
- *
- * We want to write "ldrh rINST, [rPC, _reg, lsl #1]!", but some of the
- * bits that hold the shift distance are used for the half/byte/sign flags.
- * In some cases we can pre-double _reg for free, so we require a byte offset
- * here.
- */
-.macro FETCH_ADVANCE_INST_RB reg
- ldrh rINST, [rPC, \reg]!
-.endm
-
-/*
- * Fetch a half-word code unit from an offset past the current PC. The
- * "_count" value is in 16-bit code units. Does not advance rPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-.macro FETCH reg, count
- ldrh \reg, [rPC, #((\count)*2)]
-.endm
-
-.macro FETCH_S reg, count
- ldrsh \reg, [rPC, #((\count)*2)]
-.endm
-
-/*
- * Fetch one byte from an offset past the current PC. Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-.macro FETCH_B reg, count, byte
- ldrb \reg, [rPC, #((\count)*2+(\byte))]
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
- and \reg, rINST, #255
-.endm
-
-/*
- * Put the prefetched instruction's opcode field into the specified register.
- */
-.macro GET_PREFETCHED_OPCODE oreg, ireg
- and \oreg, \ireg, #255
-.endm
-
-/*
- * Begin executing the opcode in _reg. Because this only jumps within the
- * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
- */
-.macro GOTO_OPCODE reg
- add pc, rIBASE, \reg, lsl #${handler_size_bits}
-.endm
-.macro GOTO_OPCODE_BASE base,reg
- add pc, \base, \reg, lsl #${handler_size_bits}
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-.macro GET_VREG reg, vreg
- ldr \reg, [rFP, \vreg, lsl #2]
-.endm
-.macro SET_VREG reg, vreg
- str \reg, [rFP, \vreg, lsl #2]
- mov \reg, #0
- str \reg, [rREFS, \vreg, lsl #2]
-.endm
-.macro SET_VREG_WIDE regLo, regHi, vreg
- add ip, rFP, \vreg, lsl #2
- strd \regLo, \regHi, [ip]
- mov \regLo, #0
- mov \regHi, #0
- add ip, rREFS, \vreg, lsl #2
- strd \regLo, \regHi, [ip]
-.endm
-.macro SET_VREG_OBJECT reg, vreg, tmpreg
- str \reg, [rFP, \vreg, lsl #2]
- str \reg, [rREFS, \vreg, lsl #2]
-.endm
-.macro SET_VREG_SHADOW reg, vreg
- str \reg, [rREFS, \vreg, lsl #2]
-.endm
-.macro SET_VREG_FLOAT reg, vreg, tmpreg
- add \tmpreg, rFP, \vreg, lsl #2
- fsts \reg, [\tmpreg]
- mov \tmpreg, #0
- str \tmpreg, [rREFS, \vreg, lsl #2]
-.endm
-
-/*
- * Clear the corresponding shadow regs for a vreg pair
- */
-.macro CLEAR_SHADOW_PAIR vreg, tmp1, tmp2
- mov \tmp1, #0
- add \tmp2, \vreg, #1
- SET_VREG_SHADOW \tmp1, \vreg
- SET_VREG_SHADOW \tmp1, \tmp2
-.endm
-
-/*
- * Convert a virtual register index into an address.
- */
-.macro VREG_INDEX_TO_ADDR reg, vreg
- add \reg, rFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */
-.endm
-
-.macro GET_VREG_WIDE_BY_ADDR reg0, reg1, addr
- ldmia \addr, {\reg0, \reg1}
-.endm
-.macro SET_VREG_WIDE_BY_ADDR reg0, reg1, addr
- stmia \addr, {\reg0, \reg1}
-.endm
-.macro GET_VREG_FLOAT_BY_ADDR reg, addr
- flds \reg, [\addr]
-.endm
-.macro SET_VREG_FLOAT_BY_ADDR reg, addr
- fsts \reg, [\addr]
-.endm
-.macro GET_VREG_DOUBLE_BY_ADDR reg, addr
- fldd \reg, [\addr]
-.endm
-.macro SET_VREG_DOUBLE_BY_ADDR reg, addr
- fstd \reg, [\addr]
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
-.endm
-
-/*
- * function support macros.
- */
-.macro ENTRY name
- .arm
- .type \name, #function
- .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
- .global \name
- /* Cache alignment for function entry */
- .balign 16
-\name:
-.endm
-
-.macro END name
- .size \name, .-\name
-.endm
-
-// Macro to unpoison (negate) the reference for heap poisoning.
-.macro UNPOISON_HEAP_REF rRef
-#ifdef USE_HEAP_POISONING
- rsb \rRef, \rRef, #0
-#endif // USE_HEAP_POISONING
-.endm
-
-%def entry():
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
- .text
- .align 2
-
-/*
- * On entry:
- * r0 Thread* self/
- * r1 insns_
- * r2 ShadowFrame
- * r3 JValue* result_register
- *
- */
-
-ENTRY ExecuteMterpImpl
- .cfi_startproc
- stmfd sp!, {r3-r10,fp,lr} @ save 10 regs, (r3 just to align 64)
- .cfi_adjust_cfa_offset 40
- .cfi_rel_offset r3, 0
- .cfi_rel_offset r4, 4
- .cfi_rel_offset r5, 8
- .cfi_rel_offset r6, 12
- .cfi_rel_offset r7, 16
- .cfi_rel_offset r8, 20
- .cfi_rel_offset r9, 24
- .cfi_rel_offset r10, 28
- .cfi_rel_offset fp, 32
- .cfi_rel_offset lr, 36
-
- /* Remember the return register */
- str r3, [r2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
-
- /* Remember the dex instruction pointer */
- str r1, [r2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
-
- /* set up "named" registers */
- mov rSELF, r0
- ldr r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
- add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to vregs.
- VREG_INDEX_TO_ADDR rREFS, r0 @ point to reference array in shadow frame
- ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc.
- add rPC, r1, r0, lsl #1 @ Create direct pointer to 1st dex opcode
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Starting ibase */
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
-
- /* Set up for backwards branches & osr profiling */
- ldr r0, [rFP, #OFF_FP_METHOD]
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rSELF
- bl MterpSetUpHotnessCountdown
- mov rPROFILE, r0 @ Starting hotness countdown to rPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST @ load rINST from rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
- /* NOTE: no fallthrough */
- // cfi info continues, and covers the whole mterp implementation.
- END ExecuteMterpImpl
-
-%def dchecks_before_helper():
- // Call C++ to do debug checks and return to the handler using tail call.
- .extern MterpCheckBefore
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-%def opcode_pre():
-% add_helper(dchecks_before_helper, "mterp_dchecks_before_helper")
- #if !defined(NDEBUG)
- bl mterp_dchecks_before_helper
- #endif
-
-%def fallback():
-/* Transfer stub to alternate interpreter */
- b MterpFallback
-
-%def helpers():
- ENTRY MterpHelpers
-
-%def footer():
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
- .text
- .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogDivideByZeroException
-#endif
- b MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogArrayIndexException
-#endif
- b MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNegativeArraySizeException
-#endif
- b MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNoSuchMethodException
-#endif
- b MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNullObjectException
-#endif
- b MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogExceptionThrownException
-#endif
- b MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- ldr r2, [rSELF, #THREAD_FLAGS_OFFSET]
- bl MterpLogSuspendFallback
-#endif
- b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- ldr r0, [rSELF, #THREAD_EXCEPTION_OFFSET]
- cmp r0, #0 @ Exception pending?
- beq MterpFallback @ If not, fall back to reference interpreter.
- /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpHandleException @ (self, shadow_frame)
- cmp r0, #0
- beq MterpExceptionReturn @ no local catch, back to caller.
- ldr r0, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
- ldr r1, [rFP, #OFF_FP_DEX_PC]
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
- add rPC, r0, r1, lsl #1 @ generate new dex_pc_ptr
- /* Do we need to switch interpreters? */
- ldr r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
- cmp r0, #0
- beq MterpFallback
- /* resume execution at catch block */
- EXPORT_PC
- FETCH_INST
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * rPROFILE <= signed hotness countdown (expanded to 32 bits)
- * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
- cmp rINST, #0
-MterpCommonTakenBranch:
- bgt .L_forward_branch @ don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-# error "JIT_CHECK_OSR must be -1."
-#endif
- cmp rPROFILE, #JIT_CHECK_OSR
- beq .L_osr_check
- subsgt rPROFILE, #1
- beq .L_add_batch @ counted down to zero - report
-.L_resume_backward_branch:
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- REFRESH_IBASE
- add r2, rINST, rINST @ r2<- byte offset
- FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- bne .L_suspend_request_pending
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-.L_suspend_request_pending:
- EXPORT_PC
- mov r0, rSELF
- bl MterpSuspendCheck @ (self)
- cmp r0, #0
- bne MterpFallback
- REFRESH_IBASE @ might have changed during suspend
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-.L_no_count_backwards:
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- bne .L_resume_backward_branch
-.L_osr_check:
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
- cmp r0, #0
- bne MterpOnStackReplacement
- b .L_resume_backward_branch
-
-.L_forward_branch:
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_osr_forward
-.L_resume_forward_branch:
- add r2, rINST, rINST @ r2<- byte offset
- FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-.L_check_osr_forward:
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
- cmp r0, #0
- bne MterpOnStackReplacement
- b .L_resume_forward_branch
-
-.L_add_batch:
- add r1, rFP, #OFF_FP_SHADOWFRAME
- strh rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- ldr r0, [rFP, #OFF_FP_METHOD]
- mov r2, rSELF
- bl MterpAddHotnessBatch @ (method, shadow_frame, self)
- mov rPROFILE, r0 @ restore new hotness countdown to rPROFILE
- b .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, #2
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
- cmp r0, #0
- bne MterpOnStackReplacement
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rINST
- bl MterpLogOSR
-#endif
- mov r0, #1 @ Signal normal return
- b MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogFallback
-#endif
-MterpCommonFallback:
- mov r0, #0 @ signal retry with reference interpreter.
- b MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR. Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- mov r0, #1 @ signal return to caller.
- b MterpDone
-MterpReturn:
- ldr r2, [rFP, #OFF_FP_RESULT_REGISTER]
- str r0, [r2]
- str r1, [r2, #4]
- mov r0, #1 @ signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- cmp rPROFILE, #0
- bgt MterpProfileActive @ if > 0, we may have some counts to report.
- ldmfd sp!, {r3-r10,fp,pc} @ restore 10 regs and return
-
-MterpProfileActive:
- mov rINST, r0 @ stash return value
- /* Report cached hotness counts */
- ldr r0, [rFP, #OFF_FP_METHOD]
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rSELF
- strh rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- bl MterpAddHotnessBatch @ (method, shadow_frame, self)
- mov r0, rINST @ restore return value
- ldmfd sp!, {r3-r10,fp,pc} @ restore 10 regs and return
-
- .cfi_endproc
- END MterpHelpers
-
-%def instruction_end():
-
- .type artMterpAsmInstructionEnd, #object
- .hidden artMterpAsmInstructionEnd
- .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
-%def instruction_start():
-
- .type artMterpAsmInstructionStart, #object
- .hidden artMterpAsmInstructionStart
- .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
- .text
-
-%def default_helper_prefix():
-% return "mterp_"
-
-%def opcode_start():
- ENTRY mterp_${opcode}
-%def opcode_end():
- END mterp_${opcode}
-%def helper_start(name):
- ENTRY ${name}
-%def helper_end(name):
- END ${name}
diff --git a/runtime/interpreter/mterp/arm/object.S b/runtime/interpreter/mterp/arm/object.S
deleted file mode 100644
index 8414a1f173..0000000000
--- a/runtime/interpreter/mterp/arm/object.S
+++ /dev/null
@@ -1,207 +0,0 @@
-%def field(helper=""):
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern $helper
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl $helper
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_check_cast():
- /*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class@BBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object
- ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
- mov r3, rSELF @ r3<- self
- bl MterpCheckCast @ (index, &obj, method, self)
- PREFETCH_INST 2
- cmp r0, #0
- bne MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_iget(is_object=False, is_wide=False, load="ldr", helper="MterpIGetU32"):
- @ Fast-path which gets the field offset from thread-local cache.
- add r0, rSELF, #THREAD_INTERPRETER_CACHE_OFFSET @ cache address
- ubfx r1, rPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 @ entry index
- add r0, r0, r1, lsl #3 @ entry address within the cache
- ldrd r0, r1, [r0] @ entry key (pc) and value (offset)
- mov r2, rINST, lsr #12 @ B
- GET_VREG r2, r2 @ object we're operating on
- cmp r0, rPC
-% slow_path_label = add_helper(lambda: field(helper))
- bne ${slow_path_label} @ cache miss
- cmp r2, #0
- beq common_errNullObject @ null object
-% if is_wide:
- ldrd r0, r1, [r1, r2] @ r0,r1 <- obj.field
-% else:
- ${load} r0, [r2, r1] @ r0 <- obj.field
-% #endif
-% if is_object:
- UNPOISON_HEAP_REF r0
-#if defined(USE_READ_BARRIER)
-# if defined(USE_BAKER_READ_BARRIER)
- ldr ip, [rSELF, #THREAD_IS_GC_MARKING_OFFSET]
- cmp ip, #0
- bne .L_${opcode}_mark @ GC is active
-.L_${opcode}_marked:
-# else
- bl artReadBarrierMark @ r0 <- artReadBarrierMark(r0)
-# endif
-#endif
-% #endif
- ubfx r2, rINST, #8, #4 @ A
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-% if is_object:
- SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
-% elif is_wide:
- SET_VREG_WIDE r0, r1, r2 @ fp[A]<- r0, r1
-% else:
- SET_VREG r0, r2 @ fp[A]<- r0
-% #endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-% if is_object:
-#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
-.L_${opcode}_mark:
- bl artReadBarrierMark @ r0 <- artReadBarrierMark(r0)
- b .L_${opcode}_marked
-#endif
-% #endif
-
-%def op_iget_boolean():
-% op_iget(load="ldrb", helper="MterpIGetU8")
-
-%def op_iget_byte():
-% op_iget(load="ldrsb", helper="MterpIGetI8")
-
-%def op_iget_char():
-% op_iget(load="ldrh", helper="MterpIGetU16")
-
-%def op_iget_object():
-% op_iget(is_object=True, helper="MterpIGetObj")
-
-%def op_iget_short():
-% op_iget(load="ldrsh", helper="MterpIGetI16")
-
-%def op_iget_wide():
-% op_iget(is_wide=True, helper="MterpIGetU64")
-
-%def op_instance_of():
- /*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class@CCCC */
- EXPORT_PC
- FETCH r0, 1 @ r0<- CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object
- ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
- mov r3, rSELF @ r3<- self
- bl MterpInstanceOf @ (index, &obj, method, self)
- ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r9, rINST, #8, #4 @ r9<- A
- PREFETCH_INST 2
- cmp r1, #0 @ exception pending?
- bne MterpException
- ADVANCE 2 @ advance rPC
- SET_VREG r0, r9 @ vA<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_iput(helper="MterpIPutU32"):
-% field(helper=helper)
-
-%def op_iput_boolean():
-% op_iput(helper="MterpIPutU8")
-
-%def op_iput_byte():
-% op_iput(helper="MterpIPutI8")
-
-%def op_iput_char():
-% op_iput(helper="MterpIPutU16")
-
-%def op_iput_object():
-% op_iput(helper="MterpIPutObj")
-
-%def op_iput_short():
-% op_iput(helper="MterpIPutI16")
-
-%def op_iput_wide():
-% op_iput(helper="MterpIPutU64")
-
-%def op_new_instance():
- /*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class@BBBB */
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rSELF
- mov r2, rINST
- bl MterpNewInstance @ (shadow_frame, self, inst_data)
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_sget(helper="MterpSGetU32"):
-% field(helper=helper)
-
-%def op_sget_boolean():
-% op_sget(helper="MterpSGetU8")
-
-%def op_sget_byte():
-% op_sget(helper="MterpSGetI8")
-
-%def op_sget_char():
-% op_sget(helper="MterpSGetU16")
-
-%def op_sget_object():
-% op_sget(helper="MterpSGetObj")
-
-%def op_sget_short():
-% op_sget(helper="MterpSGetI16")
-
-%def op_sget_wide():
-% op_sget(helper="MterpSGetU64")
-
-%def op_sput(helper="MterpSPutU32"):
-% field(helper=helper)
-
-%def op_sput_boolean():
-% op_sput(helper="MterpSPutU8")
-
-%def op_sput_byte():
-% op_sput(helper="MterpSPutI8")
-
-%def op_sput_char():
-% op_sput(helper="MterpSPutU16")
-
-%def op_sput_object():
-% op_sput(helper="MterpSPutObj")
-
-%def op_sput_short():
-% op_sput(helper="MterpSPutI16")
-
-%def op_sput_wide():
-% op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/arm/other.S b/runtime/interpreter/mterp/arm/other.S
deleted file mode 100644
index 20f1acf97f..0000000000
--- a/runtime/interpreter/mterp/arm/other.S
+++ /dev/null
@@ -1,433 +0,0 @@
-%def const(helper="UndefinedConstHandler"):
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern $helper
- EXPORT_PC
- FETCH r0, 1 @ r0<- BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- add r2, rFP, #OFF_FP_SHADOWFRAME
- mov r3, rSELF
- bl $helper @ (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 @ load rINST
- cmp r0, #0 @ fail?
- bne MterpPossibleException @ let reference interpreter deal with it.
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def unused():
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-%def op_const():
- /* const vAA, #+BBBBbbbb */
- mov r3, rINST, lsr #8 @ r3<- AA
- FETCH r0, 1 @ r0<- bbbb (low)
- FETCH r1, 2 @ r1<- BBBB (high)
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r3 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_const_16():
- /* const/16 vAA, #+BBBB */
- FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended)
- mov r3, rINST, lsr #8 @ r3<- AA
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r3 @ vAA<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_const_4():
- /* const/4 vA, #+B */
- sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
- ubfx r0, rINST, #8, #4 @ r0<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ ip<- opcode from rINST
- SET_VREG r1, r0 @ fp[A]<- r1
- GOTO_OPCODE ip @ execute next instruction
-
-%def op_const_class():
-% const(helper="MterpConstClass")
-
-%def op_const_high16():
- /* const/high16 vAA, #+BBBB0000 */
- FETCH r0, 1 @ r0<- 0000BBBB (zero-extended)
- mov r3, rINST, lsr #8 @ r3<- AA
- mov r0, r0, lsl #16 @ r0<- BBBB0000
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r3 @ vAA<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_const_method_handle():
-% const(helper="MterpConstMethodHandle")
-
-%def op_const_method_type():
-% const(helper="MterpConstMethodType")
-
-%def op_const_string():
-% const(helper="MterpConstString")
-
-%def op_const_string_jumbo():
- /* const/string vAA, String@BBBBBBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- bbbb (low)
- FETCH r2, 2 @ r2<- BBBB (high)
- mov r1, rINST, lsr #8 @ r1<- AA
- orr r0, r0, r2, lsl #16 @ r1<- BBBBbbbb
- add r2, rFP, #OFF_FP_SHADOWFRAME
- mov r3, rSELF
- bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 3 @ advance rPC
- cmp r0, #0 @ fail?
- bne MterpPossibleException @ let reference interpreter deal with it.
- ADVANCE 3 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_const_wide():
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- FETCH r0, 1 @ r0<- bbbb (low)
- FETCH r1, 2 @ r1<- BBBB (low middle)
- FETCH r2, 3 @ r2<- hhhh (high middle)
- orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word)
- FETCH r3, 4 @ r3<- HHHH (high)
- mov r9, rINST, lsr #8 @ r9<- AA
- orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
- CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs
- FETCH_ADVANCE_INST 5 @ advance rPC, load rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_const_wide_16():
- /* const-wide/16 vAA, #+BBBB */
- FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended)
- mov r3, rINST, lsr #8 @ r3<- AA
- mov r1, r0, asr #31 @ r1<- ssssssss
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR r0, r1, r3 @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_const_wide_32():
- /* const-wide/32 vAA, #+BBBBbbbb */
- FETCH r0, 1 @ r0<- 0000bbbb (low)
- mov r3, rINST, lsr #8 @ r3<- AA
- FETCH_S r2, 2 @ r2<- ssssBBBB (high)
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
- CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
- mov r1, r0, asr #31 @ r1<- ssssssss
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR r0, r1, r3 @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_const_wide_high16():
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- FETCH r1, 1 @ r1<- 0000BBBB (zero-extended)
- mov r3, rINST, lsr #8 @ r3<- AA
- mov r0, #0 @ r0<- 00000000
- mov r1, r1, lsl #16 @ r1<- BBBB0000
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r3, r0, r2 @ Zero shadow regs
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR r0, r1, r3 @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_monitor_enter():
- /*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r0, r2 @ r0<- vAA (object)
- mov r1, rSELF @ r1<- self
- bl artLockObjectFromCode
- cmp r0, #0
- bne MterpException
- FETCH_ADVANCE_INST 1
- ldr r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
- cmp r0, #0
- beq MterpFallback
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_monitor_exit():
- /*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r0, r2 @ r0<- vAA (object)
- mov r1, rSELF @ r0<- self
- bl artUnlockObjectFromCode @ r0<- success for unlock(self, obj)
- cmp r0, #0 @ failed?
- bne MterpException
- FETCH_ADVANCE_INST 1 @ before throw: advance rPC, load rINST
- ldr r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
- cmp r0, #0
- beq MterpFallback
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_move(is_object="0"):
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B from 15:12
- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[B]
- GET_INST_OPCODE ip @ ip<- opcode from rINST
- .if $is_object
- SET_VREG_OBJECT r2, r0 @ fp[A]<- r2
- .else
- SET_VREG r2, r0 @ fp[A]<- r2
- .endif
- GOTO_OPCODE ip @ execute next instruction
-
-%def op_move_16(is_object="0"):
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- FETCH r1, 2 @ r1<- BBBB
- FETCH r0, 1 @ r0<- AAAA
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[BBBB]
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if $is_object
- SET_VREG_OBJECT r2, r0 @ fp[AAAA]<- r2
- .else
- SET_VREG r2, r0 @ fp[AAAA]<- r2
- .endif
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_move_exception():
- /* move-exception vAA */
- mov r2, rINST, lsr #8 @ r2<- AA
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- mov r1, #0 @ r1<- 0
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- SET_VREG_OBJECT r3, r2 @ fp[AA]<- exception obj
- GET_INST_OPCODE ip @ extract opcode from rINST
- str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ clear exception
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_move_from16(is_object="0"):
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- FETCH r1, 1 @ r1<- BBBB
- mov r0, rINST, lsr #8 @ r0<- AA
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[BBBB]
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if $is_object
- SET_VREG_OBJECT r2, r0 @ fp[AA]<- r2
- .else
- SET_VREG r2, r0 @ fp[AA]<- r2
- .endif
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_move_object():
-% op_move(is_object="1")
-
-%def op_move_object_16():
-% op_move_16(is_object="1")
-
-%def op_move_object_from16():
-% op_move_from16(is_object="1")
-
-%def op_move_result(is_object="0"):
- /* for: move-result, move-result-object */
- /* op vAA */
- mov r2, rINST, lsr #8 @ r2<- AA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- ldr r0, [rFP, #OFF_FP_RESULT_REGISTER] @ get pointer to result JType.
- ldr r0, [r0] @ r0 <- result.i.
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if $is_object
- SET_VREG_OBJECT r0, r2, r1 @ fp[AA]<- r0
- .else
- SET_VREG r0, r2 @ fp[AA]<- r0
- .endif
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_move_result_object():
-% op_move_result(is_object="1")
-
-%def op_move_result_wide():
- /* move-result-wide vAA */
- mov rINST, rINST, lsr #8 @ rINST<- AA
- ldr r3, [rFP, #OFF_FP_RESULT_REGISTER]
- VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA]
- ldmia r3, {r0-r1} @ r0/r1<- retval.j
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- SET_VREG_WIDE_BY_ADDR r0, r1, r2 @ fp[AA]<- r0/r1
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_move_wide():
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[A]
- GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- fp[B]
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR r0, r1, r2 @ fp[A]<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_move_wide_16():
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- FETCH r3, 2 @ r3<- BBBB
- FETCH r2, 1 @ r2<- AAAA
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
- VREG_INDEX_TO_ADDR lr, r2 @ r2<- &fp[AAAA]
- GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- fp[BBBB]
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r2, r3, ip @ Zero out the shadow regs
- SET_VREG_WIDE_BY_ADDR r0, r1, lr @ fp[AAAA]<- r0/r1
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_move_wide_from16():
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- FETCH r3, 1 @ r3<- BBBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
- VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA]
- GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- fp[BBBB]
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG_WIDE_BY_ADDR r0, r1, r2 @ fp[AA]<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-%def op_nop():
- FETCH_ADVANCE_INST 1 @ advance to next instr, load rINST
- GET_INST_OPCODE ip @ ip<- opcode from rINST
- GOTO_OPCODE ip @ execute it
-
-%def op_unused_3e():
-% unused()
-
-%def op_unused_3f():
-% unused()
-
-%def op_unused_40():
-% unused()
-
-%def op_unused_41():
-% unused()
-
-%def op_unused_42():
-% unused()
-
-%def op_unused_43():
-% unused()
-
-%def op_unused_73():
-% unused()
-
-%def op_unused_79():
-% unused()
-
-%def op_unused_7a():
-% unused()
-
-%def op_unused_e3():
-% unused()
-
-%def op_unused_e4():
-% unused()
-
-%def op_unused_e5():
-% unused()
-
-%def op_unused_e6():
-% unused()
-
-%def op_unused_e7():
-% unused()
-
-%def op_unused_e8():
-% unused()
-
-%def op_unused_e9():
-% unused()
-
-%def op_unused_ea():
-% unused()
-
-%def op_unused_eb():
-% unused()
-
-%def op_unused_ec():
-% unused()
-
-%def op_unused_ed():
-% unused()
-
-%def op_unused_ee():
-% unused()
-
-%def op_unused_ef():
-% unused()
-
-%def op_unused_f0():
-% unused()
-
-%def op_unused_f1():
-% unused()
-
-%def op_unused_f2():
-% unused()
-
-%def op_unused_f3():
-% unused()
-
-%def op_unused_f4():
-% unused()
-
-%def op_unused_f5():
-% unused()
-
-%def op_unused_f6():
-% unused()
-
-%def op_unused_f7():
-% unused()
-
-%def op_unused_f8():
-% unused()
-
-%def op_unused_f9():
-% unused()
-
-%def op_unused_fc():
-% unused()
-
-%def op_unused_fd():
-% unused()
diff --git a/runtime/interpreter/mterp/arm64/array.S b/runtime/interpreter/mterp/arm64/array.S
deleted file mode 100644
index 628f832e3a..0000000000
--- a/runtime/interpreter/mterp/arm64/array.S
+++ /dev/null
@@ -1,235 +0,0 @@
-%def op_aget(load="ldr", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz x0, common_errNullObject // bail if null array object.
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, uxtw #$shift // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- $load w2, [x0, #$data_offset] // w2<- vBB[vCC]
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w2, w9 // vAA<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_aget_boolean():
-% op_aget(load="ldrb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
-
-%def op_aget_byte():
-% op_aget(load="ldrsb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
-
-%def op_aget_char():
-% op_aget(load="ldrh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
-
-%def op_aget_object():
- /*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- FETCH_B w3, 1, 1 // w3<- CC
- EXPORT_PC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- bl artAGetObjectFromMterp // (array, index)
- ldr x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
- lsr w2, wINST, #8 // w9<- AA
- PREFETCH_INST 2
- cbnz w1, MterpException
- SET_VREG_OBJECT w0, w2
- ADVANCE 2
- GET_INST_OPCODE ip
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_aget_short():
-% op_aget(load="ldrsh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
-
-%def op_aget_wide():
- /*
- * Array get, 64 bits. vAA <- vBB[vCC].
- *
- */
- /* aget-wide vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // yes, bail
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, uxtw #3 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- ldr x2, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] // x2<- vBB[vCC]
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x2, w4
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_aput(store="str", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // bail if null
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, uxtw #$shift // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_VREG w2, w9 // w2<- vAA
- GET_INST_OPCODE ip // extract opcode from rINST
- $store w2, [x0, #$data_offset] // vBB[vCC]<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_aput_boolean():
-% op_aput(store="strb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
-
-%def op_aput_byte():
-% op_aput(store="strb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
-
-%def op_aput_char():
-% op_aput(store="strh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
-
-%def op_aput_object():
- /*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov w2, wINST
- bl MterpAputObject
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_aput_short():
-% op_aput(store="strh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
-
-%def op_aput_wide():
- /*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- *
- */
- /* aput-wide vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // bail if null
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, uxtw #3 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- GET_VREG_WIDE x1, w4
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- str x1, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_array_length():
- /*
- * Return the length of an array.
- */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG w0, w1 // w0<- vB (object ref)
- cbz w0, common_errNullObject // yup, fail
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- array length
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w3, w2 // vB<- length
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_fill_array_data():
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC
- FETCH w0, 1 // x0<- 000000000000bbbb (lo)
- FETCH_S x1, 2 // x1<- ssssssssssssBBBB (hi)
- lsr w3, wINST, #8 // w3<- AA
- orr x1, x0, x1, lsl #16 // x1<- ssssssssBBBBbbbb
- GET_VREG w0, w3 // w0<- vAA (array object)
- add x1, xPC, x1, lsl #1 // x1<- PC + ssssssssBBBBbbbb*2 (array data off.)
- bl MterpFillArrayData // (obj, payload)
- cbz w0, MterpPossibleException // exception?
- FETCH_ADVANCE_INST 3 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_filled_new_array(helper="MterpFilledNewArray"):
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
- .extern $helper
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov x2, xSELF
- bl $helper
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 3 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_filled_new_array_range():
-% op_filled_new_array(helper="MterpFilledNewArrayRange")
-
-%def op_new_array():
- /*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class//CCCC */
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov w2, wINST
- mov x3, xSELF
- bl MterpNewArray
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/control_flow.S b/runtime/interpreter/mterp/arm64/control_flow.S
deleted file mode 100644
index 25313d3807..0000000000
--- a/runtime/interpreter/mterp/arm64/control_flow.S
+++ /dev/null
@@ -1,211 +0,0 @@
-%def bincmp(condition=""):
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- lsr w1, wINST, #12 // w1<- B
- ubfx w0, wINST, #8, #4 // w0<- A
- GET_VREG w3, w1 // w3<- vB
- GET_VREG w2, w0 // w2<- vA
- FETCH_S wINST, 1 // wINST<- branch offset, in code units
- cmp w2, w3 // compare (vA, vB)
- b.${condition} MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-%def zcmp(compare="1", branch=""):
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- lsr w0, wINST, #8 // w0<- AA
- GET_VREG w2, w0 // w2<- vAA
- FETCH_S wINST, 1 // w1<- branch offset, in code units
- .if ${compare}
- cmp w2, #0 // compare (vA, 0)
- .endif
- ${branch} MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_goto():
- /*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- sbfx wINST, wINST, #8, #8 // wINST<- ssssssAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
-
-%def op_goto_16():
- /*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- FETCH_S wINST, 1 // wINST<- ssssAAAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
-
-%def op_goto_32():
- /*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0". Because
- * we need the V bit set, we'll use an adds to convert from Dalvik
- * offset to byte offset.
- */
- /* goto/32 +AAAAAAAA */
- FETCH w0, 1 // w0<- aaaa (lo)
- FETCH w1, 2 // w1<- AAAA (hi)
- orr wINST, w0, w1, lsl #16 // wINST<- AAAAaaaa
- b MterpCommonTakenBranchNoFlags
-
-%def op_if_eq():
-% bincmp(condition="eq")
-
-%def op_if_eqz():
-% zcmp(compare="0", branch="cbz w2,")
-
-%def op_if_ge():
-% bincmp(condition="ge")
-
-%def op_if_gez():
-% zcmp(compare="0", branch="tbz w2, #31,")
-
-%def op_if_gt():
-% bincmp(condition="gt")
-
-%def op_if_gtz():
-% zcmp(branch="b.gt")
-
-%def op_if_le():
-% bincmp(condition="le")
-
-%def op_if_lez():
-% zcmp(branch="b.le")
-
-%def op_if_lt():
-% bincmp(condition="lt")
-
-%def op_if_ltz():
-% zcmp(compare="0", branch="tbnz w2, #31,")
-
-%def op_if_ne():
-% bincmp(condition="ne")
-
-%def op_if_nez():
-% zcmp(compare="0", branch="cbnz w2,")
-
-%def op_packed_switch(func="MterpDoPackedSwitch"):
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- FETCH w0, 1 // x0<- 000000000000bbbb (lo)
- FETCH_S x1, 2 // x1<- ssssssssssssBBBB (hi)
- lsr w3, wINST, #8 // w3<- AA
- orr x0, x0, x1, lsl #16 // x0<- ssssssssBBBBbbbb
- GET_VREG w1, w3 // w1<- vAA
- add x0, xPC, x0, lsl #1 // x0<- PC + ssssssssBBBBbbbb*2
- bl $func // w0<- code-unit branch offset
- sxtw xINST, w0
- b MterpCommonTakenBranchNoFlags
-
-%def op_return():
- /*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
- mov x0, xSELF
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .L${opcode}_check
-.L${opcode}_return:
- lsr w2, wINST, #8 // r2<- AA
- GET_VREG w0, w2 // r0<- vAA
- b MterpReturn
-.L${opcode}_check:
- bl MterpSuspendCheck // (self)
- b .L${opcode}_return
-
-%def op_return_object():
-% op_return()
-
-%def op_return_void():
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
- mov x0, xSELF
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .L${opcode}_check
-.L${opcode}_return:
- mov x0, #0
- b MterpReturn
-.L${opcode}_check:
- bl MterpSuspendCheck // (self)
- b .L${opcode}_return
-
-%def op_return_wide():
- /*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
- mov x0, xSELF
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .L${opcode}_check
-.L${opcode}_return:
- lsr w2, wINST, #8 // w2<- AA
- GET_VREG_WIDE x0, w2 // x0<- vAA
- b MterpReturn
-.L${opcode}_check:
- bl MterpSuspendCheck // (self)
- b .L${opcode}_return
-
-%def op_sparse_switch():
-% op_packed_switch(func="MterpDoSparseSwitch")
-
-%def op_throw():
- /*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- lsr w2, wINST, #8 // r2<- AA
- GET_VREG w1, w2 // r1<- vAA (exception object)
- cbz w1, common_errNullObject
- str x1, [xSELF, #THREAD_EXCEPTION_OFFSET] // thread->exception<- obj
- b MterpException
diff --git a/runtime/interpreter/mterp/arm64/invoke.S b/runtime/interpreter/mterp/arm64/invoke.S
deleted file mode 100644
index 4d75af60e1..0000000000
--- a/runtime/interpreter/mterp/arm64/invoke.S
+++ /dev/null
@@ -1,104 +0,0 @@
-%def invoke(helper="UndefinedInvokeHandler"):
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern $helper
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl $helper
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- ldr w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
- cbz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern $helper
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl $helper
- cbz w0, MterpException
- FETCH_ADVANCE_INST 4
- ldr w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
- cbz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-%def op_invoke_custom():
-% invoke(helper="MterpInvokeCustom")
-
-%def op_invoke_custom_range():
-% invoke(helper="MterpInvokeCustomRange")
-
-%def op_invoke_direct():
-% invoke(helper="MterpInvokeDirect")
-
-%def op_invoke_direct_range():
-% invoke(helper="MterpInvokeDirectRange")
-
-%def op_invoke_interface():
-% invoke(helper="MterpInvokeInterface")
- /*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_interface_range():
-% invoke(helper="MterpInvokeInterfaceRange")
-
-%def op_invoke_polymorphic():
-% invoke_polymorphic(helper="MterpInvokePolymorphic")
-
-%def op_invoke_polymorphic_range():
-% invoke_polymorphic(helper="MterpInvokePolymorphicRange")
-
-%def op_invoke_static():
-% invoke(helper="MterpInvokeStatic")
-
-
-%def op_invoke_static_range():
-% invoke(helper="MterpInvokeStaticRange")
-
-%def op_invoke_super():
-% invoke(helper="MterpInvokeSuper")
- /*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_super_range():
-% invoke(helper="MterpInvokeSuperRange")
-
-%def op_invoke_virtual():
-% invoke(helper="MterpInvokeVirtual")
- /*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_virtual_range():
-% invoke(helper="MterpInvokeVirtualRange")
diff --git a/runtime/interpreter/mterp/arm64/main.S b/runtime/interpreter/mterp/arm64/main.S
deleted file mode 100644
index 18ae66ac40..0000000000
--- a/runtime/interpreter/mterp/arm64/main.S
+++ /dev/null
@@ -1,799 +0,0 @@
-%def header():
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat xFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via xFP &
- number_of_vregs_.
-
- */
-
-/*
-ARM64 Runtime register usage conventions.
-
- r0 : w0 is 32-bit return register and x0 is 64-bit.
- r0-r7 : Argument registers.
- r8-r15 : Caller save registers (used as temporary registers).
- r16-r17: Also known as ip0-ip1, respectively. Used as scratch registers by
- the linker, by the trampolines and other stubs (the backend uses
- these as temporary registers).
- r18 : Caller save register (used as temporary register).
- r19 : Pointer to thread-local storage.
- r20-r29: Callee save registers.
- r30 : (lr) is reserved (the link register).
- rsp : (sp) is reserved (the stack pointer).
- rzr : (zr) is reserved (the zero register).
-
- Floating-point registers
- v0-v31
-
- v0 : s0 is return register for singles (32-bit) and d0 for doubles (64-bit).
- This is analogous to the C/C++ (hard-float) calling convention.
- v0-v7 : Floating-point argument registers in both Dalvik and C/C++ conventions.
- Also used as temporary and codegen scratch registers.
-
- v0-v7 and v16-v31 : trashed across C calls.
- v8-v15 : bottom 64-bits preserved across C calls (d8-d15 are preserved).
-
- v16-v31: Used as codegen temp/scratch.
- v8-v15 : Can be used for promotion.
-
- Must maintain 16-byte stack alignment.
-
-Mterp notes:
-
-The following registers have fixed assignments:
-
- reg nick purpose
- x20 xPC interpreted program counter, used for fetching instructions
- x21 xFP interpreted frame pointer, used for accessing locals and args
- x22 xSELF self (Thread) pointer
- x23 xINST first 16-bit code unit of current instruction
- x24 xIBASE interpreted instruction base pointer, used for computed goto
- x25 xREFS base of object references in shadow frame (ideally, we'll get rid of this later).
- x26 wPROFILE jit profile hotness countdown
- x16 ip scratch reg
- x17 ip2 scratch reg (used by macros)
-
-Macros are provided for common operations. They MUST NOT alter unspecified registers or condition
-codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/* During bringup, we'll use the shadow frame model instead of xFP */
-/* single-purpose registers, given names for clarity */
-#define xPC x20
-#define CFI_DEX 20 // DWARF register number of the register holding dex-pc (xPC).
-#define CFI_TMP 0 // DWARF register number of the first argument register (r0).
-#define xFP x21
-#define xSELF x22
-#define xINST x23
-#define wINST w23
-#define xIBASE x24
-#define xREFS x25
-#define wPROFILE w26
-#define xPROFILE x26
-#define ip x16
-#define ip2 x17
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep xFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- str xPC, [xFP, #OFF_FP_DEX_PC_PTR]
-.endm
-
-/*
- * Fetch the next instruction from xPC into wINST. Does not advance xPC.
- */
-.macro FETCH_INST
- ldrh wINST, [xPC]
-.endm
-
-/*
- * Fetch the next instruction from the specified offset. Advances xPC
- * to point to the next instruction. "_count" is in 16-bit code units.
- *
- * Because of the limited size of immediate constants on ARM, this is only
- * suitable for small forward movements (i.e. don't try to implement "goto"
- * with this).
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss. (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
- ldrh wINST, [xPC, #((\count)*2)]!
-.endm
-
-/*
- * The operation performed here is similar to FETCH_ADVANCE_INST, except the
- * src and dest registers are parameterized (not hard-wired to xPC and xINST).
- */
-.macro PREFETCH_ADVANCE_INST dreg, sreg, count
- ldrh \dreg, [\sreg, #((\count)*2)]!
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update xPC. Used to load
- * xINST ahead of possible exception point. Be sure to manually advance xPC
- * later.
- */
-.macro PREFETCH_INST count
- ldrh wINST, [xPC, #((\count)*2)]
-.endm
-
-/* Advance xPC by some number of code units. */
-.macro ADVANCE count
- add xPC, xPC, #((\count)*2)
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg and advance xPC.
- * xPC to point to the next instruction. "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value. Must not set flags.
- *
- */
-.macro FETCH_ADVANCE_INST_RB reg
- add xPC, xPC, \reg, sxtw
- ldrh wINST, [xPC]
-.endm
-
-/*
- * Fetch a half-word code unit from an offset past the current PC. The
- * "_count" value is in 16-bit code units. Does not advance xPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-.macro FETCH reg, count
- ldrh \reg, [xPC, #((\count)*2)]
-.endm
-
-.macro FETCH_S reg, count
- ldrsh \reg, [xPC, #((\count)*2)]
-.endm
-
-/*
- * Fetch one byte from an offset past the current PC. Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-.macro FETCH_B reg, count, byte
- ldrb \reg, [xPC, #((\count)*2+(\byte))]
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
- and \reg, xINST, #255
-.endm
-
-/*
- * Put the prefetched instruction's opcode field into the specified register.
- */
-.macro GET_PREFETCHED_OPCODE oreg, ireg
- and \oreg, \ireg, #255
-.endm
-
-/*
- * Begin executing the opcode in _reg. Clobbers reg
- */
-
-.macro GOTO_OPCODE reg
- add \reg, xIBASE, \reg, lsl #${handler_size_bits}
- br \reg
-.endm
-.macro GOTO_OPCODE_BASE base,reg
- add \reg, \base, \reg, lsl #${handler_size_bits}
- br \reg
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-.macro GET_VREG reg, vreg
- ldr \reg, [xFP, \vreg, uxtw #2]
-.endm
-.macro SET_VREG reg, vreg
- str \reg, [xFP, \vreg, uxtw #2]
- str wzr, [xREFS, \vreg, uxtw #2]
-.endm
-.macro SET_VREG_OBJECT reg, vreg, tmpreg
- str \reg, [xFP, \vreg, uxtw #2]
- str \reg, [xREFS, \vreg, uxtw #2]
-.endm
-.macro SET_VREG_FLOAT reg, vreg
- str \reg, [xFP, \vreg, uxtw #2]
- str wzr, [xREFS, \vreg, uxtw #2]
-.endm
-
-/*
- * Get/set the 64-bit value from a Dalvik register.
- */
-.macro GET_VREG_WIDE reg, vreg
- add ip2, xFP, \vreg, uxtw #2
- ldr \reg, [ip2]
-.endm
-.macro SET_VREG_WIDE reg, vreg
- add ip2, xFP, \vreg, uxtw #2
- str \reg, [ip2]
- add ip2, xREFS, \vreg, uxtw #2
- str xzr, [ip2]
-.endm
-.macro GET_VREG_DOUBLE reg, vreg
- add ip2, xFP, \vreg, uxtw #2
- ldr \reg, [ip2]
-.endm
-.macro SET_VREG_DOUBLE reg, vreg
- add ip2, xFP, \vreg, uxtw #2
- str \reg, [ip2]
- add ip2, xREFS, \vreg, uxtw #2
- str xzr, [ip2]
-.endm
-
-/*
- * Get the 32-bit value from a Dalvik register and sign-extend to 64-bit.
- * Used to avoid an extra instruction in int-to-long.
- */
-.macro GET_VREG_S reg, vreg
- ldrsw \reg, [xFP, \vreg, uxtw #2]
-.endm
-
-/*
- * Convert a virtual register index into an address.
- */
-.macro VREG_INDEX_TO_ADDR reg, vreg
- add \reg, xFP, \vreg, uxtw #2 /* WARNING: handle shadow frame vreg zero if store */
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
-.endm
-
-/*
- * Save two registers to the stack.
- */
-.macro SAVE_TWO_REGS reg1, reg2, offset
- stp \reg1, \reg2, [sp, #(\offset)]
- .cfi_rel_offset \reg1, (\offset)
- .cfi_rel_offset \reg2, (\offset) + 8
-.endm
-
-/*
- * Restore two registers from the stack.
- */
-.macro RESTORE_TWO_REGS reg1, reg2, offset
- ldp \reg1, \reg2, [sp, #(\offset)]
- .cfi_restore \reg1
- .cfi_restore \reg2
-.endm
-
-/*
- * Increase frame size and save two registers to the bottom of the stack.
- */
-.macro SAVE_TWO_REGS_INCREASE_FRAME reg1, reg2, frame_adjustment
- stp \reg1, \reg2, [sp, #-(\frame_adjustment)]!
- .cfi_adjust_cfa_offset (\frame_adjustment)
- .cfi_rel_offset \reg1, 0
- .cfi_rel_offset \reg2, 8
-.endm
-
-/*
- * Restore two registers from the bottom of the stack and decrease frame size.
- */
-.macro RESTORE_TWO_REGS_DECREASE_FRAME reg1, reg2, frame_adjustment
- ldp \reg1, \reg2, [sp], #(\frame_adjustment)
- .cfi_restore \reg1
- .cfi_restore \reg2
- .cfi_adjust_cfa_offset -(\frame_adjustment)
-.endm
-
-/*
- * function support macros.
- */
-.macro ENTRY name
- .type \name, #function
- .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
- .global \name
- /* Cache alignment for function entry */
- .balign 16
-\name:
-.endm
-
-.macro END name
- .size \name, .-\name
-.endm
-
-// Macro to unpoison (negate) the reference for heap poisoning.
-.macro UNPOISON_HEAP_REF rRef
-#ifdef USE_HEAP_POISONING
- neg \rRef, \rRef
-#endif // USE_HEAP_POISONING
-.endm
-
-%def entry():
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- .text
-
-/*
- * Interpreter entry point.
- * On entry:
- * x0 Thread* self/
- * x1 insns_
- * x2 ShadowFrame
- * x3 JValue* result_register
- *
- */
-ENTRY ExecuteMterpImpl
- .cfi_startproc
- SAVE_TWO_REGS_INCREASE_FRAME xPROFILE, x27, 80
- SAVE_TWO_REGS xIBASE, xREFS, 16
- SAVE_TWO_REGS xSELF, xINST, 32
- SAVE_TWO_REGS xPC, xFP, 48
- SAVE_TWO_REGS fp, lr, 64
- add fp, sp, #64
-
- /* Remember the return register */
- str x3, [x2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
-
- /* Remember the dex instruction pointer */
- str x1, [x2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
-
- /* set up "named" registers */
- mov xSELF, x0
- ldr w0, [x2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
- add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to vregs.
- add xREFS, xFP, w0, uxtw #2 // point to reference array in shadow frame
- ldr w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET] // Get starting dex_pc.
- add xPC, x1, w0, uxtw #1 // Create direct pointer to 1st dex opcode
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Starting ibase */
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
-
- /* Set up for backwards branches & osr profiling */
- ldr x0, [xFP, #OFF_FP_METHOD]
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xSELF
- bl MterpSetUpHotnessCountdown
- mov wPROFILE, w0 // Starting hotness countdown to xPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST // load wINST from rPC
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
- /* NOTE: no fallthrough */
- // cfi info continues, and covers the whole mterp implementation.
- END ExecuteMterpImpl
-
-%def dchecks_before_helper():
- // Call C++ to do debug checks and return to the handler using tail call.
- .extern MterpCheckBefore
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-%def opcode_pre():
-% add_helper(dchecks_before_helper, "mterp_dchecks_before_helper")
- #if !defined(NDEBUG)
- bl mterp_dchecks_before_helper
- #endif
-
-%def footer():
- .cfi_endproc
- END MterpHelpers
-
-%def fallback():
-/* Transfer stub to alternate interpreter */
- b MterpFallback
-
-%def helpers():
- ENTRY MterpHelpers
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogDivideByZeroException
-#endif
- b MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogArrayIndexException
-#endif
- b MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNegativeArraySizeException
-#endif
- b MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNoSuchMethodException
-#endif
- b MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNullObjectException
-#endif
- b MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogExceptionThrownException
-#endif
- b MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- ldr x2, [xSELF, #THREAD_FLAGS_OFFSET]
- bl MterpLogSuspendFallback
-#endif
- b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- ldr x0, [xSELF, #THREAD_EXCEPTION_OFFSET]
- cbz x0, MterpFallback // If not, fall back to reference interpreter.
- /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpHandleException // (self, shadow_frame)
- cbz w0, MterpExceptionReturn // no local catch, back to caller.
- ldr x0, [xFP, #OFF_FP_DEX_INSTRUCTIONS]
- ldr w1, [xFP, #OFF_FP_DEX_PC]
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
- add xPC, x0, x1, lsl #1 // generate new dex_pc_ptr
- /* Do we need to switch interpreters? */
- ldr w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
- cbz w0, MterpFallback
- /* resume execution at catch block */
- EXPORT_PC
- FETCH_INST
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
- /* NOTE: no fallthrough */
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * wINST <= signed offset
- * wPROFILE <= signed hotness countdown (expanded to 32 bits)
- * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
- cmp wINST, #0
- b.gt .L_forward_branch // don't add forward branches to hotness
- tbnz wPROFILE, #31, .L_no_count_backwards // go if negative
- subs wPROFILE, wPROFILE, #1 // countdown
- b.eq .L_add_batch // counted down to zero - report
-.L_resume_backward_branch:
- ldr lr, [xSELF, #THREAD_FLAGS_OFFSET]
- add w2, wINST, wINST // w2<- byte offset
- FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
- REFRESH_IBASE
- ands lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .L_suspend_request_pending
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-.L_suspend_request_pending:
- EXPORT_PC
- mov x0, xSELF
- bl MterpSuspendCheck // (self)
- cbnz x0, MterpFallback
- REFRESH_IBASE // might have changed during suspend
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-.L_no_count_backwards:
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.ne .L_resume_backward_branch
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
- cbnz x0, MterpOnStackReplacement
- b .L_resume_backward_branch
-
-.L_forward_branch:
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_osr_forward
-.L_resume_forward_branch:
- add w2, wINST, wINST // w2<- byte offset
- FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-.L_check_osr_forward:
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
- cbnz x0, MterpOnStackReplacement
- b .L_resume_forward_branch
-
-.L_add_batch:
- add x1, xFP, #OFF_FP_SHADOWFRAME
- strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- ldr x0, [xFP, #OFF_FP_METHOD]
- mov x2, xSELF
- bl MterpAddHotnessBatch // (method, shadow_frame, self)
- mov wPROFILE, w0 // restore new hotness countdown to wPROFILE
- b .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, #2
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
- cbnz x0, MterpOnStackReplacement
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/*
- * Check for suspend check request. Assumes wINST already loaded, xPC advanced and
- * still needs to get the opcode and branch to it, and flags are in lr.
- */
-MterpCheckSuspendAndContinue:
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh xIBASE
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne check1
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-check1:
- EXPORT_PC
- mov x0, xSELF
- bl MterpSuspendCheck // (self)
- cbnz x0, MterpFallback // Something in the environment changed, switch interpreters
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- sxtw x2, wINST
- bl MterpLogOSR
-#endif
- mov x0, #1 // Signal normal return
- b MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogFallback
-#endif
-MterpCommonFallback:
- mov x0, #0 // signal retry with reference interpreter.
- b MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR. Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- * uint32_t* xFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- mov x0, #1 // signal return to caller.
- b MterpDone
-MterpReturn:
- ldr x2, [xFP, #OFF_FP_RESULT_REGISTER]
- str x0, [x2]
- mov x0, #1 // signal return to caller.
-MterpDone:
-/*
- * At this point, we expect wPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending wPROFILE and the cached hotness counter). wPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- cmp wPROFILE, #0
- bgt MterpProfileActive // if > 0, we may have some counts to report.
- .cfi_remember_state
- RESTORE_TWO_REGS fp, lr, 64
- RESTORE_TWO_REGS xPC, xFP, 48
- RESTORE_TWO_REGS xSELF, xINST, 32
- RESTORE_TWO_REGS xIBASE, xREFS, 16
- RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
- ret
- .cfi_restore_state // Reset unwind info so following code unwinds.
- .cfi_def_cfa_offset 80 // workaround for clang bug: 31975598
-
-MterpProfileActive:
- mov xINST, x0 // stash return value
- /* Report cached hotness counts */
- ldr x0, [xFP, #OFF_FP_METHOD]
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xSELF
- strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- bl MterpAddHotnessBatch // (method, shadow_frame, self)
- mov x0, xINST // restore return value
- RESTORE_TWO_REGS fp, lr, 64
- RESTORE_TWO_REGS xPC, xFP, 48
- RESTORE_TWO_REGS xSELF, xINST, 32
- RESTORE_TWO_REGS xIBASE, xREFS, 16
- RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
- ret
-
-
-%def instruction_end():
-
- .type artMterpAsmInstructionEnd, #object
- .hidden artMterpAsmInstructionEnd
- .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
-%def instruction_start():
-
- .type artMterpAsmInstructionStart, #object
- .hidden artMterpAsmInstructionStart
- .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
- .text
-
-%def default_helper_prefix():
-% return "mterp_"
-
-%def opcode_start():
- ENTRY mterp_${opcode}
-%def opcode_end():
- END mterp_${opcode}
-%def helper_start(name):
- ENTRY ${name}
-%def helper_end(name):
- END ${name}
diff --git a/runtime/interpreter/mterp/arm64/object.S b/runtime/interpreter/mterp/arm64/object.S
deleted file mode 100644
index fb1dac9363..0000000000
--- a/runtime/interpreter/mterp/arm64/object.S
+++ /dev/null
@@ -1,201 +0,0 @@
-%def field(helper=""):
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern $helper
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl $helper
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_check_cast():
- /*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class//BBBB */
- EXPORT_PC
- FETCH w0, 1 // w0<- BBBB
- lsr w1, wINST, #8 // w1<- AA
- VREG_INDEX_TO_ADDR x1, w1 // w1<- &object
- ldr x2, [xFP, #OFF_FP_METHOD] // w2<- method
- mov x3, xSELF // w3<- self
- bl MterpCheckCast // (index, &obj, method, self)
- PREFETCH_INST 2
- cbnz w0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_iget(is_object=False, is_wide=False, load="ldr", helper="MterpIGetU32"):
- // Fast-path which gets the field offset from thread-local cache.
- add x0, xSELF, #THREAD_INTERPRETER_CACHE_OFFSET // cache address
- ubfx x1, xPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 // entry index
- add x0, x0, x1, lsl #4 // entry address within the cache
- ldp x0, x1, [x0] // entry key (pc) and value (offset)
- lsr w2, wINST, #12 // B
- GET_VREG w2, w2 // object we're operating on
- cmp x0, xPC
-% slow_path_label = add_helper(lambda: field(helper))
- b.ne ${slow_path_label} // cache miss
- cbz w2, common_errNullObject // null object
-% if is_wide:
- ldr x0, [x2, x1] // x0<- obj.field
-% else:
- ${load} w0, [x2, x1] // w0<- obj.field
-% #endif
-% if is_object:
- UNPOISON_HEAP_REF w0
-#if defined(USE_READ_BARRIER)
-# if defined(USE_BAKER_READ_BARRIER)
- ldr w1, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
- cbnz w1, .L_${opcode}_mark // GC is active.
-.L_${opcode}_marked:
-# else
- bl artReadBarrierMark // x0 <- artReadBarrierMark(x0)
-# endif
-#endif
-% #endif
- ubfx w2, wINST, #8, #4 // w2<- A
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-% if is_object:
- SET_VREG_OBJECT w0, w2 // fp[A]<- w0
-% elif is_wide:
- SET_VREG_WIDE x0, w2 // fp[A]<- x0
-% else:
- SET_VREG w0, w2 // fp[A]<- w0
-% #endif
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-% if is_object:
-#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
-.L_${opcode}_mark:
- bl artReadBarrierMark // x0 <- artReadBarrierMark(x0)
- b .L_${opcode}_marked
-#endif
-% #endif
-
-%def op_iget_boolean():
-% op_iget(load="ldrb", helper="MterpIGetU8")
-
-%def op_iget_byte():
-% op_iget(load="ldrsb", helper="MterpIGetI8")
-
-%def op_iget_char():
-% op_iget(load="ldrh", helper="MterpIGetU16")
-
-%def op_iget_object():
-% op_iget(is_object=True, helper="MterpIGetObj")
-
-%def op_iget_short():
-% op_iget(load="ldrsh", helper="MterpIGetI16")
-
-%def op_iget_wide():
-% op_iget(is_wide=True, helper="MterpIGetU64")
-
-%def op_instance_of():
- /*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class//CCCC */
- EXPORT_PC
- FETCH w0, 1 // w0<- CCCC
- lsr w1, wINST, #12 // w1<- B
- VREG_INDEX_TO_ADDR x1, w1 // w1<- &object
- ldr x2, [xFP, #OFF_FP_METHOD] // w2<- method
- mov x3, xSELF // w3<- self
- bl MterpInstanceOf // (index, &obj, method, self)
- ldr x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx w2, wINST, #8, #4 // w2<- A
- PREFETCH_INST 2
- cbnz x1, MterpException
- ADVANCE 2 // advance rPC
- SET_VREG w0, w2 // vA<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_iput(helper="MterpIPutU32"):
-% field(helper=helper)
-
-%def op_iput_boolean():
-% op_iput(helper="MterpIPutU8")
-
-%def op_iput_byte():
-% op_iput(helper="MterpIPutI8")
-
-%def op_iput_char():
-% op_iput(helper="MterpIPutU16")
-
-%def op_iput_object():
-% op_iput(helper="MterpIPutObj")
-
-%def op_iput_short():
-% op_iput(helper="MterpIPutI16")
-
-%def op_iput_wide():
-% op_iput(helper="MterpIPutU64")
-
-%def op_new_instance():
- /*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class//BBBB */
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xSELF
- mov w2, wINST
- bl MterpNewInstance // (shadow_frame, self, inst_data)
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_sget(helper="MterpSGetU32"):
-% field(helper=helper)
-
-%def op_sget_boolean():
-% op_sget(helper="MterpSGetU8")
-
-%def op_sget_byte():
-% op_sget(helper="MterpSGetI8")
-
-%def op_sget_char():
-% op_sget(helper="MterpSGetU16")
-
-%def op_sget_object():
-% op_sget(helper="MterpSGetObj")
-
-%def op_sget_short():
-% op_sget(helper="MterpSGetI16")
-
-%def op_sget_wide():
-% op_sget(helper="MterpSGetU64")
-
-%def op_sput(helper="MterpSPutU32"):
-% field(helper=helper)
-
-%def op_sput_boolean():
-% op_sput(helper="MterpSPutU8")
-
-%def op_sput_byte():
-% op_sput(helper="MterpSPutI8")
-
-%def op_sput_char():
-% op_sput(helper="MterpSPutU16")
-
-%def op_sput_object():
-% op_sput(helper="MterpSPutObj")
-
-%def op_sput_short():
-% op_sput(helper="MterpSPutI16")
-
-%def op_sput_wide():
-% op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/arm64/other.S b/runtime/interpreter/mterp/arm64/other.S
deleted file mode 100644
index a8cf62e4fa..0000000000
--- a/runtime/interpreter/mterp/arm64/other.S
+++ /dev/null
@@ -1,407 +0,0 @@
-%def const(helper="UndefinedConstHandler"):
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern $helper
- EXPORT_PC
- FETCH w0, 1 // w0<- BBBB
- lsr w1, wINST, #8 // w1<- AA
- add x2, xFP, #OFF_FP_SHADOWFRAME
- mov x3, xSELF
- bl $helper // (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 // load rINST
- cbnz w0, MterpPossibleException // let reference interpreter deal with it.
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-%def unused():
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-%def op_const():
- /* const vAA, #+BBBBbbbb */
- lsr w3, wINST, #8 // w3<- AA
- FETCH w0, 1 // w0<- bbbb (low
- FETCH w1, 2 // w1<- BBBB (high
- FETCH_ADVANCE_INST 3 // advance rPC, load wINST
- orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG w0, w3 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_const_16():
- /* const/16 vAA, #+BBBB */
- FETCH_S w0, 1 // w0<- ssssBBBB (sign-extended)
- lsr w3, wINST, #8 // w3<- AA
- FETCH_ADVANCE_INST 2 // advance xPC, load wINST
- SET_VREG w0, w3 // vAA<- w0
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_const_4():
- /* const/4 vA, #+B */
- sbfx w1, wINST, #12, #4 // w1<- sssssssB
- ubfx w0, wINST, #8, #4 // w0<- A
- FETCH_ADVANCE_INST 1 // advance xPC, load wINST
- GET_INST_OPCODE ip // ip<- opcode from xINST
- SET_VREG w1, w0 // fp[A]<- w1
- GOTO_OPCODE ip // execute next instruction
-
-%def op_const_class():
-% const(helper="MterpConstClass")
-
-%def op_const_high16():
- /* const/high16 vAA, #+BBBB0000 */
- FETCH w0, 1 // r0<- 0000BBBB (zero-extended)
- lsr w3, wINST, #8 // r3<- AA
- lsl w0, w0, #16 // r0<- BBBB0000
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- SET_VREG w0, w3 // vAA<- r0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_const_method_handle():
-% const(helper="MterpConstMethodHandle")
-
-%def op_const_method_type():
-% const(helper="MterpConstMethodType")
-
-%def op_const_string():
-% const(helper="MterpConstString")
-
-%def op_const_string_jumbo():
- /* const/string vAA, String//BBBBBBBB */
- EXPORT_PC
- FETCH w0, 1 // w0<- bbbb (low
- FETCH w2, 2 // w2<- BBBB (high
- lsr w1, wINST, #8 // w1<- AA
- orr w0, w0, w2, lsl #16 // w1<- BBBBbbbb
- add x2, xFP, #OFF_FP_SHADOWFRAME
- mov x3, xSELF
- bl MterpConstString // (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 3 // advance rPC
- cbnz w0, MterpPossibleException // let reference interpreter deal with it.
- ADVANCE 3 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_const_wide():
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- FETCH w0, 1 // w0<- bbbb (low)
- FETCH w1, 2 // w1<- BBBB (low middle)
- FETCH w2, 3 // w2<- hhhh (high middle)
- FETCH w3, 4 // w3<- HHHH (high)
- lsr w4, wINST, #8 // r4<- AA
- FETCH_ADVANCE_INST 5 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
- orr x0, x0, x2, lsl #32 // w0<- hhhhBBBBbbbb
- orr x0, x0, x3, lsl #48 // w0<- HHHHhhhhBBBBbbbb
- SET_VREG_WIDE x0, w4
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_const_wide_16():
- /* const-wide/16 vAA, #+BBBB */
- FETCH_S x0, 1 // x0<- ssssssssssssBBBB (sign-extended)
- lsr w3, wINST, #8 // w3<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w3
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_const_wide_32():
- /* const-wide/32 vAA, #+BBBBbbbb */
- FETCH w0, 1 // x0<- 000000000000bbbb (low)
- lsr w3, wINST, #8 // w3<- AA
- FETCH_S x2, 2 // x2<- ssssssssssssBBBB (high)
- FETCH_ADVANCE_INST 3 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- orr x0, x0, x2, lsl #16 // x0<- ssssssssBBBBbbbb
- SET_VREG_WIDE x0, w3
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_const_wide_high16():
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- FETCH w0, 1 // w0<- 0000BBBB (zero-extended)
- lsr w1, wINST, #8 // w1<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- lsl x0, x0, #48
- SET_VREG_WIDE x0, w1
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_monitor_enter():
- /*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC
- lsr w2, wINST, #8 // w2<- AA
- GET_VREG w0, w2 // w0<- vAA (object)
- mov x1, xSELF // w1<- self
- bl artLockObjectFromCode
- cbnz w0, MterpException
- FETCH_ADVANCE_INST 1
- ldr w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
- cbz w0, MterpFallback
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_monitor_exit():
- /*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC
- lsr w2, wINST, #8 // w2<- AA
- GET_VREG w0, w2 // w0<- vAA (object)
- mov x1, xSELF // w0<- self
- bl artUnlockObjectFromCode // w0<- success for unlock(self, obj)
- cbnz w0, MterpException
- FETCH_ADVANCE_INST 1 // before throw: advance rPC, load rINST
- ldr w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
- cbz w0, MterpFallback
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_move(is_object="0"):
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- lsr w1, wINST, #12 // x1<- B from 15:12
- ubfx w0, wINST, #8, #4 // x0<- A from 11:8
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- GET_VREG w2, w1 // x2<- fp[B]
- GET_INST_OPCODE ip // ip<- opcode from wINST
- .if $is_object
- SET_VREG_OBJECT w2, w0 // fp[A]<- x2
- .else
- SET_VREG w2, w0 // fp[A]<- x2
- .endif
- GOTO_OPCODE ip // execute next instruction
-
-%def op_move_16(is_object="0"):
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- FETCH w1, 2 // w1<- BBBB
- FETCH w0, 1 // w0<- AAAA
- FETCH_ADVANCE_INST 3 // advance xPC, load xINST
- GET_VREG w2, w1 // w2<- fp[BBBB]
- GET_INST_OPCODE ip // extract opcode from xINST
- .if $is_object
- SET_VREG_OBJECT w2, w0 // fp[AAAA]<- w2
- .else
- SET_VREG w2, w0 // fp[AAAA]<- w2
- .endif
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_move_exception():
- /* move-exception vAA */
- lsr w2, wINST, #8 // w2<- AA
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- mov x1, #0 // w1<- 0
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- SET_VREG_OBJECT w3, w2 // fp[AA]<- exception obj
- GET_INST_OPCODE ip // extract opcode from rINST
- str x1, [xSELF, #THREAD_EXCEPTION_OFFSET] // clear exception
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_move_from16(is_object="0"):
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- FETCH w1, 1 // r1<- BBBB
- lsr w0, wINST, #8 // r0<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- GET_VREG w2, w1 // r2<- fp[BBBB]
- GET_INST_OPCODE ip // extract opcode from wINST
- .if $is_object
- SET_VREG_OBJECT w2, w0 // fp[AA]<- r2
- .else
- SET_VREG w2, w0 // fp[AA]<- r2
- .endif
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_move_object():
-% op_move(is_object="1")
-
-%def op_move_object_16():
-% op_move_16(is_object="1")
-
-%def op_move_object_from16():
-% op_move_from16(is_object="1")
-
-%def op_move_result(is_object="0"):
- /* for: move-result, move-result-object */
- /* op vAA */
- lsr w2, wINST, #8 // r2<- AA
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- ldr x0, [xFP, #OFF_FP_RESULT_REGISTER] // get pointer to result JType.
- ldr w0, [x0] // r0 <- result.i.
- GET_INST_OPCODE ip // extract opcode from wINST
- .if $is_object
- SET_VREG_OBJECT w0, w2, w1 // fp[AA]<- r0
- .else
- SET_VREG w0, w2 // fp[AA]<- r0
- .endif
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_move_result_object():
-% op_move_result(is_object="1")
-
-%def op_move_result_wide():
- /* for: move-result-wide */
- /* op vAA */
- lsr w2, wINST, #8 // r2<- AA
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- ldr x0, [xFP, #OFF_FP_RESULT_REGISTER] // get pointer to result JType.
- ldr x0, [x0] // r0 <- result.i.
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, w2 // fp[AA]<- r0
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_move_wide():
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- lsr w3, wINST, #12 // w3<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x3, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x3, w2
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_move_wide_16():
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- FETCH w3, 2 // w3<- BBBB
- FETCH w2, 1 // w2<- AAAA
- GET_VREG_WIDE x3, w3
- FETCH_ADVANCE_INST 3 // advance rPC, load rINST
- SET_VREG_WIDE x3, w2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_move_wide_from16():
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- FETCH w3, 1 // w3<- BBBB
- lsr w2, wINST, #8 // w2<- AA
- GET_VREG_WIDE x3, w3
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x3, w2
- GOTO_OPCODE ip // jump to next instruction
-
-%def op_nop():
- FETCH_ADVANCE_INST 1 // advance to next instr, load rINST
- GET_INST_OPCODE ip // ip<- opcode from rINST
- GOTO_OPCODE ip // execute it
-
-%def op_unused_3e():
-% unused()
-
-%def op_unused_3f():
-% unused()
-
-%def op_unused_40():
-% unused()
-
-%def op_unused_41():
-% unused()
-
-%def op_unused_42():
-% unused()
-
-%def op_unused_43():
-% unused()
-
-%def op_unused_73():
-% unused()
-
-%def op_unused_79():
-% unused()
-
-%def op_unused_7a():
-% unused()
-
-%def op_unused_e3():
-% unused()
-
-%def op_unused_e4():
-% unused()
-
-%def op_unused_e5():
-% unused()
-
-%def op_unused_e6():
-% unused()
-
-%def op_unused_e7():
-% unused()
-
-%def op_unused_e8():
-% unused()
-
-%def op_unused_e9():
-% unused()
-
-%def op_unused_ea():
-% unused()
-
-%def op_unused_eb():
-% unused()
-
-%def op_unused_ec():
-% unused()
-
-%def op_unused_ed():
-% unused()
-
-%def op_unused_ee():
-% unused()
-
-%def op_unused_ef():
-% unused()
-
-%def op_unused_f0():
-% unused()
-
-%def op_unused_f1():
-% unused()
-
-%def op_unused_f2():
-% unused()
-
-%def op_unused_f3():
-% unused()
-
-%def op_unused_f4():
-% unused()
-
-%def op_unused_f5():
-% unused()
-
-%def op_unused_f6():
-% unused()
-
-%def op_unused_f7():
-% unused()
-
-%def op_unused_f8():
-% unused()
-
-%def op_unused_f9():
-% unused()
-
-%def op_unused_fc():
-% unused()
-
-%def op_unused_fd():
-% unused()
diff --git a/runtime/interpreter/mterp/arm64/arithmetic.S b/runtime/interpreter/mterp/arm64ng/arithmetic.S
index cf9dd869d0..cf9dd869d0 100644
--- a/runtime/interpreter/mterp/arm64/arithmetic.S
+++ b/runtime/interpreter/mterp/arm64ng/arithmetic.S
diff --git a/runtime/interpreter/mterp/arm64/floating_point.S b/runtime/interpreter/mterp/arm64ng/floating_point.S
index ad42db3f4b..ad42db3f4b 100644
--- a/runtime/interpreter/mterp/arm64/floating_point.S
+++ b/runtime/interpreter/mterp/arm64ng/floating_point.S
diff --git a/runtime/interpreter/mterp/common/gen_setup.py b/runtime/interpreter/mterp/common/gen_setup.py
index 4d18136496..ffe567bab3 100644
--- a/runtime/interpreter/mterp/common/gen_setup.py
+++ b/runtime/interpreter/mterp/common/gen_setup.py
@@ -22,8 +22,8 @@ import sys, re
from io import StringIO
out = StringIO() # File-like in-memory buffer.
-handler_size_bytes = "MTERP_HANDLER_SIZE"
-handler_size_bits = "MTERP_HANDLER_SIZE_LOG2"
+handler_size_bytes = "NTERP_HANDLER_SIZE"
+handler_size_bits = "NTERP_HANDLER_SIZE_LOG2"
opcode = ""
opnum = ""
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
deleted file mode 100644
index ae5e4703fd..0000000000
--- a/runtime/interpreter/mterp/mterp.cc
+++ /dev/null
@@ -1,921 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * Mterp entry point and support functions.
- */
-#include "mterp.h"
-
-#include "base/quasi_atomic.h"
-#include "debugger.h"
-#include "entrypoints/entrypoint_utils-inl.h"
-#include "interpreter/interpreter_common.h"
-#include "interpreter/interpreter_intrinsics.h"
-#include "interpreter/shadow_frame-inl.h"
-#include "mirror/string-alloc-inl.h"
-
-namespace art {
-namespace interpreter {
-/*
- * Verify some constants used by the mterp interpreter.
- */
-void CheckMterpAsmConstants() {
- /*
- * If we're using computed goto instruction transitions, make sure
- * none of the handlers overflows the byte limit. This won't tell
- * which one did, but if any one is too big the total size will
- * overflow.
- */
- const int width = kMterpHandlerSize;
- int interp_size = (uintptr_t) artMterpAsmInstructionEnd -
- (uintptr_t) artMterpAsmInstructionStart;
- if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
- LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
- << "(did an instruction handler exceed " << width << " bytes?)";
- }
-}
-
-void InitMterpTls(Thread* self) {
- self->SetMterpCurrentIBase(artMterpAsmInstructionStart);
-}
-
-/*
- * Find the matching case. Returns the offset to the handler instructions.
- *
- * Returns 3 if we don't find a match (it's the size of the sparse-switch
- * instruction).
- */
-extern "C" ssize_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal) {
- const int kInstrLen = 3;
- uint16_t size;
- const int32_t* keys;
- const int32_t* entries;
-
- /*
- * Sparse switch data format:
- * ushort ident = 0x0200 magic value
- * ushort size number of entries in the table; > 0
- * int keys[size] keys, sorted low-to-high; 32-bit aligned
- * int targets[size] branch targets, relative to switch opcode
- *
- * Total size is (2+size*4) 16-bit code units.
- */
-
- uint16_t signature = *switchData++;
- DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kSparseSwitchSignature));
-
- size = *switchData++;
-
- /* The keys are guaranteed to be aligned on a 32-bit boundary;
- * we can treat them as a native int array.
- */
- keys = reinterpret_cast<const int32_t*>(switchData);
-
- /* The entries are guaranteed to be aligned on a 32-bit boundary;
- * we can treat them as a native int array.
- */
- entries = keys + size;
-
- /*
- * Binary-search through the array of keys, which are guaranteed to
- * be sorted low-to-high.
- */
- int lo = 0;
- int hi = size - 1;
- while (lo <= hi) {
- int mid = (lo + hi) >> 1;
-
- int32_t foundVal = keys[mid];
- if (testVal < foundVal) {
- hi = mid - 1;
- } else if (testVal > foundVal) {
- lo = mid + 1;
- } else {
- return entries[mid];
- }
- }
- return kInstrLen;
-}
-
-extern "C" ssize_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal) {
- const int kInstrLen = 3;
-
- /*
- * Packed switch data format:
- * ushort ident = 0x0100 magic value
- * ushort size number of entries in the table
- * int first_key first (and lowest) switch case value
- * int targets[size] branch targets, relative to switch opcode
- *
- * Total size is (4+size*2) 16-bit code units.
- */
- uint16_t signature = *switchData++;
- DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kPackedSwitchSignature));
-
- uint16_t size = *switchData++;
-
- int32_t firstKey = *switchData++;
- firstKey |= (*switchData++) << 16;
-
- int index = testVal - firstKey;
- if (index < 0 || index >= size) {
- return kInstrLen;
- }
-
- /*
- * The entries are guaranteed to be aligned on a 32-bit boundary;
- * we can treat them as a native int array.
- */
- const int32_t* entries = reinterpret_cast<const int32_t*>(switchData);
- return entries[index];
-}
-
-bool CanUseMterp()
- REQUIRES_SHARED(Locks::mutator_lock_) {
- const Runtime* const runtime = Runtime::Current();
- return
- !runtime->IsAotCompiler() &&
- !runtime->GetInstrumentation()->IsActive() &&
- // mterp only knows how to deal with the normal exits. It cannot handle any of the
- // non-standard force-returns.
- !runtime->AreNonStandardExitsEnabled() &&
- // An async exception has been thrown. We need to go to the switch interpreter. MTerp doesn't
- // know how to deal with these so we could end up never dealing with it if we are in an
- // infinite loop.
- !runtime->AreAsyncExceptionsThrown() &&
- (runtime->GetJit() == nullptr || !runtime->GetJit()->JitAtFirstUse());
-}
-
-#define MTERP_INVOKE(Name) \
-extern "C" size_t MterpInvoke##Name(Thread* self, \
- ShadowFrame* shadow_frame, \
- uint16_t* dex_pc_ptr, \
- uint16_t inst_data) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- JValue* result_register = shadow_frame->GetResultRegister(); \
- const Instruction* inst = Instruction::At(dex_pc_ptr); \
- if (shadow_frame->GetMethod()->SkipAccessChecks()) { \
- return DoInvoke<k##Name, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>( \
- self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u; \
- } else { \
- return DoInvoke<k##Name, /*is_range=*/ false, /*do_access_check=*/ true, /*is_mterp=*/ true>( \
- self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u; \
- } \
-} \
-extern "C" size_t MterpInvoke##Name##Range(Thread* self, \
- ShadowFrame* shadow_frame, \
- uint16_t* dex_pc_ptr, \
- uint16_t inst_data) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- JValue* result_register = shadow_frame->GetResultRegister(); \
- const Instruction* inst = Instruction::At(dex_pc_ptr); \
- if (shadow_frame->GetMethod()->SkipAccessChecks()) { \
- return DoInvoke<k##Name, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>( \
- self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u; \
- } else { \
- return DoInvoke<k##Name, /*is_range=*/ true, /*do_access_check=*/ true, /*is_mterp=*/ true>( \
- self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u; \
- } \
-}
-
-MTERP_INVOKE(Virtual)
-MTERP_INVOKE(Super)
-MTERP_INVOKE(Interface)
-MTERP_INVOKE(Direct)
-MTERP_INVOKE(Static)
-
-#undef MTERP_INVOKE
-
-extern "C" size_t MterpInvokeCustom(Thread* self,
- ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr,
- uint16_t inst_data)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- JValue* result_register = shadow_frame->GetResultRegister();
- const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvokeCustom</* is_range= */ false>(
- self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
-}
-
-extern "C" size_t MterpInvokePolymorphic(Thread* self,
- ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr,
- uint16_t inst_data)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- JValue* result_register = shadow_frame->GetResultRegister();
- const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvokePolymorphic</* is_range= */ false>(
- self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
-}
-
-extern "C" size_t MterpInvokeCustomRange(Thread* self,
- ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr,
- uint16_t inst_data)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- JValue* result_register = shadow_frame->GetResultRegister();
- const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvokeCustom</*is_range=*/ true>(
- self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
-}
-
-extern "C" size_t MterpInvokePolymorphicRange(Thread* self,
- ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr,
- uint16_t inst_data)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- JValue* result_register = shadow_frame->GetResultRegister();
- const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvokePolymorphic</* is_range= */ true>(
- self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
-}
-
-extern "C" void MterpThreadFenceForConstructor() {
- QuasiAtomic::ThreadFenceForConstructor();
-}
-
-extern "C" size_t MterpConstString(uint32_t index,
- uint32_t tgt_vreg,
- ShadowFrame* shadow_frame,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::String> s = ResolveString(self, *shadow_frame, dex::StringIndex(index));
- if (UNLIKELY(s == nullptr)) {
- return 1u;
- }
- shadow_frame->SetVRegReference(tgt_vreg, s);
- return 0u;
-}
-
-extern "C" size_t MterpConstClass(uint32_t index,
- uint32_t tgt_vreg,
- ShadowFrame* shadow_frame,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::Class> c =
- ResolveVerifyAndClinit(dex::TypeIndex(index),
- shadow_frame->GetMethod(),
- self,
- /* can_run_clinit= */ false,
- !shadow_frame->GetMethod()->SkipAccessChecks());
- if (UNLIKELY(c == nullptr)) {
- return 1u;
- }
- shadow_frame->SetVRegReference(tgt_vreg, c);
- return 0u;
-}
-
-extern "C" size_t MterpConstMethodHandle(uint32_t index,
- uint32_t tgt_vreg,
- ShadowFrame* shadow_frame,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::MethodHandle> mh = ResolveMethodHandle(self, index, shadow_frame->GetMethod());
- if (UNLIKELY(mh == nullptr)) {
- return 1u;
- }
- shadow_frame->SetVRegReference(tgt_vreg, mh);
- return 0u;
-}
-
-extern "C" size_t MterpConstMethodType(uint32_t index,
- uint32_t tgt_vreg,
- ShadowFrame* shadow_frame,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::MethodType> mt =
- ResolveMethodType(self, dex::ProtoIndex(index), shadow_frame->GetMethod());
- if (UNLIKELY(mt == nullptr)) {
- return 1u;
- }
- shadow_frame->SetVRegReference(tgt_vreg, mt);
- return 0u;
-}
-
-extern "C" size_t MterpCheckCast(uint32_t index,
- StackReference<mirror::Object>* vreg_addr,
- art::ArtMethod* method,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::Class> c =
- ResolveVerifyAndClinit(dex::TypeIndex(index),
- method,
- self,
- /* can_run_clinit= */ false,
- !method->SkipAccessChecks());
- if (UNLIKELY(c == nullptr)) {
- return 1u;
- }
- // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
- ObjPtr<mirror::Object> obj = vreg_addr->AsMirrorPtr();
- if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
- ThrowClassCastException(c, obj->GetClass());
- return 1u;
- }
- return 0u;
-}
-
-extern "C" size_t MterpInstanceOf(uint32_t index,
- StackReference<mirror::Object>* vreg_addr,
- art::ArtMethod* method,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
- method,
- self,
- /* can_run_clinit= */ false,
- !method->SkipAccessChecks());
- if (UNLIKELY(c == nullptr)) {
- return 0u; // Caller will check for pending exception. Return value unimportant.
- }
- // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
- ObjPtr<mirror::Object> obj = vreg_addr->AsMirrorPtr();
- return (obj != nullptr) && obj->InstanceOf(c) ? 1u : 0u;
-}
-
-extern "C" size_t MterpFillArrayData(mirror::Object* obj,
- const Instruction::ArrayDataPayload* payload)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return FillArrayData(obj, payload) ? 1u : 0u;
-}
-
-extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
- ObjPtr<mirror::Object> obj = nullptr;
- ObjPtr<mirror::Class> c =
- ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
- shadow_frame->GetMethod(),
- self,
- /* can_run_clinit= */ false,
- !shadow_frame->GetMethod()->SkipAccessChecks());
- if (LIKELY(c != nullptr)) {
- if (UNLIKELY(c->IsStringClass())) {
- gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
- obj = mirror::String::AllocEmptyString(self, allocator_type);
- } else {
- obj = AllocObjectFromCode(c, self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
- }
- }
- if (UNLIKELY(obj == nullptr)) {
- return 0u;
- }
- obj->GetClass()->AssertInitializedOrInitializingInThread(self);
- shadow_frame->SetVRegReference(inst->VRegA_21c(inst_data), obj);
- return 1u;
-}
-
-extern "C" size_t MterpAputObject(ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr,
- uint32_t inst_data)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- const Instruction* inst = Instruction::At(dex_pc_ptr);
- ObjPtr<mirror::Object> a = shadow_frame->GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == nullptr)) {
- return 0u;
- }
- int32_t index = shadow_frame->GetVReg(inst->VRegC_23x());
- ObjPtr<mirror::Object> val = shadow_frame->GetVRegReference(inst->VRegA_23x(inst_data));
- ObjPtr<mirror::ObjectArray<mirror::Object>> array = a->AsObjectArray<mirror::Object>();
- if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
- array->SetWithoutChecks<false>(index, val);
- return 1u;
- }
- return 0u;
-}
-
-extern "C" size_t MterpFilledNewArray(ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- const Instruction* inst = Instruction::At(dex_pc_ptr);
- JValue* result_register = shadow_frame->GetResultRegister();
- bool res = false;
- if (shadow_frame->GetMethod()->SkipAccessChecks()) {
- res = DoFilledNewArray</*is_range=*/false,
- /*do_access_check=*/false,
- /*transaction_active=*/false>(inst,
- *shadow_frame,
- self,
- result_register);
- } else {
- res = DoFilledNewArray</*is_range=*/false,
- /*do_access_check=*/true,
- /*transaction_active=*/false>(inst,
- *shadow_frame,
- self,
- result_register);
- }
- return res ? 1u : 0u;
-}
-
-extern "C" size_t MterpFilledNewArrayRange(ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- const Instruction* inst = Instruction::At(dex_pc_ptr);
- JValue* result_register = shadow_frame->GetResultRegister();
- bool res = false;
- if (shadow_frame->GetMethod()->SkipAccessChecks()) {
- res = DoFilledNewArray</*is_range=*/true,
- /*do_access_check=*/false,
- /*transaction_active=*/false>(inst,
- *shadow_frame,
- self,
- result_register);
- } else {
- res = DoFilledNewArray</*is_range=*/true,
- /*do_access_check=*/true,
- /*transaction_active=*/false>(inst,
- *shadow_frame,
- self,
- result_register);
- }
- return res ? 1u : 0u;
-}
-
-extern "C" size_t MterpNewArray(ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr,
- uint32_t inst_data, Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- const Instruction* inst = Instruction::At(dex_pc_ptr);
- int32_t length = shadow_frame->GetVReg(inst->VRegB_22c(inst_data));
- gc::AllocatorType allocator = Runtime::Current()->GetHeap()->GetCurrentAllocator();
- ObjPtr<mirror::Object> obj;
- if (shadow_frame->GetMethod()->SkipAccessChecks()) {
- obj = AllocArrayFromCode</*kAccessCheck=*/ false>(dex::TypeIndex(inst->VRegC_22c()),
- length,
- shadow_frame->GetMethod(),
- self,
- allocator);
- } else {
- obj = AllocArrayFromCode</*kAccessCheck=*/ true>(dex::TypeIndex(inst->VRegC_22c()),
- length,
- shadow_frame->GetMethod(),
- self,
- allocator);
- }
- if (UNLIKELY(obj == nullptr)) {
- return 0u;
- }
- shadow_frame->SetVRegReference(inst->VRegA_22c(inst_data), obj);
- return 1u;
-}
-
-extern "C" size_t MterpHandleException(Thread* self, ShadowFrame* shadow_frame)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(self->IsExceptionPending());
- const instrumentation::Instrumentation* const instrumentation =
- Runtime::Current()->GetInstrumentation();
- return MoveToExceptionHandler(self, *shadow_frame, instrumentation) ? 1u : 0u;
-}
-
-struct MterpCheckHelper {
- DECLARE_RUNTIME_DEBUG_FLAG(kSlowMode);
-};
-DEFINE_RUNTIME_DEBUG_FLAG(MterpCheckHelper, kSlowMode);
-
-extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Check that we are using the right interpreter.
- if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) {
- // The flag might be currently being updated on all threads. Retry with lock.
- MutexLock tll_mu(self, *Locks::thread_list_lock_);
- DCHECK_EQ(self->UseMterp(), CanUseMterp());
- }
- DCHECK(!Runtime::Current()->IsActiveTransaction());
- const Instruction* inst = Instruction::At(dex_pc_ptr);
- uint16_t inst_data = inst->Fetch16(0);
- if (inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION) {
- self->AssertPendingException();
- } else {
- self->AssertNoPendingException();
- }
- if (kTraceExecutionEnabled) {
- uint32_t dex_pc = dex_pc_ptr - shadow_frame->GetDexInstructions();
- TraceExecution(*shadow_frame, inst, dex_pc);
- }
- if (kTestExportPC) {
- // Save invalid dex pc to force segfault if improperly used.
- shadow_frame->SetDexPCPtr(reinterpret_cast<uint16_t*>(kExportPCPoison));
- }
- if (MterpCheckHelper::kSlowMode) {
- shadow_frame->CheckConsistentVRegs();
- }
-}
-
-extern "C" void MterpLogDivideByZeroException(Thread* self, ShadowFrame* shadow_frame)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- UNUSED(self);
- const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
- uint16_t inst_data = inst->Fetch16(0);
- LOG(INFO) << "DivideByZero: " << inst->Opcode(inst_data);
-}
-
-extern "C" void MterpLogArrayIndexException(Thread* self, ShadowFrame* shadow_frame)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- UNUSED(self);
- const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
- uint16_t inst_data = inst->Fetch16(0);
- LOG(INFO) << "ArrayIndex: " << inst->Opcode(inst_data);
-}
-
-extern "C" void MterpLogNegativeArraySizeException(Thread* self, ShadowFrame* shadow_frame)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- UNUSED(self);
- const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
- uint16_t inst_data = inst->Fetch16(0);
- LOG(INFO) << "NegativeArraySize: " << inst->Opcode(inst_data);
-}
-
-extern "C" void MterpLogNoSuchMethodException(Thread* self, ShadowFrame* shadow_frame)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- UNUSED(self);
- const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
- uint16_t inst_data = inst->Fetch16(0);
- LOG(INFO) << "NoSuchMethod: " << inst->Opcode(inst_data);
-}
-
-extern "C" void MterpLogExceptionThrownException(Thread* self, ShadowFrame* shadow_frame)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- UNUSED(self);
- const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
- uint16_t inst_data = inst->Fetch16(0);
- LOG(INFO) << "ExceptionThrown: " << inst->Opcode(inst_data);
-}
-
-extern "C" void MterpLogNullObjectException(Thread* self, ShadowFrame* shadow_frame)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- UNUSED(self);
- const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
- uint16_t inst_data = inst->Fetch16(0);
- LOG(INFO) << "NullObject: " << inst->Opcode(inst_data);
-}
-
-extern "C" void MterpLogFallback(Thread* self, ShadowFrame* shadow_frame)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- UNUSED(self);
- const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
- uint16_t inst_data = inst->Fetch16(0);
- LOG(INFO) << "Fallback: " << inst->Opcode(inst_data) << ", Suspend Pending?: "
- << self->IsExceptionPending();
-}
-
-extern "C" void MterpLogOSR(Thread* self, ShadowFrame* shadow_frame, int32_t offset)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- UNUSED(self);
- const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
- uint16_t inst_data = inst->Fetch16(0);
- LOG(INFO) << "OSR: " << inst->Opcode(inst_data) << ", offset = " << offset;
-}
-
-extern "C" void MterpLogSuspendFallback(Thread* self, ShadowFrame* shadow_frame, uint32_t flags)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- UNUSED(self);
- const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
- uint16_t inst_data = inst->Fetch16(0);
- if (flags & kCheckpointRequest) {
- LOG(INFO) << "Checkpoint fallback: " << inst->Opcode(inst_data);
- } else if (flags & kSuspendRequest) {
- LOG(INFO) << "Suspend fallback: " << inst->Opcode(inst_data);
- } else if (flags & kEmptyCheckpointRequest) {
- LOG(INFO) << "Empty checkpoint fallback: " << inst->Opcode(inst_data);
- }
-}
-
-extern "C" size_t MterpSuspendCheck(Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- self->AllowThreadSuspension();
- return !self->UseMterp();
-}
-
-// Execute single field access instruction (get/put, static/instance).
-// The template arguments reduce this to fairly small amount of code.
-// It requires the target object and field to be already resolved.
-template<typename PrimType, FindFieldType kAccessType>
-ALWAYS_INLINE void MterpFieldAccess(Instruction* inst,
- uint16_t inst_data,
- ShadowFrame* shadow_frame,
- ObjPtr<mirror::Object> obj,
- MemberOffset offset,
- bool is_volatile)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- static_assert(std::is_integral<PrimType>::value, "Unexpected primitive type");
- constexpr bool kIsStatic = (kAccessType & FindFieldFlags::StaticBit) != 0;
- constexpr bool kIsPrimitive = (kAccessType & FindFieldFlags::PrimitiveBit) != 0;
- constexpr bool kIsRead = (kAccessType & FindFieldFlags::ReadBit) != 0;
-
- uint16_t vRegA = kIsStatic ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
- if (kIsPrimitive) {
- if (kIsRead) {
- PrimType value = UNLIKELY(is_volatile)
- ? obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset)
- : obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset);
- if (sizeof(PrimType) == sizeof(uint64_t)) {
- shadow_frame->SetVRegLong(vRegA, value); // Set two consecutive registers.
- } else {
- shadow_frame->SetVReg(vRegA, static_cast<int32_t>(value)); // Sign/zero extend.
- }
- } else { // Write.
- uint64_t value = (sizeof(PrimType) == sizeof(uint64_t))
- ? shadow_frame->GetVRegLong(vRegA)
- : shadow_frame->GetVReg(vRegA);
- if (UNLIKELY(is_volatile)) {
- obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset, value);
- } else {
- obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset, value);
- }
- }
- } else { // Object.
- if (kIsRead) {
- ObjPtr<mirror::Object> value = UNLIKELY(is_volatile)
- ? obj->GetFieldObjectVolatile<mirror::Object>(offset)
- : obj->GetFieldObject<mirror::Object>(offset);
- shadow_frame->SetVRegReference(vRegA, value);
- } else { // Write.
- ObjPtr<mirror::Object> value = shadow_frame->GetVRegReference(vRegA);
- if (UNLIKELY(is_volatile)) {
- obj->SetFieldObjectVolatile</*kTransactionActive=*/ false>(offset, value);
- } else {
- obj->SetFieldObject</*kTransactionActive=*/ false>(offset, value);
- }
- }
- }
-}
-
-template<typename PrimType, FindFieldType kAccessType, bool do_access_checks>
-NO_INLINE bool MterpFieldAccessSlow(Instruction* inst,
- uint16_t inst_data,
- ShadowFrame* shadow_frame,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- constexpr bool kIsStatic = (kAccessType & FindFieldFlags::StaticBit) != 0;
- constexpr bool kIsRead = (kAccessType & FindFieldFlags::ReadBit) != 0;
-
- // Update the dex pc in shadow frame, just in case anything throws.
- shadow_frame->SetDexPCPtr(reinterpret_cast<uint16_t*>(inst));
- ArtMethod* referrer = shadow_frame->GetMethod();
- uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* field = FindFieldFromCode<kAccessType, do_access_checks>(
- field_idx, referrer, self, sizeof(PrimType));
- if (UNLIKELY(field == nullptr)) {
- DCHECK(self->IsExceptionPending());
- return false;
- }
- constexpr bool kIsPrimitive = (kAccessType & FindFieldFlags::PrimitiveBit) != 0;
- if (!kIsPrimitive && !kIsRead) {
- uint16_t vRegA = kIsStatic ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
- ObjPtr<mirror::Object> value = shadow_frame->GetVRegReference(vRegA);
- if (value != nullptr && field->ResolveType() == nullptr) {
- DCHECK(self->IsExceptionPending());
- return false;
- }
- }
- ObjPtr<mirror::Object> obj = kIsStatic
- ? field->GetDeclaringClass().Ptr()
- : shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data));
- if (UNLIKELY(obj == nullptr)) {
- ThrowNullPointerExceptionForFieldAccess(field, kIsRead);
- return false;
- }
- MterpFieldAccess<PrimType, kAccessType>(
- inst, inst_data, shadow_frame, obj, field->GetOffset(), field->IsVolatile());
- return true;
-}
-
-// This methods is called from assembly to handle field access instructions.
-template<typename PrimType, FindFieldType kAccessType, bool do_access_checks>
-ALWAYS_INLINE bool MterpFieldAccessFast(Instruction* inst,
- uint16_t inst_data,
- ShadowFrame* shadow_frame,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- constexpr bool kIsStatic = (kAccessType & FindFieldFlags::StaticBit) != 0;
-
- // Try to find the field in small thread-local cache first.
- InterpreterCache* tls_cache = self->GetInterpreterCache();
- size_t tls_value;
- if (LIKELY(tls_cache->Get(inst, &tls_value))) {
- // The meaning of the cache value is opcode-specific.
- // It is ArtFiled* for static fields and the raw offset for instance fields.
- size_t offset = kIsStatic
- ? reinterpret_cast<ArtField*>(tls_value)->GetOffset().SizeValue()
- : tls_value;
- if (kIsDebugBuild) {
- uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* field = FindFieldFromCode<kAccessType, do_access_checks>(
- field_idx, shadow_frame->GetMethod(), self, sizeof(PrimType));
- DCHECK_EQ(offset, field->GetOffset().SizeValue());
- }
- ObjPtr<mirror::Object> obj = kIsStatic
- ? reinterpret_cast<ArtField*>(tls_value)->GetDeclaringClass()
- : ObjPtr<mirror::Object>(shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data)));
- if (LIKELY(obj != nullptr)) {
- MterpFieldAccess<PrimType, kAccessType>(
- inst, inst_data, shadow_frame, obj, MemberOffset(offset), /* is_volatile= */ false);
- return true;
- }
- }
-
- // This effectively inlines the fast path from ArtMethod::GetDexCache.
- ArtMethod* referrer = shadow_frame->GetMethod();
- if (LIKELY(!referrer->IsObsolete() && !do_access_checks)) {
- // Avoid read barriers, since we need only the pointer to the native (non-movable)
- // DexCache field array which we can get even through from-space objects.
- ObjPtr<mirror::Class> klass = referrer->GetDeclaringClass<kWithoutReadBarrier>();
- ObjPtr<mirror::DexCache> dex_cache =
- klass->GetDexCache<kDefaultVerifyFlags, kWithoutReadBarrier>();
-
- // Try to find the desired field in DexCache.
- uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* field = dex_cache->GetResolvedField(field_idx);
- if (LIKELY(field != nullptr)) {
- bool visibly_initialized = !kIsStatic || field->GetDeclaringClass()->IsVisiblyInitialized();
- if (LIKELY(visibly_initialized)) {
- DCHECK_EQ(field, (FindFieldFromCode<kAccessType, do_access_checks>(
- field_idx, referrer, self, sizeof(PrimType))));
- ObjPtr<mirror::Object> obj = kIsStatic
- ? field->GetDeclaringClass().Ptr()
- : shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data));
- // We check if nterp is supported as nterp and mterp use the cache in an
- // incompatible way.
- if (!IsNterpSupported() && LIKELY(kIsStatic || obj != nullptr)) {
- // Only non-volatile fields are allowed in the thread-local cache.
- if (LIKELY(!field->IsVolatile())) {
- if (kIsStatic) {
- tls_cache->Set(inst, reinterpret_cast<uintptr_t>(field));
- } else {
- tls_cache->Set(inst, field->GetOffset().SizeValue());
- }
- }
- MterpFieldAccess<PrimType, kAccessType>(
- inst, inst_data, shadow_frame, obj, field->GetOffset(), field->IsVolatile());
- return true;
- }
- }
- }
- }
-
- // Slow path. Last and with identical arguments so that it becomes single instruction tail call.
- return MterpFieldAccessSlow<PrimType, kAccessType, do_access_checks>(
- inst, inst_data, shadow_frame, self);
-}
-
-#define MTERP_FIELD_ACCESSOR(Name, PrimType, AccessType) \
-extern "C" bool Name(Instruction* inst, uint16_t inst_data, ShadowFrame* sf, Thread* self) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- if (sf->GetMethod()->SkipAccessChecks()) { \
- return MterpFieldAccessFast<PrimType, AccessType, false>(inst, inst_data, sf, self); \
- } else { \
- return MterpFieldAccessFast<PrimType, AccessType, true>(inst, inst_data, sf, self); \
- } \
-}
-
-#define MTERP_FIELD_ACCESSORS_FOR_TYPE(Sufix, PrimType, Kind) \
- MTERP_FIELD_ACCESSOR(MterpIGet##Sufix, PrimType, Instance##Kind##Read) \
- MTERP_FIELD_ACCESSOR(MterpIPut##Sufix, PrimType, Instance##Kind##Write) \
- MTERP_FIELD_ACCESSOR(MterpSGet##Sufix, PrimType, Static##Kind##Read) \
- MTERP_FIELD_ACCESSOR(MterpSPut##Sufix, PrimType, Static##Kind##Write)
-
-MTERP_FIELD_ACCESSORS_FOR_TYPE(I8, int8_t, Primitive)
-MTERP_FIELD_ACCESSORS_FOR_TYPE(U8, uint8_t, Primitive)
-MTERP_FIELD_ACCESSORS_FOR_TYPE(I16, int16_t, Primitive)
-MTERP_FIELD_ACCESSORS_FOR_TYPE(U16, uint16_t, Primitive)
-MTERP_FIELD_ACCESSORS_FOR_TYPE(U32, uint32_t, Primitive)
-MTERP_FIELD_ACCESSORS_FOR_TYPE(U64, uint64_t, Primitive)
-MTERP_FIELD_ACCESSORS_FOR_TYPE(Obj, uint32_t, Object)
-
-// Check that the primitive type for Obj variant above is correct.
-// It really must be primitive type for the templates to compile.
-// In the case of objects, it is only used to get the field size.
-static_assert(kHeapReferenceSize == sizeof(uint32_t), "Unexpected kHeapReferenceSize");
-
-#undef MTERP_FIELD_ACCESSORS_FOR_TYPE
-#undef MTERP_FIELD_ACCESSOR
-
-extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr,
- int32_t index)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (UNLIKELY(arr == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- return nullptr;
- }
- ObjPtr<mirror::ObjectArray<mirror::Object>> array = arr->AsObjectArray<mirror::Object>();
- if (LIKELY(array->CheckIsValidIndex(index))) {
- return array->GetWithoutChecks(index).Ptr();
- } else {
- return nullptr;
- }
-}
-
-extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj,
- uint32_t field_offset)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (UNLIKELY(obj == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- return nullptr;
- }
- return obj->GetFieldObject<mirror::Object>(MemberOffset(field_offset));
-}
-
-/*
- * Create a hotness_countdown based on the current method hotness_count and profiling
- * mode. In short, determine how many hotness events we hit before reporting back
- * to the full instrumentation via MterpAddHotnessBatch. Called once on entry to the method,
- * and regenerated following batch updates.
- */
-extern "C" ssize_t MterpSetUpHotnessCountdown(ArtMethod* method,
- ShadowFrame* shadow_frame,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- uint16_t hotness_count = method->GetCounter();
- int32_t countdown_value = jit::kJitHotnessDisabled;
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr) {
- int32_t warm_threshold = jit->WarmMethodThreshold();
- int32_t hot_threshold = jit->HotMethodThreshold();
- int32_t osr_threshold = jit->OSRMethodThreshold();
- if (hotness_count < warm_threshold) {
- countdown_value = warm_threshold - hotness_count;
- } else if (hotness_count < hot_threshold) {
- countdown_value = hot_threshold - hotness_count;
- } else if (hotness_count < osr_threshold) {
- countdown_value = osr_threshold - hotness_count;
- } else {
- countdown_value = jit::kJitCheckForOSR;
- }
- if (jit::Jit::ShouldUsePriorityThreadWeight(self)) {
- int32_t priority_thread_weight = jit->PriorityThreadWeight();
- countdown_value = std::min(countdown_value, countdown_value / priority_thread_weight);
- }
- }
- /*
- * The actual hotness threshold may exceed the range of our int16_t countdown value. This is
- * not a problem, though. We can just break it down into smaller chunks.
- */
- countdown_value = std::min(countdown_value,
- static_cast<int32_t>(std::numeric_limits<int16_t>::max()));
- shadow_frame->SetCachedHotnessCountdown(countdown_value);
- shadow_frame->SetHotnessCountdown(countdown_value);
- return countdown_value;
-}
-
-/*
- * Report a batch of hotness events to the instrumentation and then return the new
- * countdown value to the next time we should report.
- */
-extern "C" ssize_t MterpAddHotnessBatch(ArtMethod* method,
- ShadowFrame* shadow_frame,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr) {
- int16_t count = shadow_frame->GetCachedHotnessCountdown() - shadow_frame->GetHotnessCountdown();
- jit->AddSamples(self, method, count, /*with_backedges=*/ true);
- }
- return MterpSetUpHotnessCountdown(method, shadow_frame, self);
-}
-
-extern "C" size_t MterpMaybeDoOnStackReplacement(Thread* self,
- ShadowFrame* shadow_frame,
- int32_t offset)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- int16_t osr_countdown = shadow_frame->GetCachedHotnessCountdown() - 1;
- bool did_osr = false;
- /*
- * To reduce the cost of polling the compiler to determine whether the requested OSR
- * compilation has completed, only check every Nth time. NOTE: the "osr_countdown <= 0"
- * condition is satisfied either by the decrement below or the initial setting of
- * the cached countdown field to kJitCheckForOSR, which elsewhere is asserted to be -1.
- */
- if (osr_countdown <= 0) {
- ArtMethod* method = shadow_frame->GetMethod();
- JValue* result = shadow_frame->GetResultRegister();
- uint32_t dex_pc = shadow_frame->GetDexPC();
- jit::Jit* jit = Runtime::Current()->GetJit();
- osr_countdown = jit::Jit::kJitRecheckOSRThreshold;
- if (offset <= 0) {
- // Keep updating hotness in case a compilation request was dropped. Eventually it will retry.
- jit->AddSamples(self, method, osr_countdown, /*with_backedges=*/ true);
- }
- did_osr = jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result);
- }
- shadow_frame->SetCachedHotnessCountdown(osr_countdown);
- return did_osr ? 1u : 0u;
-}
-
-} // namespace interpreter
-} // namespace art
diff --git a/runtime/interpreter/mterp/mterp.h b/runtime/interpreter/mterp/mterp.h
deleted file mode 100644
index bc057c2df0..0000000000
--- a/runtime/interpreter/mterp/mterp.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_INTERPRETER_MTERP_MTERP_H_
-#define ART_RUNTIME_INTERPRETER_MTERP_MTERP_H_
-
-#include <cstddef>
-#include <cstdint>
-
-#include "base/globals.h"
-
-/*
- * Mterp assembly handler bases
- */
-extern "C" void* artMterpAsmInstructionStart[];
-extern "C" void* artMterpAsmInstructionEnd[];
-
-namespace art {
-
-class Thread;
-
-namespace interpreter {
-
-void InitMterpTls(Thread* self);
-void CheckMterpAsmConstants();
-bool CanUseMterp();
-
-// Poison value for TestExportPC. If we segfault with this value, it means that a mterp
-// handler for a recent opcode failed to export the Dalvik PC prior to a possible exit from
-// the mterp environment.
-constexpr uintptr_t kExportPCPoison = 0xdead00ff;
-// Set true to enable poison testing of ExportPC. Uses Alt interpreter.
-constexpr bool kTestExportPC = false;
-
-constexpr size_t kMterpHandlerSize = 128;
-
-} // namespace interpreter
-} // namespace art
-
-#endif // ART_RUNTIME_INTERPRETER_MTERP_MTERP_H_
diff --git a/runtime/interpreter/mterp/mterp_stub.cc b/runtime/interpreter/mterp/mterp_stub.cc
deleted file mode 100644
index ec02909654..0000000000
--- a/runtime/interpreter/mterp/mterp_stub.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "../interpreter_common.h"
-
-/*
- * Stub definitions for targets without mterp implementations.
- */
-
-namespace art {
-namespace interpreter {
-/*
- * Call this during initialization to verify that the values in asm-constants.h
- * are still correct.
- */
-void CheckMterpAsmConstants() {
- // Nothing to check when mterp is not implemented.
-}
-
-void InitMterpTls(Thread* self) {
- self->SetMterpDefaultIBase(nullptr);
- self->SetMterpCurrentIBase(nullptr);
- self->SetMterpAltIBase(nullptr);
-}
-
-/*
- * The platform-specific implementation must provide this.
- */
-extern "C" bool ExecuteMterpImpl(Thread* self,
- const uint16_t* dex_instructions,
- ShadowFrame* shadow_frame,
- JValue* result_register)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- UNUSED(self); UNUSED(shadow_frame); UNUSED(code_item); UNUSED(result_register);
- UNIMPLEMENTED(art::FATAL);
- return false;
-}
-
-} // namespace interpreter
-} // namespace art
diff --git a/runtime/interpreter/mterp/nterp.cc b/runtime/interpreter/mterp/nterp.cc
index 5fc360932b..ddef31d792 100644
--- a/runtime/interpreter/mterp/nterp.cc
+++ b/runtime/interpreter/mterp/nterp.cc
@@ -39,13 +39,26 @@ bool IsNterpSupported() {
bool CanRuntimeUseNterp() REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
- // Nterp shares the same restrictions as Mterp.
// If the runtime is interpreter only, we currently don't use nterp as some
// parts of the runtime (like instrumentation) make assumption on an
// interpreter-only runtime to always be in a switch-like interpreter.
- return IsNterpSupported() && CanUseMterp() && !instr->InterpretOnly();
+ return IsNterpSupported() &&
+ !instr->InterpretOnly() &&
+ !runtime->IsAotCompiler() &&
+ !runtime->GetInstrumentation()->IsActive() &&
+ // nterp only knows how to deal with the normal exits. It cannot handle any of the
+ // non-standard force-returns.
+ !runtime->AreNonStandardExitsEnabled() &&
+ // An async exception has been thrown. We need to go to the switch interpreter. nterp doesn't
+ // know how to deal with these so we could end up never dealing with it if we are in an
+ // infinite loop.
+ !runtime->AreAsyncExceptionsThrown() &&
+ (runtime->GetJit() == nullptr || !runtime->GetJit()->JitAtFirstUse());
}
+// The entrypoint for nterp, which ArtMethods can directly point to.
+extern "C" void ExecuteNterpImpl() REQUIRES_SHARED(Locks::mutator_lock_);
+
const void* GetNterpEntryPoint() {
return reinterpret_cast<const void*>(interpreter::ExecuteNterpImpl);
}
@@ -60,7 +73,7 @@ void CheckNterpAsmConstants() {
* which one did, but if any one is too big the total size will
* overflow.
*/
- const int width = kMterpHandlerSize;
+ const int width = kNterpHandlerSize;
ptrdiff_t interp_size = reinterpret_cast<uintptr_t>(artNterpAsmInstructionEnd) -
reinterpret_cast<uintptr_t>(artNterpAsmInstructionStart);
if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
@@ -740,18 +753,99 @@ extern "C" jit::OsrData* NterpHotMethod(ArtMethod* method, uint16_t* dex_pc_ptr,
return nullptr;
}
-extern "C" ssize_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal);
extern "C" ssize_t NterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedAssertNoThreadSuspension sants("In nterp");
- return MterpDoPackedSwitch(switchData, testVal);
+ const int kInstrLen = 3;
+
+ /*
+ * Packed switch data format:
+ * ushort ident = 0x0100 magic value
+ * ushort size number of entries in the table
+ * int first_key first (and lowest) switch case value
+ * int targets[size] branch targets, relative to switch opcode
+ *
+ * Total size is (4+size*2) 16-bit code units.
+ */
+ uint16_t signature = *switchData++;
+ DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kPackedSwitchSignature));
+
+ uint16_t size = *switchData++;
+
+ int32_t firstKey = *switchData++;
+ firstKey |= (*switchData++) << 16;
+
+ int index = testVal - firstKey;
+ if (index < 0 || index >= size) {
+ return kInstrLen;
+ }
+
+ /*
+ * The entries are guaranteed to be aligned on a 32-bit boundary;
+ * we can treat them as a native int array.
+ */
+ const int32_t* entries = reinterpret_cast<const int32_t*>(switchData);
+ return entries[index];
}
-extern "C" ssize_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal);
+/*
+ * Find the matching case. Returns the offset to the handler instructions.
+ *
+ * Returns 3 if we don't find a match (it's the size of the sparse-switch
+ * instruction).
+ */
extern "C" ssize_t NterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedAssertNoThreadSuspension sants("In nterp");
- return MterpDoSparseSwitch(switchData, testVal);
+ const int kInstrLen = 3;
+ uint16_t size;
+ const int32_t* keys;
+ const int32_t* entries;
+
+ /*
+ * Sparse switch data format:
+ * ushort ident = 0x0200 magic value
+ * ushort size number of entries in the table; > 0
+ * int keys[size] keys, sorted low-to-high; 32-bit aligned
+ * int targets[size] branch targets, relative to switch opcode
+ *
+ * Total size is (2+size*4) 16-bit code units.
+ */
+
+ uint16_t signature = *switchData++;
+ DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kSparseSwitchSignature));
+
+ size = *switchData++;
+
+ /* The keys are guaranteed to be aligned on a 32-bit boundary;
+ * we can treat them as a native int array.
+ */
+ keys = reinterpret_cast<const int32_t*>(switchData);
+
+ /* The entries are guaranteed to be aligned on a 32-bit boundary;
+ * we can treat them as a native int array.
+ */
+ entries = keys + size;
+
+ /*
+ * Binary-search through the array of keys, which are guaranteed to
+ * be sorted low-to-high.
+ */
+ int lo = 0;
+ int hi = size - 1;
+ while (lo <= hi) {
+ int mid = (lo + hi) >> 1;
+
+ int32_t foundVal = keys[mid];
+ if (testVal < foundVal) {
+ hi = mid - 1;
+ } else if (testVal > foundVal) {
+ lo = mid + 1;
+ } else {
+ return entries[mid];
+ }
+ }
+ return kInstrLen;
}
extern "C" void NterpFree(void* val) {
diff --git a/runtime/interpreter/mterp/x86/array.S b/runtime/interpreter/mterp/x86/array.S
deleted file mode 100644
index de846a4728..0000000000
--- a/runtime/interpreter/mterp/x86/array.S
+++ /dev/null
@@ -1,215 +0,0 @@
-%def op_aget(load="movl", shift="4", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- $load $data_offset(%eax,%ecx,$shift), %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_aget_boolean():
-% op_aget(load="movzbl", shift="1", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
-
-%def op_aget_byte():
-% op_aget(load="movsbl", shift="1", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
-
-%def op_aget_char():
-% op_aget(load="movzwl", shift="2", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
-
-%def op_aget_object():
-/*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecs <- vCC (requested index)
- EXPORT_PC
- movl %eax, OUT_ARG0(%esp)
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(artAGetObjectFromMterp) # (array, index)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException
- SET_VREG_OBJECT %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_aget_short():
-% op_aget(load="movswl", shift="2", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
-
-%def op_aget_wide():
-/*
- * Array get, 64 bits. vAA <- vBB[vCC].
- */
- /* aget-wide vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
- movq (%eax), %xmm0 # xmm0 <- vBB[vCC]
- SET_WIDE_FP_VREG %xmm0, rINST # vAA <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_aput(reg="rINST", store="movl", shift="4", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal $data_offset(%eax,%ecx,$shift), %eax
- GET_VREG rINST, rINST
- $store $reg, (%eax)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_aput_boolean():
-% op_aput(reg="rINSTbl", store="movb", shift="1", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
-
-%def op_aput_byte():
-% op_aput(reg="rINSTbl", store="movb", shift="1", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
-
-%def op_aput_char():
-% op_aput(reg="rINSTw", store="movw", shift="2", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
-
-%def op_aput_object():
-/*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- REFRESH_INST ${opnum}
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpAputObject) # (array, index)
- RESTORE_IBASE
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_aput_short():
-% op_aput(reg="rINSTw", store="movw", shift="2", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
-
-%def op_aput_wide():
-/*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- *
- */
- /* aput-wide vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
- GET_WIDE_FP_VREG %xmm0, rINST # xmm0 <- vAA
- movq %xmm0, (%eax) # vBB[vCC] <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_array_length():
-/*
- * Return the length of an array.
- */
- mov rINST, %eax # eax <- BA
- sarl $$4, rINST # rINST <- B
- GET_VREG %ecx, rINST # ecx <- vB (object ref)
- testl %ecx, %ecx # is null?
- je common_errNullObject
- andb $$0xf, %al # eax <- A
- movl MIRROR_ARRAY_LENGTH_OFFSET(%ecx), rINST
- SET_VREG rINST, %eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-%def op_fill_array_data():
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC
- movl 2(rPC), %ecx # ecx <- BBBBbbbb
- leal (rPC,%ecx,2), %ecx # ecx <- PC + BBBBbbbb*2
- GET_VREG %eax, rINST # eax <- vAA (array object)
- movl %eax, OUT_ARG0(%esp)
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpFillArrayData) # (obj, payload)
- REFRESH_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-%def op_filled_new_array(helper="MterpFilledNewArray"):
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern $helper
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG2(%esp)
- call SYMBOL($helper)
- REFRESH_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-%def op_filled_new_array_range():
-% op_filled_new_array(helper="MterpFilledNewArrayRange")
-
-%def op_new_array():
-/*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class@CCCC */
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- REFRESH_INST ${opnum}
- movl rINST, OUT_ARG2(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp)
- call SYMBOL(MterpNewArray)
- RESTORE_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/control_flow.S b/runtime/interpreter/mterp/x86/control_flow.S
deleted file mode 100644
index 08dc8660e0..0000000000
--- a/runtime/interpreter/mterp/x86/control_flow.S
+++ /dev/null
@@ -1,208 +0,0 @@
-%def bincmp(revcmp=""):
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movzx rINSTbl, %ecx # ecx <- A+
- andb $$0xf, %cl # ecx <- A
- GET_VREG %eax, %ecx # eax <- vA
- sarl $$4, rINST # rINST <- B
- cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
- j${revcmp} 1f
- movswl 2(rPC), rINST # Get signed branch offset
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $$JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def zcmp(revcmp=""):
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $$0, VREG_ADDRESS(rINST) # compare (vA, 0)
- j${revcmp} 1f
- movswl 2(rPC), rINST # fetch signed displacement
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $$JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_goto():
-/*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- movsbl rINSTbl, rINST # rINST <- ssssssAA
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-
-%def op_goto_16():
-/*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- movswl 2(rPC), rINST # rINST <- ssssAAAA
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-
-%def op_goto_32():
-/*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0". Because
- * we need the V bit set, we'll use an adds to convert from Dalvik
- * offset to byte offset.
- */
- /* goto/32 +AAAAAAAA */
- movl 2(rPC), rINST # rINST <- AAAAAAAA
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-
-%def op_if_eq():
-% bincmp(revcmp="ne")
-
-%def op_if_eqz():
-% zcmp(revcmp="ne")
-
-%def op_if_ge():
-% bincmp(revcmp="l")
-
-%def op_if_gez():
-% zcmp(revcmp="l")
-
-%def op_if_gt():
-% bincmp(revcmp="le")
-
-%def op_if_gtz():
-% zcmp(revcmp="le")
-
-%def op_if_le():
-% bincmp(revcmp="g")
-
-%def op_if_lez():
-% zcmp(revcmp="g")
-
-%def op_if_lt():
-% bincmp(revcmp="ge")
-
-%def op_if_ltz():
-% zcmp(revcmp="ge")
-
-%def op_if_ne():
-% bincmp(revcmp="e")
-
-%def op_if_nez():
-% zcmp(revcmp="e")
-
-%def op_packed_switch(func="MterpDoPackedSwitch"):
-/*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- movl 2(rPC), %ecx # ecx <- BBBBbbbb
- GET_VREG %eax, rINST # eax <- vAA
- leal (rPC,%ecx,2), %ecx # ecx <- PC + BBBBbbbb*2
- movl %eax, OUT_ARG1(%esp) # ARG1 <- vAA
- movl %ecx, OUT_ARG0(%esp) # ARG0 <- switchData
- call SYMBOL($func)
- REFRESH_IBASE
- testl %eax, %eax
- movl %eax, rINST
- jmp MterpCommonTakenBranch
-
-%def op_return():
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movl rSELF, %eax
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- jz 1f
- movl %eax, OUT_ARG0(%esp)
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_VREG %eax, rINST # eax <- vAA
- xorl %ecx, %ecx
- jmp MterpReturn
-
-%def op_return_object():
-% op_return()
-
-%def op_return_void():
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movl rSELF, %eax
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- jz 1f
- movl %eax, OUT_ARG0(%esp)
- call SYMBOL(MterpSuspendCheck)
-1:
- xorl %eax, %eax
- xorl %ecx, %ecx
- jmp MterpReturn
-
-%def op_return_wide():
-/*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movl rSELF, %eax
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- jz 1f
- movl %eax, OUT_ARG0(%esp)
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_VREG %eax, rINST # eax <- v[AA+0]
- GET_VREG_HIGH %ecx, rINST # ecx <- v[AA+1]
- jmp MterpReturn
-
-%def op_sparse_switch():
-% op_packed_switch(func="MterpDoSparseSwitch")
-
-%def op_throw():
-/*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- GET_VREG %eax, rINST # eax<- vAA (exception object)
- testl %eax, %eax
- jz common_errNullObject
- movl rSELF,%ecx
- movl %eax, THREAD_EXCEPTION_OFFSET(%ecx)
- jmp MterpException
diff --git a/runtime/interpreter/mterp/x86/invoke.S b/runtime/interpreter/mterp/x86/invoke.S
deleted file mode 100644
index 7f51880c5a..0000000000
--- a/runtime/interpreter/mterp/x86/invoke.S
+++ /dev/null
@@ -1,115 +0,0 @@
-%def invoke(helper="UndefinedInvokeHandler"):
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern $helper
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST ${opnum}
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL($helper)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- movl rSELF, %eax
- cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%eax)
- jz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern $helper
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST ${opnum}
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL($helper)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 4
- movl rSELF, %eax
- cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%eax)
- jz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-%def op_invoke_custom():
-% invoke(helper="MterpInvokeCustom")
-
-%def op_invoke_custom_range():
-% invoke(helper="MterpInvokeCustomRange")
-
-%def op_invoke_direct():
-% invoke(helper="MterpInvokeDirect")
-
-%def op_invoke_direct_range():
-% invoke(helper="MterpInvokeDirectRange")
-
-%def op_invoke_interface():
-% invoke(helper="MterpInvokeInterface")
-/*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_interface_range():
-% invoke(helper="MterpInvokeInterfaceRange")
-
-%def op_invoke_polymorphic():
-% invoke_polymorphic(helper="MterpInvokePolymorphic")
-
-%def op_invoke_polymorphic_range():
-% invoke_polymorphic(helper="MterpInvokePolymorphicRange")
-
-%def op_invoke_static():
-% invoke(helper="MterpInvokeStatic")
-
-
-%def op_invoke_static_range():
-% invoke(helper="MterpInvokeStaticRange")
-
-%def op_invoke_super():
-% invoke(helper="MterpInvokeSuper")
-/*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_super_range():
-% invoke(helper="MterpInvokeSuperRange")
-
-%def op_invoke_virtual():
-% invoke(helper="MterpInvokeVirtual")
-/*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_virtual_range():
-% invoke(helper="MterpInvokeVirtualRange")
diff --git a/runtime/interpreter/mterp/x86/main.S b/runtime/interpreter/mterp/x86/main.S
deleted file mode 100644
index 2afef5202d..0000000000
--- a/runtime/interpreter/mterp/x86/main.S
+++ /dev/null
@@ -1,806 +0,0 @@
-%def header():
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via rFP &
- number_of_vregs_.
-
- */
-
-/*
-x86 ABI general notes:
-
-Caller save set:
- eax, edx, ecx, st(0)-st(7)
-Callee save set:
- ebx, esi, edi, ebp
-Return regs:
- 32-bit in eax
- 64-bit in edx:eax (low-order 32 in eax)
- fp on top of fp stack st(0)
-
-Parameters passed on stack, pushed right-to-left. On entry to target, first
-parm is at 4(%esp). Traditional entry code is:
-
-functEntry:
- push %ebp # save old frame pointer
- mov %ebp,%esp # establish new frame pointer
- sub FrameSize,%esp # Allocate storage for spill, locals & outs
-
-Once past the prologue, arguments are referenced at ((argno + 2)*4)(%ebp)
-
-Stack must be 16-byte aligned to support SSE in native code.
-
-If we're not doing variable stack allocation (alloca), the frame pointer can be
-eliminated and all arg references adjusted to be esp relative.
-*/
-
-/*
-Mterp and x86 notes:
-
-Some key interpreter variables will be assigned to registers.
-
- nick reg purpose
- rPC esi interpreted program counter, used for fetching instructions
- rFP edi interpreted frame pointer, used for accessing locals and args
- rINSTw bx first 16-bit code of current instruction
- rINSTbl bl opcode portion of instruction word
- rINSTbh bh high byte of inst word, usually contains src/tgt reg names
- rIBASE edx base of instruction handler table
- rREFS ebp base of object references in shadow frame.
-
-Notes:
- o High order 16 bits of ebx must be zero on entry to handler
- o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
- o eax and ecx are scratch, rINSTw/ebx sometimes scratch
-
-Macros are provided for common operations. Each macro MUST emit only
-one instruction to make instruction-counting easier. They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#define LITERAL(value) $$(value)
-
-/*
- * Handle mac compiler specific
- */
-#if defined(__APPLE__)
- #define MACRO_LITERAL(value) $$(value)
- #define FUNCTION_TYPE(name)
- #define OBJECT_TYPE(name)
- #define SIZE(start,end)
- // Mac OS' symbols have an _ prefix.
- #define SYMBOL(name) _ ## name
- #define ASM_HIDDEN .private_extern
-#else
- #define MACRO_LITERAL(value) $$value
- #define FUNCTION_TYPE(name) .type name, @function
- #define OBJECT_TYPE(name) .type name, @object
- #define SIZE(start,end) .size start, .-end
- #define SYMBOL(name) name
- #define ASM_HIDDEN .hidden
-#endif
-
-.macro PUSH _reg
- pushl \_reg
- .cfi_adjust_cfa_offset 4
- .cfi_rel_offset \_reg, 0
-.endm
-
-.macro POP _reg
- popl \_reg
- .cfi_adjust_cfa_offset -4
- .cfi_restore \_reg
-.endm
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/* Frame size must be 16-byte aligned.
- * Remember about 4 bytes for return address + 4 * 4 for spills
- */
-#define FRAME_SIZE 28
-
-/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
-#define IN_ARG3 (FRAME_SIZE + 16 + 16)
-#define IN_ARG2 (FRAME_SIZE + 16 + 12)
-#define IN_ARG1 (FRAME_SIZE + 16 + 8)
-#define IN_ARG0 (FRAME_SIZE + 16 + 4)
-/* Spill offsets relative to %esp */
-#define LOCAL0 (FRAME_SIZE - 4)
-#define LOCAL1 (FRAME_SIZE - 8)
-#define LOCAL2 (FRAME_SIZE - 12)
-/* Out Arg offsets, relative to %esp */
-#define OUT_ARG3 ( 12)
-#define OUT_ARG2 ( 8)
-#define OUT_ARG1 ( 4)
-#define OUT_ARG0 ( 0) /* <- ExecuteMterpImpl esp + 0 */
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rSELF IN_ARG0(%esp)
-#define rPC %esi
-#define CFI_DEX 6 // DWARF register number of the register holding dex-pc (esi).
-#define CFI_TMP 0 // DWARF register number of the first argument register (eax).
-#define rFP %edi
-#define rINST %ebx
-#define rINSTw %bx
-#define rINSTbh %bh
-#define rINSTbl %bl
-#define rIBASE %edx
-#define rREFS %ebp
-#define rPROFILE OFF_FP_COUNTDOWN_OFFSET(rFP)
-
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- movl rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
- movl rSELF, rIBASE
- movl THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
-.endm
-
-/*
- * Refresh handler table.
- * IBase handles uses the caller save register so we must restore it after each call.
- * Also it is used as a result of some 64-bit operations (like imul) and we should
- * restore it in such cases also.
- *
- * TODO: Consider spilling the IBase instead of restoring it from Thread structure.
- */
-.macro RESTORE_IBASE
- movl rSELF, rIBASE
- movl THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
-.endm
-
-/*
- * If rSELF is already loaded then we can use it from known reg.
- */
-.macro RESTORE_IBASE_FROM_SELF _reg
- movl THREAD_CURRENT_IBASE_OFFSET(\_reg), rIBASE
-.endm
-
-/*
- * Refresh rINST.
- * At enter to handler rINST does not contain the opcode number.
- * However some utilities require the full value, so this macro
- * restores the opcode number.
- */
-.macro REFRESH_INST _opnum
- movb rINSTbl, rINSTbh
- movb MACRO_LITERAL(\_opnum), rINSTbl
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINSTw. Does not advance rPC.
- */
-.macro FETCH_INST
- movzwl (rPC), rINST
-.endm
-
-/*
- * Remove opcode from rINST, compute the address of handler and jump to it.
- */
-.macro GOTO_NEXT
- movzx rINSTbl,%eax
- movzbl rINSTbh,rINST
- shll MACRO_LITERAL(${handler_size_bits}), %eax
- addl rIBASE, %eax
- jmp *%eax
-.endm
-
-/*
- * Advance rPC by instruction count.
- */
-.macro ADVANCE_PC _count
- leal 2*\_count(rPC), rPC
-.endm
-
-/*
- * Advance rPC by instruction count, fetch instruction and jump to handler.
- */
-.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
- ADVANCE_PC \_count
- FETCH_INST
- GOTO_NEXT
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
-#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4)
-#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
-#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
-
-.macro GET_VREG _reg _vreg
- movl VREG_ADDRESS(\_vreg), \_reg
-.endm
-
-/* Read wide value to xmm. */
-.macro GET_WIDE_FP_VREG _reg _vreg
- movq VREG_ADDRESS(\_vreg), \_reg
-.endm
-
-.macro SET_VREG _reg _vreg
- movl \_reg, VREG_ADDRESS(\_vreg)
- movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
-.endm
-
-/* Write wide value from xmm. xmm is clobbered. */
-.macro SET_WIDE_FP_VREG _reg _vreg
- movq \_reg, VREG_ADDRESS(\_vreg)
- pxor \_reg, \_reg
- movq \_reg, VREG_REF_ADDRESS(\_vreg)
-.endm
-
-.macro SET_VREG_OBJECT _reg _vreg
- movl \_reg, VREG_ADDRESS(\_vreg)
- movl \_reg, VREG_REF_ADDRESS(\_vreg)
-.endm
-
-.macro GET_VREG_HIGH _reg _vreg
- movl VREG_HIGH_ADDRESS(\_vreg), \_reg
-.endm
-
-.macro SET_VREG_HIGH _reg _vreg
- movl \_reg, VREG_HIGH_ADDRESS(\_vreg)
- movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
-.endm
-
-.macro CLEAR_REF _vreg
- movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
-.endm
-
-.macro CLEAR_WIDE_REF _vreg
- movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
- movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
-.endm
-
-.macro GET_VREG_XMMs _xmmreg _vreg
- movss VREG_ADDRESS(\_vreg), \_xmmreg
-.endm
-.macro GET_VREG_XMMd _xmmreg _vreg
- movsd VREG_ADDRESS(\_vreg), \_xmmreg
-.endm
-.macro SET_VREG_XMMs _xmmreg _vreg
- movss \_xmmreg, VREG_ADDRESS(\_vreg)
-.endm
-.macro SET_VREG_XMMd _xmmreg _vreg
- movsd \_xmmreg, VREG_ADDRESS(\_vreg)
-.endm
-
-/*
- * function support macros.
- */
-.macro ENTRY name
- .text
- ASM_HIDDEN SYMBOL(\name)
- .global SYMBOL(\name)
- FUNCTION_TYPE(\name)
-SYMBOL(\name):
-.endm
-
-.macro END name
- SIZE(\name,\name)
-.endm
-
-%def entry():
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- *
- * On entry:
- * 0 Thread* self
- * 1 insns_
- * 2 ShadowFrame
- * 3 JValue* result_register
- *
- */
-ENTRY ExecuteMterpImpl
- .cfi_startproc
- .cfi_def_cfa esp, 4
-
- /* Spill callee save regs */
- PUSH %ebp
- PUSH %edi
- PUSH %esi
- PUSH %ebx
-
- /* Allocate frame */
- subl $$FRAME_SIZE, %esp
- .cfi_adjust_cfa_offset FRAME_SIZE
-
- /* Load ShadowFrame pointer */
- movl IN_ARG2(%esp), %edx
-
- /* Remember the return register */
- movl IN_ARG3(%esp), %eax
- movl %eax, SHADOWFRAME_RESULT_REGISTER_OFFSET(%edx)
-
- /* Remember the code_item */
- movl IN_ARG1(%esp), %ecx
- movl %ecx, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(%edx)
-
- /* set up "named" registers */
- movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(%edx), %eax
- leal SHADOWFRAME_VREGS_OFFSET(%edx), rFP
- leal (rFP, %eax, 4), rREFS
- movl SHADOWFRAME_DEX_PC_OFFSET(%edx), %eax
- lea (%ecx, %eax, 2), rPC
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Set up for backwards branches & osr profiling */
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG2(%esp)
- call SYMBOL(MterpSetUpHotnessCountdown)
-
- /* Starting ibase */
- REFRESH_IBASE
-
- /* start executing the instruction at rPC */
- FETCH_INST
- GOTO_NEXT
- /* NOTE: no fallthrough */
- // cfi info continues, and covers the whole mterp implementation.
- END ExecuteMterpImpl
-
-%def dchecks_before_helper():
- // Call C++ to do debug checks and return to the handler using tail call.
- .extern MterpCheckBefore
- popl %eax # Return address (the instuction handler).
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- pushl %eax # Return address for the tail call.
- jmp SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
-
-%def opcode_pre():
-% add_helper(dchecks_before_helper, "mterp_dchecks_before_helper")
- #if !defined(NDEBUG)
- call SYMBOL(mterp_dchecks_before_helper)
- REFRESH_IBASE
- #endif
-
-%def fallback():
-/* Transfer stub to alternate interpreter */
- jmp MterpFallback
-
-%def helpers():
- ENTRY MterpHelpers
-
-%def footer():
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
- .text
- .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogDivideByZeroException)
-#endif
- jmp MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogArrayIndexException)
-#endif
- jmp MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogNegativeArraySizeException)
-#endif
- jmp MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogNoSuchMethodException)
-#endif
- jmp MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogNullObjectException)
-#endif
- jmp MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG0(%esp)
- call SYMBOL(MterpLogExceptionThrownException)
-#endif
- jmp MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG0(%esp)
- movl THREAD_FLAGS_OFFSET(%eax), %eax
- movl %eax, OUT_ARG2(%esp)
- call SYMBOL(MterpLogSuspendFallback)
-#endif
- jmp MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- movl rSELF, %eax
- testl $$-1, THREAD_EXCEPTION_OFFSET(%eax)
- jz MterpFallback
- /* intentional fallthrough - handle pending exception. */
-
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpHandleException)
- testb %al, %al
- jz MterpExceptionReturn
- movl OFF_FP_DEX_INSTRUCTIONS(rFP), %eax
- movl OFF_FP_DEX_PC(rFP), %ecx
- lea (%eax, %ecx, 2), rPC
- movl rPC, OFF_FP_DEX_PC_PTR(rFP)
- /* Do we need to switch interpreters? */
- movl rSELF, %eax
- cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%eax)
- jz MterpFallback
- /* resume execution at catch block */
- REFRESH_IBASE
- FETCH_INST
- GOTO_NEXT
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranch:
- jg .L_forward_branch # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-# error "JIT_CHECK_OSR must be -1."
-#endif
- cmpw $$JIT_CHECK_OSR, rPROFILE
- je .L_osr_check
- decw rPROFILE
- je .L_add_batch # counted down to zero - report
-.L_resume_backward_branch:
- movl rSELF, %eax
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- leal (rPC, rINST, 2), rPC
- FETCH_INST
- jnz .L_suspend_request_pending
- REFRESH_IBASE
- GOTO_NEXT
-
-.L_suspend_request_pending:
- EXPORT_PC
- movl %eax, OUT_ARG0(%esp) # rSELF in eax
- call SYMBOL(MterpSuspendCheck) # (self)
- testb %al, %al
- jnz MterpFallback
- REFRESH_IBASE # might have changed during suspend
- GOTO_NEXT
-
-.L_no_count_backwards:
- cmpw $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
- jne .L_resume_backward_branch
-.L_osr_check:
- EXPORT_PC
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- jz .L_resume_backward_branch
- jmp MterpOnStackReplacement
-
-.L_forward_branch:
- cmpw $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
- je .L_check_osr_forward
-.L_resume_forward_branch:
- leal (rPC, rINST, 2), rPC
- FETCH_INST
- GOTO_NEXT
-
-.L_check_osr_forward:
- EXPORT_PC
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- REFRESH_IBASE
- jz .L_resume_forward_branch
- jmp MterpOnStackReplacement
-
-.L_add_batch:
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG2(%esp)
- call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- jmp .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- EXPORT_PC
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl $$2, OUT_ARG2(%esp)
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- REFRESH_IBASE
- jnz MterpOnStackReplacement
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpLogOSR)
-#endif
- movl $$1, %eax
- jmp MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogFallback)
-#endif
-MterpCommonFallback:
- xor %eax, %eax
- jmp MterpDone
-
-/*
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- movl $$1, %eax
- jmp MterpDone
-MterpReturn:
- movl OFF_FP_RESULT_REGISTER(rFP), %edx
- movl %eax, (%edx)
- movl %ecx, 4(%edx)
- mov $$1, %eax
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- cmpw $$0, rPROFILE
- jle MRestoreFrame # if > 0, we may have some counts to report.
-
- movl %eax, rINST # stash return value
- /* Report cached hotness counts */
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG2(%esp)
- call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- movl rINST, %eax # restore return value
-
- /* pop up frame */
-MRestoreFrame:
- addl $$FRAME_SIZE, %esp
- .cfi_adjust_cfa_offset -FRAME_SIZE
-
- /* Restore callee save register */
- POP %ebx
- POP %esi
- POP %edi
- POP %ebp
- ret
- .cfi_endproc
- END MterpHelpers
-
-%def instruction_end():
-
- OBJECT_TYPE(artMterpAsmInstructionEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
- .global SYMBOL(artMterpAsmInstructionEnd)
-SYMBOL(artMterpAsmInstructionEnd):
-
-%def instruction_start():
-
- OBJECT_TYPE(artMterpAsmInstructionStart)
- ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
- .global SYMBOL(artMterpAsmInstructionStart)
-SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
- .text
-
-%def default_helper_prefix():
-% return "mterp_"
-
-%def opcode_start():
- ENTRY mterp_${opcode}
-%def opcode_end():
- END mterp_${opcode}
-%def helper_start(name):
- ENTRY ${name}
-%def helper_end(name):
- END ${name}
diff --git a/runtime/interpreter/mterp/x86/object.S b/runtime/interpreter/mterp/x86/object.S
deleted file mode 100644
index 6ab1931783..0000000000
--- a/runtime/interpreter/mterp/x86/object.S
+++ /dev/null
@@ -1,167 +0,0 @@
-%def field(helper=""):
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern $helper
- REFRESH_INST ${opnum} # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL($helper)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_check_cast():
-/*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class@BBBB */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- leal VREG_ADDRESS(rINST), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl OFF_FP_METHOD(rFP),%eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp)
- call SYMBOL(MterpCheckCast) # (index, &obj, method, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_iget(is_object="0", helper="MterpIGetU32"):
-% field(helper=helper)
-
-%def op_iget_boolean():
-% op_iget(helper="MterpIGetU8")
-
-%def op_iget_byte():
-% op_iget(helper="MterpIGetI8")
-
-%def op_iget_char():
-% op_iget(helper="MterpIGetU16")
-
-%def op_iget_object():
-% op_iget(is_object="1", helper="MterpIGetObj")
-
-%def op_iget_short():
-% op_iget(helper="MterpIGetI16")
-
-%def op_iget_wide():
-% op_iget(helper="MterpIGetU64")
-
-%def op_instance_of():
-/*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class@CCCC */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, %eax # eax <- BA
- sarl $$4, %eax # eax <- B
- leal VREG_ADDRESS(%eax), %ecx # Get object address
- movl %ecx, OUT_ARG1(%esp)
- movl OFF_FP_METHOD(rFP),%eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp)
- call SYMBOL(MterpInstanceOf) # (index, &obj, method, self)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException
- andb $$0xf, rINSTbl # rINSTbl <- A
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_iput(is_object="0", helper="MterpIPutU32"):
-% field(helper=helper)
-
-%def op_iput_boolean():
-% op_iput(helper="MterpIPutU8")
-
-%def op_iput_byte():
-% op_iput(helper="MterpIPutI8")
-
-%def op_iput_char():
-% op_iput(helper="MterpIPutU16")
-
-%def op_iput_object():
-% op_iput(is_object="1", helper="MterpIPutObj")
-
-%def op_iput_short():
-% op_iput(helper="MterpIPutI16")
-
-%def op_iput_wide():
-% op_iput(helper="MterpIPutU64")
-
-%def op_new_instance():
-/*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class@BBBB */
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG1(%esp)
- REFRESH_INST ${opnum}
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpNewInstance)
- RESTORE_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_sget(is_object="0", helper="MterpSGetU32"):
-% field(helper=helper)
-
-%def op_sget_boolean():
-% op_sget(helper="MterpSGetU8")
-
-%def op_sget_byte():
-% op_sget(helper="MterpSGetI8")
-
-%def op_sget_char():
-% op_sget(helper="MterpSGetU16")
-
-%def op_sget_object():
-% op_sget(is_object="1", helper="MterpSGetObj")
-
-%def op_sget_short():
-% op_sget(helper="MterpSGetI16")
-
-%def op_sget_wide():
-% op_sget(helper="MterpSGetU64")
-
-%def op_sput(is_object="0", helper="MterpSPutU32"):
-% field(helper=helper)
-
-%def op_sput_boolean():
-% op_sput(helper="MterpSPutU8")
-
-%def op_sput_byte():
-% op_sput(helper="MterpSPutI8")
-
-%def op_sput_char():
-% op_sput(helper="MterpSPutU16")
-
-%def op_sput_object():
-% op_sput(is_object="1", helper="MterpSPutObj")
-
-%def op_sput_short():
-% op_sput(helper="MterpSPutI16")
-
-%def op_sput_wide():
-% op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/x86/other.S b/runtime/interpreter/mterp/x86/other.S
deleted file mode 100644
index b0fd1ec3b0..0000000000
--- a/runtime/interpreter/mterp/x86/other.S
+++ /dev/null
@@ -1,379 +0,0 @@
-%def const(helper="UndefinedConstHandler"):
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern $helper
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, OUT_ARG1(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL($helper) # (index, tgt_reg, shadow_frame, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def unused():
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-%def op_const():
- /* const vAA, #+BBBBbbbb */
- movl 2(rPC), %eax # grab all 32 bits at once
- SET_VREG %eax, rINST # vAA<- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-%def op_const_16():
- /* const/16 vAA, #+BBBB */
- movswl 2(rPC), %ecx # ecx <- ssssBBBB
- SET_VREG %ecx, rINST # vAA <- ssssBBBB
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_const_4():
- /* const/4 vA, #+B */
- movsx rINSTbl, %eax # eax <-ssssssBx
- movl $$0xf, rINST
- andl %eax, rINST # rINST <- A
- sarl $$4, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-%def op_const_class():
-% const(helper="MterpConstClass")
-
-%def op_const_high16():
- /* const/high16 vAA, #+BBBB0000 */
- movzwl 2(rPC), %eax # eax <- 0000BBBB
- sall $$16, %eax # eax <- BBBB0000
- SET_VREG %eax, rINST # vAA <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_const_method_handle():
-% const(helper="MterpConstMethodHandle")
-
-%def op_const_method_type():
-% const(helper="MterpConstMethodType")
-
-%def op_const_string():
-% const(helper="MterpConstString")
-
-%def op_const_string_jumbo():
- /* const/string vAA, String@BBBBBBBB */
- EXPORT_PC
- movl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, OUT_ARG1(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-%def op_const_wide():
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- movl 2(rPC), %eax # eax <- lsw
- movzbl rINSTbl, %ecx # ecx <- AA
- movl 6(rPC), rINST # rINST <- msw
- SET_VREG %eax, %ecx
- SET_VREG_HIGH rINST, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
-
-%def op_const_wide_16():
- /* const-wide/16 vAA, #+BBBB */
- movswl 2(rPC), %eax # eax <- ssssBBBB
- movl rIBASE, %ecx # preserve rIBASE (cltd trashes it)
- cltd # rIBASE:eax <- ssssssssssssBBBB
- SET_VREG_HIGH rIBASE, rINST # store msw
- SET_VREG %eax, rINST # store lsw
- movl %ecx, rIBASE # restore rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_const_wide_32():
- /* const-wide/32 vAA, #+BBBBbbbb */
- movl 2(rPC), %eax # eax <- BBBBbbbb
- movl rIBASE, %ecx # preserve rIBASE (cltd trashes it)
- cltd # rIBASE:eax <- ssssssssssssBBBB
- SET_VREG_HIGH rIBASE, rINST # store msw
- SET_VREG %eax, rINST # store lsw
- movl %ecx, rIBASE # restore rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-%def op_const_wide_high16():
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- movzwl 2(rPC), %eax # eax <- 0000BBBB
- sall $$16, %eax # eax <- BBBB0000
- SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
- xorl %eax, %eax
- SET_VREG %eax, rINST # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_monitor_enter():
-/*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC
- GET_VREG %ecx, rINST
- movl %ecx, OUT_ARG0(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG1(%esp)
- call SYMBOL(artLockObjectFromCode) # (object, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpException
- ADVANCE_PC 1
- movl rSELF, %eax
- cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%eax)
- jz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-%def op_monitor_exit():
-/*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC
- GET_VREG %ecx, rINST
- movl %ecx, OUT_ARG0(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG1(%esp)
- call SYMBOL(artUnlockObjectFromCode) # (object, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpException
- ADVANCE_PC 1
- movl rSELF, %eax
- cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%eax)
- jz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-%def op_move(is_object="0"):
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- movzbl rINSTbl, %eax # eax <- BA
- andb $$0xf, %al # eax <- A
- shrl $$4, rINST # rINST <- B
- GET_VREG rINST, rINST
- .if $is_object
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-%def op_move_16(is_object="0"):
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- movzwl 4(rPC), %ecx # ecx <- BBBB
- movzwl 2(rPC), %eax # eax <- AAAA
- GET_VREG rINST, %ecx
- .if $is_object
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-%def op_move_exception():
- /* move-exception vAA */
- movl rSELF, %ecx
- movl THREAD_EXCEPTION_OFFSET(%ecx), %eax
- SET_VREG_OBJECT %eax, rINST # fp[AA] <- exception object
- movl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-%def op_move_from16(is_object="0"):
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- movzx rINSTbl, %eax # eax <- AA
- movw 2(rPC), rINSTw # rINSTw <- BBBB
- GET_VREG rINST, rINST # rINST <- fp[BBBB]
- .if $is_object
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_move_object():
-% op_move(is_object="1")
-
-%def op_move_object_16():
-% op_move_16(is_object="1")
-
-%def op_move_object_from16():
-% op_move_from16(is_object="1")
-
-%def op_move_result(is_object="0"):
- /* for: move-result, move-result-object */
- /* op vAA */
- movl OFF_FP_RESULT_REGISTER(rFP), %eax # get pointer to result JType.
- movl (%eax), %eax # r0 <- result.i.
- .if $is_object
- SET_VREG_OBJECT %eax, rINST # fp[A] <- fp[B]
- .else
- SET_VREG %eax, rINST # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-%def op_move_result_object():
-% op_move_result(is_object="1")
-
-%def op_move_result_wide():
- /* move-result-wide vAA */
- movl OFF_FP_RESULT_REGISTER(rFP), %eax # get pointer to result JType.
- movl 4(%eax), %ecx # Get high
- movl (%eax), %eax # Get low
- SET_VREG %eax, rINST # v[AA+0] <- eax
- SET_VREG_HIGH %ecx, rINST # v[AA+1] <- ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-%def op_move_wide():
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $$4, rINST # rINST <- B
- andb $$0xf, %cl # ecx <- A
- GET_WIDE_FP_VREG %xmm0, rINST # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0, %ecx # v[A] <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-%def op_move_wide_16():
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzwl 4(rPC), %ecx # ecx<- BBBB
- movzwl 2(rPC), %eax # eax<- AAAA
- GET_WIDE_FP_VREG %xmm0, %ecx # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0, %eax # v[A] <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-%def op_move_wide_from16():
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzwl 2(rPC), %ecx # ecx <- BBBB
- movzbl rINSTbl, %eax # eax <- AAAA
- GET_WIDE_FP_VREG %xmm0, %ecx # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0, %eax # v[A] <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_nop():
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-%def op_unused_3e():
-% unused()
-
-%def op_unused_3f():
-% unused()
-
-%def op_unused_40():
-% unused()
-
-%def op_unused_41():
-% unused()
-
-%def op_unused_42():
-% unused()
-
-%def op_unused_43():
-% unused()
-
-%def op_unused_73():
-% unused()
-
-%def op_unused_79():
-% unused()
-
-%def op_unused_7a():
-% unused()
-
-%def op_unused_e3():
-% unused()
-
-%def op_unused_e4():
-% unused()
-
-%def op_unused_e5():
-% unused()
-
-%def op_unused_e6():
-% unused()
-
-%def op_unused_e7():
-% unused()
-
-%def op_unused_e8():
-% unused()
-
-%def op_unused_e9():
-% unused()
-
-%def op_unused_ea():
-% unused()
-
-%def op_unused_eb():
-% unused()
-
-%def op_unused_ec():
-% unused()
-
-%def op_unused_ed():
-% unused()
-
-%def op_unused_ee():
-% unused()
-
-%def op_unused_ef():
-% unused()
-
-%def op_unused_f0():
-% unused()
-
-%def op_unused_f1():
-% unused()
-
-%def op_unused_f2():
-% unused()
-
-%def op_unused_f3():
-% unused()
-
-%def op_unused_f4():
-% unused()
-
-%def op_unused_f5():
-% unused()
-
-%def op_unused_f6():
-% unused()
-
-%def op_unused_f7():
-% unused()
-
-%def op_unused_f8():
-% unused()
-
-%def op_unused_f9():
-% unused()
-
-%def op_unused_fc():
-% unused()
-
-%def op_unused_fd():
-% unused()
diff --git a/runtime/interpreter/mterp/x86_64/array.S b/runtime/interpreter/mterp/x86_64/array.S
deleted file mode 100644
index e49c09778c..0000000000
--- a/runtime/interpreter/mterp/x86_64/array.S
+++ /dev/null
@@ -1,178 +0,0 @@
-%def op_aget(load="movl", shift="4", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET", wide="0"):
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if $wide
- movq $data_offset(%rax,%rcx,8), %rax
- SET_WIDE_VREG %rax, rINSTq
- .else
- $load $data_offset(%rax,%rcx,$shift), %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_aget_boolean():
-% op_aget(load="movzbl", shift="1", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
-
-%def op_aget_byte():
-% op_aget(load="movsbl", shift="1", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
-
-%def op_aget_char():
-% op_aget(load="movzwl", shift="2", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
-
-%def op_aget_object():
-/*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG OUT_32_ARG0, %rax # eax <- vBB (array object)
- GET_VREG OUT_32_ARG1, %rcx # ecx <- vCC (requested index)
- EXPORT_PC
- call SYMBOL(artAGetObjectFromMterp) # (array, index)
- movq rSELF, %rcx
- cmpq $$0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException
- SET_VREG_OBJECT %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_aget_short():
-% op_aget(load="movswl", shift="2", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
-
-%def op_aget_wide():
-% op_aget(load="movq", shift="8", data_offset="MIRROR_WIDE_ARRAY_DATA_OFFSET", wide="1")
-
-%def op_aput(reg="rINST", store="movl", shift="4", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET", wide="0"):
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if $wide
- GET_WIDE_VREG rINSTq, rINSTq
- .else
- GET_VREG rINST, rINSTq
- .endif
- $store $reg, $data_offset(%rax,%rcx,$shift)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_aput_boolean():
-% op_aput(reg="rINSTbl", store="movb", shift="1", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
-
-%def op_aput_byte():
-% op_aput(reg="rINSTbl", store="movb", shift="1", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
-
-%def op_aput_char():
-% op_aput(reg="rINSTw", store="movw", shift="2", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
-
-%def op_aput_object():
-/*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- REFRESH_INST ${opnum}
- movq rINSTq, OUT_ARG2
- call SYMBOL(MterpAputObject) # (array, index)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_aput_short():
-% op_aput(reg="rINSTw", store="movw", shift="2", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
-
-%def op_aput_wide():
-% op_aput(reg="rINSTq", store="movq", shift="8", data_offset="MIRROR_WIDE_ARRAY_DATA_OFFSET", wide="1")
-
-%def op_array_length():
-/*
- * Return the length of an array.
- */
- movl rINST, %eax # eax <- BA
- sarl $$4, rINST # rINST <- B
- GET_VREG %ecx, rINSTq # ecx <- vB (object ref)
- testl %ecx, %ecx # is null?
- je common_errNullObject
- andb $$0xf, %al # eax <- A
- movl MIRROR_ARRAY_LENGTH_OFFSET(%rcx), rINST
- SET_VREG rINST, %rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-%def op_fill_array_data():
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC
- movslq 2(rPC), %rcx # rcx <- ssssssssBBBBbbbb
- leaq (rPC,%rcx,2), OUT_ARG1 # OUT_ARG1 <- PC + ssssssssBBBBbbbb*2
- GET_VREG OUT_32_ARG0, rINSTq # OUT_ARG0 <- vAA (array object)
- call SYMBOL(MterpFillArrayData) # (obj, payload)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-%def op_filled_new_array(helper="MterpFilledNewArray"):
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern $helper
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- movq rSELF, OUT_ARG2
- call SYMBOL($helper)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-%def op_filled_new_array_range():
-% op_filled_new_array(helper="MterpFilledNewArrayRange")
-
-%def op_new_array():
-/*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class@CCCC */
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- REFRESH_INST ${opnum}
- movq rINSTq, OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpNewArray)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/control_flow.S b/runtime/interpreter/mterp/x86_64/control_flow.S
deleted file mode 100644
index 3b52c62edf..0000000000
--- a/runtime/interpreter/mterp/x86_64/control_flow.S
+++ /dev/null
@@ -1,197 +0,0 @@
-%def bincmp(revcmp=""):
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movl rINST, %ecx # rcx <- A+
- sarl $$4, rINST # rINST <- B
- andb $$0xf, %cl # rcx <- A
- GET_VREG %eax, %rcx # eax <- vA
- cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
- j${revcmp} 1f
- movswq 2(rPC), rINSTq # Get signed branch offset
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $$JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def zcmp(revcmp=""):
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $$0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
- j${revcmp} 1f
- movswq 2(rPC), rINSTq # fetch signed displacement
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $$JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_goto():
-/*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- movsbq rINSTbl, rINSTq # rINSTq <- ssssssAA
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-
-%def op_goto_16():
-/*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- movswq 2(rPC), rINSTq # rINSTq <- ssssAAAA
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-
-%def op_goto_32():
-/*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Because we need the SF bit set, we'll use an adds
- * to convert from Dalvik offset to byte offset.
- */
- /* goto/32 +AAAAAAAA */
- movslq 2(rPC), rINSTq # rINSTq <- AAAAAAAA
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-
-%def op_if_eq():
-% bincmp(revcmp="ne")
-
-%def op_if_eqz():
-% zcmp(revcmp="ne")
-
-%def op_if_ge():
-% bincmp(revcmp="l")
-
-%def op_if_gez():
-% zcmp(revcmp="l")
-
-%def op_if_gt():
-% bincmp(revcmp="le")
-
-%def op_if_gtz():
-% zcmp(revcmp="le")
-
-%def op_if_le():
-% bincmp(revcmp="g")
-
-%def op_if_lez():
-% zcmp(revcmp="g")
-
-%def op_if_lt():
-% bincmp(revcmp="ge")
-
-%def op_if_ltz():
-% zcmp(revcmp="ge")
-
-%def op_if_ne():
-% bincmp(revcmp="e")
-
-%def op_if_nez():
-% zcmp(revcmp="e")
-
-%def op_packed_switch(func="MterpDoPackedSwitch"):
-/*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- movslq 2(rPC), OUT_ARG0 # rcx <- ssssssssBBBBbbbb
- leaq (rPC,OUT_ARG0,2), OUT_ARG0 # rcx <- PC + ssssssssBBBBbbbb*2
- GET_VREG OUT_32_ARG1, rINSTq # eax <- vAA
- call SYMBOL($func)
- testl %eax, %eax
- movslq %eax, rINSTq
- jmp MterpCommonTakenBranch
-
-%def op_return():
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movq rSELF, OUT_ARG0
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
- jz 1f
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_VREG %eax, rINSTq # eax <- vAA
- jmp MterpReturn
-
-%def op_return_object():
-% op_return()
-
-%def op_return_void():
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movq rSELF, OUT_ARG0
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
- jz 1f
- call SYMBOL(MterpSuspendCheck)
-1:
- xorq %rax, %rax
- jmp MterpReturn
-
-%def op_return_wide():
-/*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movq rSELF, OUT_ARG0
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
- jz 1f
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_WIDE_VREG %rax, rINSTq # eax <- v[AA]
- jmp MterpReturn
-
-%def op_sparse_switch():
-% op_packed_switch(func="MterpDoSparseSwitch")
-
-%def op_throw():
-/*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- GET_VREG %eax, rINSTq # eax<- vAA (exception object)
- testb %al, %al
- jz common_errNullObject
- movq rSELF, %rcx
- movq %rax, THREAD_EXCEPTION_OFFSET(%rcx)
- jmp MterpException
diff --git a/runtime/interpreter/mterp/x86_64/invoke.S b/runtime/interpreter/mterp/x86_64/invoke.S
deleted file mode 100644
index a7230962c1..0000000000
--- a/runtime/interpreter/mterp/x86_64/invoke.S
+++ /dev/null
@@ -1,109 +0,0 @@
-%def invoke(helper="UndefinedInvokeHandler"):
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern $helper
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST ${opnum}
- movl rINST, OUT_32_ARG3
- call SYMBOL($helper)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- movq rSELF, %rax
- cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%rax)
- jz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern $helper
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST ${opnum}
- movl rINST, OUT_32_ARG3
- call SYMBOL($helper)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 4
- movq rSELF, %rax
- cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%rax)
- jz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-%def op_invoke_custom():
-% invoke(helper="MterpInvokeCustom")
-
-%def op_invoke_custom_range():
-% invoke(helper="MterpInvokeCustomRange")
-
-%def op_invoke_direct():
-% invoke(helper="MterpInvokeDirect")
-
-%def op_invoke_direct_range():
-% invoke(helper="MterpInvokeDirectRange")
-
-%def op_invoke_interface():
-% invoke(helper="MterpInvokeInterface")
-/*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_interface_range():
-% invoke(helper="MterpInvokeInterfaceRange")
-
-%def op_invoke_polymorphic():
-% invoke_polymorphic(helper="MterpInvokePolymorphic")
-
-%def op_invoke_polymorphic_range():
-% invoke_polymorphic(helper="MterpInvokePolymorphicRange")
-
-%def op_invoke_static():
-% invoke(helper="MterpInvokeStatic")
-
-
-%def op_invoke_static_range():
-% invoke(helper="MterpInvokeStaticRange")
-
-%def op_invoke_super():
-% invoke(helper="MterpInvokeSuper")
-/*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_super_range():
-% invoke(helper="MterpInvokeSuperRange")
-
-%def op_invoke_virtual():
-% invoke(helper="MterpInvokeVirtual")
-/*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_virtual_range():
-% invoke(helper="MterpInvokeVirtualRange")
diff --git a/runtime/interpreter/mterp/x86_64/main.S b/runtime/interpreter/mterp/x86_64/main.S
deleted file mode 100644
index 4aeebadc29..0000000000
--- a/runtime/interpreter/mterp/x86_64/main.S
+++ /dev/null
@@ -1,761 +0,0 @@
-%def header():
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via rFP &
- number_of_vregs_.
-
- */
-
-/*
-x86_64 ABI general notes:
-
-Caller save set:
- rax, rdx, rcx, rsi, rdi, r8-r11, st(0)-st(7)
-Callee save set:
- rbx, rbp, r12-r15
-Return regs:
- 32-bit in eax
- 64-bit in rax
- fp on xmm0
-
-First 8 fp parameters came in xmm0-xmm7.
-First 6 non-fp parameters came in rdi, rsi, rdx, rcx, r8, r9.
-Other parameters passed on stack, pushed right-to-left. On entry to target, first
-param is at 8(%esp). Traditional entry code is:
-
-Stack must be 16-byte aligned to support SSE in native code.
-
-If we're not doing variable stack allocation (alloca), the frame pointer can be
-eliminated and all arg references adjusted to be esp relative.
-*/
-
-/*
-Mterp and x86_64 notes:
-
-Some key interpreter variables will be assigned to registers.
-
- nick reg purpose
- rPROFILE rbp countdown register for jit profiling
- rPC r12 interpreted program counter, used for fetching instructions
- rFP r13 interpreted frame pointer, used for accessing locals and args
- rINSTw bx first 16-bit code of current instruction
- rINSTbl bl opcode portion of instruction word
- rINSTbh bh high byte of inst word, usually contains src/tgt reg names
- rIBASE r14 base of instruction handler table
- rREFS r15 base of object references in shadow frame.
-
-Notes:
- o High order 16 bits of ebx must be zero on entry to handler
- o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
- o eax and ecx are scratch, rINSTw/ebx sometimes scratch
-
-Macros are provided for common operations. Each macro MUST emit only
-one instruction to make instruction-counting easier. They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#define LITERAL(value) $$(value)
-
-/*
- * Handle mac compiler specific
- */
-#if defined(__APPLE__)
- #define MACRO_LITERAL(value) $$(value)
- #define FUNCTION_TYPE(name)
- #define OBJECT_TYPE(name)
- #define SIZE(start,end)
- // Mac OS' symbols have an _ prefix.
- #define SYMBOL(name) _ ## name
- #define ASM_HIDDEN .private_extern
-#else
- #define MACRO_LITERAL(value) $$value
- #define FUNCTION_TYPE(name) .type name, @function
- #define OBJECT_TYPE(name) .type name, @object
- #define SIZE(start,end) .size start, .-end
- #define SYMBOL(name) name
- #define ASM_HIDDEN .hidden
-#endif
-
-.macro PUSH _reg
- pushq \_reg
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset \_reg, 0
-.endm
-
-.macro POP _reg
- popq \_reg
- .cfi_adjust_cfa_offset -8
- .cfi_restore \_reg
-.endm
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
-#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
-
-/* Frame size must be 16-byte aligned.
- * Remember about 8 bytes for return address + 6 * 8 for spills.
- */
-#define FRAME_SIZE 8
-
-/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
-#define IN_ARG3 %rcx
-#define IN_ARG2 %rdx
-#define IN_ARG1 %rsi
-#define IN_ARG0 %rdi
-/* Spill offsets relative to %esp */
-#define SELF_SPILL (FRAME_SIZE - 8)
-/* Out Args */
-#define OUT_ARG3 %rcx
-#define OUT_ARG2 %rdx
-#define OUT_ARG1 %rsi
-#define OUT_ARG0 %rdi
-#define OUT_32_ARG3 %ecx
-#define OUT_32_ARG2 %edx
-#define OUT_32_ARG1 %esi
-#define OUT_32_ARG0 %edi
-#define OUT_FP_ARG1 %xmm1
-#define OUT_FP_ARG0 %xmm0
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rSELF SELF_SPILL(%rsp)
-#define rPC %r12
-#define CFI_DEX 12 // DWARF register number of the register holding dex-pc (rPC).
-#define CFI_TMP 5 // DWARF register number of the first argument register (rdi).
-#define rFP %r13
-#define rINST %ebx
-#define rINSTq %rbx
-#define rINSTw %bx
-#define rINSTbh %bh
-#define rINSTbl %bl
-#define rIBASE %r14
-#define rREFS %r15
-#define rPROFILE %ebp
-
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- movq rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- * IBase handles uses the caller save register so we must restore it after each call.
- * Also it is used as a result of some 64-bit operations (like imul) and we should
- * restore it in such cases also.
- *
- */
-.macro REFRESH_IBASE_REG self_reg
- movq THREAD_CURRENT_IBASE_OFFSET(\self_reg), rIBASE
-.endm
-.macro REFRESH_IBASE
- movq rSELF, rIBASE
- REFRESH_IBASE_REG rIBASE
-.endm
-
-/*
- * Refresh rINST.
- * At enter to handler rINST does not contain the opcode number.
- * However some utilities require the full value, so this macro
- * restores the opcode number.
- */
-.macro REFRESH_INST _opnum
- movb rINSTbl, rINSTbh
- movb $$\_opnum, rINSTbl
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINSTw. Does not advance rPC.
- */
-.macro FETCH_INST
- movzwq (rPC), rINSTq
-.endm
-
-/*
- * Remove opcode from rINST, compute the address of handler and jump to it.
- */
-.macro GOTO_NEXT
- movzx rINSTbl,%eax
- movzbl rINSTbh,rINST
- shll MACRO_LITERAL(${handler_size_bits}), %eax
- addq rIBASE, %rax
- jmp *%rax
-.endm
-
-/*
- * Advance rPC by instruction count.
- */
-.macro ADVANCE_PC _count
- leaq 2*\_count(rPC), rPC
-.endm
-
-/*
- * Advance rPC by instruction count, fetch instruction and jump to handler.
- */
-.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
- ADVANCE_PC \_count
- FETCH_INST
- GOTO_NEXT
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
-#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4)
-#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
-#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
-
-.macro GET_VREG _reg _vreg
- movl VREG_ADDRESS(\_vreg), \_reg
-.endm
-
-/* Read wide value. */
-.macro GET_WIDE_VREG _reg _vreg
- movq VREG_ADDRESS(\_vreg), \_reg
-.endm
-
-.macro SET_VREG _reg _vreg
- movl \_reg, VREG_ADDRESS(\_vreg)
- movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
-.endm
-
-/* Write wide value. reg is clobbered. */
-.macro SET_WIDE_VREG _reg _vreg
- movq \_reg, VREG_ADDRESS(\_vreg)
- xorq \_reg, \_reg
- movq \_reg, VREG_REF_ADDRESS(\_vreg)
-.endm
-
-.macro SET_VREG_OBJECT _reg _vreg
- movl \_reg, VREG_ADDRESS(\_vreg)
- movl \_reg, VREG_REF_ADDRESS(\_vreg)
-.endm
-
-.macro GET_VREG_HIGH _reg _vreg
- movl VREG_HIGH_ADDRESS(\_vreg), \_reg
-.endm
-
-.macro SET_VREG_HIGH _reg _vreg
- movl \_reg, VREG_HIGH_ADDRESS(\_vreg)
- movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
-.endm
-
-.macro CLEAR_REF _vreg
- movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
-.endm
-
-.macro CLEAR_WIDE_REF _vreg
- movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
- movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
-.endm
-
-.macro GET_VREG_XMMs _xmmreg _vreg
- movss VREG_ADDRESS(\_vreg), \_xmmreg
-.endm
-.macro GET_VREG_XMMd _xmmreg _vreg
- movsd VREG_ADDRESS(\_vreg), \_xmmreg
-.endm
-.macro SET_VREG_XMMs _xmmreg _vreg
- movss \_xmmreg, VREG_ADDRESS(\_vreg)
-.endm
-.macro SET_VREG_XMMd _xmmreg _vreg
- movsd \_xmmreg, VREG_ADDRESS(\_vreg)
-.endm
-
-/*
- * function support macros.
- */
-.macro ENTRY name
- .text
- ASM_HIDDEN SYMBOL(\name)
- .global SYMBOL(\name)
- FUNCTION_TYPE(\name)
-SYMBOL(\name):
-.endm
-
-.macro END name
- SIZE(\name,\name)
-.endm
-
-%def entry():
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- *
- * On entry:
- * 0 Thread* self
- * 1 insns_
- * 2 ShadowFrame
- * 3 JValue* result_register
- *
- */
-
-ENTRY ExecuteMterpImpl
- .cfi_startproc
- .cfi_def_cfa rsp, 8
-
- /* Spill callee save regs */
- PUSH %rbx
- PUSH %rbp
- PUSH %r12
- PUSH %r13
- PUSH %r14
- PUSH %r15
-
- /* Allocate frame */
- subq $$FRAME_SIZE, %rsp
- .cfi_adjust_cfa_offset FRAME_SIZE
-
- /* Remember the return register */
- movq IN_ARG3, SHADOWFRAME_RESULT_REGISTER_OFFSET(IN_ARG2)
-
- /* Remember the code_item */
- movq IN_ARG1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(IN_ARG2)
-
- /* set up "named" registers */
- movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(IN_ARG2), %eax
- leaq SHADOWFRAME_VREGS_OFFSET(IN_ARG2), rFP
- leaq (rFP, %rax, 4), rREFS
- movl SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
- leaq (IN_ARG1, %rax, 2), rPC
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Starting ibase */
- movq IN_ARG0, rSELF
- REFRESH_IBASE_REG IN_ARG0
-
- /* Set up for backwards branches & osr profiling */
- movq IN_ARG0, OUT_ARG2 /* Set up OUT_ARG2 before clobbering IN_ARG0 */
- movq OFF_FP_METHOD(rFP), OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpSetUpHotnessCountdown)
- movswl %ax, rPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST
- GOTO_NEXT
- /* NOTE: no fallthrough */
- // cfi info continues, and covers the whole mterp implementation.
- END ExecuteMterpImpl
-
-%def dchecks_before_helper():
- // Call C++ to do debug checks and return to the handler using tail call.
- .extern MterpCheckBefore
- popq %rax # Return address (the instuction handler).
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- pushq %rax # Return address for the tail call.
- jmp SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
-
-%def opcode_pre():
-% add_helper(dchecks_before_helper, "mterp_dchecks_before_helper")
- #if !defined(NDEBUG)
- call SYMBOL(mterp_dchecks_before_helper)
- #endif
-
-%def fallback():
-/* Transfer stub to alternate interpreter */
- jmp MterpFallback
-
-%def helpers():
- ENTRY MterpHelpers
-
-%def footer():
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
- .text
- .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogDivideByZeroException)
-#endif
- jmp MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogArrayIndexException)
-#endif
- jmp MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogNegativeArraySizeException)
-#endif
- jmp MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogNoSuchMethodException)
-#endif
- jmp MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogNullObjectException)
-#endif
- jmp MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogExceptionThrownException)
-#endif
- jmp MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movl THREAD_FLAGS_OFFSET(OUT_ARG0), OUT_32_ARG2
- call SYMBOL(MterpLogSuspendFallback)
-#endif
- jmp MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- movq rSELF, %rcx
- cmpq $$0, THREAD_EXCEPTION_OFFSET(%rcx)
- jz MterpFallback
- /* intentional fallthrough - handle pending exception. */
-
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpHandleException)
- testb %al, %al
- jz MterpExceptionReturn
- movq OFF_FP_DEX_INSTRUCTIONS(rFP), %rax
- mov OFF_FP_DEX_PC(rFP), %ecx
- leaq (%rax, %rcx, 2), rPC
- movq rPC, OFF_FP_DEX_PC_PTR(rFP)
- /* Do we need to switch interpreters? */
- movq rSELF, %rax
- cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%rax)
- jz MterpFallback
- /* resume execution at catch block */
- REFRESH_IBASE
- FETCH_INST
- GOTO_NEXT
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * rPROFILE <= signed hotness countdown (expanded to 32 bits)
- * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranch:
- jg .L_forward_branch # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-# error "JIT_CHECK_OSR must be -1."
-#endif
- cmpl $$JIT_CHECK_OSR, rPROFILE
- je .L_osr_check
- decl rPROFILE
- je .L_add_batch # counted down to zero - report
-.L_resume_backward_branch:
- movq rSELF, %rax
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
- REFRESH_IBASE_REG %rax
- leaq (rPC, rINSTq, 2), rPC
- FETCH_INST
- jnz .L_suspend_request_pending
- GOTO_NEXT
-
-.L_suspend_request_pending:
- EXPORT_PC
- movq rSELF, OUT_ARG0
- call SYMBOL(MterpSuspendCheck) # (self)
- testb %al, %al
- jnz MterpFallback
- REFRESH_IBASE # might have changed during suspend
- GOTO_NEXT
-
-.L_no_count_backwards:
- cmpl $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
- jne .L_resume_backward_branch
-.L_osr_check:
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rINSTq, OUT_ARG2
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- jz .L_resume_backward_branch
- jmp MterpOnStackReplacement
-
-.L_forward_branch:
- cmpl $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
- je .L_check_osr_forward
-.L_resume_forward_branch:
- leaq (rPC, rINSTq, 2), rPC
- FETCH_INST
- GOTO_NEXT
-
-.L_check_osr_forward:
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rINSTq, OUT_ARG2
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- jz .L_resume_forward_branch
- jmp MterpOnStackReplacement
-
-.L_add_batch:
- movl rPROFILE, %eax
- movq OFF_FP_METHOD(rFP), OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movw %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
- movq rSELF, OUT_ARG2
- call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- movswl %ax, rPROFILE
- jmp .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movl $$2, OUT_32_ARG2
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- jnz MterpOnStackReplacement
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movl rINST, OUT_32_ARG2
- call SYMBOL(MterpLogOSR)
-#endif
- movl $$1, %eax
- jmp MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogFallback)
-#endif
-MterpCommonFallback:
- xorl %eax, %eax
- jmp MterpDone
-
-/*
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- movl $$1, %eax
- jmp MterpDone
-MterpReturn:
- movq OFF_FP_RESULT_REGISTER(rFP), %rdx
- movq %rax, (%rdx)
- movl $$1, %eax
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- testl rPROFILE, rPROFILE
- jle MRestoreFrame # if > 0, we may have some counts to report.
-
- movl %eax, rINST # stash return value
- /* Report cached hotness counts */
- movl rPROFILE, %eax
- movq OFF_FP_METHOD(rFP), OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movw %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
- movq rSELF, OUT_ARG2
- call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- movl rINST, %eax # restore return value
-
- /* pop up frame */
-MRestoreFrame:
- addq $$FRAME_SIZE, %rsp
- .cfi_adjust_cfa_offset -FRAME_SIZE
-
- /* Restore callee save register */
- POP %r15
- POP %r14
- POP %r13
- POP %r12
- POP %rbp
- POP %rbx
- ret
- .cfi_endproc
- END MterpHelpers
-
-%def instruction_end():
-
- OBJECT_TYPE(artMterpAsmInstructionEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
- .global SYMBOL(artMterpAsmInstructionEnd)
-SYMBOL(artMterpAsmInstructionEnd):
-
-%def instruction_start():
-
- OBJECT_TYPE(artMterpAsmInstructionStart)
- ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
- .global SYMBOL(artMterpAsmInstructionStart)
-SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
- .text
-
-%def default_helper_prefix():
-% return "mterp_"
-
-%def opcode_start():
- ENTRY mterp_${opcode}
-%def opcode_end():
- END mterp_${opcode}
-%def helper_start(name):
- ENTRY ${name}
-%def helper_end(name):
- END ${name}
diff --git a/runtime/interpreter/mterp/x86_64/object.S b/runtime/interpreter/mterp/x86_64/object.S
deleted file mode 100644
index 643c6dafe2..0000000000
--- a/runtime/interpreter/mterp/x86_64/object.S
+++ /dev/null
@@ -1,152 +0,0 @@
-%def field(helper=""):
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern $helper
- REFRESH_INST ${opnum} # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL($helper)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_check_cast():
-/*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class@BBBB */
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # OUT_ARG0 <- BBBB
- leaq VREG_ADDRESS(rINSTq), OUT_ARG1
- movq OFF_FP_METHOD(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpCheckCast) # (index, &obj, method, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_iget(is_object="0", helper="MterpIGetU32"):
-% field(helper=helper)
-
-%def op_iget_boolean():
-% op_iget(helper="MterpIGetU8")
-
-%def op_iget_byte():
-% op_iget(helper="MterpIGetI8")
-
-%def op_iget_char():
-% op_iget(helper="MterpIGetU16")
-
-%def op_iget_object():
-% op_iget(is_object="1", helper="MterpIGetObj")
-
-%def op_iget_short():
-% op_iget(helper="MterpIGetI16")
-
-%def op_iget_wide():
-% op_iget(helper="MterpIGetU64")
-
-%def op_instance_of():
-/*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class@CCCC */
- EXPORT_PC
- movzwl 2(rPC), OUT_32_ARG0 # OUT_32_ARG0 <- CCCC
- movl rINST, %eax # eax <- BA
- sarl $$4, %eax # eax <- B
- leaq VREG_ADDRESS(%rax), OUT_ARG1 # Get object address
- movq OFF_FP_METHOD(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpInstanceOf) # (index, &obj, method, self)
- movsbl %al, %eax
- movq rSELF, %rcx
- cmpq $$0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException
- andb $$0xf, rINSTbl # rINSTbl <- A
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_iput(is_object="0", helper="MterpIPutU32"):
-% field(helper=helper)
-
-%def op_iput_boolean():
-% op_iput(helper="MterpIPutU8")
-
-%def op_iput_byte():
-% op_iput(helper="MterpIPutI8")
-
-%def op_iput_char():
-% op_iput(helper="MterpIPutU16")
-
-%def op_iput_object():
-% op_iput(is_object="1", helper="MterpIPutObj")
-
-%def op_iput_short():
-% op_iput(helper="MterpIPutI16")
-
-%def op_iput_wide():
-% op_iput(helper="MterpIPutU64")
-
-%def op_new_instance():
-/*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class@BBBB */
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rSELF, OUT_ARG1
- REFRESH_INST ${opnum}
- movq rINSTq, OUT_ARG2
- call SYMBOL(MterpNewInstance)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_sget(is_object="0", helper="MterpSGetU32"):
-% field(helper=helper)
-
-%def op_sget_boolean():
-% op_sget(helper="MterpSGetU8")
-
-%def op_sget_byte():
-% op_sget(helper="MterpSGetI8")
-
-%def op_sget_char():
-% op_sget(helper="MterpSGetU16")
-
-%def op_sget_object():
-% op_sget(is_object="1", helper="MterpSGetObj")
-
-%def op_sget_short():
-% op_sget(helper="MterpSGetI16")
-
-%def op_sget_wide():
-% op_sget(helper="MterpSGetU64")
-
-%def op_sput(is_object="0", helper="MterpSPutU32"):
-% field(helper=helper)
-
-%def op_sput_boolean():
-% op_sput(helper="MterpSPutU8")
-
-%def op_sput_byte():
-% op_sput(helper="MterpSPutI8")
-
-%def op_sput_char():
-% op_sput(helper="MterpSPutU16")
-
-%def op_sput_object():
-% op_sput(is_object="1", helper="MterpSPutObj")
-
-%def op_sput_short():
-% op_sput(helper="MterpSPutI16")
-
-%def op_sput_wide():
-% op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/x86_64/other.S b/runtime/interpreter/mterp/x86_64/other.S
deleted file mode 100644
index 83e14f0fd7..0000000000
--- a/runtime/interpreter/mterp/x86_64/other.S
+++ /dev/null
@@ -1,348 +0,0 @@
-%def const(helper="UndefinedConstHandler"):
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern $helper
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
- movq rINSTq, OUT_ARG1
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL($helper) # (index, tgt_reg, shadow_frame, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def unused():
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-%def op_const():
- /* const vAA, #+BBBBbbbb */
- movl 2(rPC), %eax # grab all 32 bits at once
- SET_VREG %eax, rINSTq # vAA<- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-%def op_const_16():
- /* const/16 vAA, #+BBBB */
- movswl 2(rPC), %ecx # ecx <- ssssBBBB
- SET_VREG %ecx, rINSTq # vAA <- ssssBBBB
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_const_4():
- /* const/4 vA, #+B */
- movsbl rINSTbl, %eax # eax <-ssssssBx
- movl $$0xf, rINST
- andl %eax, rINST # rINST <- A
- sarl $$4, %eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-%def op_const_class():
-% const(helper="MterpConstClass")
-
-%def op_const_high16():
- /* const/high16 vAA, #+BBBB0000 */
- movzwl 2(rPC), %eax # eax <- 0000BBBB
- sall $$16, %eax # eax <- BBBB0000
- SET_VREG %eax, rINSTq # vAA <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_const_method_handle():
-% const(helper="MterpConstMethodHandle")
-
-%def op_const_method_type():
-% const(helper="MterpConstMethodType")
-
-%def op_const_string():
-% const(helper="MterpConstString")
-
-%def op_const_string_jumbo():
- /* const/string vAA, String@BBBBBBBB */
- EXPORT_PC
- movl 2(rPC), OUT_32_ARG0 # OUT_32_ARG0 <- BBBB
- movq rINSTq, OUT_ARG1
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-%def op_const_wide():
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- movq 2(rPC), %rax # rax <- HHHHhhhhBBBBbbbb
- SET_WIDE_VREG %rax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
-
-%def op_const_wide_16():
- /* const-wide/16 vAA, #+BBBB */
- movswq 2(rPC), %rax # rax <- ssssBBBB
- SET_WIDE_VREG %rax, rINSTq # store
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_const_wide_32():
- /* const-wide/32 vAA, #+BBBBbbbb */
- movslq 2(rPC), %rax # eax <- ssssssssBBBBbbbb
- SET_WIDE_VREG %rax, rINSTq # store
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-%def op_const_wide_high16():
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- movzwq 2(rPC), %rax # eax <- 0000BBBB
- salq $$48, %rax # eax <- BBBB0000
- SET_WIDE_VREG %rax, rINSTq # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_monitor_enter():
-/*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC
- GET_VREG OUT_32_ARG0, rINSTq
- movq rSELF, OUT_ARG1
- call SYMBOL(artLockObjectFromCode) # (object, self)
- testq %rax, %rax
- jnz MterpException
- ADVANCE_PC 1
- movq rSELF, %rax
- cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%rax)
- jz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-%def op_monitor_exit():
-/*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC
- GET_VREG OUT_32_ARG0, rINSTq
- movq rSELF, OUT_ARG1
- call SYMBOL(artUnlockObjectFromCode) # (object, self)
- testq %rax, %rax
- jnz MterpException
- ADVANCE_PC 1
- movq rSELF, %rax
- cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%rax)
- jz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-%def op_move(is_object="0"):
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- movl rINST, %eax # eax <- BA
- andb $$0xf, %al # eax <- A
- shrl $$4, rINST # rINST <- B
- GET_VREG %edx, rINSTq
- .if $is_object
- SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
- .else
- SET_VREG %edx, %rax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-%def op_move_16(is_object="0"):
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- movzwq 4(rPC), %rcx # ecx <- BBBB
- movzwq 2(rPC), %rax # eax <- AAAA
- GET_VREG %edx, %rcx
- .if $is_object
- SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
- .else
- SET_VREG %edx, %rax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-%def op_move_exception():
- /* move-exception vAA */
- movq rSELF, %rcx
- movl THREAD_EXCEPTION_OFFSET(%rcx), %eax
- SET_VREG_OBJECT %eax, rINSTq # fp[AA] <- exception object
- movl $$0, THREAD_EXCEPTION_OFFSET(%rcx)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-%def op_move_from16(is_object="0"):
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- movzwq 2(rPC), %rax # eax <- BBBB
- GET_VREG %edx, %rax # edx <- fp[BBBB]
- .if $is_object
- SET_VREG_OBJECT %edx, rINSTq # fp[A] <- fp[B]
- .else
- SET_VREG %edx, rINSTq # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_move_object():
-% op_move(is_object="1")
-
-%def op_move_object_16():
-% op_move_16(is_object="1")
-
-%def op_move_object_from16():
-% op_move_from16(is_object="1")
-
-%def op_move_result(is_object="0"):
- /* for: move-result, move-result-object */
- /* op vAA */
- movq OFF_FP_RESULT_REGISTER(rFP), %rax # get pointer to result JType.
- movl (%rax), %eax # r0 <- result.i.
- .if $is_object
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <- fp[B]
- .else
- SET_VREG %eax, rINSTq # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-%def op_move_result_object():
-% op_move_result(is_object="1")
-
-%def op_move_result_wide():
- /* move-result-wide vAA */
- movq OFF_FP_RESULT_REGISTER(rFP), %rax # get pointer to result JType.
- movq (%rax), %rdx # Get wide
- SET_WIDE_VREG %rdx, rINSTq # v[AA] <- rdx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-%def op_move_wide():
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movl rINST, %ecx # ecx <- BA
- sarl $$4, rINST # rINST <- B
- andb $$0xf, %cl # ecx <- A
- GET_WIDE_VREG %rdx, rINSTq # rdx <- v[B]
- SET_WIDE_VREG %rdx, %rcx # v[A] <- rdx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-%def op_move_wide_16():
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzwq 4(rPC), %rcx # ecx<- BBBB
- movzwq 2(rPC), %rax # eax<- AAAA
- GET_WIDE_VREG %rdx, %rcx # rdx <- v[B]
- SET_WIDE_VREG %rdx, %rax # v[A] <- rdx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-%def op_move_wide_from16():
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzwl 2(rPC), %ecx # ecx <- BBBB
- GET_WIDE_VREG %rdx, %rcx # rdx <- v[B]
- SET_WIDE_VREG %rdx, rINSTq # v[A] <- rdx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-%def op_nop():
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-%def op_unused_3e():
-% unused()
-
-%def op_unused_3f():
-% unused()
-
-%def op_unused_40():
-% unused()
-
-%def op_unused_41():
-% unused()
-
-%def op_unused_42():
-% unused()
-
-%def op_unused_43():
-% unused()
-
-%def op_unused_73():
-% unused()
-
-%def op_unused_79():
-% unused()
-
-%def op_unused_7a():
-% unused()
-
-%def op_unused_e3():
-% unused()
-
-%def op_unused_e4():
-% unused()
-
-%def op_unused_e5():
-% unused()
-
-%def op_unused_e6():
-% unused()
-
-%def op_unused_e7():
-% unused()
-
-%def op_unused_e8():
-% unused()
-
-%def op_unused_e9():
-% unused()
-
-%def op_unused_ea():
-% unused()
-
-%def op_unused_eb():
-% unused()
-
-%def op_unused_ec():
-% unused()
-
-%def op_unused_ed():
-% unused()
-
-%def op_unused_ee():
-% unused()
-
-%def op_unused_ef():
-% unused()
-
-%def op_unused_f0():
-% unused()
-
-%def op_unused_f1():
-% unused()
-
-%def op_unused_f2():
-% unused()
-
-%def op_unused_f3():
-% unused()
-
-%def op_unused_f4():
-% unused()
-
-%def op_unused_f5():
-% unused()
-
-%def op_unused_f6():
-% unused()
-
-%def op_unused_f7():
-% unused()
-
-%def op_unused_f8():
-% unused()
-
-%def op_unused_f9():
-% unused()
-
-%def op_unused_fc():
-% unused()
-
-%def op_unused_fd():
-% unused()
diff --git a/runtime/interpreter/mterp/x86_64/arithmetic.S b/runtime/interpreter/mterp/x86_64ng/arithmetic.S
index 0ef7a8369c..0ef7a8369c 100644
--- a/runtime/interpreter/mterp/x86_64/arithmetic.S
+++ b/runtime/interpreter/mterp/x86_64ng/arithmetic.S
diff --git a/runtime/interpreter/mterp/x86_64/floating_point.S b/runtime/interpreter/mterp/x86_64ng/floating_point.S
index 599b3f4965..599b3f4965 100644
--- a/runtime/interpreter/mterp/x86_64/floating_point.S
+++ b/runtime/interpreter/mterp/x86_64ng/floating_point.S
diff --git a/runtime/interpreter/mterp/x86/arithmetic.S b/runtime/interpreter/mterp/x86ng/arithmetic.S
index 973e5b8a0f..973e5b8a0f 100644
--- a/runtime/interpreter/mterp/x86/arithmetic.S
+++ b/runtime/interpreter/mterp/x86ng/arithmetic.S
diff --git a/runtime/interpreter/mterp/x86/floating_point.S b/runtime/interpreter/mterp/x86ng/floating_point.S
index 0b3c06caf1..0b3c06caf1 100644
--- a/runtime/interpreter/mterp/x86/floating_point.S
+++ b/runtime/interpreter/mterp/x86ng/floating_point.S
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 050233dac5..e8069d9e51 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -197,9 +197,7 @@ static uint32_t EnableDebugFeatures(uint32_t runtime_flags) {
if ((runtime_flags & DEBUG_ALWAYS_JIT) != 0) {
jit::JitOptions* jit_options = runtime->GetJITOptions();
CHECK(jit_options != nullptr);
- Runtime::Current()->DoAndMaybeSwitchInterpreter([=]() {
- jit_options->SetJitAtFirstUse();
- });
+ jit_options->SetJitAtFirstUse();
runtime_flags &= ~DEBUG_ALWAYS_JIT;
}
diff --git a/runtime/oat.h b/runtime/oat.h
index 99268c0bca..811fda786e 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr std::array<uint8_t, 4> kOatMagic { { 'o', 'a', 't', '\n' } };
- // Last oat version changed reason: StaticFieldVarHandle introduction.
- static constexpr std::array<uint8_t, 4> kOatVersion { { '1', '9', '6', '\0' } };
+ // Last oat version changed reason: Removal of mterp.
+ static constexpr std::array<uint8_t, 4> kOatVersion { { '1', '9', '7', '\0' } };
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
static constexpr const char* kDebuggableKey = "debuggable";
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
index 543ff720ef..639e9e8446 100644
--- a/runtime/oat_quick_method_header.cc
+++ b/runtime/oat_quick_method_header.cc
@@ -19,7 +19,6 @@
#include "arch/instruction_set.h"
#include "art_method.h"
#include "dex/dex_file_types.h"
-#include "interpreter/interpreter_mterp_impl.h"
#include "interpreter/mterp/nterp.h"
#include "nterp_helpers.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index 2ffaf98103..0daa7769ed 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -26,7 +26,6 @@
#include "base/mutex.h"
#include "entrypoints/quick/callee_save_frame.h"
#include "gc_root-inl.h"
-#include "interpreter/mterp/mterp.h"
#include "obj_ptr-inl.h"
#include "thread_list.h"
@@ -89,15 +88,6 @@ inline ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type)
return reinterpret_cast64<ArtMethod*>(callee_save_methods_[static_cast<size_t>(type)]);
}
-template<typename Action>
-void Runtime::DoAndMaybeSwitchInterpreter(Action lamda) {
- MutexLock tll_mu(Thread::Current(), *Locks::thread_list_lock_);
- lamda();
- Runtime::Current()->GetThreadList()->ForEach([](Thread* thread, void*) {
- thread->tls32_.use_mterp.store(interpreter::CanUseMterp());
- }, nullptr);
-}
-
} // namespace art
#endif // ART_RUNTIME_RUNTIME_INL_H_
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 2ce6764259..2573b3a684 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -882,7 +882,7 @@ bool Runtime::Start() {
self->TransitionFromRunnableToSuspended(kNative);
- DoAndMaybeSwitchInterpreter([=](){ started_ = true; });
+ started_ = true;
if (!IsImageDex2OatEnabled() || !GetHeap()->HasBootImageSpace()) {
ScopedObjectAccess soa(self);
@@ -2943,7 +2943,7 @@ void Runtime::CreateJit() {
}
jit::Jit* jit = jit::Jit::Create(jit_code_cache_.get(), jit_options_.get());
- DoAndMaybeSwitchInterpreter([=](){ jit_.reset(jit); });
+ jit_.reset(jit);
if (jit == nullptr) {
LOG(WARNING) << "Failed to allocate JIT";
// Release JIT code cache resources (several MB of memory).
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 080ba6382c..d30dd87f25 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -781,7 +781,7 @@ class Runtime {
}
void SetNonStandardExitsEnabled() {
- DoAndMaybeSwitchInterpreter([=](){ non_standard_exits_enabled_ = true; });
+ non_standard_exits_enabled_ = true;
}
bool AreAsyncExceptionsThrown() const {
@@ -789,20 +789,9 @@ class Runtime {
}
void SetAsyncExceptionsThrown() {
- DoAndMaybeSwitchInterpreter([=](){ async_exceptions_thrown_ = true; });
+ async_exceptions_thrown_ = true;
}
- // Change state and re-check which interpreter should be used.
- //
- // This must be called whenever there is an event that forces
- // us to use different interpreter (e.g. debugger is attached).
- //
- // Changing the state using the lamda gives us some multihreading safety.
- // It ensures that two calls do not interfere with each other and
- // it makes it possible to DCHECK that thread local flag is correct.
- template<typename Action>
- static void DoAndMaybeSwitchInterpreter(Action lamda);
-
// Returns the build fingerprint, if set. Otherwise an empty string is returned.
std::string GetFingerprint() {
return fingerprint_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 61a751c14f..4ade7c7c64 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -74,7 +74,6 @@
#include "indirect_reference_table-inl.h"
#include "instrumentation.h"
#include "interpreter/interpreter.h"
-#include "interpreter/mterp/mterp.h"
#include "interpreter/shadow_frame-inl.h"
#include "java_frame_root_info.h"
#include "jni/java_vm_ext.h"
@@ -944,10 +943,6 @@ bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_en
RemoveSuspendTrigger();
InitCardTable();
InitTid();
- {
- ScopedTrace trace2("InitInterpreterTls");
- interpreter::InitInterpreterTls(this);
- }
#ifdef __BIONIC__
__get_tls()[TLS_SLOT_ART_THREAD_SELF] = this;
@@ -2325,14 +2320,9 @@ Thread::Thread(bool daemon)
tlsPtr_.flip_function = nullptr;
tlsPtr_.thread_local_mark_stack = nullptr;
tls32_.is_transitioning_to_runnable = false;
- tls32_.use_mterp = false;
ResetTlab();
}
-void Thread::NotifyInTheadList() {
- tls32_.use_mterp = interpreter::CanUseMterp();
-}
-
bool Thread::CanLoadClasses() const {
return !IsRuntimeThread() || !Runtime::Current()->IsJavaDebuggable();
}
diff --git a/runtime/thread.h b/runtime/thread.h
index 676bfd81de..5ffaa9f3aa 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -704,13 +704,6 @@ class Thread {
}
template<PointerSize pointer_size>
- static constexpr ThreadOffset<pointer_size> UseMterpOffset() {
- return ThreadOffset<pointer_size>(
- OFFSETOF_MEMBER(Thread, tls32_) +
- OFFSETOF_MEMBER(tls_32bit_sized_values, use_mterp));
- }
-
- template<PointerSize pointer_size>
static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
@@ -1147,10 +1140,6 @@ class Thread {
tls32_.state_and_flags.as_atomic_int.fetch_and(-1 ^ flag, std::memory_order_seq_cst);
}
- bool UseMterp() const {
- return tls32_.use_mterp.load();
- }
-
void ResetQuickAllocEntryPointsForThread();
// Returns the remaining space in the TLAB.
@@ -1367,9 +1356,6 @@ class Thread {
// observed to be set at the same time by instrumentation.
void DeleteJPeer(JNIEnv* env);
- void NotifyInTheadList()
- REQUIRES_SHARED(Locks::thread_list_lock_);
-
// Attaches the calling native thread to the runtime, returning the new native peer.
// Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
template <typename PeerAction>
@@ -1571,7 +1557,6 @@ class Thread {
disable_thread_flip_count(0),
user_code_suspend_count(0),
force_interpreter_count(0),
- use_mterp(0),
make_visibly_initialized_counter(0),
define_class_counter(0) {}
@@ -1656,10 +1641,6 @@ class Thread {
// thread must remain in interpreted code as much as possible.
uint32_t force_interpreter_count;
- // True if everything is in the ideal state for fast interpretation.
- // False if we need to switch to the C++ interpreter to handle special cases.
- std::atomic<bool32_t> use_mterp;
-
// Counter for calls to initialize a class that's initialized but not visibly initialized.
// When this reaches kMakeVisiblyInitializedCounterTriggerCount, we call the runtime to
// make initialized classes visibly initialized. This is needed because we usually make
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 84b7384c46..8a48300f42 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -1287,7 +1287,6 @@ void ThreadList::Register(Thread* self) {
}
self->SetWeakRefAccessEnabled(cc->IsWeakRefAccessEnabled());
}
- self->NotifyInTheadList();
}
void ThreadList::Unregister(Thread* self) {
diff --git a/tools/cpp-define-generator/globals.def b/tools/cpp-define-generator/globals.def
index 50f9b3317c..ae8f5aabe5 100644
--- a/tools/cpp-define-generator/globals.def
+++ b/tools/cpp-define-generator/globals.def
@@ -22,7 +22,6 @@
#include "dex/modifiers.h"
#include "gc/accounting/card_table.h"
#include "gc/heap.h"
-#include "interpreter/mterp/mterp.h"
#include "interpreter/mterp/nterp.h"
#include "jit/jit.h"
#include "mirror/object.h"
@@ -61,10 +60,10 @@ ASM_DEFINE(JIT_HOTNESS_DISABLE,
art::jit::kJitHotnessDisabled)
ASM_DEFINE(MIN_LARGE_OBJECT_THRESHOLD,
art::gc::Heap::kMinLargeObjectThreshold)
-ASM_DEFINE(MTERP_HANDLER_SIZE,
- art::interpreter::kMterpHandlerSize)
-ASM_DEFINE(MTERP_HANDLER_SIZE_LOG2,
- art::WhichPowerOf2(art::interpreter::kMterpHandlerSize))
+ASM_DEFINE(NTERP_HANDLER_SIZE,
+ art::interpreter::kNterpHandlerSize)
+ASM_DEFINE(NTERP_HANDLER_SIZE_LOG2,
+ art::WhichPowerOf2(art::interpreter::kNterpHandlerSize))
ASM_DEFINE(OBJECT_ALIGNMENT_MASK,
art::kObjectAlignment - 1)
ASM_DEFINE(OBJECT_ALIGNMENT_MASK_TOGGLED,
diff --git a/tools/cpp-define-generator/thread.def b/tools/cpp-define-generator/thread.def
index 4fee6df57d..47c26293fa 100644
--- a/tools/cpp-define-generator/thread.def
+++ b/tools/cpp-define-generator/thread.def
@@ -61,8 +61,6 @@ ASM_DEFINE(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST,
art::kSuspendRequest | art::kCheckpointRequest | art::kEmptyCheckpointRequest)
ASM_DEFINE(THREAD_SUSPEND_REQUEST,
art::kSuspendRequest)
-ASM_DEFINE(THREAD_USE_MTERP_OFFSET,
- art::Thread::UseMterpOffset<art::kRuntimePointerSize>().Int32Value())
ASM_DEFINE(THREAD_TOP_QUICK_FRAME_OFFSET,
art::Thread::TopOfManagedStackOffset<art::kRuntimePointerSize>().Int32Value())
ASM_DEFINE(THREAD_ALLOC_OBJECT_ENTRYPOINT_OFFSET,