summaryrefslogtreecommitdiff
path: root/runtime/interpreter
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/interpreter')
-rw-r--r--runtime/interpreter/interpreter.cc137
-rw-r--r--runtime/interpreter/interpreter_cache-inl.h10
-rw-r--r--runtime/interpreter/interpreter_cache.cc8
-rw-r--r--runtime/interpreter/interpreter_cache.h2
-rw-r--r--runtime/interpreter/interpreter_common.cc155
-rw-r--r--runtime/interpreter/interpreter_common.h414
-rw-r--r--runtime/interpreter/interpreter_intrinsics.cc678
-rw-r--r--runtime/interpreter/interpreter_intrinsics.h41
-rw-r--r--runtime/interpreter/interpreter_switch_impl-inl.h128
-rw-r--r--runtime/interpreter/interpreter_switch_impl.h12
-rw-r--r--runtime/interpreter/interpreter_switch_impl0.cc2
-rw-r--r--runtime/interpreter/interpreter_switch_impl1.cc2
-rw-r--r--runtime/interpreter/interpreter_switch_impl2.cc30
-rw-r--r--runtime/interpreter/interpreter_switch_impl3.cc30
-rw-r--r--runtime/interpreter/mterp/README.txt23
-rw-r--r--runtime/interpreter/mterp/arm64ng/array.S3
-rw-r--r--runtime/interpreter/mterp/arm64ng/main.S36
-rw-r--r--runtime/interpreter/mterp/arm64ng/object.S7
-rw-r--r--runtime/interpreter/mterp/arm64ng/other.S2
-rw-r--r--runtime/interpreter/mterp/armng/array.S3
-rw-r--r--runtime/interpreter/mterp/armng/main.S36
-rw-r--r--runtime/interpreter/mterp/armng/object.S9
-rw-r--r--runtime/interpreter/mterp/armng/other.S2
-rw-r--r--runtime/interpreter/mterp/nterp.cc343
-rw-r--r--runtime/interpreter/mterp/nterp.h1
-rw-r--r--runtime/interpreter/mterp/nterp_impl.cc84
-rw-r--r--runtime/interpreter/mterp/nterp_stub.cc53
-rw-r--r--runtime/interpreter/mterp/riscv64/arithmetic.S248
-rw-r--r--runtime/interpreter/mterp/riscv64/array.S57
-rw-r--r--runtime/interpreter/mterp/riscv64/control_flow.S74
-rw-r--r--runtime/interpreter/mterp/riscv64/floating_point.S128
-rw-r--r--runtime/interpreter/mterp/riscv64/invoke.S56
-rw-r--r--runtime/interpreter/mterp/riscv64/main.S132
-rw-r--r--runtime/interpreter/mterp/riscv64/object.S109
-rw-r--r--runtime/interpreter/mterp/riscv64/other.S167
-rw-r--r--runtime/interpreter/mterp/x86_64ng/main.S32
-rw-r--r--runtime/interpreter/mterp/x86_64ng/object.S4
-rw-r--r--runtime/interpreter/mterp/x86_64ng/other.S2
-rw-r--r--runtime/interpreter/mterp/x86ng/main.S36
-rw-r--r--runtime/interpreter/mterp/x86ng/object.S4
-rw-r--r--runtime/interpreter/mterp/x86ng/other.S2
-rw-r--r--runtime/interpreter/shadow_frame.h45
-rw-r--r--runtime/interpreter/unstarted_runtime.cc74
-rw-r--r--runtime/interpreter/unstarted_runtime_list.h3
-rw-r--r--runtime/interpreter/unstarted_runtime_test.cc112
45 files changed, 1931 insertions, 1605 deletions
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 38c94abf06..3ca531f89b 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -231,30 +231,22 @@ static void InterpreterJni(Thread* self,
}
}
+NO_STACK_PROTECTOR
static JValue ExecuteSwitch(Thread* self,
const CodeItemDataAccessor& accessor,
ShadowFrame& shadow_frame,
JValue result_register,
bool interpret_one_instruction) REQUIRES_SHARED(Locks::mutator_lock_) {
if (Runtime::Current()->IsActiveTransaction()) {
- if (shadow_frame.GetMethod()->SkipAccessChecks()) {
- return ExecuteSwitchImpl<false, true>(
- self, accessor, shadow_frame, result_register, interpret_one_instruction);
- } else {
- return ExecuteSwitchImpl<true, true>(
- self, accessor, shadow_frame, result_register, interpret_one_instruction);
- }
+ return ExecuteSwitchImpl<true>(
+ self, accessor, shadow_frame, result_register, interpret_one_instruction);
} else {
- if (shadow_frame.GetMethod()->SkipAccessChecks()) {
- return ExecuteSwitchImpl<false, false>(
- self, accessor, shadow_frame, result_register, interpret_one_instruction);
- } else {
- return ExecuteSwitchImpl<true, false>(
- self, accessor, shadow_frame, result_register, interpret_one_instruction);
- }
+ return ExecuteSwitchImpl<false>(
+ self, accessor, shadow_frame, result_register, interpret_one_instruction);
}
}
+NO_STACK_PROTECTOR
static inline JValue Execute(
Thread* self,
const CodeItemDataAccessor& accessor,
@@ -265,41 +257,22 @@ static inline JValue Execute(
DCHECK(!shadow_frame.GetMethod()->IsAbstract());
DCHECK(!shadow_frame.GetMethod()->IsNative());
+ // We cache the result of NeedsDexPcEvents in the shadow frame so we don't need to call
+ // NeedsDexPcEvents on every instruction for better performance. NeedsDexPcEvents only gets
+ // updated asynchronoulsy in a SuspendAll scope and any existing shadow frames are updated with
+ // new value. So it is safe to cache it here.
+ shadow_frame.SetNotifyDexPcMoveEvents(
+ Runtime::Current()->GetInstrumentation()->NeedsDexPcEvents(shadow_frame.GetMethod(), self));
+
if (LIKELY(!from_deoptimize)) { // Entering the method, but not via deoptimization.
if (kIsDebugBuild) {
CHECK_EQ(shadow_frame.GetDexPC(), 0u);
self->AssertNoPendingException();
}
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
ArtMethod *method = shadow_frame.GetMethod();
- if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
- instrumentation->MethodEnterEvent(self, method);
- if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
- // The caller will retry this invoke or ignore the result. Just return immediately without
- // any value.
- DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
- JValue ret = JValue();
- PerformNonStandardReturn<MonitorState::kNoMonitorsLocked>(
- self, shadow_frame, ret, instrumentation, accessor.InsSize());
- return ret;
- }
- if (UNLIKELY(self->IsExceptionPending())) {
- instrumentation->MethodUnwindEvent(self,
- shadow_frame.GetThisObject(accessor.InsSize()),
- method,
- 0);
- JValue ret = JValue();
- if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
- DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
- PerformNonStandardReturn<MonitorState::kNoMonitorsLocked>(
- self, shadow_frame, ret, instrumentation, accessor.InsSize());
- }
- return ret;
- }
- }
-
- if (!stay_in_interpreter && !self->IsForceInterpreter()) {
+ // If we can continue in JIT and have JITed code available execute JITed code.
+ if (!stay_in_interpreter && !self->IsForceInterpreter() && !shadow_frame.GetForcePopFrame()) {
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
jit->MethodEntered(self, shadow_frame.GetMethod());
@@ -320,6 +293,40 @@ static inline JValue Execute(
}
}
}
+
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ if (UNLIKELY(instrumentation->HasMethodEntryListeners() || shadow_frame.GetForcePopFrame())) {
+ instrumentation->MethodEnterEvent(self, method);
+ if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
+ // The caller will retry this invoke or ignore the result. Just return immediately without
+ // any value.
+ DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
+ JValue ret = JValue();
+ PerformNonStandardReturn(self,
+ shadow_frame,
+ ret,
+ instrumentation,
+ accessor.InsSize(),
+ /* unlock_monitors= */ false);
+ return ret;
+ }
+ if (UNLIKELY(self->IsExceptionPending())) {
+ instrumentation->MethodUnwindEvent(self,
+ method,
+ 0);
+ JValue ret = JValue();
+ if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
+ DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
+ PerformNonStandardReturn(self,
+ shadow_frame,
+ ret,
+ instrumentation,
+ accessor.InsSize(),
+ /* unlock_monitors= */ false);
+ }
+ return ret;
+ }
+ }
}
ArtMethod* method = shadow_frame.GetMethod();
@@ -366,7 +373,7 @@ void EnterInterpreterFromInvoke(Thread* self,
num_ins = accessor.InsSize();
} else if (!method->IsInvokable()) {
self->EndAssertNoThreadSuspension(old_cause);
- method->ThrowInvocationTimeError();
+ method->ThrowInvocationTimeError(receiver);
return;
} else {
DCHECK(method->IsNative()) << method->PrettyMethod();
@@ -377,11 +384,9 @@ void EnterInterpreterFromInvoke(Thread* self,
}
}
// Set up shadow frame with matching number of reference slots to vregs.
- ShadowFrame* last_shadow_frame = self->GetManagedStack()->GetTopShadowFrame();
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_regs, last_shadow_frame, method, /* dex pc */ 0);
+ CREATE_SHADOW_FRAME(num_regs, method, /* dex pc */ 0);
ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
- self->PushShadowFrame(shadow_frame);
size_t cur_reg = num_regs - num_ins;
if (!method->IsStatic()) {
@@ -413,21 +418,10 @@ void EnterInterpreterFromInvoke(Thread* self,
}
}
self->EndAssertNoThreadSuspension(old_cause);
- // Do this after populating the shadow frame in case EnsureInitialized causes a GC.
- if (method->IsStatic()) {
- ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
- if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
- if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
- self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
- CHECK(self->IsExceptionPending());
- self->PopShadowFrame();
- return;
- }
- DCHECK(h_class->IsInitializing());
- }
+ if (!EnsureInitialized(self, shadow_frame)) {
+ return;
}
+ self->PushShadowFrame(shadow_frame);
if (LIKELY(!method->IsNative())) {
JValue r = Execute(self, accessor, *shadow_frame, JValue(), stay_in_interpreter);
if (result != nullptr) {
@@ -476,6 +470,7 @@ void EnterInterpreterFromDeoptimize(Thread* self,
const uint32_t dex_pc = shadow_frame->GetDexPC();
uint32_t new_dex_pc = dex_pc;
if (UNLIKELY(self->IsExceptionPending())) {
+ DCHECK(self->GetException() != Thread::GetDeoptimizationException());
// If we deoptimize from the QuickExceptionHandler, we already reported the exception throw
// event to the instrumentation. Skip throw listeners for the first frame. The deopt check
// should happen after the throw listener is called as throw listener can trigger a
@@ -514,7 +509,7 @@ void EnterInterpreterFromDeoptimize(Thread* self,
new_dex_pc = dex_pc + instr->SizeInCodeUnits();
} else if (instr->IsInvoke()) {
DCHECK(deopt_method_type == DeoptimizationMethodType::kDefault);
- if (IsStringInit(instr, shadow_frame->GetMethod())) {
+ if (IsStringInit(*instr, shadow_frame->GetMethod())) {
uint16_t this_obj_vreg = GetReceiverRegisterForStringInit(instr);
// Move the StringFactory.newStringFromChars() result into the register representing
// "this object" when invoking the string constructor in the original dex instruction.
@@ -569,6 +564,7 @@ void EnterInterpreterFromDeoptimize(Thread* self,
ret_val->SetJ(value.GetJ());
}
+NO_STACK_PROTECTOR
JValue EnterInterpreterFromEntryPoint(Thread* self, const CodeItemDataAccessor& accessor,
ShadowFrame* shadow_frame) {
DCHECK_EQ(self, Thread::Current());
@@ -585,6 +581,7 @@ JValue EnterInterpreterFromEntryPoint(Thread* self, const CodeItemDataAccessor&
return Execute(self, accessor, *shadow_frame, JValue());
}
+NO_STACK_PROTECTOR
void ArtInterpreterToInterpreterBridge(Thread* self,
const CodeItemDataAccessor& accessor,
ShadowFrame* shadow_frame,
@@ -596,23 +593,6 @@ void ArtInterpreterToInterpreterBridge(Thread* self,
}
self->PushShadowFrame(shadow_frame);
- ArtMethod* method = shadow_frame->GetMethod();
- // Ensure static methods are initialized.
- const bool is_static = method->IsStatic();
- if (is_static) {
- ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
- if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
- if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
- self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
- DCHECK(self->IsExceptionPending());
- self->PopShadowFrame();
- return;
- }
- DCHECK(h_class->IsInitializing());
- }
- }
if (LIKELY(!shadow_frame->GetMethod()->IsNative())) {
result->SetJ(Execute(self, accessor, *shadow_frame, JValue()).GetJ());
@@ -620,6 +600,7 @@ void ArtInterpreterToInterpreterBridge(Thread* self,
// We don't expect to be asked to interpret native code (which is entered via a JNI compiler
// generated stub) except during testing and image writing.
CHECK(!Runtime::Current()->IsStarted());
+ bool is_static = shadow_frame->GetMethod()->IsStatic();
ObjPtr<mirror::Object> receiver = is_static ? nullptr : shadow_frame->GetVRegReference(0);
uint32_t* args = shadow_frame->GetVRegArgs(is_static ? 0 : 1);
UnstartedRuntime::Jni(self, shadow_frame->GetMethod(), receiver.Ptr(), args, result);
diff --git a/runtime/interpreter/interpreter_cache-inl.h b/runtime/interpreter/interpreter_cache-inl.h
index cea8157d26..804d382877 100644
--- a/runtime/interpreter/interpreter_cache-inl.h
+++ b/runtime/interpreter/interpreter_cache-inl.h
@@ -35,13 +35,9 @@ inline bool InterpreterCache::Get(Thread* self, const void* key, /* out */ size_
inline void InterpreterCache::Set(Thread* self, const void* key, size_t value) {
DCHECK(self->GetInterpreterCache() == this) << "Must be called from owning thread";
-
- // For simplicity, only update the cache if weak ref accesses are enabled. If
- // they are disabled, this means the GC is processing the cache, and is
- // reading it concurrently.
- if (kUseReadBarrier && self->GetWeakRefAccessEnabled()) {
- data_[IndexOf(key)] = Entry{key, value};
- }
+ // Simple store works here as the cache is always read/written by the owning
+ // thread only (or in a stop-the-world pause).
+ data_[IndexOf(key)] = Entry{key, value};
}
} // namespace art
diff --git a/runtime/interpreter/interpreter_cache.cc b/runtime/interpreter/interpreter_cache.cc
index 450edbaa4e..7e7b294c17 100644
--- a/runtime/interpreter/interpreter_cache.cc
+++ b/runtime/interpreter/interpreter_cache.cc
@@ -22,7 +22,13 @@ namespace art {
void InterpreterCache::Clear(Thread* owning_thread) {
DCHECK(owning_thread->GetInterpreterCache() == this);
DCHECK(owning_thread == Thread::Current() || owning_thread->IsSuspended());
- data_.fill(Entry{});
+ // Avoid using std::fill (or its variant) as there could be a concurrent sweep
+ // happening by the GC thread and these functions may clear partially.
+ for (Entry& entry : data_) {
+ std::atomic<const void*>* atomic_key_addr =
+ reinterpret_cast<std::atomic<const void*>*>(&entry.first);
+ atomic_key_addr->store(nullptr, std::memory_order_relaxed);
+ }
}
} // namespace art
diff --git a/runtime/interpreter/interpreter_cache.h b/runtime/interpreter/interpreter_cache.h
index c57d0233a6..8714bc613c 100644
--- a/runtime/interpreter/interpreter_cache.h
+++ b/runtime/interpreter/interpreter_cache.h
@@ -47,7 +47,7 @@ class Thread;
class ALIGNED(16) InterpreterCache {
public:
// Aligned since we load the whole entry in single assembly instruction.
- typedef std::pair<const void*, size_t> Entry ALIGNED(2 * sizeof(size_t));
+ using Entry ALIGNED(2 * sizeof(size_t)) = std::pair<const void*, size_t>;
// 2x size increase/decrease corresponds to ~0.5% interpreter performance change.
// Value of 256 has around 75% cache hit rate.
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index c8a87c1d75..ac9980f6d3 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -92,10 +92,6 @@ bool ShouldStayInSwitchInterpreter(ArtMethod* method)
}
const void* code = method->GetEntryPointFromQuickCompiledCode();
- if (code == GetQuickInstrumentationEntryPoint()) {
- code = Runtime::Current()->GetInstrumentation()->GetCodeForInvoke(method);
- }
-
return Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(code);
}
@@ -185,7 +181,6 @@ bool MoveToExceptionHandler(Thread* self,
// Exception is not caught by the current method. We will unwind to the
// caller. Notify any instrumentation listener.
instrumentation->MethodUnwindEvent(self,
- shadow_frame.GetThisObject(),
shadow_frame.GetMethod(),
shadow_frame.GetDexPC());
}
@@ -236,14 +231,16 @@ void AbortTransactionV(Thread* self, const char* fmt, va_list args) {
// about ALWAYS_INLINE (-Werror, -Wgcc-compat) in definitions.
//
-template <bool is_range, bool do_assignability_check>
+template <bool is_range>
+NO_STACK_PROTECTOR
static ALWAYS_INLINE bool DoCallCommon(ArtMethod* called_method,
Thread* self,
ShadowFrame& shadow_frame,
JValue* result,
uint16_t number_of_inputs,
uint32_t (&arg)[Instruction::kMaxVarArgRegs],
- uint32_t vregC) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t vregC,
+ bool string_init) REQUIRES_SHARED(Locks::mutator_lock_);
template <bool is_range>
ALWAYS_INLINE void CopyRegisters(ShadowFrame& caller_frame,
@@ -255,6 +252,7 @@ ALWAYS_INLINE void CopyRegisters(ShadowFrame& caller_frame,
// END DECLARATIONS.
+NO_STACK_PROTECTOR
void ArtInterpreterToCompiledCodeBridge(Thread* self,
ArtMethod* caller,
ShadowFrame* shadow_frame,
@@ -262,25 +260,6 @@ void ArtInterpreterToCompiledCodeBridge(Thread* self,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = shadow_frame->GetMethod();
- // Ensure static methods are initialized.
- if (method->IsStatic()) {
- ObjPtr<mirror::Class> declaringClass = method->GetDeclaringClass();
- if (UNLIKELY(!declaringClass->IsVisiblyInitialized())) {
- self->PushShadowFrame(shadow_frame);
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_class(hs.NewHandle(declaringClass));
- if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
- self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
- self->PopShadowFrame();
- DCHECK(self->IsExceptionPending());
- return;
- }
- self->PopShadowFrame();
- DCHECK(h_class->IsInitializing());
- // Reload from shadow frame in case the method moved, this is faster than adding a handle.
- method = shadow_frame->GetMethod();
- }
- }
// Basic checks for the arg_offset. If there's no code item, the arg_offset must be 0. Otherwise,
// check that the arg_offset isn't greater than the number of registers. A stronger check is
// difficult since the frame may contain space for all the registers in the method, or only enough
@@ -1018,11 +997,9 @@ static ObjPtr<mirror::CallSite> InvokeBootstrapMethod(Thread* self,
// Set-up a shadow frame for invoking the bootstrap method handle.
ShadowFrameAllocaUniquePtr bootstrap_frame =
CREATE_SHADOW_FRAME(call_site_type->NumberOfVRegs(),
- nullptr,
referrer,
shadow_frame.GetDexPC());
- ScopedStackedShadowFramePusher pusher(
- self, bootstrap_frame.get(), StackedShadowFrameType::kShadowFrameUnderConstruction);
+ ScopedStackedShadowFramePusher pusher(self, bootstrap_frame.get());
ShadowFrameSetter setter(bootstrap_frame.get(), 0u);
// The first parameter is a MethodHandles lookup instance.
@@ -1205,23 +1182,15 @@ inline void CopyRegisters(ShadowFrame& caller_frame,
}
}
-template <bool is_range,
- bool do_assignability_check>
+template <bool is_range>
static inline bool DoCallCommon(ArtMethod* called_method,
Thread* self,
ShadowFrame& shadow_frame,
JValue* result,
uint16_t number_of_inputs,
uint32_t (&arg)[Instruction::kMaxVarArgRegs],
- uint32_t vregC) {
- bool string_init = false;
- // Replace calls to String.<init> with equivalent StringFactory call.
- if (UNLIKELY(called_method->GetDeclaringClass()->IsStringClass()
- && called_method->IsConstructor())) {
- called_method = WellKnownClasses::StringInitToStringFactory(called_method);
- string_init = true;
- }
-
+ uint32_t vregC,
+ bool string_init) {
// Compute method information.
CodeItemDataAccessor accessor(called_method->DexInstructionData());
// Number of registers for the callee's call frame.
@@ -1288,16 +1257,15 @@ static inline bool DoCallCommon(ArtMethod* called_method,
// Allocate shadow frame on the stack.
const char* old_cause = self->StartAssertNoThreadSuspension("DoCallCommon");
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_regs, &shadow_frame, called_method, /* dex pc */ 0);
+ CREATE_SHADOW_FRAME(num_regs, called_method, /* dex pc */ 0);
ShadowFrame* new_shadow_frame = shadow_frame_unique_ptr.get();
// Initialize new shadow frame by copying the registers from the callee shadow frame.
- if (do_assignability_check) {
+ if (!shadow_frame.GetMethod()->SkipAccessChecks()) {
// Slow path.
// We might need to do class loading, which incurs a thread state change to kNative. So
// register the shadow frame as under construction and allow suspension again.
- ScopedStackedShadowFramePusher pusher(
- self, new_shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
+ ScopedStackedShadowFramePusher pusher(self, new_shadow_frame);
self->EndAssertNoThreadSuspension(old_cause);
// ArtMethod here is needed to check type information of the call site against the callee.
@@ -1336,7 +1304,7 @@ static inline bool DoCallCommon(ArtMethod* called_method,
// Handle Object references. 1 virtual register slot.
case 'L': {
ObjPtr<mirror::Object> o = shadow_frame.GetVRegReference(src_reg);
- if (do_assignability_check && o != nullptr) {
+ if (o != nullptr) {
const dex::TypeIndex type_idx = params->GetTypeItem(shorty_pos).type_idx_;
ObjPtr<mirror::Class> arg_type = method->GetDexCache()->GetResolvedType(type_idx);
if (arg_type == nullptr) {
@@ -1410,9 +1378,15 @@ static inline bool DoCallCommon(ArtMethod* called_method,
return !self->IsExceptionPending();
}
-template<bool is_range, bool do_assignability_check>
-bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, uint16_t inst_data, JValue* result) {
+template<bool is_range>
+NO_STACK_PROTECTOR
+bool DoCall(ArtMethod* called_method,
+ Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ bool is_string_init,
+ JValue* result) {
// Argument word count.
const uint16_t number_of_inputs =
(is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
@@ -1428,12 +1402,18 @@ bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
inst->GetVarArgs(arg, inst_data);
}
- return DoCallCommon<is_range, do_assignability_check>(
- called_method, self, shadow_frame,
- result, number_of_inputs, arg, vregC);
+ return DoCallCommon<is_range>(
+ called_method,
+ self,
+ shadow_frame,
+ result,
+ number_of_inputs,
+ arg,
+ vregC,
+ is_string_init);
}
-template <bool is_range, bool do_access_check, bool transaction_active>
+template <bool is_range, bool transaction_active>
bool DoFilledNewArray(const Instruction* inst,
const ShadowFrame& shadow_frame,
Thread* self,
@@ -1450,6 +1430,7 @@ bool DoFilledNewArray(const Instruction* inst,
return false;
}
uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
+ bool do_access_check = !shadow_frame.GetMethod()->SkipAccessChecks();
ObjPtr<mirror::Class> array_class = ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
shadow_frame.GetMethod(),
self,
@@ -1554,17 +1535,50 @@ void RecordArrayElementsInTransaction(ObjPtr<mirror::Array> array, int32_t count
}
}
+void UnlockHeldMonitors(Thread* self, ShadowFrame* shadow_frame)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(shadow_frame->GetForcePopFrame() || Runtime::Current()->IsTransactionAborted());
+ // Unlock all monitors.
+ if (shadow_frame->GetMethod()->MustCountLocks()) {
+ DCHECK(!shadow_frame->GetMethod()->SkipAccessChecks());
+ // Get the monitors from the shadow-frame monitor-count data.
+ shadow_frame->GetLockCountData().VisitMonitors(
+ [&](mirror::Object** obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Since we don't use the 'obj' pointer after the DoMonitorExit everything should be fine
+ // WRT suspension.
+ DoMonitorExit(self, shadow_frame, *obj);
+ });
+ } else {
+ std::vector<verifier::MethodVerifier::DexLockInfo> locks;
+ verifier::MethodVerifier::FindLocksAtDexPc(shadow_frame->GetMethod(),
+ shadow_frame->GetDexPC(),
+ &locks,
+ Runtime::Current()->GetTargetSdkVersion());
+ for (const auto& reg : locks) {
+ if (UNLIKELY(reg.dex_registers.empty())) {
+ LOG(ERROR) << "Unable to determine reference locked by "
+ << shadow_frame->GetMethod()->PrettyMethod() << " at pc "
+ << shadow_frame->GetDexPC();
+ } else {
+ DoMonitorExit(
+ self, shadow_frame, shadow_frame->GetVRegReference(*reg.dex_registers.begin()));
+ }
+ }
+ }
+}
+
// Explicit DoCall template function declarations.
-#define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
- template REQUIRES_SHARED(Locks::mutator_lock_) \
- bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \
- ShadowFrame& shadow_frame, \
- const Instruction* inst, uint16_t inst_data, \
- JValue* result)
-EXPLICIT_DO_CALL_TEMPLATE_DECL(false, false);
-EXPLICIT_DO_CALL_TEMPLATE_DECL(false, true);
-EXPLICIT_DO_CALL_TEMPLATE_DECL(true, false);
-EXPLICIT_DO_CALL_TEMPLATE_DECL(true, true);
+#define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
+ bool DoCall<_is_range>(ArtMethod* method, \
+ Thread* self, \
+ ShadowFrame& shadow_frame, \
+ const Instruction* inst, \
+ uint16_t inst_data, \
+ bool string_init, \
+ JValue* result)
+EXPLICIT_DO_CALL_TEMPLATE_DECL(false);
+EXPLICIT_DO_CALL_TEMPLATE_DECL(true);
#undef EXPLICIT_DO_CALL_TEMPLATE_DECL
// Explicit DoInvokePolymorphic template function declarations.
@@ -1578,16 +1592,15 @@ EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL(true);
#undef EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL
// Explicit DoFilledNewArray template function declarations.
-#define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active) \
+#define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _transaction_active) \
template REQUIRES_SHARED(Locks::mutator_lock_) \
- bool DoFilledNewArray<_is_range_, _check, _transaction_active>(const Instruction* inst, \
- const ShadowFrame& shadow_frame, \
- Thread* self, JValue* result)
-#define EXPLICIT_DO_FILLED_NEW_ARRAY_ALL_TEMPLATE_DECL(_transaction_active) \
- EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(false, false, _transaction_active); \
- EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(false, true, _transaction_active); \
- EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(true, false, _transaction_active); \
- EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(true, true, _transaction_active)
+ bool DoFilledNewArray<_is_range_, _transaction_active>(const Instruction* inst, \
+ const ShadowFrame& shadow_frame, \
+ Thread* self, \
+ JValue* result)
+#define EXPLICIT_DO_FILLED_NEW_ARRAY_ALL_TEMPLATE_DECL(_transaction_active) \
+ EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(false, _transaction_active); \
+ EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(true, _transaction_active)
EXPLICIT_DO_FILLED_NEW_ARRAY_ALL_TEMPLATE_DECL(false);
EXPLICIT_DO_FILLED_NEW_ARRAY_ALL_TEMPLATE_DECL(true);
#undef EXPLICIT_DO_FILLED_NEW_ARRAY_ALL_TEMPLATE_DECL
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 0b91120c58..b8d6817904 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -20,7 +20,6 @@
#include "android-base/macros.h"
#include "instrumentation.h"
#include "interpreter.h"
-#include "interpreter_intrinsics.h"
#include "transaction.h"
#include <math.h>
@@ -71,7 +70,6 @@ namespace interpreter {
void ThrowNullPointerExceptionFromInterpreter()
REQUIRES_SHARED(Locks::mutator_lock_);
-template <bool kMonitorCounting>
static inline void DoMonitorEnter(Thread* self, ShadowFrame* frame, ObjPtr<mirror::Object> ref)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!Roles::uninterruptible_) {
@@ -85,28 +83,29 @@ static inline void DoMonitorEnter(Thread* self, ShadowFrame* frame, ObjPtr<mirro
DCHECK(unlocked);
return;
}
- if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
+ if (frame->GetMethod()->MustCountLocks()) {
+ DCHECK(!frame->GetMethod()->SkipAccessChecks());
frame->GetLockCountData().AddMonitor(self, h_ref.Get());
}
}
-template <bool kMonitorCounting>
static inline void DoMonitorExit(Thread* self, ShadowFrame* frame, ObjPtr<mirror::Object> ref)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!Roles::uninterruptible_) {
StackHandleScope<1> hs(self);
Handle<mirror::Object> h_ref(hs.NewHandle(ref));
h_ref->MonitorExit(self);
- if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
+ if (frame->GetMethod()->MustCountLocks()) {
+ DCHECK(!frame->GetMethod()->SkipAccessChecks());
frame->GetLockCountData().RemoveMonitorOrThrow(self, h_ref.Get());
}
}
-template <bool kMonitorCounting>
static inline bool DoMonitorCheckOnExit(Thread* self, ShadowFrame* frame)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!Roles::uninterruptible_) {
- if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
+ if (frame->GetMethod()->MustCountLocks()) {
+ DCHECK(!frame->GetMethod()->SkipAccessChecks());
return frame->GetLockCountData().CheckAllMonitorsReleasedOrThrow(self);
}
return true;
@@ -125,9 +124,14 @@ void RecordArrayElementsInTransaction(ObjPtr<mirror::Array> array, int32_t count
// Invokes the given method. This is part of the invocation support and is used by DoInvoke,
// DoFastInvoke and DoInvokeVirtualQuick functions.
// Returns true on success, otherwise throws an exception and returns false.
-template<bool is_range, bool do_assignability_check>
-bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, uint16_t inst_data, JValue* result);
+template<bool is_range>
+bool DoCall(ArtMethod* called_method,
+ Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ bool string_init,
+ JValue* result);
// Called by the switch interpreter to know if we can stay in it.
bool ShouldStayInSwitchInterpreter(ArtMethod* method)
@@ -153,54 +157,16 @@ NeedsMethodExitEvent(const instrumentation::Instrumentation* ins)
return ins->HasMethodExitListeners() || ins->HasWatchedFramePopListeners();
}
-// NO_INLINE so we won't bloat the interpreter with this very cold lock-release code.
-template <bool kMonitorCounting>
-static NO_INLINE void UnlockHeldMonitors(Thread* self, ShadowFrame* shadow_frame)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(shadow_frame->GetForcePopFrame() ||
- Runtime::Current()->IsTransactionAborted());
- // Unlock all monitors.
- if (kMonitorCounting && shadow_frame->GetMethod()->MustCountLocks()) {
- // Get the monitors from the shadow-frame monitor-count data.
- shadow_frame->GetLockCountData().VisitMonitors(
- [&](mirror::Object** obj) REQUIRES_SHARED(Locks::mutator_lock_) {
- // Since we don't use the 'obj' pointer after the DoMonitorExit everything should be fine
- // WRT suspension.
- DoMonitorExit<kMonitorCounting>(self, shadow_frame, *obj);
- });
- } else {
- std::vector<verifier::MethodVerifier::DexLockInfo> locks;
- verifier::MethodVerifier::FindLocksAtDexPc(shadow_frame->GetMethod(),
- shadow_frame->GetDexPC(),
- &locks,
- Runtime::Current()->GetTargetSdkVersion());
- for (const auto& reg : locks) {
- if (UNLIKELY(reg.dex_registers.empty())) {
- LOG(ERROR) << "Unable to determine reference locked by "
- << shadow_frame->GetMethod()->PrettyMethod() << " at pc "
- << shadow_frame->GetDexPC();
- } else {
- DoMonitorExit<kMonitorCounting>(
- self, shadow_frame, shadow_frame->GetVRegReference(*reg.dex_registers.begin()));
- }
- }
- }
-}
-
-enum class MonitorState {
- kNoMonitorsLocked,
- kCountingMonitors,
- kNormalMonitors,
-};
+COLD_ATTR void UnlockHeldMonitors(Thread* self, ShadowFrame* shadow_frame)
+ REQUIRES_SHARED(Locks::mutator_lock_);
-template<MonitorState kMonitorState>
static inline ALWAYS_INLINE void PerformNonStandardReturn(
Thread* self,
ShadowFrame& frame,
JValue& result,
const instrumentation::Instrumentation* instrumentation,
- uint16_t num_dex_inst) REQUIRES_SHARED(Locks::mutator_lock_) {
- static constexpr bool kMonitorCounting = (kMonitorState == MonitorState::kCountingMonitors);
+ uint16_t num_dex_inst,
+ bool unlock_monitors = true) REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::Object> thiz(frame.GetThisObject(num_dex_inst));
StackHandleScope<1u> hs(self);
if (UNLIKELY(self->IsExceptionPending())) {
@@ -208,10 +174,10 @@ static inline ALWAYS_INLINE void PerformNonStandardReturn(
<< self->GetException()->Dump();
self->ClearException();
}
- if (kMonitorState != MonitorState::kNoMonitorsLocked) {
- UnlockHeldMonitors<kMonitorCounting>(self, &frame);
+ if (unlock_monitors) {
+ UnlockHeldMonitors(self, &frame);
+ DoMonitorCheckOnExit(self, &frame);
}
- DoMonitorCheckOnExit<kMonitorCounting>(self, &frame);
result = JValue();
if (UNLIKELY(NeedsMethodExitEvent(instrumentation))) {
SendMethodExitEvents(self, instrumentation, frame, frame.GetMethod(), result);
@@ -220,7 +186,7 @@ static inline ALWAYS_INLINE void PerformNonStandardReturn(
// Handles all invoke-XXX/range instructions except for invoke-polymorphic[/range].
// Returns true on success, otherwise throws an exception and returns false.
-template<InvokeType type, bool is_range, bool do_access_check, bool is_mterp>
+template<InvokeType type, bool is_range>
static ALWAYS_INLINE bool DoInvoke(Thread* self,
ShadowFrame& shadow_frame,
const Instruction* inst,
@@ -231,63 +197,20 @@ static ALWAYS_INLINE bool DoInvoke(Thread* self,
if (UNLIKELY(self->ObserveAsyncException())) {
return false;
}
- const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
+ const uint32_t vregC = is_range ? inst->VRegC_3rc() : inst->VRegC_35c();
+ ObjPtr<mirror::Object> obj = type == kStatic ? nullptr : shadow_frame.GetVRegReference(vregC);
ArtMethod* sf_method = shadow_frame.GetMethod();
-
- // Try to find the method in small thread-local cache first (only used when
- // nterp is not used as mterp and nterp use the cache in an incompatible way).
- InterpreterCache* tls_cache = self->GetInterpreterCache();
- size_t tls_value;
- ArtMethod* resolved_method;
- if (!IsNterpSupported() && LIKELY(tls_cache->Get(self, inst, &tls_value))) {
- resolved_method = reinterpret_cast<ArtMethod*>(tls_value);
- } else {
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- constexpr ClassLinker::ResolveMode resolve_mode =
- do_access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
- : ClassLinker::ResolveMode::kNoChecks;
- resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, sf_method, type);
- if (UNLIKELY(resolved_method == nullptr)) {
- CHECK(self->IsExceptionPending());
- result->SetJ(0);
- return false;
- }
- if (!IsNterpSupported()) {
- tls_cache->Set(self, inst, reinterpret_cast<size_t>(resolved_method));
- }
- }
-
- // Null pointer check and virtual method resolution.
- ObjPtr<mirror::Object> receiver =
- (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
- ArtMethod* called_method;
- called_method = FindMethodToCall<type, do_access_check>(
- method_idx, resolved_method, &receiver, sf_method, self);
- if (UNLIKELY(called_method == nullptr)) {
- CHECK(self->IsExceptionPending());
+ bool string_init = false;
+ ArtMethod* called_method = FindMethodToCall<type>(
+ self, sf_method, &obj, *inst, /* only_lookup_tls_cache= */ false, &string_init);
+ if (called_method == nullptr) {
+ DCHECK(self->IsExceptionPending());
result->SetJ(0);
return false;
}
- if (UNLIKELY(!called_method->IsInvokable())) {
- called_method->ThrowInvocationTimeError();
- result->SetJ(0);
- return false;
- }
-
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (is_mterp && !is_range && called_method->IsIntrinsic()) {
- if (MterpHandleIntrinsic(&shadow_frame, called_method, inst, inst_data,
- shadow_frame.GetResultRegister())) {
- if (jit != nullptr && sf_method != nullptr) {
- jit->NotifyInterpreterToCompiledCodeTransition(self, sf_method);
- }
- return !self->IsExceptionPending();
- }
- }
- return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
- result);
+ return DoCall<is_range>(
+ called_method, self, shadow_frame, inst, inst_data, string_init, result);
}
static inline ObjPtr<mirror::MethodHandle> ResolveMethodHandle(Thread* self,
@@ -386,24 +309,77 @@ ALWAYS_INLINE static JValue GetFieldValue(const ShadowFrame& shadow_frame, uint3
return field_value;
}
+extern "C" size_t NterpGetStaticField(Thread* self,
+ ArtMethod* caller,
+ const uint16_t* dex_pc_ptr,
+ size_t resolve_field_type);
+
+extern "C" uint32_t NterpGetInstanceFieldOffset(Thread* self,
+ ArtMethod* caller,
+ const uint16_t* dex_pc_ptr,
+ size_t resolve_field_type);
+
+static inline void GetFieldInfo(Thread* self,
+ ArtMethod* caller,
+ const uint16_t* dex_pc_ptr,
+ bool is_static,
+ bool resolve_field_type,
+ ArtField** field,
+ bool* is_volatile,
+ MemberOffset* offset) {
+ size_t tls_value = 0u;
+ if (!self->GetInterpreterCache()->Get(self, dex_pc_ptr, &tls_value)) {
+ if (is_static) {
+ tls_value = NterpGetStaticField(self, caller, dex_pc_ptr, resolve_field_type);
+ } else {
+ tls_value = NterpGetInstanceFieldOffset(self, caller, dex_pc_ptr, resolve_field_type);
+ }
+
+ if (self->IsExceptionPending()) {
+ return;
+ }
+ }
+
+ if (is_static) {
+ DCHECK_NE(tls_value, 0u);
+ *is_volatile = ((tls_value & 1) != 0);
+ *field = reinterpret_cast<ArtField*>(tls_value & ~static_cast<size_t>(1u));
+ *offset = (*field)->GetOffset();
+ } else {
+ *is_volatile = (static_cast<int32_t>(tls_value) < 0);
+ *offset = MemberOffset(std::abs(static_cast<int32_t>(tls_value)));
+ }
+}
+
// Handles iget-XXX and sget-XXX instructions.
// Returns true on success, otherwise throws an exception and returns false.
-template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
+template<FindFieldType find_type,
+ Primitive::Type field_type,
bool transaction_active = false>
-ALWAYS_INLINE bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
+ALWAYS_INLINE bool DoFieldGet(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
uint16_t inst_data) REQUIRES_SHARED(Locks::mutator_lock_) {
const bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
- const uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtMethod* method = shadow_frame.GetMethod();
- ArtField* f = FindFieldFromCode<find_type, do_access_check>(
- field_idx, method, self, Primitive::ComponentSize(field_type));
- if (UNLIKELY(f == nullptr)) {
- CHECK(self->IsExceptionPending());
+ bool should_report = Runtime::Current()->GetInstrumentation()->HasFieldReadListeners();
+ ArtField* field = nullptr;
+ MemberOffset offset(0u);
+ bool is_volatile;
+ GetFieldInfo(self,
+ shadow_frame.GetMethod(),
+ reinterpret_cast<const uint16_t*>(inst),
+ is_static,
+ /*resolve_field_type=*/ false,
+ &field,
+ &is_volatile,
+ &offset);
+ if (self->IsExceptionPending()) {
return false;
}
+
ObjPtr<mirror::Object> obj;
if (is_static) {
- obj = f->GetDeclaringClass();
+ obj = field->GetDeclaringClass();
if (transaction_active) {
if (Runtime::Current()->GetTransaction()->ReadConstraint(obj)) {
Runtime::Current()->AbortTransactionAndThrowAbortError(self, "Can't read static fields of "
@@ -413,40 +389,57 @@ ALWAYS_INLINE bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Ins
}
} else {
obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
- if (UNLIKELY(obj == nullptr)) {
- ThrowNullPointerExceptionForFieldAccess(f, method, true);
- return false;
+ if (should_report || obj == nullptr) {
+ field = ResolveFieldWithAccessChecks(self,
+ Runtime::Current()->GetClassLinker(),
+ inst->VRegC_22c(),
+ shadow_frame.GetMethod(),
+ /* is_static= */ false,
+ /* is_put= */ false,
+ /* resolve_field_type= */ false);
+ if (obj == nullptr) {
+ ThrowNullPointerExceptionForFieldAccess(
+ field, shadow_frame.GetMethod(), /* is_read= */ true);
+ return false;
+ }
+ // Reload in case suspension happened during field resolution.
+ obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
}
}
+ uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
JValue result;
- if (UNLIKELY(!DoFieldGetCommon<field_type>(self, shadow_frame, obj, f, &result))) {
- // Instrumentation threw an error!
- CHECK(self->IsExceptionPending());
- return false;
+ if (should_report) {
+ DCHECK(field != nullptr);
+ if (UNLIKELY(!DoFieldGetCommon<field_type>(self, shadow_frame, obj, field, &result))) {
+ // Instrumentation threw an error!
+ CHECK(self->IsExceptionPending());
+ return false;
+ }
}
- uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
+
+#define FIELD_GET(prim, type, jtype, vreg) \
+ case Primitive::kPrim ##prim: \
+ shadow_frame.SetVReg ##vreg(vregA, \
+ should_report ? result.Get ##jtype() \
+ : is_volatile ? obj->GetField ## type ## Volatile(offset) \
+ : obj->GetField ##type(offset)); \
+ break;
+
switch (field_type) {
- case Primitive::kPrimBoolean:
- shadow_frame.SetVReg(vregA, result.GetZ());
- break;
- case Primitive::kPrimByte:
- shadow_frame.SetVReg(vregA, result.GetB());
- break;
- case Primitive::kPrimChar:
- shadow_frame.SetVReg(vregA, result.GetC());
- break;
- case Primitive::kPrimShort:
- shadow_frame.SetVReg(vregA, result.GetS());
- break;
- case Primitive::kPrimInt:
- shadow_frame.SetVReg(vregA, result.GetI());
- break;
- case Primitive::kPrimLong:
- shadow_frame.SetVRegLong(vregA, result.GetJ());
- break;
+ FIELD_GET(Boolean, Boolean, Z, )
+ FIELD_GET(Byte, Byte, B, )
+ FIELD_GET(Char, Char, C, )
+ FIELD_GET(Short, Short, S, )
+ FIELD_GET(Int, 32, I, )
+ FIELD_GET(Long, 64, J, Long)
+#undef FIELD_GET
case Primitive::kPrimNot:
- shadow_frame.SetVRegReference(vregA, result.GetL());
+ shadow_frame.SetVRegReference(
+ vregA,
+ should_report ? result.GetL()
+ : is_volatile ? obj->GetFieldObjectVolatile<mirror::Object>(offset)
+ : obj->GetFieldObject<mirror::Object>(offset));
break;
default:
LOG(FATAL) << "Unreachable: " << field_type;
@@ -485,36 +478,57 @@ static inline bool CheckWriteValueConstraint(Thread* self, ObjPtr<mirror::Object
// Handles iput-XXX and sput-XXX instructions.
// Returns true on success, otherwise throws an exception and returns false.
-template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
- bool transaction_active>
-ALWAYS_INLINE bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
- const Instruction* inst, uint16_t inst_data)
+template<FindFieldType find_type, Primitive::Type field_type, bool transaction_active>
+ALWAYS_INLINE bool DoFieldPut(Thread* self,
+ const ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const bool do_assignability_check = do_access_check;
+ bool should_report = Runtime::Current()->GetInstrumentation()->HasFieldWriteListeners();
bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
- uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtMethod* method = shadow_frame.GetMethod();
- ArtField* f = FindFieldFromCode<find_type, do_access_check>(
- field_idx, method, self, Primitive::ComponentSize(field_type));
- if (UNLIKELY(f == nullptr)) {
- CHECK(self->IsExceptionPending());
+ uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
+ bool resolve_field_type = (shadow_frame.GetVRegReference(vregA) != nullptr);
+ ArtField* field = nullptr;
+ MemberOffset offset(0u);
+ bool is_volatile;
+ GetFieldInfo(self,
+ shadow_frame.GetMethod(),
+ reinterpret_cast<const uint16_t*>(inst),
+ is_static,
+ resolve_field_type,
+ &field,
+ &is_volatile,
+ &offset);
+ if (self->IsExceptionPending()) {
return false;
}
+
ObjPtr<mirror::Object> obj;
if (is_static) {
- obj = f->GetDeclaringClass();
+ obj = field->GetDeclaringClass();
} else {
obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
- if (UNLIKELY(obj == nullptr)) {
- ThrowNullPointerExceptionForFieldAccess(f, method, false);
- return false;
+ if (should_report || obj == nullptr) {
+ field = ResolveFieldWithAccessChecks(self,
+ Runtime::Current()->GetClassLinker(),
+ inst->VRegC_22c(),
+ shadow_frame.GetMethod(),
+ /* is_static= */ false,
+ /* is_put= */ true,
+ resolve_field_type);
+ if (UNLIKELY(obj == nullptr)) {
+ ThrowNullPointerExceptionForFieldAccess(
+ field, shadow_frame.GetMethod(), /* is_read= */ false);
+ return false;
+ }
+ // Reload in case suspension happened during field resolution.
+ obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
}
}
if (transaction_active && !CheckWriteConstraint(self, obj)) {
return false;
}
- uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
JValue value = GetFieldValue<field_type>(shadow_frame, vregA);
if (transaction_active &&
@@ -522,12 +536,43 @@ ALWAYS_INLINE bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
!CheckWriteValueConstraint(self, value.GetL())) {
return false;
}
+ if (should_report) {
+ return DoFieldPutCommon<field_type, transaction_active>(self,
+ shadow_frame,
+ obj,
+ field,
+ value);
+ }
+#define FIELD_SET(prim, type, jtype) \
+ case Primitive::kPrim ## prim: \
+ if (is_volatile) { \
+ obj->SetField ## type ## Volatile<transaction_active>(offset, value.Get ## jtype()); \
+ } else { \
+ obj->SetField ## type<transaction_active>(offset, value.Get ## jtype()); \
+ } \
+ break;
+
+ switch (field_type) {
+ FIELD_SET(Boolean, Boolean, Z)
+ FIELD_SET(Byte, Byte, B)
+ FIELD_SET(Char, Char, C)
+ FIELD_SET(Short, Short, S)
+ FIELD_SET(Int, 32, I)
+ FIELD_SET(Long, 64, J)
+ FIELD_SET(Not, Object, L)
+ case Primitive::kPrimVoid: {
+ LOG(FATAL) << "Unreachable " << field_type;
+ break;
+ }
+ }
+#undef FIELD_SET
- return DoFieldPutCommon<field_type, do_assignability_check, transaction_active>(self,
- shadow_frame,
- obj,
- f,
- value);
+ if (transaction_active) {
+ if (UNLIKELY(self->IsExceptionPending())) {
+ return false;
+ }
+ }
+ return true;
}
// Handles string resolution for const-string and const-string-jumbo instructions. Also ensures the
@@ -631,13 +676,16 @@ static inline bool DoLongRemainder(ShadowFrame& shadow_frame,
// Handles filled-new-array and filled-new-array-range instructions.
// Returns true on success, otherwise throws an exception and returns false.
-template <bool is_range, bool do_access_check, bool transaction_active>
-bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
- Thread* self, JValue* result);
+template <bool is_range, bool transaction_active>
+bool DoFilledNewArray(const Instruction* inst,
+ const ShadowFrame& shadow_frame,
+ Thread* self,
+ JValue* result);
// Handles packed-switch instruction.
// Returns the branch offset to the next instruction to execute.
-static inline int32_t DoPackedSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
+static inline int32_t DoPackedSwitch(const Instruction* inst,
+ const ShadowFrame& shadow_frame,
uint16_t inst_data)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(inst->Opcode() == Instruction::PACKED_SWITCH);
@@ -755,34 +803,6 @@ void ArtInterpreterToCompiledCodeBridge(Thread* self,
uint16_t arg_offset,
JValue* result);
-static inline bool IsStringInit(const DexFile* dex_file, uint32_t method_idx)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- const dex::MethodId& method_id = dex_file->GetMethodId(method_idx);
- const char* class_name = dex_file->StringByTypeIdx(method_id.class_idx_);
- const char* method_name = dex_file->GetMethodName(method_id);
- // Instead of calling ResolveMethod() which has suspend point and can trigger
- // GC, look up the method symbolically.
- // Compare method's class name and method name against string init.
- // It's ok since it's not allowed to create your own java/lang/String.
- // TODO: verify that assumption.
- if ((strcmp(class_name, "Ljava/lang/String;") == 0) &&
- (strcmp(method_name, "<init>") == 0)) {
- return true;
- }
- return false;
-}
-
-static inline bool IsStringInit(const Instruction* instr, ArtMethod* caller)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (instr->Opcode() == Instruction::INVOKE_DIRECT ||
- instr->Opcode() == Instruction::INVOKE_DIRECT_RANGE) {
- uint16_t callee_method_idx = (instr->Opcode() == Instruction::INVOKE_DIRECT_RANGE) ?
- instr->VRegB_3rc() : instr->VRegB_35c();
- return IsStringInit(caller->GetDexFile(), callee_method_idx);
- }
- return false;
-}
-
// Set string value created from StringFactory.newStringFromXXX() into all aliases of
// StringFactory.newEmptyString().
void SetStringInitValueToAllAliases(ShadowFrame* shadow_frame,
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
deleted file mode 100644
index c8344bc760..0000000000
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ /dev/null
@@ -1,678 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "interpreter/interpreter_intrinsics.h"
-
-#include "dex/dex_instruction.h"
-#include "intrinsics_enum.h"
-#include "interpreter/interpreter_common.h"
-
-namespace art {
-namespace interpreter {
-
-
-#define BINARY_INTRINSIC(name, op, get1, get2, set) \
-static ALWAYS_INLINE bool name(ShadowFrame* shadow_frame, \
- const Instruction* inst, \
- uint16_t inst_data, \
- JValue* result_register) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \
- inst->GetVarArgs(arg, inst_data); \
- result_register->set(op(shadow_frame->get1, shadow_frame->get2)); \
- return true; \
-}
-
-#define BINARY_II_INTRINSIC(name, op, set) \
- BINARY_INTRINSIC(name, op, GetVReg(arg[0]), GetVReg(arg[1]), set)
-
-#define BINARY_JJ_INTRINSIC(name, op, set) \
- BINARY_INTRINSIC(name, op, GetVRegLong(arg[0]), GetVRegLong(arg[2]), set)
-
-#define BINARY_JI_INTRINSIC(name, op, set) \
- BINARY_INTRINSIC(name, op, GetVRegLong(arg[0]), GetVReg(arg[2]), set)
-
-#define UNARY_INTRINSIC(name, op, get, set) \
-static ALWAYS_INLINE bool name(ShadowFrame* shadow_frame, \
- const Instruction* inst, \
- uint16_t inst_data, \
- JValue* result_register) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \
- inst->GetVarArgs(arg, inst_data); \
- result_register->set(op(shadow_frame->get(arg[0]))); \
- return true; \
-}
-
-
-// java.lang.Integer.reverse(I)I
-UNARY_INTRINSIC(MterpIntegerReverse, ReverseBits32, GetVReg, SetI);
-
-// java.lang.Integer.reverseBytes(I)I
-UNARY_INTRINSIC(MterpIntegerReverseBytes, BSWAP, GetVReg, SetI);
-
-// java.lang.Integer.bitCount(I)I
-UNARY_INTRINSIC(MterpIntegerBitCount, POPCOUNT, GetVReg, SetI);
-
-// java.lang.Integer.compare(II)I
-BINARY_II_INTRINSIC(MterpIntegerCompare, Compare, SetI);
-
-// java.lang.Integer.highestOneBit(I)I
-UNARY_INTRINSIC(MterpIntegerHighestOneBit, HighestOneBitValue, GetVReg, SetI);
-
-// java.lang.Integer.LowestOneBit(I)I
-UNARY_INTRINSIC(MterpIntegerLowestOneBit, LowestOneBitValue, GetVReg, SetI);
-
-// java.lang.Integer.numberOfLeadingZeros(I)I
-UNARY_INTRINSIC(MterpIntegerNumberOfLeadingZeros, JAVASTYLE_CLZ, GetVReg, SetI);
-
-// java.lang.Integer.numberOfTrailingZeros(I)I
-UNARY_INTRINSIC(MterpIntegerNumberOfTrailingZeros, JAVASTYLE_CTZ, GetVReg, SetI);
-
-// java.lang.Integer.rotateRight(II)I
-BINARY_II_INTRINSIC(MterpIntegerRotateRight, (Rot<int32_t, false>), SetI);
-
-// java.lang.Integer.rotateLeft(II)I
-BINARY_II_INTRINSIC(MterpIntegerRotateLeft, (Rot<int32_t, true>), SetI);
-
-// java.lang.Integer.signum(I)I
-UNARY_INTRINSIC(MterpIntegerSignum, Signum, GetVReg, SetI);
-
-// java.lang.Long.reverse(J)J
-UNARY_INTRINSIC(MterpLongReverse, ReverseBits64, GetVRegLong, SetJ);
-
-// java.lang.Long.reverseBytes(J)J
-UNARY_INTRINSIC(MterpLongReverseBytes, BSWAP, GetVRegLong, SetJ);
-
-// java.lang.Long.bitCount(J)I
-UNARY_INTRINSIC(MterpLongBitCount, POPCOUNT, GetVRegLong, SetI);
-
-// java.lang.Long.compare(JJ)I
-BINARY_JJ_INTRINSIC(MterpLongCompare, Compare, SetI);
-
-// java.lang.Long.highestOneBit(J)J
-UNARY_INTRINSIC(MterpLongHighestOneBit, HighestOneBitValue, GetVRegLong, SetJ);
-
-// java.lang.Long.lowestOneBit(J)J
-UNARY_INTRINSIC(MterpLongLowestOneBit, LowestOneBitValue, GetVRegLong, SetJ);
-
-// java.lang.Long.numberOfLeadingZeros(J)I
-UNARY_INTRINSIC(MterpLongNumberOfLeadingZeros, JAVASTYLE_CLZ, GetVRegLong, SetJ);
-
-// java.lang.Long.numberOfTrailingZeros(J)I
-UNARY_INTRINSIC(MterpLongNumberOfTrailingZeros, JAVASTYLE_CTZ, GetVRegLong, SetJ);
-
-// java.lang.Long.rotateRight(JI)J
-BINARY_JI_INTRINSIC(MterpLongRotateRight, (Rot<int64_t, false>), SetJ);
-
-// java.lang.Long.rotateLeft(JI)J
-BINARY_JI_INTRINSIC(MterpLongRotateLeft, (Rot<int64_t, true>), SetJ);
-
-// java.lang.Long.signum(J)I
-UNARY_INTRINSIC(MterpLongSignum, Signum, GetVRegLong, SetI);
-
-// java.lang.Short.reverseBytes(S)S
-UNARY_INTRINSIC(MterpShortReverseBytes, BSWAP, GetVRegShort, SetS);
-
-// java.lang.Math.min(II)I
-BINARY_II_INTRINSIC(MterpMathMinIntInt, std::min, SetI);
-
-// java.lang.Math.min(JJ)J
-BINARY_JJ_INTRINSIC(MterpMathMinLongLong, std::min, SetJ);
-
-// java.lang.Math.max(II)I
-BINARY_II_INTRINSIC(MterpMathMaxIntInt, std::max, SetI);
-
-// java.lang.Math.max(JJ)J
-BINARY_JJ_INTRINSIC(MterpMathMaxLongLong, std::max, SetJ);
-
-// java.lang.Math.abs(I)I
-UNARY_INTRINSIC(MterpMathAbsInt, std::abs, GetVReg, SetI);
-
-// java.lang.Math.abs(J)J
-UNARY_INTRINSIC(MterpMathAbsLong, std::abs, GetVRegLong, SetJ);
-
-// java.lang.Math.abs(F)F
-UNARY_INTRINSIC(MterpMathAbsFloat, 0x7fffffff&, GetVReg, SetI);
-
-// java.lang.Math.abs(D)D
-UNARY_INTRINSIC(MterpMathAbsDouble, INT64_C(0x7fffffffffffffff)&, GetVRegLong, SetJ);
-
-// java.lang.Math.sqrt(D)D
-UNARY_INTRINSIC(MterpMathSqrt, std::sqrt, GetVRegDouble, SetD);
-
-// java.lang.Math.ceil(D)D
-UNARY_INTRINSIC(MterpMathCeil, std::ceil, GetVRegDouble, SetD);
-
-// java.lang.Math.floor(D)D
-UNARY_INTRINSIC(MterpMathFloor, std::floor, GetVRegDouble, SetD);
-
-// java.lang.Math.sin(D)D
-UNARY_INTRINSIC(MterpMathSin, std::sin, GetVRegDouble, SetD);
-
-// java.lang.Math.cos(D)D
-UNARY_INTRINSIC(MterpMathCos, std::cos, GetVRegDouble, SetD);
-
-// java.lang.Math.tan(D)D
-UNARY_INTRINSIC(MterpMathTan, std::tan, GetVRegDouble, SetD);
-
-// java.lang.Math.asin(D)D
-UNARY_INTRINSIC(MterpMathAsin, std::asin, GetVRegDouble, SetD);
-
-// java.lang.Math.acos(D)D
-UNARY_INTRINSIC(MterpMathAcos, std::acos, GetVRegDouble, SetD);
-
-// java.lang.Math.atan(D)D
-UNARY_INTRINSIC(MterpMathAtan, std::atan, GetVRegDouble, SetD);
-
-// java.lang.String.charAt(I)C
-static ALWAYS_INLINE bool MterpStringCharAt(ShadowFrame* shadow_frame,
- const Instruction* inst,
- uint16_t inst_data,
- JValue* result_register)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- uint32_t arg[Instruction::kMaxVarArgRegs] = {};
- inst->GetVarArgs(arg, inst_data);
- ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString();
- int length = str->GetLength();
- int index = shadow_frame->GetVReg(arg[1]);
- uint16_t res;
- if (UNLIKELY(index < 0) || (index >= length)) {
- return false; // Punt and let non-intrinsic version deal with the throw.
- }
- if (str->IsCompressed()) {
- res = str->GetValueCompressed()[index];
- } else {
- res = str->GetValue()[index];
- }
- result_register->SetC(res);
- return true;
-}
-
-// java.lang.String.compareTo(Ljava/lang/string)I
-static ALWAYS_INLINE bool MterpStringCompareTo(ShadowFrame* shadow_frame,
- const Instruction* inst,
- uint16_t inst_data,
- JValue* result_register)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- uint32_t arg[Instruction::kMaxVarArgRegs] = {};
- inst->GetVarArgs(arg, inst_data);
- ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString();
- ObjPtr<mirror::Object> arg1 = shadow_frame->GetVRegReference(arg[1]);
- if (arg1 == nullptr) {
- return false;
- }
- result_register->SetI(str->CompareTo(arg1->AsString()));
- return true;
-}
-
-#define STRING_INDEXOF_INTRINSIC(name, starting_pos) \
-static ALWAYS_INLINE bool Mterp##name(ShadowFrame* shadow_frame, \
- const Instruction* inst, \
- uint16_t inst_data, \
- JValue* result_register) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \
- inst->GetVarArgs(arg, inst_data); \
- ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString(); \
- int ch = shadow_frame->GetVReg(arg[1]); \
- if (ch >= 0x10000) { \
- /* Punt if supplementary char. */ \
- return false; \
- } \
- result_register->SetI(str->FastIndexOf(ch, starting_pos)); \
- return true; \
-}
-
-// java.lang.String.indexOf(I)I
-STRING_INDEXOF_INTRINSIC(StringIndexOf, 0);
-
-// java.lang.String.indexOf(II)I
-STRING_INDEXOF_INTRINSIC(StringIndexOfAfter, shadow_frame->GetVReg(arg[2]));
-
-#define SIMPLE_STRING_INTRINSIC(name, operation) \
-static ALWAYS_INLINE bool Mterp##name(ShadowFrame* shadow_frame, \
- const Instruction* inst, \
- uint16_t inst_data, \
- JValue* result_register) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \
- inst->GetVarArgs(arg, inst_data); \
- ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString(); \
- result_register->operation; \
- return true; \
-}
-
-// java.lang.String.isEmpty()Z
-SIMPLE_STRING_INTRINSIC(StringIsEmpty, SetZ(str->GetLength() == 0))
-
-// java.lang.String.length()I
-SIMPLE_STRING_INTRINSIC(StringLength, SetI(str->GetLength()))
-
-// java.lang.String.getCharsNoCheck(II[CI)V
-static ALWAYS_INLINE bool MterpStringGetCharsNoCheck(ShadowFrame* shadow_frame,
- const Instruction* inst,
- uint16_t inst_data,
- JValue* result_register ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Start, end & index already checked by caller - won't throw. Destination is uncompressed.
- uint32_t arg[Instruction::kMaxVarArgRegs] = {};
- inst->GetVarArgs(arg, inst_data);
- ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString();
- int32_t start = shadow_frame->GetVReg(arg[1]);
- int32_t end = shadow_frame->GetVReg(arg[2]);
- int32_t index = shadow_frame->GetVReg(arg[4]);
- ObjPtr<mirror::CharArray> array = shadow_frame->GetVRegReference(arg[3])->AsCharArray();
- uint16_t* dst = array->GetData() + index;
- int32_t len = (end - start);
- if (str->IsCompressed()) {
- const uint8_t* src_8 = str->GetValueCompressed() + start;
- for (int i = 0; i < len; i++) {
- dst[i] = src_8[i];
- }
- } else {
- uint16_t* src_16 = str->GetValue() + start;
- memcpy(dst, src_16, len * sizeof(uint16_t));
- }
- return true;
-}
-
-// java.lang.String.equalsLjava/lang/Object;)Z
-static ALWAYS_INLINE bool MterpStringEquals(ShadowFrame* shadow_frame,
- const Instruction* inst,
- uint16_t inst_data,
- JValue* result_register)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- uint32_t arg[Instruction::kMaxVarArgRegs] = {};
- inst->GetVarArgs(arg, inst_data);
- ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString();
- ObjPtr<mirror::Object> obj = shadow_frame->GetVRegReference(arg[1]);
- bool res = false; // Assume not equal.
- if ((obj != nullptr) && obj->IsString()) {
- ObjPtr<mirror::String> str2 = obj->AsString();
- if (str->GetCount() == str2->GetCount()) {
- // Length & compression status are same. Can use block compare.
- void* bytes1;
- void* bytes2;
- int len = str->GetLength();
- if (str->IsCompressed()) {
- bytes1 = str->GetValueCompressed();
- bytes2 = str2->GetValueCompressed();
- } else {
- len *= sizeof(uint16_t);
- bytes1 = str->GetValue();
- bytes2 = str2->GetValue();
- }
- res = (memcmp(bytes1, bytes2, len) == 0);
- }
- }
- result_register->SetZ(res);
- return true;
-}
-
-#define VARHANDLE_FENCE_INTRINSIC(name, std_memory_operation) \
-static ALWAYS_INLINE bool name(ShadowFrame* shadow_frame ATTRIBUTE_UNUSED, \
- const Instruction* inst ATTRIBUTE_UNUSED, \
- uint16_t inst_data ATTRIBUTE_UNUSED, \
- JValue* result_register ATTRIBUTE_UNUSED) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- std::atomic_thread_fence(std_memory_operation); \
- return true; \
-}
-
-// The VarHandle fence methods are static (unlike jdk.internal.misc.Unsafe versions).
-// The fences for the LoadLoadFence and StoreStoreFence are stronger
-// than strictly required, but the impact should be marginal.
-VARHANDLE_FENCE_INTRINSIC(MterpVarHandleFullFence, std::memory_order_seq_cst)
-VARHANDLE_FENCE_INTRINSIC(MterpVarHandleAcquireFence, std::memory_order_acquire)
-VARHANDLE_FENCE_INTRINSIC(MterpVarHandleReleaseFence, std::memory_order_release)
-VARHANDLE_FENCE_INTRINSIC(MterpVarHandleLoadLoadFence, std::memory_order_acquire)
-VARHANDLE_FENCE_INTRINSIC(MterpVarHandleStoreStoreFence, std::memory_order_release)
-
-#define METHOD_HANDLE_INVOKE_INTRINSIC(name) \
-static ALWAYS_INLINE bool Mterp##name(ShadowFrame* shadow_frame, \
- const Instruction* inst, \
- uint16_t inst_data, \
- JValue* result) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) { \
- return DoInvokePolymorphic<false>(Thread::Current(), *shadow_frame, inst, inst_data, result); \
- } else { \
- return DoInvokePolymorphic<true>(Thread::Current(), *shadow_frame, inst, inst_data, result); \
- } \
-}
-
-METHOD_HANDLE_INVOKE_INTRINSIC(MethodHandleInvokeExact)
-METHOD_HANDLE_INVOKE_INTRINSIC(MethodHandleInvoke)
-
-#define VAR_HANDLE_ACCESSOR_INTRINSIC(name) \
-static ALWAYS_INLINE bool Mterp##name(ShadowFrame* shadow_frame, \
- const Instruction* inst, \
- uint16_t inst_data, \
- JValue* result) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- return Do##name(Thread::Current(), *shadow_frame, inst, inst_data, result); \
-}
-
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleCompareAndExchange)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleCompareAndExchangeAcquire)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleCompareAndExchangeRelease)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleCompareAndSet)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGet);
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAcquire)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndAdd)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndAddAcquire)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndAddRelease)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseAnd)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseAndAcquire)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseAndRelease)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseOr)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseOrAcquire)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseOrRelease)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseXor)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseXorAcquire)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseXorRelease)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndSet)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndSetAcquire)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndSetRelease)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetOpaque)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetVolatile)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleSet)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleSetOpaque)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleSetRelease)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleSetVolatile)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSet)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSetAcquire)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSetPlain)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSetRelease)
-
-static ALWAYS_INLINE bool MterpReachabilityFence(ShadowFrame* shadow_frame ATTRIBUTE_UNUSED,
- const Instruction* inst ATTRIBUTE_UNUSED,
- uint16_t inst_data ATTRIBUTE_UNUSED,
- JValue* result_register ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Do nothing; Its only purpose is to keep the argument reference live
- // at preceding suspend points. That's automatic in the interpreter.
- return true;
-}
-
-// Macro to help keep track of what's left to implement.
-#define UNIMPLEMENTED_CASE(name) \
- case Intrinsics::k##name: \
- res = false; \
- break;
-
-#define INTRINSIC_CASE(name) \
- case Intrinsics::k##name: \
- res = Mterp##name(shadow_frame, inst, inst_data, result_register); \
- break;
-
-bool MterpHandleIntrinsic(ShadowFrame* shadow_frame,
- ArtMethod* const called_method,
- const Instruction* inst,
- uint16_t inst_data,
- JValue* result_register)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- Intrinsics intrinsic = static_cast<Intrinsics>(called_method->GetIntrinsic());
- bool res = false; // Assume failure
- switch (intrinsic) {
- UNIMPLEMENTED_CASE(DoubleDoubleToRawLongBits /* (D)J */)
- UNIMPLEMENTED_CASE(DoubleDoubleToLongBits /* (D)J */)
- UNIMPLEMENTED_CASE(DoubleIsInfinite /* (D)Z */)
- UNIMPLEMENTED_CASE(DoubleIsNaN /* (D)Z */)
- UNIMPLEMENTED_CASE(DoubleLongBitsToDouble /* (J)D */)
- UNIMPLEMENTED_CASE(FloatFloatToRawIntBits /* (F)I */)
- UNIMPLEMENTED_CASE(FloatFloatToIntBits /* (F)I */)
- UNIMPLEMENTED_CASE(FloatIsInfinite /* (F)Z */)
- UNIMPLEMENTED_CASE(FloatIsNaN /* (F)Z */)
- UNIMPLEMENTED_CASE(FloatIntBitsToFloat /* (I)F */)
- UNIMPLEMENTED_CASE(IntegerDivideUnsigned /* (II)I */)
- UNIMPLEMENTED_CASE(LongDivideUnsigned /* (JJ)J */)
- INTRINSIC_CASE(IntegerReverse)
- INTRINSIC_CASE(IntegerReverseBytes)
- INTRINSIC_CASE(IntegerBitCount)
- INTRINSIC_CASE(IntegerCompare)
- INTRINSIC_CASE(IntegerHighestOneBit)
- INTRINSIC_CASE(IntegerLowestOneBit)
- INTRINSIC_CASE(IntegerNumberOfLeadingZeros)
- INTRINSIC_CASE(IntegerNumberOfTrailingZeros)
- INTRINSIC_CASE(IntegerRotateRight)
- INTRINSIC_CASE(IntegerRotateLeft)
- INTRINSIC_CASE(IntegerSignum)
- INTRINSIC_CASE(LongReverse)
- INTRINSIC_CASE(LongReverseBytes)
- INTRINSIC_CASE(LongBitCount)
- INTRINSIC_CASE(LongCompare)
- INTRINSIC_CASE(LongHighestOneBit)
- INTRINSIC_CASE(LongLowestOneBit)
- INTRINSIC_CASE(LongNumberOfLeadingZeros)
- INTRINSIC_CASE(LongNumberOfTrailingZeros)
- INTRINSIC_CASE(LongRotateRight)
- INTRINSIC_CASE(LongRotateLeft)
- INTRINSIC_CASE(LongSignum)
- INTRINSIC_CASE(ShortReverseBytes)
- INTRINSIC_CASE(MathAbsDouble)
- INTRINSIC_CASE(MathAbsFloat)
- INTRINSIC_CASE(MathAbsLong)
- INTRINSIC_CASE(MathAbsInt)
- UNIMPLEMENTED_CASE(MathFmaDouble /* (DDD)D */)
- UNIMPLEMENTED_CASE(MathFmaFloat /* (FFF)F */)
- UNIMPLEMENTED_CASE(MathMinDoubleDouble /* (DD)D */)
- UNIMPLEMENTED_CASE(MathMinFloatFloat /* (FF)F */)
- INTRINSIC_CASE(MathMinLongLong)
- INTRINSIC_CASE(MathMinIntInt)
- UNIMPLEMENTED_CASE(MathMaxDoubleDouble /* (DD)D */)
- UNIMPLEMENTED_CASE(MathMaxFloatFloat /* (FF)F */)
- INTRINSIC_CASE(MathMaxLongLong)
- INTRINSIC_CASE(MathMaxIntInt)
- INTRINSIC_CASE(MathCos)
- INTRINSIC_CASE(MathSin)
- INTRINSIC_CASE(MathAcos)
- INTRINSIC_CASE(MathAsin)
- INTRINSIC_CASE(MathAtan)
- UNIMPLEMENTED_CASE(MathAtan2 /* (DD)D */)
- UNIMPLEMENTED_CASE(MathCbrt /* (D)D */)
- UNIMPLEMENTED_CASE(MathCosh /* (D)D */)
- UNIMPLEMENTED_CASE(MathExp /* (D)D */)
- UNIMPLEMENTED_CASE(MathExpm1 /* (D)D */)
- UNIMPLEMENTED_CASE(MathHypot /* (DD)D */)
- UNIMPLEMENTED_CASE(MathLog /* (D)D */)
- UNIMPLEMENTED_CASE(MathLog10 /* (D)D */)
- UNIMPLEMENTED_CASE(MathNextAfter /* (DD)D */)
- UNIMPLEMENTED_CASE(MathPow /* (DD)D */)
- UNIMPLEMENTED_CASE(MathSinh /* (D)D */)
- INTRINSIC_CASE(MathTan)
- UNIMPLEMENTED_CASE(MathTanh /* (D)D */)
- INTRINSIC_CASE(MathSqrt)
- INTRINSIC_CASE(MathCeil)
- INTRINSIC_CASE(MathFloor)
- UNIMPLEMENTED_CASE(MathRint /* (D)D */)
- UNIMPLEMENTED_CASE(MathRoundDouble /* (D)J */)
- UNIMPLEMENTED_CASE(MathRoundFloat /* (F)I */)
- UNIMPLEMENTED_CASE(MathMultiplyHigh /* (JJ)J */)
- UNIMPLEMENTED_CASE(SystemArrayCopyByte /* ([BI[BII)V */)
- UNIMPLEMENTED_CASE(SystemArrayCopyChar /* ([CI[CII)V */)
- UNIMPLEMENTED_CASE(SystemArrayCopyInt /* ([II[III)V */)
- UNIMPLEMENTED_CASE(SystemArrayCopy /* (Ljava/lang/Object;ILjava/lang/Object;II)V */)
- UNIMPLEMENTED_CASE(ThreadCurrentThread /* ()Ljava/lang/Thread; */)
- UNIMPLEMENTED_CASE(MemoryPeekByte /* (J)B */)
- UNIMPLEMENTED_CASE(MemoryPeekIntNative /* (J)I */)
- UNIMPLEMENTED_CASE(MemoryPeekLongNative /* (J)J */)
- UNIMPLEMENTED_CASE(MemoryPeekShortNative /* (J)S */)
- UNIMPLEMENTED_CASE(MemoryPokeByte /* (JB)V */)
- UNIMPLEMENTED_CASE(MemoryPokeIntNative /* (JI)V */)
- UNIMPLEMENTED_CASE(MemoryPokeLongNative /* (JJ)V */)
- UNIMPLEMENTED_CASE(MemoryPokeShortNative /* (JS)V */)
- INTRINSIC_CASE(ReachabilityFence /* (Ljava/lang/Object;)V */)
- INTRINSIC_CASE(StringCharAt)
- INTRINSIC_CASE(StringCompareTo)
- INTRINSIC_CASE(StringEquals)
- INTRINSIC_CASE(StringGetCharsNoCheck)
- INTRINSIC_CASE(StringIndexOf)
- INTRINSIC_CASE(StringIndexOfAfter)
- UNIMPLEMENTED_CASE(StringStringIndexOf /* (Ljava/lang/String;)I */)
- UNIMPLEMENTED_CASE(StringStringIndexOfAfter /* (Ljava/lang/String;I)I */)
- INTRINSIC_CASE(StringIsEmpty)
- INTRINSIC_CASE(StringLength)
- UNIMPLEMENTED_CASE(StringNewStringFromBytes /* ([BIII)Ljava/lang/String; */)
- UNIMPLEMENTED_CASE(StringNewStringFromChars /* (II[C)Ljava/lang/String; */)
- UNIMPLEMENTED_CASE(StringNewStringFromString /* (Ljava/lang/String;)Ljava/lang/String; */)
- UNIMPLEMENTED_CASE(StringBufferAppend /* (Ljava/lang/String;)Ljava/lang/StringBuffer; */)
- UNIMPLEMENTED_CASE(StringBufferLength /* ()I */)
- UNIMPLEMENTED_CASE(StringBufferToString /* ()Ljava/lang/String; */)
- UNIMPLEMENTED_CASE(
- StringBuilderAppendObject /* (Ljava/lang/Object;)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(
- StringBuilderAppendString /* (Ljava/lang/String;)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(
- StringBuilderAppendCharSequence /* (Ljava/lang/CharSequence;)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(StringBuilderAppendCharArray /* ([C)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(StringBuilderAppendBoolean /* (Z)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(StringBuilderAppendChar /* (C)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(StringBuilderAppendInt /* (I)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(StringBuilderAppendLong /* (J)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(StringBuilderAppendFloat /* (F)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(StringBuilderAppendDouble /* (D)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(StringBuilderLength /* ()I */)
- UNIMPLEMENTED_CASE(StringBuilderToString /* ()Ljava/lang/String; */)
- UNIMPLEMENTED_CASE(UnsafeCASInt /* (Ljava/lang/Object;JII)Z */)
- UNIMPLEMENTED_CASE(UnsafeCASLong /* (Ljava/lang/Object;JJJ)Z */)
- UNIMPLEMENTED_CASE(UnsafeCASObject /* (Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z */)
- UNIMPLEMENTED_CASE(UnsafeGet /* (Ljava/lang/Object;J)I */)
- UNIMPLEMENTED_CASE(UnsafeGetVolatile /* (Ljava/lang/Object;J)I */)
- UNIMPLEMENTED_CASE(UnsafeGetObject /* (Ljava/lang/Object;J)Ljava/lang/Object; */)
- UNIMPLEMENTED_CASE(UnsafeGetObjectVolatile /* (Ljava/lang/Object;J)Ljava/lang/Object; */)
- UNIMPLEMENTED_CASE(UnsafeGetLong /* (Ljava/lang/Object;J)J */)
- UNIMPLEMENTED_CASE(UnsafeGetLongVolatile /* (Ljava/lang/Object;J)J */)
- UNIMPLEMENTED_CASE(UnsafePut /* (Ljava/lang/Object;JI)V */)
- UNIMPLEMENTED_CASE(UnsafePutOrdered /* (Ljava/lang/Object;JI)V */)
- UNIMPLEMENTED_CASE(UnsafePutVolatile /* (Ljava/lang/Object;JI)V */)
- UNIMPLEMENTED_CASE(UnsafePutObject /* (Ljava/lang/Object;JLjava/lang/Object;)V */)
- UNIMPLEMENTED_CASE(UnsafePutObjectOrdered /* (Ljava/lang/Object;JLjava/lang/Object;)V */)
- UNIMPLEMENTED_CASE(UnsafePutObjectVolatile /* (Ljava/lang/Object;JLjava/lang/Object;)V */)
- UNIMPLEMENTED_CASE(UnsafePutLong /* (Ljava/lang/Object;JJ)V */)
- UNIMPLEMENTED_CASE(UnsafePutLongOrdered /* (Ljava/lang/Object;JJ)V */)
- UNIMPLEMENTED_CASE(UnsafePutLongVolatile /* (Ljava/lang/Object;JJ)V */)
- UNIMPLEMENTED_CASE(UnsafeGetAndAddInt /* (Ljava/lang/Object;JI)I */)
- UNIMPLEMENTED_CASE(UnsafeGetAndAddLong /* (Ljava/lang/Object;JJ)J */)
- UNIMPLEMENTED_CASE(UnsafeGetAndSetInt /* (Ljava/lang/Object;JI)I */)
- UNIMPLEMENTED_CASE(UnsafeGetAndSetLong /* (Ljava/lang/Object;JJ)J */)
- UNIMPLEMENTED_CASE(UnsafeGetAndSetObject /* (Ljava/lang/Object;JLjava/lang/Object;)Ljava/lang/Object; */)
- UNIMPLEMENTED_CASE(UnsafeLoadFence /* ()V */)
- UNIMPLEMENTED_CASE(UnsafeStoreFence /* ()V */)
- UNIMPLEMENTED_CASE(UnsafeFullFence /* ()V */)
- UNIMPLEMENTED_CASE(JdkUnsafeCASInt /* (Ljava/lang/Object;JII)Z */)
- UNIMPLEMENTED_CASE(JdkUnsafeCASLong /* (Ljava/lang/Object;JJJ)Z */)
- UNIMPLEMENTED_CASE(JdkUnsafeCASObject /* (Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z */)
- UNIMPLEMENTED_CASE(JdkUnsafeCompareAndSetInt /* (Ljava/lang/Object;JII)Z */)
- UNIMPLEMENTED_CASE(JdkUnsafeCompareAndSetLong /* (Ljava/lang/Object;JJJ)Z */)
- UNIMPLEMENTED_CASE(JdkUnsafeCompareAndSetObject /* (Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z */)
- UNIMPLEMENTED_CASE(JdkUnsafeGet /* (Ljava/lang/Object;J)I */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetVolatile /* (Ljava/lang/Object;J)I */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetAcquire /* (Ljava/lang/Object;J)I */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetObject /* (Ljava/lang/Object;J)Ljava/lang/Object; */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetObjectVolatile /* (Ljava/lang/Object;J)Ljava/lang/Object; */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetObjectAcquire /* (Ljava/lang/Object;J)Ljava/lang/Object; */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetLong /* (Ljava/lang/Object;J)J */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetLongVolatile /* (Ljava/lang/Object;J)J */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetLongAcquire /* (Ljava/lang/Object;J)J */)
- UNIMPLEMENTED_CASE(JdkUnsafePut /* (Ljava/lang/Object;JI)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutOrdered /* (Ljava/lang/Object;JI)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutVolatile /* (Ljava/lang/Object;JI)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutRelease /* (Ljava/lang/Object;JI)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutObject /* (Ljava/lang/Object;JLjava/lang/Object;)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutObjectOrdered /* (Ljava/lang/Object;JLjava/lang/Object;)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutObjectVolatile /* (Ljava/lang/Object;JLjava/lang/Object;)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutObjectRelease /* (Ljava/lang/Object;JLjava/lang/Object;)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutLong /* (Ljava/lang/Object;JJ)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutLongOrdered /* (Ljava/lang/Object;JJ)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutLongVolatile /* (Ljava/lang/Object;JJ)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutLongRelease /* (Ljava/lang/Object;JJ)V */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetAndAddInt /* (Ljava/lang/Object;JI)I */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetAndAddLong /* (Ljava/lang/Object;JJ)J */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetAndSetInt /* (Ljava/lang/Object;JI)I */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetAndSetLong /* (Ljava/lang/Object;JJ)J */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetAndSetObject /* (Ljava/lang/Object;JLjava/lang/Object;)Ljava/lang/Object; */)
- UNIMPLEMENTED_CASE(JdkUnsafeLoadFence /* ()V */)
- UNIMPLEMENTED_CASE(JdkUnsafeStoreFence /* ()V */)
- UNIMPLEMENTED_CASE(JdkUnsafeFullFence /* ()V */)
- UNIMPLEMENTED_CASE(ReferenceGetReferent /* ()Ljava/lang/Object; */)
- UNIMPLEMENTED_CASE(ReferenceRefersTo /* (Ljava/lang/Object;)Z */)
- UNIMPLEMENTED_CASE(IntegerValueOf /* (I)Ljava/lang/Integer; */)
- UNIMPLEMENTED_CASE(ThreadInterrupted /* ()Z */)
- UNIMPLEMENTED_CASE(CRC32Update /* (II)I */)
- UNIMPLEMENTED_CASE(CRC32UpdateBytes /* (I[BII)I */)
- UNIMPLEMENTED_CASE(CRC32UpdateByteBuffer /* (IJII)I */)
- UNIMPLEMENTED_CASE(FP16Compare /* (SS)I */)
- UNIMPLEMENTED_CASE(FP16ToFloat /* (S)F */)
- UNIMPLEMENTED_CASE(FP16ToHalf /* (F)S */)
- UNIMPLEMENTED_CASE(FP16Floor /* (S)S */)
- UNIMPLEMENTED_CASE(FP16Ceil /* (S)S */)
- UNIMPLEMENTED_CASE(FP16Rint /* (S)S */)
- UNIMPLEMENTED_CASE(FP16Greater /* (SS)Z */)
- UNIMPLEMENTED_CASE(FP16GreaterEquals /* (SS)Z */)
- UNIMPLEMENTED_CASE(FP16Less /* (SS)Z */)
- UNIMPLEMENTED_CASE(FP16LessEquals /* (SS)Z */)
- UNIMPLEMENTED_CASE(FP16Min /* (SS)S */)
- UNIMPLEMENTED_CASE(FP16Max /* (SS)S */)
- INTRINSIC_CASE(VarHandleFullFence)
- INTRINSIC_CASE(VarHandleAcquireFence)
- INTRINSIC_CASE(VarHandleReleaseFence)
- INTRINSIC_CASE(VarHandleLoadLoadFence)
- INTRINSIC_CASE(VarHandleStoreStoreFence)
- INTRINSIC_CASE(MethodHandleInvokeExact)
- INTRINSIC_CASE(MethodHandleInvoke)
- INTRINSIC_CASE(VarHandleCompareAndExchange)
- INTRINSIC_CASE(VarHandleCompareAndExchangeAcquire)
- INTRINSIC_CASE(VarHandleCompareAndExchangeRelease)
- INTRINSIC_CASE(VarHandleCompareAndSet)
- INTRINSIC_CASE(VarHandleGet)
- INTRINSIC_CASE(VarHandleGetAcquire)
- INTRINSIC_CASE(VarHandleGetAndAdd)
- INTRINSIC_CASE(VarHandleGetAndAddAcquire)
- INTRINSIC_CASE(VarHandleGetAndAddRelease)
- INTRINSIC_CASE(VarHandleGetAndBitwiseAnd)
- INTRINSIC_CASE(VarHandleGetAndBitwiseAndAcquire)
- INTRINSIC_CASE(VarHandleGetAndBitwiseAndRelease)
- INTRINSIC_CASE(VarHandleGetAndBitwiseOr)
- INTRINSIC_CASE(VarHandleGetAndBitwiseOrAcquire)
- INTRINSIC_CASE(VarHandleGetAndBitwiseOrRelease)
- INTRINSIC_CASE(VarHandleGetAndBitwiseXor)
- INTRINSIC_CASE(VarHandleGetAndBitwiseXorAcquire)
- INTRINSIC_CASE(VarHandleGetAndBitwiseXorRelease)
- INTRINSIC_CASE(VarHandleGetAndSet)
- INTRINSIC_CASE(VarHandleGetAndSetAcquire)
- INTRINSIC_CASE(VarHandleGetAndSetRelease)
- INTRINSIC_CASE(VarHandleGetOpaque)
- INTRINSIC_CASE(VarHandleGetVolatile)
- INTRINSIC_CASE(VarHandleSet)
- INTRINSIC_CASE(VarHandleSetOpaque)
- INTRINSIC_CASE(VarHandleSetRelease)
- INTRINSIC_CASE(VarHandleSetVolatile)
- INTRINSIC_CASE(VarHandleWeakCompareAndSet)
- INTRINSIC_CASE(VarHandleWeakCompareAndSetAcquire)
- INTRINSIC_CASE(VarHandleWeakCompareAndSetPlain)
- INTRINSIC_CASE(VarHandleWeakCompareAndSetRelease)
- case Intrinsics::kNone:
- res = false;
- break;
- // Note: no default case to ensure we catch any newly added intrinsics.
- }
- return res;
-}
-
-} // namespace interpreter
-} // namespace art
diff --git a/runtime/interpreter/interpreter_intrinsics.h b/runtime/interpreter/interpreter_intrinsics.h
deleted file mode 100644
index 2a23002d05..0000000000
--- a/runtime/interpreter/interpreter_intrinsics.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_INTRINSICS_H_
-#define ART_RUNTIME_INTERPRETER_INTERPRETER_INTRINSICS_H_
-
-#include "jvalue.h"
-
-namespace art {
-
-class ArtMethod;
-class Instruction;
-class ShadowFrame;
-
-namespace interpreter {
-
-// Invokes to methods identified as intrinics are routed here. If there is
-// no interpreter implementation, return false and a normal invoke will proceed.
-bool MterpHandleIntrinsic(ShadowFrame* shadow_frame,
- ArtMethod* const called_method,
- const Instruction* inst,
- uint16_t inst_data,
- JValue* result_register);
-
-} // namespace interpreter
-} // namespace art
-
-#endif // ART_RUNTIME_INTERPRETER_INTERPRETER_INTRINSICS_H_
diff --git a/runtime/interpreter/interpreter_switch_impl-inl.h b/runtime/interpreter/interpreter_switch_impl-inl.h
index d95c507698..ddde26d0cf 100644
--- a/runtime/interpreter/interpreter_switch_impl-inl.h
+++ b/runtime/interpreter/interpreter_switch_impl-inl.h
@@ -50,7 +50,7 @@ namespace interpreter {
// The function names must match the names from dex_instruction_list.h and have no arguments.
// Return value: The handlers must return false if the instruction throws or returns (exits).
//
-template<bool do_access_check, bool transaction_active, Instruction::Format kFormat>
+template<bool transaction_active, Instruction::Format kFormat>
class InstructionHandler {
public:
#define HANDLER_ATTRIBUTES ALWAYS_INLINE FLATTEN WARN_UNUSED REQUIRES_SHARED(Locks::mutator_lock_)
@@ -64,7 +64,7 @@ class InstructionHandler {
DCHECK(abort_exception != nullptr);
DCHECK(abort_exception->GetClass()->DescriptorEquals(Transaction::kAbortExceptionDescriptor));
Self()->ClearException();
- PerformNonStandardReturn<kMonitorState>(
+ PerformNonStandardReturn(
Self(), shadow_frame_, ctx_->result, Instrumentation(), Accessor().InsSize());
Self()->SetException(abort_exception.Get());
ExitInterpreterLoop();
@@ -76,7 +76,7 @@ class InstructionHandler {
HANDLER_ATTRIBUTES bool CheckForceReturn() {
if (shadow_frame_.GetForcePopFrame()) {
DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
- PerformNonStandardReturn<kMonitorState>(
+ PerformNonStandardReturn(
Self(), shadow_frame_, ctx_->result, Instrumentation(), Accessor().InsSize());
ExitInterpreterLoop();
return false;
@@ -100,7 +100,7 @@ class InstructionHandler {
/* skip_listeners= */ skip_event,
/* skip_throw_listener= */ skip_event)) {
// Structured locking is to be enforced for abnormal termination, too.
- DoMonitorCheckOnExit<do_assignability_check>(Self(), &shadow_frame_);
+ DoMonitorCheckOnExit(Self(), &shadow_frame_);
ctx_->result = JValue(); /* Handled in caller. */
ExitInterpreterLoop();
return false; // Return to caller.
@@ -144,7 +144,7 @@ class InstructionHandler {
if (!CheckForceReturn()) {
return false;
}
- if (UNLIKELY(Instrumentation()->HasDexPcListeners())) {
+ if (UNLIKELY(shadow_frame_.GetNotifyDexPcMoveEvents())) {
uint8_t opcode = inst_->Opcode(inst_data_);
bool is_move_result_object = (opcode == Instruction::MOVE_RESULT_OBJECT);
JValue* save_ref = is_move_result_object ? &ctx_->result_register : nullptr;
@@ -204,7 +204,7 @@ class InstructionHandler {
HANDLER_ATTRIBUTES bool HandleReturn(JValue result) {
Self()->AllowThreadSuspension();
- if (!DoMonitorCheckOnExit<do_assignability_check>(Self(), &shadow_frame_)) {
+ if (!DoMonitorCheckOnExit(Self(), &shadow_frame_)) {
return false;
}
if (UNLIKELY(NeedsMethodExitEvent(Instrumentation()) &&
@@ -341,19 +341,19 @@ class InstructionHandler {
template<FindFieldType find_type, Primitive::Type field_type>
HANDLER_ATTRIBUTES bool HandleGet() {
- return DoFieldGet<find_type, field_type, do_access_check, transaction_active>(
+ return DoFieldGet<find_type, field_type, transaction_active>(
Self(), shadow_frame_, inst_, inst_data_);
}
template<FindFieldType find_type, Primitive::Type field_type>
HANDLER_ATTRIBUTES bool HandlePut() {
- return DoFieldPut<find_type, field_type, do_access_check, transaction_active>(
+ return DoFieldPut<find_type, field_type, transaction_active>(
Self(), shadow_frame_, inst_, inst_data_);
}
template<InvokeType type, bool is_range>
HANDLER_ATTRIBUTES bool HandleInvoke() {
- bool success = DoInvoke<type, is_range, do_access_check, /*is_mterp=*/ false>(
+ bool success = DoInvoke<type, is_range>(
Self(), shadow_frame_, inst_, inst_data_, ResultRegister());
return PossiblyHandlePendingExceptionOnInvoke(!success);
}
@@ -457,12 +457,12 @@ class InstructionHandler {
HANDLER_ATTRIBUTES bool RETURN_OBJECT() {
JValue result;
Self()->AllowThreadSuspension();
- if (!DoMonitorCheckOnExit<do_assignability_check>(Self(), &shadow_frame_)) {
+ if (!DoMonitorCheckOnExit(Self(), &shadow_frame_)) {
return false;
}
const size_t ref_idx = A();
ObjPtr<mirror::Object> obj_result = GetVRegReference(ref_idx);
- if (do_assignability_check && obj_result != nullptr) {
+ if (obj_result != nullptr && UNLIKELY(DoAssignabilityChecks())) {
ObjPtr<mirror::Class> return_type = shadow_frame_.GetMethod()->ResolveReturnType();
// Re-load since it might have moved.
obj_result = GetVRegReference(ref_idx);
@@ -481,22 +481,23 @@ class InstructionHandler {
return false; // Pending exception.
}
}
- StackHandleScope<1> hs(Self());
- MutableHandle<mirror::Object> h_result(hs.NewHandle(obj_result));
result.SetL(obj_result);
- if (UNLIKELY(NeedsMethodExitEvent(Instrumentation()) &&
- !SendMethodExitEvents(Self(),
- Instrumentation(),
- shadow_frame_,
- shadow_frame_.GetMethod(),
- h_result))) {
- DCHECK(Self()->IsExceptionPending());
- // Do not raise exception event if it is caused by other instrumentation event.
- shadow_frame_.SetSkipNextExceptionEvent(true);
- return false; // Pending exception.
+ if (UNLIKELY(NeedsMethodExitEvent(Instrumentation()))) {
+ StackHandleScope<1> hs(Self());
+ MutableHandle<mirror::Object> h_result(hs.NewHandle(obj_result));
+ if (!SendMethodExitEvents(Self(),
+ Instrumentation(),
+ shadow_frame_,
+ shadow_frame_.GetMethod(),
+ h_result)) {
+ DCHECK(Self()->IsExceptionPending());
+ // Do not raise exception event if it is caused by other instrumentation event.
+ shadow_frame_.SetSkipNextExceptionEvent(true);
+ return false; // Pending exception.
+ }
+ // Re-load since it might have moved or been replaced during the MethodExitEvent.
+ result.SetL(h_result.Get());
}
- // Re-load since it might have moved or been replaced during the MethodExitEvent.
- result.SetL(h_result.Get());
ctx_->result = result;
ExitInterpreterLoop();
return false;
@@ -551,11 +552,12 @@ class InstructionHandler {
}
HANDLER_ATTRIBUTES bool CONST_CLASS() {
- ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(B()),
- shadow_frame_.GetMethod(),
- Self(),
- false,
- do_access_check);
+ ObjPtr<mirror::Class> c =
+ ResolveVerifyAndClinit(dex::TypeIndex(B()),
+ shadow_frame_.GetMethod(),
+ Self(),
+ false,
+ !shadow_frame_.GetMethod()->SkipAccessChecks());
if (UNLIKELY(c == nullptr)) {
return false; // Pending exception.
}
@@ -596,7 +598,7 @@ class InstructionHandler {
ThrowNullPointerExceptionFromInterpreter();
return false; // Pending exception.
}
- DoMonitorEnter<do_assignability_check>(Self(), &shadow_frame_, obj);
+ DoMonitorEnter(Self(), &shadow_frame_, obj);
return !Self()->IsExceptionPending();
}
@@ -609,16 +611,17 @@ class InstructionHandler {
ThrowNullPointerExceptionFromInterpreter();
return false; // Pending exception.
}
- DoMonitorExit<do_assignability_check>(Self(), &shadow_frame_, obj);
+ DoMonitorExit(Self(), &shadow_frame_, obj);
return !Self()->IsExceptionPending();
}
HANDLER_ATTRIBUTES bool CHECK_CAST() {
- ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(B()),
- shadow_frame_.GetMethod(),
- Self(),
- false,
- do_access_check);
+ ObjPtr<mirror::Class> c =
+ ResolveVerifyAndClinit(dex::TypeIndex(B()),
+ shadow_frame_.GetMethod(),
+ Self(),
+ false,
+ !shadow_frame_.GetMethod()->SkipAccessChecks());
if (UNLIKELY(c == nullptr)) {
return false; // Pending exception.
}
@@ -631,11 +634,12 @@ class InstructionHandler {
}
HANDLER_ATTRIBUTES bool INSTANCE_OF() {
- ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(C()),
- shadow_frame_.GetMethod(),
- Self(),
- false,
- do_access_check);
+ ObjPtr<mirror::Class> c =
+ ResolveVerifyAndClinit(dex::TypeIndex(C()),
+ shadow_frame_.GetMethod(),
+ Self(),
+ false,
+ !shadow_frame_.GetMethod()->SkipAccessChecks());
if (UNLIKELY(c == nullptr)) {
return false; // Pending exception.
}
@@ -656,11 +660,12 @@ class InstructionHandler {
HANDLER_ATTRIBUTES bool NEW_INSTANCE() {
ObjPtr<mirror::Object> obj = nullptr;
- ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(B()),
- shadow_frame_.GetMethod(),
- Self(),
- false,
- do_access_check);
+ ObjPtr<mirror::Class> c =
+ ResolveVerifyAndClinit(dex::TypeIndex(B()),
+ shadow_frame_.GetMethod(),
+ Self(),
+ false,
+ !shadow_frame_.GetMethod()->SkipAccessChecks());
if (LIKELY(c != nullptr)) {
// Don't allow finalizable objects to be allocated during a transaction since these can't
// be finalized without a started runtime.
@@ -687,7 +692,7 @@ class InstructionHandler {
HANDLER_ATTRIBUTES bool NEW_ARRAY() {
int32_t length = GetVReg(B());
- ObjPtr<mirror::Object> obj = AllocArrayFromCode<do_access_check>(
+ ObjPtr<mirror::Object> obj = AllocArrayFromCode(
dex::TypeIndex(C()),
length,
shadow_frame_.GetMethod(),
@@ -701,12 +706,12 @@ class InstructionHandler {
}
HANDLER_ATTRIBUTES bool FILLED_NEW_ARRAY() {
- return DoFilledNewArray<false, do_access_check, transaction_active>(
+ return DoFilledNewArray<false, transaction_active>(
inst_, shadow_frame_, Self(), ResultRegister());
}
HANDLER_ATTRIBUTES bool FILLED_NEW_ARRAY_RANGE() {
- return DoFilledNewArray<true, do_access_check, transaction_active>(
+ return DoFilledNewArray<true, transaction_active>(
inst_, shadow_frame_, Self(), ResultRegister());
}
@@ -731,7 +736,7 @@ class InstructionHandler {
ObjPtr<mirror::Object> exception = GetVRegReference(A());
if (UNLIKELY(exception == nullptr)) {
ThrowNullPointerException();
- } else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
+ } else if (DoAssignabilityChecks() && !exception->GetClass()->IsThrowableClass()) {
// This should never happen.
std::string temp;
Self()->ThrowNewExceptionF("Ljava/lang/InternalError;",
@@ -1741,9 +1746,9 @@ class InstructionHandler {
}
private:
- static constexpr bool do_assignability_check = do_access_check;
- static constexpr MonitorState kMonitorState =
- do_assignability_check ? MonitorState::kCountingMonitors : MonitorState::kNormalMonitors;
+ bool DoAssignabilityChecks() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return !shadow_frame_.GetMethod()->SkipAccessChecks();
+ }
ALWAYS_INLINE const CodeItemDataAccessor& Accessor() { return ctx_->accessor; }
ALWAYS_INLINE const uint16_t* Insns() { return ctx_->accessor.Insns(); }
@@ -1815,8 +1820,8 @@ class InstructionHandler {
#endif
#define OPCODE_CASE(OPCODE, OPCODE_NAME, NAME, FORMAT, i, a, e, v) \
-template<bool do_access_check, bool transaction_active> \
-ASAN_NO_INLINE static bool OP_##OPCODE_NAME( \
+template<bool transaction_active> \
+ASAN_NO_INLINE NO_STACK_PROTECTOR static bool OP_##OPCODE_NAME( \
SwitchImplContext* ctx, \
const instrumentation::Instrumentation* instrumentation, \
Thread* self, \
@@ -1826,14 +1831,15 @@ ASAN_NO_INLINE static bool OP_##OPCODE_NAME(
uint16_t inst_data, \
const Instruction*& next, \
bool& exit) REQUIRES_SHARED(Locks::mutator_lock_) { \
- InstructionHandler<do_access_check, transaction_active, Instruction::FORMAT> handler( \
+ InstructionHandler<transaction_active, Instruction::FORMAT> handler( \
ctx, instrumentation, self, shadow_frame, dex_pc, inst, inst_data, next, exit); \
return LIKELY(handler.OPCODE_NAME()); \
}
DEX_INSTRUCTION_LIST(OPCODE_CASE)
#undef OPCODE_CASE
-template<bool do_access_check, bool transaction_active>
+template<bool transaction_active>
+NO_STACK_PROTECTOR
void ExecuteSwitchImplCpp(SwitchImplContext* ctx) {
Thread* self = ctx->self;
const CodeItemDataAccessor& accessor = ctx->accessor;
@@ -1857,7 +1863,7 @@ void ExecuteSwitchImplCpp(SwitchImplContext* ctx) {
uint16_t inst_data = inst->Fetch16(0);
bool exit = false;
bool success; // Moved outside to keep frames small under asan.
- if (InstructionHandler<do_access_check, transaction_active, Instruction::kInvalidFormat>(
+ if (InstructionHandler<transaction_active, Instruction::kInvalidFormat>(
ctx, instrumentation, self, shadow_frame, dex_pc, inst, inst_data, next, exit).
Preamble()) {
DCHECK_EQ(self->IsExceptionPending(), inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION);
@@ -1865,7 +1871,7 @@ void ExecuteSwitchImplCpp(SwitchImplContext* ctx) {
#define OPCODE_CASE(OPCODE, OPCODE_NAME, NAME, FORMAT, i, a, e, v) \
case OPCODE: { \
next = inst->RelativeAt(Instruction::SizeInCodeUnits(Instruction::FORMAT)); \
- success = OP_##OPCODE_NAME<do_access_check, transaction_active>( \
+ success = OP_##OPCODE_NAME<transaction_active>( \
ctx, instrumentation, self, shadow_frame, dex_pc, inst, inst_data, next, exit); \
if (success && LIKELY(!interpret_one_instruction)) { \
continue; \
@@ -1881,7 +1887,7 @@ void ExecuteSwitchImplCpp(SwitchImplContext* ctx) {
return; // Return statement or debugger forced exit.
}
if (self->IsExceptionPending()) {
- if (!InstructionHandler<do_access_check, transaction_active, Instruction::kInvalidFormat>(
+ if (!InstructionHandler<transaction_active, Instruction::kInvalidFormat>(
ctx, instrumentation, self, shadow_frame, dex_pc, inst, inst_data, next, exit).
HandlePendingException()) {
shadow_frame.SetDexPC(dex::kDexNoIndex);
diff --git a/runtime/interpreter/interpreter_switch_impl.h b/runtime/interpreter/interpreter_switch_impl.h
index d4dca11c60..3a42c217a9 100644
--- a/runtime/interpreter/interpreter_switch_impl.h
+++ b/runtime/interpreter/interpreter_switch_impl.h
@@ -45,7 +45,7 @@ struct SwitchImplContext {
};
// The actual internal implementation of the switch interpreter.
-template<bool do_access_check, bool transaction_active>
+template<bool transaction_active>
void ExecuteSwitchImplCpp(SwitchImplContext* ctx)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -55,9 +55,11 @@ extern "C" void ExecuteSwitchImplAsm(SwitchImplContext* ctx, void* impl, const u
REQUIRES_SHARED(Locks::mutator_lock_);
// Wrapper around the switch interpreter which ensures we can unwind through it.
-template<bool do_access_check, bool transaction_active>
-ALWAYS_INLINE JValue ExecuteSwitchImpl(Thread* self, const CodeItemDataAccessor& accessor,
- ShadowFrame& shadow_frame, JValue result_register,
+template<bool transaction_active>
+ALWAYS_INLINE JValue ExecuteSwitchImpl(Thread* self,
+ const CodeItemDataAccessor& accessor,
+ ShadowFrame& shadow_frame,
+ JValue result_register,
bool interpret_one_instruction)
REQUIRES_SHARED(Locks::mutator_lock_) {
SwitchImplContext ctx {
@@ -68,7 +70,7 @@ ALWAYS_INLINE JValue ExecuteSwitchImpl(Thread* self, const CodeItemDataAccessor&
.interpret_one_instruction = interpret_one_instruction,
.result = JValue(),
};
- void* impl = reinterpret_cast<void*>(&ExecuteSwitchImplCpp<do_access_check, transaction_active>);
+ void* impl = reinterpret_cast<void*>(&ExecuteSwitchImplCpp<transaction_active>);
const uint16_t* dex_pc = ctx.accessor.Insns();
ExecuteSwitchImplAsm(&ctx, impl, dex_pc);
return ctx.result;
diff --git a/runtime/interpreter/interpreter_switch_impl0.cc b/runtime/interpreter/interpreter_switch_impl0.cc
index 00159ecd3f..b4e5f5061d 100644
--- a/runtime/interpreter/interpreter_switch_impl0.cc
+++ b/runtime/interpreter/interpreter_switch_impl0.cc
@@ -24,7 +24,7 @@ namespace interpreter {
// Explicit definition of ExecuteSwitchImplCpp.
template HOT_ATTR
-void ExecuteSwitchImplCpp<false, false>(SwitchImplContext* ctx);
+void ExecuteSwitchImplCpp<false>(SwitchImplContext* ctx);
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/interpreter_switch_impl1.cc b/runtime/interpreter/interpreter_switch_impl1.cc
index 3a86765c68..f8f9fcc81a 100644
--- a/runtime/interpreter/interpreter_switch_impl1.cc
+++ b/runtime/interpreter/interpreter_switch_impl1.cc
@@ -24,7 +24,7 @@ namespace interpreter {
// Explicit definition of ExecuteSwitchImplCpp.
template
-void ExecuteSwitchImplCpp<false, true>(SwitchImplContext* ctx);
+void ExecuteSwitchImplCpp<true>(SwitchImplContext* ctx);
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/interpreter_switch_impl2.cc b/runtime/interpreter/interpreter_switch_impl2.cc
deleted file mode 100644
index c2739c13ae..0000000000
--- a/runtime/interpreter/interpreter_switch_impl2.cc
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// The interpreter function takes considerable time to compile and link.
-// We compile the explicit definitions separately to speed up the build.
-
-#include "interpreter_switch_impl-inl.h"
-
-namespace art {
-namespace interpreter {
-
-// Explicit definition of ExecuteSwitchImplCpp.
-template HOT_ATTR
-void ExecuteSwitchImplCpp<true, false>(SwitchImplContext* ctx);
-
-} // namespace interpreter
-} // namespace art
diff --git a/runtime/interpreter/interpreter_switch_impl3.cc b/runtime/interpreter/interpreter_switch_impl3.cc
deleted file mode 100644
index 808e4bc9c5..0000000000
--- a/runtime/interpreter/interpreter_switch_impl3.cc
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// The interpreter function takes considerable time to compile and link.
-// We compile the explicit definitions separately to speed up the build.
-
-#include "interpreter_switch_impl-inl.h"
-
-namespace art {
-namespace interpreter {
-
-// Explicit definition of ExecuteSwitchImplCpp.
-template
-void ExecuteSwitchImplCpp<true, true>(SwitchImplContext* ctx);
-
-} // namespace interpreter
-} // namespace art
diff --git a/runtime/interpreter/mterp/README.txt b/runtime/interpreter/mterp/README.txt
index c5509113f9..9b6c845857 100644
--- a/runtime/interpreter/mterp/README.txt
+++ b/runtime/interpreter/mterp/README.txt
@@ -51,26 +51,13 @@ If a constant in the file becomes out of sync, the VM will log an error
message and abort during startup.
-==== Rebuilding ====
-
-If you change any of the source file fragments, you need to rebuild the
-combined source files in the "out" directory. Make sure the files in
-"out" are editable, then:
-
- $ cd mterp
- $ ./gen_mterp.py
-
-The ultimate goal is to have the build system generate the necessary
-output files without requiring this separate step, but we're not yet
-ready to require Python in the build.
-
==== Interpreter Control ====
-The mterp fast interpreter achieves much of its performance advantage
-over the C++ interpreter through its efficient mechanism of
-transitioning from one Dalvik bytecode to the next. Mterp for ARM targets
-uses a computed-goto mechanism, in which the handler entrypoints are
-located at the base of the handler table + (opcode * 128).
+The nterp fast interpreter achieves much of its performance advantage
+over the C++ "switch" interpreter through its efficient mechanism of
+transitioning from one Dalvik bytecode to the next. Nterp uses a computed-goto
+mechanism, in which the handler entrypoints are located at the base of the
+handler table + (opcode * 128).
In normal operation, the dedicated register rIBASE
(r8 for ARM, edx for x86) holds a mainHandlerTable. If we need to switch
diff --git a/runtime/interpreter/mterp/arm64ng/array.S b/runtime/interpreter/mterp/arm64ng/array.S
index 68636628cd..1689b015ea 100644
--- a/runtime/interpreter/mterp/arm64ng/array.S
+++ b/runtime/interpreter/mterp/arm64ng/array.S
@@ -170,6 +170,7 @@
GET_VREG w1, w1 // w1<- vB (array length)
ldr lr, [xSELF, #THREAD_ALLOC_ARRAY_ENTRYPOINT_OFFSET]
blr lr
+ dmb ishst // need fence for making array's class visible
ubfx w1, wINST, #8, #4 // w1<- A
SET_VREG_OBJECT w0, w1
FETCH_ADVANCE_INST 2
@@ -179,7 +180,7 @@
mov x0, xSELF
ldr x1, [sp, 0]
mov x2, xPC
- bl nterp_get_class_or_allocate_object
+ bl nterp_get_class
b 1b
3:
bl art_quick_read_barrier_mark_reg00
diff --git a/runtime/interpreter/mterp/arm64ng/main.S b/runtime/interpreter/mterp/arm64ng/main.S
index 89de81f5e4..424d060b85 100644
--- a/runtime/interpreter/mterp/arm64ng/main.S
+++ b/runtime/interpreter/mterp/arm64ng/main.S
@@ -238,7 +238,7 @@
.hidden \name
.global \name
.balign 16
- // Padding of 3 * 8 bytes to get 16 bytes alignment of code entry.
+ // Padding of 3 * 4 bytes to get 16 bytes alignment of code entry.
.long 0
.long 0
.long 0
@@ -273,7 +273,9 @@ ENTRY \name
bl \helper
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_OR_DELIVER_PENDING_EXCEPTION
+ ldr xIP0, [xSELF, # THREAD_EXCEPTION_OFFSET] // Get exception field.
+ cbnz xIP0, nterp_deliver_pending_exception
+ ret
END \name
.endm
@@ -1590,6 +1592,29 @@ END \name
* rest method parameters
*/
+OAT_ENTRY ExecuteNterpWithClinitImpl, EndExecuteNterpWithClinitImpl
+ // For simplicity, we don't do a read barrier here, but instead rely
+ // on art_quick_resolution_trampoline to always have a suspend point before
+ // calling back here.
+ ldr wip, [x0, #ART_METHOD_DECLARING_CLASS_OFFSET]
+ ldrb wip2, [ip, #MIRROR_CLASS_IS_VISIBLY_INITIALIZED_OFFSET]
+ cmp ip2, #MIRROR_CLASS_IS_VISIBLY_INITIALIZED_VALUE
+ b.hs ExecuteNterpImpl
+ cmp ip2, #MIRROR_CLASS_IS_INITIALIZED_VALUE
+ b.lo .Linitializing_check
+ dmb ish
+ b ExecuteNterpImpl
+.Linitializing_check:
+ cmp ip2, #MIRROR_CLASS_IS_INITIALIZING_VALUE
+ b.lo .Lresolution_trampoline
+ ldr wip2, [ip, #MIRROR_CLASS_CLINIT_THREAD_ID_OFFSET]
+ ldr wip, [xSELF, #THREAD_TID_OFFSET]
+ cmp wip, wip2
+ b.eq ExecuteNterpImpl
+.Lresolution_trampoline:
+ b art_quick_resolution_trampoline
+EndExecuteNterpWithClinitImpl:
+
OAT_ENTRY ExecuteNterpImpl, EndExecuteNterpImpl
.cfi_startproc
sub x16, sp, #STACK_OVERFLOW_RESERVED_BYTES
@@ -1887,11 +1912,16 @@ NTERP_TRAMPOLINE nterp_get_static_field, NterpGetStaticField
NTERP_TRAMPOLINE nterp_get_instance_field_offset, NterpGetInstanceFieldOffset
NTERP_TRAMPOLINE nterp_filled_new_array, NterpFilledNewArray
NTERP_TRAMPOLINE nterp_filled_new_array_range, NterpFilledNewArrayRange
-NTERP_TRAMPOLINE nterp_get_class_or_allocate_object, NterpGetClassOrAllocateObject
+NTERP_TRAMPOLINE nterp_get_class, NterpGetClass
+NTERP_TRAMPOLINE nterp_allocate_object, NterpAllocateObject
NTERP_TRAMPOLINE nterp_get_method, NterpGetMethod
NTERP_TRAMPOLINE nterp_hot_method, NterpHotMethod
NTERP_TRAMPOLINE nterp_load_object, NterpLoadObject
+ENTRY nterp_deliver_pending_exception
+ DELIVER_PENDING_EXCEPTION
+END nterp_deliver_pending_exception
+
// gen_mterp.py will inline the following definitions
// within [ExecuteNterpImpl, EndExecuteNterpImpl).
%def instruction_end():
diff --git a/runtime/interpreter/mterp/arm64ng/object.S b/runtime/interpreter/mterp/arm64ng/object.S
index df044d9833..b6a6e24031 100644
--- a/runtime/interpreter/mterp/arm64ng/object.S
+++ b/runtime/interpreter/mterp/arm64ng/object.S
@@ -19,7 +19,7 @@
mov x0, xSELF
ldr x1, [sp]
mov x2, xPC
- bl nterp_get_class_or_allocate_object
+ bl nterp_get_class
mov x1, x0
b 1b
@@ -90,7 +90,7 @@
mov x0, xSELF
ldr x1, [sp]
mov x2, xPC
- bl nterp_get_class_or_allocate_object
+ bl nterp_get_class
mov x1, x0
b 1b
@@ -466,6 +466,7 @@
4:
ldr lr, [xSELF, #THREAD_ALLOC_OBJECT_ENTRYPOINT_OFFSET]
blr lr
+ dmb ishst // need fence for making object's class visible
1:
lsr w1, wINST, #8 // w1 <- A
SET_VREG_OBJECT w0, w1 // fp[A] <- value
@@ -476,7 +477,7 @@
mov x0, xSELF
ldr x1, [sp]
mov x2, xPC
- bl nterp_get_class_or_allocate_object
+ bl nterp_allocate_object
b 1b
3:
bl art_quick_read_barrier_mark_reg00
diff --git a/runtime/interpreter/mterp/arm64ng/other.S b/runtime/interpreter/mterp/arm64ng/other.S
index 1feafd5348..3470ee8da5 100644
--- a/runtime/interpreter/mterp/arm64ng/other.S
+++ b/runtime/interpreter/mterp/arm64ng/other.S
@@ -66,7 +66,7 @@
b 1b
%def op_const_class():
-% op_const_object(jumbo="0", helper="nterp_get_class_or_allocate_object")
+% op_const_object(jumbo="0", helper="nterp_get_class")
%def op_const_method_handle():
% op_const_object(jumbo="0")
diff --git a/runtime/interpreter/mterp/armng/array.S b/runtime/interpreter/mterp/armng/array.S
index 4ab418c479..e685cbf574 100644
--- a/runtime/interpreter/mterp/armng/array.S
+++ b/runtime/interpreter/mterp/armng/array.S
@@ -177,6 +177,7 @@
GET_VREG r1, r1 // r1<- vB (array length)
ldr lr, [rSELF, #THREAD_ALLOC_ARRAY_ENTRYPOINT_OFFSET]
blx lr
+ dmb ishst // need fence for making array's class visible
ubfx r1, rINST, #8, #4 // r1<- A
SET_VREG_OBJECT r0, r1
FETCH_ADVANCE_INST 2
@@ -186,7 +187,7 @@
mov r0, rSELF
ldr r1, [sp]
mov r2, rPC
- bl nterp_get_class_or_allocate_object
+ bl nterp_get_class
b 1b
3:
bl art_quick_read_barrier_mark_reg00
diff --git a/runtime/interpreter/mterp/armng/main.S b/runtime/interpreter/mterp/armng/main.S
index 310a3fd8f1..3647f3e0d3 100644
--- a/runtime/interpreter/mterp/armng/main.S
+++ b/runtime/interpreter/mterp/armng/main.S
@@ -248,7 +248,7 @@
.hidden \name
.global \name
.balign 16
- // Padding of 3 * 8 bytes to get 16 bytes alignment of code entry.
+ // Padding of 3 * 4 bytes to get 16 bytes alignment of code entry.
.long 0
.long 0
.long 0
@@ -284,7 +284,10 @@ ENTRY \name
bl \helper
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_OR_DELIVER_PENDING_EXCEPTION
+ ldr ip, [rSELF, #THREAD_EXCEPTION_OFFSET] @ Get exception field.
+ cmp ip, #0
+ bne nterp_deliver_pending_exception
+ bx lr
END \name
.endm
@@ -1608,6 +1611,28 @@ END \name
* rest method parameters
*/
+OAT_ENTRY ExecuteNterpWithClinitImpl, EndExecuteNterpWithClinitImpl
+ // For simplicity, we don't do a read barrier here, but instead rely
+ // on art_quick_resolution_trampoline to always have a suspend point before
+ // calling back here.
+ ldr r4, [r0, ART_METHOD_DECLARING_CLASS_OFFSET]
+ ldrb ip, [r4, MIRROR_CLASS_IS_VISIBLY_INITIALIZED_OFFSET]
+ cmp ip, #MIRROR_CLASS_IS_VISIBLY_INITIALIZED_VALUE
+ bcs ExecuteNterpImpl
+ cmp ip, #MIRROR_CLASS_IS_INITIALIZED_VALUE
+ blo .Linitializing_check
+ dmb ish
+ b ExecuteNterpImpl
+.Linitializing_check:
+ cmp ip, #MIRROR_CLASS_IS_INITIALIZING_VALUE
+ blo art_quick_resolution_trampoline
+ ldr r4, [r4, #MIRROR_CLASS_CLINIT_THREAD_ID_OFFSET]
+ ldr ip, [rSELF, #THREAD_TID_OFFSET]
+ cmp r4, ip
+ beq ExecuteNterpImpl
+ b art_quick_resolution_trampoline
+EndExecuteNterpWithClinitImpl:
+
OAT_ENTRY ExecuteNterpImpl, EndExecuteNterpImpl
.cfi_startproc
sub ip, sp, #STACK_OVERFLOW_RESERVED_BYTES
@@ -1969,11 +1994,16 @@ NTERP_TRAMPOLINE nterp_get_static_field, NterpGetStaticField
NTERP_TRAMPOLINE nterp_get_instance_field_offset, NterpGetInstanceFieldOffset
NTERP_TRAMPOLINE nterp_filled_new_array, NterpFilledNewArray
NTERP_TRAMPOLINE nterp_filled_new_array_range, NterpFilledNewArrayRange
-NTERP_TRAMPOLINE nterp_get_class_or_allocate_object, NterpGetClassOrAllocateObject
+NTERP_TRAMPOLINE nterp_get_class, NterpGetClass
+NTERP_TRAMPOLINE nterp_allocate_object, NterpAllocateObject
NTERP_TRAMPOLINE nterp_get_method, NterpGetMethod
NTERP_TRAMPOLINE nterp_hot_method, NterpHotMethod
NTERP_TRAMPOLINE nterp_load_object, NterpLoadObject
+ENTRY nterp_deliver_pending_exception
+ DELIVER_PENDING_EXCEPTION
+END nterp_deliver_pending_exception
+
// gen_mterp.py will inline the following definitions
// within [ExecuteNterpImpl, EndExecuteNterpImpl).
%def instruction_end():
diff --git a/runtime/interpreter/mterp/armng/object.S b/runtime/interpreter/mterp/armng/object.S
index 7deffaf995..cde8cf94bd 100644
--- a/runtime/interpreter/mterp/armng/object.S
+++ b/runtime/interpreter/mterp/armng/object.S
@@ -20,7 +20,7 @@
mov r0, rSELF
ldr r1, [sp]
mov r2, rPC
- bl nterp_get_class_or_allocate_object
+ bl nterp_get_class
mov r1, r0
b 1b
@@ -91,7 +91,7 @@
mov r0, rSELF
ldr r1, [sp]
mov r2, rPC
- bl nterp_get_class_or_allocate_object
+ bl nterp_get_class
mov r1, r0
b 1b
@@ -513,8 +513,9 @@
4:
ldr lr, [rSELF, #THREAD_ALLOC_OBJECT_ENTRYPOINT_OFFSET]
blx lr
+ dmb ishst // need fence for making object's class visible
1:
- lsr r1, rINST, #8 // r1 <- A
+ lsr r1, rINST, #8 // r1 <- A
SET_VREG_OBJECT r0, r1 // fp[A] <- value
FETCH_ADVANCE_INST 2
GET_INST_OPCODE ip
@@ -523,7 +524,7 @@
mov r0, rSELF
ldr r1, [sp]
mov r2, rPC
- bl nterp_get_class_or_allocate_object
+ bl nterp_allocate_object
b 1b
3:
bl art_quick_read_barrier_mark_reg00
diff --git a/runtime/interpreter/mterp/armng/other.S b/runtime/interpreter/mterp/armng/other.S
index 3376808156..7dfed62e9b 100644
--- a/runtime/interpreter/mterp/armng/other.S
+++ b/runtime/interpreter/mterp/armng/other.S
@@ -67,7 +67,7 @@
b 1b
%def op_const_class():
-% op_const_object(jumbo="0", helper="nterp_get_class_or_allocate_object")
+% op_const_object(jumbo="0", helper="nterp_get_class")
%def op_const_method_handle():
% op_const_object(jumbo="0")
diff --git a/runtime/interpreter/mterp/nterp.cc b/runtime/interpreter/mterp/nterp.cc
index d70a846b7c..81e80ed885 100644
--- a/runtime/interpreter/mterp/nterp.cc
+++ b/runtime/interpreter/mterp/nterp.cc
@@ -26,7 +26,6 @@
#include "entrypoints/entrypoint_utils-inl.h"
#include "interpreter/interpreter_cache-inl.h"
#include "interpreter/interpreter_common.h"
-#include "interpreter/interpreter_intrinsics.h"
#include "interpreter/shadow_frame-inl.h"
#include "mirror/string-alloc-inl.h"
#include "nterp_helpers.h"
@@ -34,53 +33,6 @@
namespace art {
namespace interpreter {
-bool IsNterpSupported() {
- return !kPoisonHeapReferences && kUseReadBarrier;
-}
-
-bool CanRuntimeUseNterp() REQUIRES_SHARED(Locks::mutator_lock_) {
- Runtime* runtime = Runtime::Current();
- instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
- // If the runtime is interpreter only, we currently don't use nterp as some
- // parts of the runtime (like instrumentation) make assumption on an
- // interpreter-only runtime to always be in a switch-like interpreter.
- return IsNterpSupported() &&
- !instr->InterpretOnly() &&
- !runtime->IsAotCompiler() &&
- !runtime->GetInstrumentation()->NeedsSlowInterpreterForListeners() &&
- // An async exception has been thrown. We need to go to the switch interpreter. nterp doesn't
- // know how to deal with these so we could end up never dealing with it if we are in an
- // infinite loop.
- !runtime->AreAsyncExceptionsThrown() &&
- (runtime->GetJit() == nullptr || !runtime->GetJit()->JitAtFirstUse());
-}
-
-// The entrypoint for nterp, which ArtMethods can directly point to.
-extern "C" void ExecuteNterpImpl() REQUIRES_SHARED(Locks::mutator_lock_);
-
-const void* GetNterpEntryPoint() {
- return reinterpret_cast<const void*>(interpreter::ExecuteNterpImpl);
-}
-
-/*
- * Verify some constants used by the nterp interpreter.
- */
-void CheckNterpAsmConstants() {
- /*
- * If we're using computed goto instruction transitions, make sure
- * none of the handlers overflows the byte limit. This won't tell
- * which one did, but if any one is too big the total size will
- * overflow.
- */
- const int width = kNterpHandlerSize;
- ptrdiff_t interp_size = reinterpret_cast<uintptr_t>(artNterpAsmInstructionEnd) -
- reinterpret_cast<uintptr_t>(artNterpAsmInstructionStart);
- if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
- LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
- << "(did an instruction handler exceed " << width << " bytes?)";
- }
-}
-
inline void UpdateHotness(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
// The hotness we will add to a method when we perform a
// field/method/class/string lookup.
@@ -89,13 +41,12 @@ inline void UpdateHotness(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock
}
template<typename T>
-inline void UpdateCache(Thread* self, uint16_t* dex_pc_ptr, T value) {
- DCHECK(kUseReadBarrier) << "Nterp only works with read barriers";
+inline void UpdateCache(Thread* self, const uint16_t* dex_pc_ptr, T value) {
self->GetInterpreterCache()->Set(self, dex_pc_ptr, value);
}
template<typename T>
-inline void UpdateCache(Thread* self, uint16_t* dex_pc_ptr, T* value) {
+inline void UpdateCache(Thread* self, const uint16_t* dex_pc_ptr, T* value) {
UpdateCache(self, dex_pc_ptr, reinterpret_cast<size_t>(value));
}
@@ -244,76 +195,57 @@ extern "C" const char* NterpGetShortyFromInvokeCustom(ArtMethod* caller, uint16_
return dex_file->GetShorty(proto_idx);
}
-FLATTEN
-extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- UpdateHotness(caller);
- const Instruction* inst = Instruction::At(dex_pc_ptr);
- InvokeType invoke_type = kStatic;
- uint16_t method_index = 0;
- switch (inst->Opcode()) {
- case Instruction::INVOKE_DIRECT: {
- method_index = inst->VRegB_35c();
- invoke_type = kDirect;
- break;
- }
-
- case Instruction::INVOKE_INTERFACE: {
- method_index = inst->VRegB_35c();
- invoke_type = kInterface;
- break;
- }
-
- case Instruction::INVOKE_STATIC: {
- method_index = inst->VRegB_35c();
- invoke_type = kStatic;
- break;
- }
+static constexpr uint8_t kInvalidInvokeType = 255u;
+static_assert(static_cast<uint8_t>(kMaxInvokeType) < kInvalidInvokeType);
+
+static constexpr uint8_t GetOpcodeInvokeType(uint8_t opcode) {
+ switch (opcode) {
+ case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_DIRECT_RANGE:
+ return static_cast<uint8_t>(kDirect);
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ return static_cast<uint8_t>(kInterface);
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_STATIC_RANGE:
+ return static_cast<uint8_t>(kStatic);
+ case Instruction::INVOKE_SUPER:
+ case Instruction::INVOKE_SUPER_RANGE:
+ return static_cast<uint8_t>(kSuper);
+ case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ return static_cast<uint8_t>(kVirtual);
- case Instruction::INVOKE_SUPER: {
- method_index = inst->VRegB_35c();
- invoke_type = kSuper;
- break;
- }
- case Instruction::INVOKE_VIRTUAL: {
- method_index = inst->VRegB_35c();
- invoke_type = kVirtual;
- break;
- }
-
- case Instruction::INVOKE_DIRECT_RANGE: {
- method_index = inst->VRegB_3rc();
- invoke_type = kDirect;
- break;
- }
-
- case Instruction::INVOKE_INTERFACE_RANGE: {
- method_index = inst->VRegB_3rc();
- invoke_type = kInterface;
- break;
- }
+ default:
+ return kInvalidInvokeType;
+ }
+}
- case Instruction::INVOKE_STATIC_RANGE: {
- method_index = inst->VRegB_3rc();
- invoke_type = kStatic;
- break;
- }
+static constexpr std::array<uint8_t, 256u> GenerateOpcodeInvokeTypes() {
+ std::array<uint8_t, 256u> opcode_invoke_types{};
+ for (size_t opcode = 0u; opcode != opcode_invoke_types.size(); ++opcode) {
+ opcode_invoke_types[opcode] = GetOpcodeInvokeType(opcode);
+ }
+ return opcode_invoke_types;
+}
- case Instruction::INVOKE_SUPER_RANGE: {
- method_index = inst->VRegB_3rc();
- invoke_type = kSuper;
- break;
- }
+static constexpr std::array<uint8_t, 256u> kOpcodeInvokeTypes = GenerateOpcodeInvokeTypes();
- case Instruction::INVOKE_VIRTUAL_RANGE: {
- method_index = inst->VRegB_3rc();
- invoke_type = kVirtual;
- break;
- }
+FLATTEN
+extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, const uint16_t* dex_pc_ptr)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ UpdateHotness(caller);
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ Instruction::Code opcode = inst->Opcode();
+ DCHECK(IsUint<8>(static_cast<std::underlying_type_t<Instruction::Code>>(opcode)));
+ uint8_t raw_invoke_type = kOpcodeInvokeTypes[opcode];
+ DCHECK_LE(raw_invoke_type, kMaxInvokeType);
+ InvokeType invoke_type = static_cast<InvokeType>(raw_invoke_type);
- default:
- LOG(FATAL) << "Unknown instruction " << inst->Opcode();
- }
+ // In release mode, this is just a simple load.
+ // In debug mode, this checks that we're using the correct instruction format.
+ uint16_t method_index =
+ (opcode >= Instruction::INVOKE_VIRTUAL_RANGE) ? inst->VRegB_3rc() : inst->VRegB_35c();
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
ArtMethod* resolved_method = caller->SkipAccessChecks()
@@ -355,9 +287,7 @@ extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, uint16_t* dex_
}
UpdateCache(self, dex_pc_ptr, result);
return result;
- } else if (resolved_method->GetDeclaringClass()->IsStringClass()
- && !resolved_method->IsStatic()
- && resolved_method->IsConstructor()) {
+ } else if (resolved_method->IsStringConstructor()) {
CHECK_NE(invoke_type, kSuper);
resolved_method = WellKnownClasses::StringInitToStringFactory(resolved_method);
// Or the result with 1 to notify to nterp this is a string init method. We
@@ -374,71 +304,23 @@ extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, uint16_t* dex_
}
}
-FLATTEN
-static ArtField* ResolveFieldWithAccessChecks(Thread* self,
- ClassLinker* class_linker,
- uint16_t field_index,
- ArtMethod* caller,
- bool is_static,
- bool is_put,
- size_t resolve_field_type) // Resolve if not zero
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (caller->SkipAccessChecks()) {
- return class_linker->ResolveField(field_index, caller, is_static);
- }
-
- caller = caller->GetInterfaceMethodIfProxy(kRuntimePointerSize);
-
- StackHandleScope<2> hs(self);
- Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(caller->GetDexCache()));
- Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(caller->GetClassLoader()));
-
- ArtField* resolved_field = class_linker->ResolveFieldJLS(field_index,
- h_dex_cache,
- h_class_loader);
- if (resolved_field == nullptr) {
- return nullptr;
- }
-
- ObjPtr<mirror::Class> fields_class = resolved_field->GetDeclaringClass();
- if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
- ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, caller);
- return nullptr;
- }
- ObjPtr<mirror::Class> referring_class = caller->GetDeclaringClass();
- if (UNLIKELY(!referring_class->CheckResolvedFieldAccess(fields_class,
- resolved_field,
- caller->GetDexCache(),
- field_index))) {
- return nullptr;
- }
- if (UNLIKELY(is_put && resolved_field->IsFinal() && (fields_class != referring_class))) {
- ThrowIllegalAccessErrorFinalField(caller, resolved_field);
- return nullptr;
- }
- if (resolve_field_type != 0u && resolved_field->ResolveType() == nullptr) {
- DCHECK(self->IsExceptionPending());
- return nullptr;
- }
- return resolved_field;
-}
-
extern "C" size_t NterpGetStaticField(Thread* self,
ArtMethod* caller,
- uint16_t* dex_pc_ptr,
+ const uint16_t* dex_pc_ptr,
size_t resolve_field_type) // Resolve if not zero
REQUIRES_SHARED(Locks::mutator_lock_) {
UpdateHotness(caller);
const Instruction* inst = Instruction::At(dex_pc_ptr);
uint16_t field_index = inst->VRegB_21c();
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ Instruction::Code opcode = inst->Opcode();
ArtField* resolved_field = ResolveFieldWithAccessChecks(
self,
class_linker,
field_index,
caller,
- /* is_static */ true,
- /* is_put */ IsInstructionSPut(inst->Opcode()),
+ /*is_static=*/ true,
+ /*is_put=*/ IsInstructionSPut(opcode),
resolve_field_type);
if (resolved_field == nullptr) {
@@ -461,27 +343,39 @@ extern "C" size_t NterpGetStaticField(Thread* self,
// check for it.
return reinterpret_cast<size_t>(resolved_field) | 1;
} else {
- UpdateCache(self, dex_pc_ptr, resolved_field);
+ // For sput-object, try to resolve the field type even if we were not requested to.
+ // Only if the field type is successfully resolved can we update the cache. If we
+ // fail to resolve the type, we clear the exception to keep interpreter
+ // semantics of not throwing when null is stored.
+ if (opcode == Instruction::SPUT_OBJECT &&
+ resolve_field_type == 0 &&
+ resolved_field->ResolveType() == nullptr) {
+ DCHECK(self->IsExceptionPending());
+ self->ClearException();
+ } else {
+ UpdateCache(self, dex_pc_ptr, resolved_field);
+ }
return reinterpret_cast<size_t>(resolved_field);
}
}
extern "C" uint32_t NterpGetInstanceFieldOffset(Thread* self,
ArtMethod* caller,
- uint16_t* dex_pc_ptr,
+ const uint16_t* dex_pc_ptr,
size_t resolve_field_type) // Resolve if not zero
REQUIRES_SHARED(Locks::mutator_lock_) {
UpdateHotness(caller);
const Instruction* inst = Instruction::At(dex_pc_ptr);
uint16_t field_index = inst->VRegC_22c();
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ Instruction::Code opcode = inst->Opcode();
ArtField* resolved_field = ResolveFieldWithAccessChecks(
self,
class_linker,
field_index,
caller,
- /* is_static */ false,
- /* is_put */ IsInstructionIPut(inst->Opcode()),
+ /*is_static=*/ false,
+ /*is_put=*/ IsInstructionIPut(opcode),
resolve_field_type);
if (resolved_field == nullptr) {
DCHECK(self->IsExceptionPending());
@@ -492,65 +386,84 @@ extern "C" uint32_t NterpGetInstanceFieldOffset(Thread* self,
// of volatile.
return -resolved_field->GetOffset().Uint32Value();
}
- UpdateCache(self, dex_pc_ptr, resolved_field->GetOffset().Uint32Value());
+ // For iput-object, try to resolve the field type even if we were not requested to.
+ // Only if the field type is successfully resolved can we update the cache. If we
+ // fail to resolve the type, we clear the exception to keep interpreter
+ // semantics of not throwing when null is stored.
+ if (opcode == Instruction::IPUT_OBJECT &&
+ resolve_field_type == 0 &&
+ resolved_field->ResolveType() == nullptr) {
+ DCHECK(self->IsExceptionPending());
+ self->ClearException();
+ } else {
+ UpdateCache(self, dex_pc_ptr, resolved_field->GetOffset().Uint32Value());
+ }
return resolved_field->GetOffset().Uint32Value();
}
-extern "C" mirror::Object* NterpGetClassOrAllocateObject(Thread* self,
- ArtMethod* caller,
- uint16_t* dex_pc_ptr)
+extern "C" mirror::Object* NterpGetClass(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
REQUIRES_SHARED(Locks::mutator_lock_) {
UpdateHotness(caller);
const Instruction* inst = Instruction::At(dex_pc_ptr);
- dex::TypeIndex index;
- switch (inst->Opcode()) {
- case Instruction::NEW_INSTANCE:
- index = dex::TypeIndex(inst->VRegB_21c());
- break;
- case Instruction::CHECK_CAST:
- index = dex::TypeIndex(inst->VRegB_21c());
- break;
- case Instruction::INSTANCE_OF:
- index = dex::TypeIndex(inst->VRegC_22c());
- break;
- case Instruction::CONST_CLASS:
- index = dex::TypeIndex(inst->VRegB_21c());
- break;
- case Instruction::NEW_ARRAY:
- index = dex::TypeIndex(inst->VRegC_22c());
- break;
- default:
- LOG(FATAL) << "Unreachable";
+ Instruction::Code opcode = inst->Opcode();
+ DCHECK(opcode == Instruction::CHECK_CAST ||
+ opcode == Instruction::INSTANCE_OF ||
+ opcode == Instruction::CONST_CLASS ||
+ opcode == Instruction::NEW_ARRAY);
+
+ // In release mode, this is just a simple load.
+ // In debug mode, this checks that we're using the correct instruction format.
+ dex::TypeIndex index = dex::TypeIndex(
+ (opcode == Instruction::CHECK_CAST || opcode == Instruction::CONST_CLASS)
+ ? inst->VRegB_21c()
+ : inst->VRegC_22c());
+
+ ObjPtr<mirror::Class> c =
+ ResolveVerifyAndClinit(index,
+ caller,
+ self,
+ /* can_run_clinit= */ false,
+ /* verify_access= */ !caller->SkipAccessChecks());
+ if (UNLIKELY(c == nullptr)) {
+ DCHECK(self->IsExceptionPending());
+ return nullptr;
}
+
+ UpdateCache(self, dex_pc_ptr, c.Ptr());
+ return c.Ptr();
+}
+
+extern "C" mirror::Object* NterpAllocateObject(Thread* self,
+ ArtMethod* caller,
+ uint16_t* dex_pc_ptr)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ UpdateHotness(caller);
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ DCHECK_EQ(inst->Opcode(), Instruction::NEW_INSTANCE);
+ dex::TypeIndex index = dex::TypeIndex(inst->VRegB_21c());
ObjPtr<mirror::Class> c =
ResolveVerifyAndClinit(index,
caller,
self,
/* can_run_clinit= */ false,
/* verify_access= */ !caller->SkipAccessChecks());
- if (c == nullptr) {
+ if (UNLIKELY(c == nullptr)) {
DCHECK(self->IsExceptionPending());
return nullptr;
}
- if (inst->Opcode() == Instruction::NEW_INSTANCE) {
- gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
- if (UNLIKELY(c->IsStringClass())) {
- // We don't cache the class for strings as we need to special case their
- // allocation.
- return mirror::String::AllocEmptyString(self, allocator_type).Ptr();
- } else {
- if (!c->IsFinalizable() && c->IsInstantiable()) {
- // Cache non-finalizable classes for next calls.
- UpdateCache(self, dex_pc_ptr, c.Ptr());
- }
- return AllocObjectFromCode(c, self, allocator_type).Ptr();
- }
+ gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+ if (UNLIKELY(c->IsStringClass())) {
+ // We don't cache the class for strings as we need to special case their
+ // allocation.
+ return mirror::String::AllocEmptyString(self, allocator_type).Ptr();
} else {
- // For all other cases, cache the class.
- UpdateCache(self, dex_pc_ptr, c.Ptr());
+ if (!c->IsFinalizable() && c->IsInstantiable()) {
+ // Cache non-finalizable classes for next calls.
+ UpdateCache(self, dex_pc_ptr, c.Ptr());
+ }
+ return AllocObjectFromCode(c, self, allocator_type).Ptr();
}
- return c.Ptr();
}
extern "C" mirror::Object* NterpLoadObject(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
diff --git a/runtime/interpreter/mterp/nterp.h b/runtime/interpreter/mterp/nterp.h
index 1590b280e9..4d5af393bc 100644
--- a/runtime/interpreter/mterp/nterp.h
+++ b/runtime/interpreter/mterp/nterp.h
@@ -32,6 +32,7 @@ void CheckNterpAsmConstants();
bool IsNterpSupported();
bool CanRuntimeUseNterp();
const void* GetNterpEntryPoint();
+const void* GetNterpWithClinitEntryPoint();
constexpr uint16_t kNterpHotnessValue = 0;
diff --git a/runtime/interpreter/mterp/nterp_impl.cc b/runtime/interpreter/mterp/nterp_impl.cc
new file mode 100644
index 0000000000..f2a9855de1
--- /dev/null
+++ b/runtime/interpreter/mterp/nterp_impl.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "arch/instruction_set.h"
+#include "interpreter/interpreter_common.h"
+#include "nterp.h"
+
+/*
+ * Definitions for targets that support nterp.
+ */
+
+namespace art {
+
+namespace interpreter {
+
+bool IsNterpSupported() {
+ return !kPoisonHeapReferences && kReserveMarkingRegister &&
+ kRuntimeISA != InstructionSet::kRiscv64;
+}
+
+bool CanRuntimeUseNterp() REQUIRES_SHARED(Locks::mutator_lock_) {
+ Runtime* runtime = Runtime::Current();
+ instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
+ // If the runtime is interpreter only, we currently don't use nterp as some
+ // parts of the runtime (like instrumentation) make assumption on an
+ // interpreter-only runtime to always be in a switch-like interpreter.
+ return IsNterpSupported() && !runtime->IsJavaDebuggable() && !instr->EntryExitStubsInstalled() &&
+ !instr->InterpretOnly() && !runtime->IsAotCompiler() &&
+ !instr->NeedsSlowInterpreterForListeners() &&
+ // An async exception has been thrown. We need to go to the switch interpreter. nterp
+ // doesn't know how to deal with these so we could end up never dealing with it if we are
+ // in an infinite loop.
+ !runtime->AreAsyncExceptionsThrown() &&
+ (runtime->GetJit() == nullptr || !runtime->GetJit()->JitAtFirstUse());
+}
+
+// The entrypoint for nterp, which ArtMethods can directly point to.
+extern "C" void ExecuteNterpImpl() REQUIRES_SHARED(Locks::mutator_lock_);
+
+const void* GetNterpEntryPoint() {
+ return reinterpret_cast<const void*>(interpreter::ExecuteNterpImpl);
+}
+
+// Another entrypoint, which does a clinit check at entry.
+extern "C" void ExecuteNterpWithClinitImpl() REQUIRES_SHARED(Locks::mutator_lock_);
+
+const void* GetNterpWithClinitEntryPoint() {
+ return reinterpret_cast<const void*>(interpreter::ExecuteNterpWithClinitImpl);
+}
+
+/*
+ * Verify some constants used by the nterp interpreter.
+ */
+void CheckNterpAsmConstants() {
+ /*
+ * If we're using computed goto instruction transitions, make sure
+ * none of the handlers overflows the byte limit. This won't tell
+ * which one did, but if any one is too big the total size will
+ * overflow.
+ */
+ const int width = kNterpHandlerSize;
+ ptrdiff_t interp_size = reinterpret_cast<uintptr_t>(artNterpAsmInstructionEnd) -
+ reinterpret_cast<uintptr_t>(artNterpAsmInstructionStart);
+ if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
+ LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
+ << "(did an instruction handler exceed " << width << " bytes?)";
+ }
+}
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/interpreter/mterp/nterp_stub.cc b/runtime/interpreter/mterp/nterp_stub.cc
deleted file mode 100644
index 95d11c28a3..0000000000
--- a/runtime/interpreter/mterp/nterp_stub.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/enums.h"
-#include "base/locks.h"
-
-/*
- * Stub definitions for targets without nterp implementations.
- */
-
-namespace art {
-
-class ArtMethod;
-
-namespace interpreter {
-
-bool IsNterpSupported() {
- return false;
-}
-
-bool CanRuntimeUseNterp() {
- return false;
-}
-
-const void* GetNterpEntryPoint() {
- return nullptr;
-}
-
-void CheckNterpAsmConstants() {
-}
-
-extern "C" void ExecuteNterpImpl() REQUIRES_SHARED(Locks::mutator_lock_) {
- UNIMPLEMENTED(FATAL);
-}
-
-extern "C" void* artNterpAsmInstructionStart[] = { nullptr };
-extern "C" void* artNterpAsmInstructionEnd[] = { nullptr };
-
-} // namespace interpreter
-} // namespace art
diff --git a/runtime/interpreter/mterp/riscv64/arithmetic.S b/runtime/interpreter/mterp/riscv64/arithmetic.S
new file mode 100644
index 0000000000..30cb9035e9
--- /dev/null
+++ b/runtime/interpreter/mterp/riscv64/arithmetic.S
@@ -0,0 +1,248 @@
+%def binop(preinstr="", result="r0", chkzero="0", instr=""):
+ unimp
+
+%def binop2addr(preinstr="", result="r0", chkzero="0", instr=""):
+ unimp
+
+%def binopLit16(result="r0", chkzero="0", instr=""):
+ unimp
+
+%def binopLit8(extract="unimp", result="r0", chkzero="0", instr=""):
+ unimp
+
+%def binopWide(preinstr="", result0="r0", result1="r1", chkzero="0", instr=""):
+ unimp
+
+%def binopWide2addr(preinstr="", result0="r0", result1="r1", chkzero="0", instr=""):
+ unimp
+
+%def unop(preinstr="", instr=""):
+ unimp
+
+%def unopNarrower(preinstr="", instr=""):
+ unimp
+
+%def unopWide(preinstr="", instr=""):
+ unimp
+
+%def unopWider(preinstr="", instr=""):
+ unimp
+
+%def op_add_int():
+ unimp
+
+%def op_add_int_2addr():
+ unimp
+
+%def op_add_int_lit16():
+ unimp
+
+%def op_add_int_lit8():
+ unimp
+
+%def op_add_long():
+ unimp
+
+%def op_add_long_2addr():
+ unimp
+
+%def op_and_int():
+ unimp
+
+%def op_and_int_2addr():
+ unimp
+
+%def op_and_int_lit16():
+ unimp
+
+%def op_and_int_lit8():
+ unimp
+
+%def op_and_long():
+ unimp
+
+%def op_and_long_2addr():
+ unimp
+
+%def op_cmp_long():
+ unimp
+
+%def op_div_int():
+ unimp
+
+%def op_div_int_2addr():
+ unimp
+
+%def op_div_int_lit16():
+ unimp
+
+%def op_div_int_lit8():
+ unimp
+
+%def op_div_long():
+ unimp
+
+%def op_div_long_2addr():
+ unimp
+
+%def op_int_to_byte():
+ unimp
+
+%def op_int_to_char():
+ unimp
+
+%def op_int_to_long():
+ unimp
+
+%def op_int_to_short():
+ unimp
+
+%def op_long_to_int():
+ unimp
+
+%def op_mul_int():
+ unimp
+
+%def op_mul_int_2addr():
+ unimp
+
+%def op_mul_int_lit16():
+ unimp
+
+%def op_mul_int_lit8():
+ unimp
+
+%def op_mul_long():
+ unimp
+
+%def op_mul_long_2addr():
+ unimp
+
+%def op_neg_int():
+ unimp
+
+%def op_neg_long():
+ unimp
+
+%def op_not_int():
+ unimp
+
+%def op_not_long():
+ unimp
+
+%def op_or_int():
+ unimp
+
+%def op_or_int_2addr():
+ unimp
+
+%def op_or_int_lit16():
+ unimp
+
+%def op_or_int_lit8():
+ unimp
+
+%def op_or_long():
+ unimp
+
+%def op_or_long_2addr():
+ unimp
+
+%def op_rem_int():
+ unimp
+
+%def op_rem_int_2addr():
+ unimp
+
+%def op_rem_int_lit16():
+ unimp
+
+%def op_rem_int_lit8():
+ unimp
+
+%def op_rem_long():
+ unimp
+
+%def op_rem_long_2addr():
+ unimp
+
+%def op_rsub_int():
+ unimp
+
+%def op_rsub_int_lit8():
+ unimp
+
+%def op_shl_int():
+ unimp
+
+%def op_shl_int_2addr():
+ unimp
+
+%def op_shl_int_lit8():
+ unimp
+
+%def op_shl_long():
+ unimp
+
+%def op_shl_long_2addr():
+ unimp
+
+%def op_shr_int():
+ unimp
+
+%def op_shr_int_2addr():
+ unimp
+
+%def op_shr_int_lit8():
+ unimp
+
+%def op_shr_long():
+ unimp
+
+%def op_shr_long_2addr():
+ unimp
+
+%def op_sub_int():
+ unimp
+
+%def op_sub_int_2addr():
+ unimp
+
+%def op_sub_long():
+ unimp
+
+%def op_sub_long_2addr():
+ unimp
+
+%def op_ushr_int():
+ unimp
+
+%def op_ushr_int_2addr():
+ unimp
+
+%def op_ushr_int_lit8():
+ unimp
+
+%def op_ushr_long():
+ unimp
+
+%def op_ushr_long_2addr():
+ unimp
+
+%def op_xor_int():
+ unimp
+
+%def op_xor_int_2addr():
+ unimp
+
+%def op_xor_int_lit16():
+ unimp
+
+%def op_xor_int_lit8():
+ unimp
+
+%def op_xor_long():
+ unimp
+
+%def op_xor_long_2addr():
+ unimp
diff --git a/runtime/interpreter/mterp/riscv64/array.S b/runtime/interpreter/mterp/riscv64/array.S
new file mode 100644
index 0000000000..e58f3844af
--- /dev/null
+++ b/runtime/interpreter/mterp/riscv64/array.S
@@ -0,0 +1,57 @@
+%def op_aget(load="unimp", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET", wide="0", is_object="0"):
+ unimp
+
+%def op_aget_boolean():
+% op_aget(load="unimp", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET", wide="0", is_object="0")
+
+%def op_aget_byte():
+% op_aget(load="unimp", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET", wide="0", is_object="0")
+
+%def op_aget_char():
+% op_aget(load="unimp", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET", wide="0", is_object="0")
+
+%def op_aget_object():
+% op_aget(load="unimp", shift="2", data_offset="MIRROR_OBJECT_ARRAY_DATA_OFFSET", wide="0", is_object="1")
+
+%def op_aget_short():
+% op_aget(load="unimp", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET", wide="0", is_object="0")
+
+%def op_aget_wide():
+% op_aget(load="unimp", shift="3", data_offset="MIRROR_WIDE_ARRAY_DATA_OFFSET", wide="1", is_object="0")
+
+%def op_aput(store="unimp", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET", wide="0", is_object="0"):
+ unimp
+
+%def op_aput_boolean():
+% op_aput(store="unimp", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET", wide="0", is_object="0")
+
+%def op_aput_byte():
+% op_aput(store="unimp", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET", wide="0", is_object="0")
+
+%def op_aput_char():
+% op_aput(store="unimp", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET", wide="0", is_object="0")
+
+%def op_aput_short():
+% op_aput(store="unimp", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET", wide="0", is_object="0")
+
+%def op_aput_wide():
+% op_aput(store="unimp", shift="3", data_offset="MIRROR_WIDE_ARRAY_DATA_OFFSET", wide="1", is_object="0")
+
+%def op_aput_object():
+% op_aput(store="unimp", shift="2", data_offset="MIRROR_OBJECT_ARRAY_DATA_OFFSET", wide="0", is_object="1")
+
+%def op_array_length():
+ unimp
+
+%def op_fill_array_data():
+ unimp
+
+%def op_filled_new_array(helper="nterp_filled_new_array"):
+ unimp
+
+%def op_filled_new_array_range():
+% op_filled_new_array(helper="nterp_filled_new_array_range")
+
+%def op_new_array():
+ unimp
+
diff --git a/runtime/interpreter/mterp/riscv64/control_flow.S b/runtime/interpreter/mterp/riscv64/control_flow.S
new file mode 100644
index 0000000000..6e263f3f69
--- /dev/null
+++ b/runtime/interpreter/mterp/riscv64/control_flow.S
@@ -0,0 +1,74 @@
+%def bincmp(condition=""):
+ unimp
+
+%def zcmp(condition=""):
+ unimp
+
+%def op_goto():
+ unimp
+
+%def op_goto_16():
+ unimp
+
+%def op_goto_32():
+ unimp
+
+%def op_if_eq():
+% bincmp(condition="eq")
+
+%def op_if_eqz():
+% zcmp(condition="eq")
+
+%def op_if_ge():
+% bincmp(condition="ge")
+
+%def op_if_gez():
+% zcmp(condition="ge")
+
+%def op_if_gt():
+% bincmp(condition="gt")
+
+%def op_if_gtz():
+% zcmp(condition="gt")
+
+%def op_if_le():
+% bincmp(condition="le")
+
+%def op_if_lez():
+% zcmp(condition="le")
+
+%def op_if_lt():
+% bincmp(condition="lt")
+
+%def op_if_ltz():
+% zcmp(condition="lt")
+
+%def op_if_ne():
+% bincmp(condition="ne")
+
+%def op_if_nez():
+% zcmp(condition="ne")
+
+%def op_packed_switch(func="NterpDoPackedSwitch"):
+ unimp
+
+%def op_sparse_switch():
+% op_packed_switch(func="NterpDoSparseSwitch")
+
+/*
+ * Return a 32-bit value.
+ */
+%def op_return(is_object="0", is_void="0", is_wide="0"):
+ unimp
+
+%def op_return_object():
+% op_return(is_object="1", is_void="0", is_wide="0")
+
+%def op_return_void():
+% op_return(is_object="0", is_void="1", is_wide="0")
+
+%def op_return_wide():
+% op_return(is_object="0", is_void="0", is_wide="1")
+
+%def op_throw():
+ unimp
diff --git a/runtime/interpreter/mterp/riscv64/floating_point.S b/runtime/interpreter/mterp/riscv64/floating_point.S
new file mode 100644
index 0000000000..cd6c82af6a
--- /dev/null
+++ b/runtime/interpreter/mterp/riscv64/floating_point.S
@@ -0,0 +1,128 @@
+%def fbinop(instr=""):
+ unimp
+
+%def fbinop2addr(instr=""):
+ unimp
+
+%def fbinopWide(instr=""):
+ unimp
+
+%def fbinopWide2addr(instr=""):
+ unimp
+
+%def funop(instr=""):
+ unimp
+
+%def funopNarrower(instr=""):
+ unimp
+
+%def funopWider(instr=""):
+ unimp
+
+%def op_add_double():
+ unimp
+
+%def op_add_double_2addr():
+ unimp
+
+%def op_add_float():
+ unimp
+
+%def op_add_float_2addr():
+ unimp
+
+%def op_cmpg_double():
+ unimp
+
+%def op_cmpg_float():
+ unimp
+
+%def op_cmpl_double():
+ unimp
+
+%def op_cmpl_float():
+ unimp
+
+%def op_div_double():
+ unimp
+
+%def op_div_double_2addr():
+ unimp
+
+%def op_div_float():
+ unimp
+
+%def op_div_float_2addr():
+ unimp
+
+%def op_double_to_float():
+ unimp
+
+%def op_double_to_int():
+ unimp
+
+%def op_double_to_long():
+ unimp
+
+%def op_float_to_double():
+ unimp
+
+%def op_float_to_int():
+ unimp
+
+%def op_float_to_long():
+ unimp
+
+%def op_int_to_double():
+ unimp
+
+%def op_int_to_float():
+ unimp
+
+%def op_long_to_double():
+ unimp
+
+%def op_long_to_float():
+ unimp
+
+%def op_mul_double():
+ unimp
+
+%def op_mul_double_2addr():
+ unimp
+
+%def op_mul_float():
+ unimp
+
+%def op_mul_float_2addr():
+ unimp
+
+%def op_neg_double():
+ unimp
+
+%def op_neg_float():
+ unimp
+
+%def op_rem_double():
+ unimp
+
+%def op_rem_double_2addr():
+ unimp
+
+%def op_rem_float():
+ unimp
+
+%def op_rem_float_2addr():
+ unimp
+
+%def op_sub_double():
+ unimp
+
+%def op_sub_double_2addr():
+ unimp
+
+%def op_sub_float():
+ unimp
+
+%def op_sub_float_2addr():
+ unimp
diff --git a/runtime/interpreter/mterp/riscv64/invoke.S b/runtime/interpreter/mterp/riscv64/invoke.S
new file mode 100644
index 0000000000..8d3b5220d4
--- /dev/null
+++ b/runtime/interpreter/mterp/riscv64/invoke.S
@@ -0,0 +1,56 @@
+%def op_invoke_custom():
+ unimp
+
+%def op_invoke_custom_range():
+ unimp
+
+%def invoke_direct_or_super(helper="", range="", is_super=""):
+ unimp
+
+%def op_invoke_direct():
+ unimp
+
+%def op_invoke_direct_range():
+ unimp
+
+%def op_invoke_super():
+ unimp
+
+%def op_invoke_super_range():
+ unimp
+
+%def op_invoke_polymorphic():
+ unimp
+
+%def op_invoke_polymorphic_range():
+ unimp
+
+%def invoke_interface(range=""):
+ unimp
+
+%def op_invoke_interface_slow_path():
+ unimp
+
+%def op_invoke_interface():
+ unimp
+
+%def op_invoke_interface_range():
+ unimp
+
+%def invoke_static(helper=""):
+ unimp
+
+%def op_invoke_static():
+ unimp
+
+%def op_invoke_static_range():
+ unimp
+
+%def invoke_virtual(helper="", range=""):
+ unimp
+
+%def op_invoke_virtual():
+ unimp
+
+%def op_invoke_virtual_range():
+ unimp
diff --git a/runtime/interpreter/mterp/riscv64/main.S b/runtime/interpreter/mterp/riscv64/main.S
new file mode 100644
index 0000000000..b2ca460825
--- /dev/null
+++ b/runtime/interpreter/mterp/riscv64/main.S
@@ -0,0 +1,132 @@
+%def header():
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+#include "arch/riscv64/asm_support_riscv64.S"
+
+// An assembly entry that has a OatQuickMethodHeader prefix.
+.macro OAT_ENTRY name, end
+ .type \name, @function
+ .hidden \name
+ .global \name
+ .balign 16
+ // Padding of 3 * 4 bytes to get 16 bytes alignment of code entry.
+ .4byte 0, 0, 0
+ // OatQuickMethodHeader `data_` field. Note that the top two bits must be clear.
+ .4byte (\end - \name)
+\name:
+.endm
+
+.macro SIZE name
+ .size \name, .-\name
+.endm
+
+// Similar to ENTRY but without the CFI directives.
+.macro NAME_START name
+ .type \name, @function
+ .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
+ .global \name
+ /* XXX Cache alignment for function entry */
+ .balign 16
+\name:
+.endm
+
+.macro NAME_END name
+ SIZE \name
+.endm
+
+%def entry():
+/*
+ * ArtMethod entry point.
+ *
+ * On entry:
+ * XXX ArtMethod* callee
+ * rest method parameters
+ */
+
+OAT_ENTRY ExecuteNterpWithClinitImpl, EndExecuteNterpWithClinitImpl
+ // For simplicity, we don't do a read barrier here, but instead rely
+ // on art_quick_resolution_trampoline to always have a suspend point before
+ // calling back here.
+ unimp
+EndExecuteNterpWithClinitImpl:
+
+OAT_ENTRY ExecuteNterpImpl, EndExecuteNterpImpl
+ .cfi_startproc
+ unimp
+
+%def fetch_from_thread_cache(dest_reg, miss_label):
+
+%def footer():
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+
+// Enclose all code below in a symbol (which gets printed in backtraces).
+NAME_START nterp_helper
+// This is the logical end of ExecuteNterpImpl, where the frame info applies.
+// EndExecuteNterpImpl includes the methods below as we want the runtime to
+// see them as part of the Nterp PCs.
+.cfi_endproc
+NAME_END nterp_helper
+
+// This is the end of PCs contained by the OatQuickMethodHeader created for the interpreter
+// entry point.
+ .type EndExecuteNterpImpl, @function
+ .hidden EndExecuteNterpImpl
+ .global EndExecuteNterpImpl
+EndExecuteNterpImpl:
+
+// gen_mterp.py will inline the following definitions
+// within [ExecuteNterpImpl, EndExecuteNterpImpl).
+%def instruction_start():
+ .type artNterpAsmInstructionStart, @function
+ .hidden artNterpAsmInstructionStart
+ .global artNterpAsmInstructionStart
+artNterpAsmInstructionStart = .L_op_nop
+ .text
+
+%def instruction_end():
+ .type artNterpAsmInstructionEnd, @function
+ .hidden artNterpAsmInstructionEnd
+ .global artNterpAsmInstructionEnd
+artNterpAsmInstructionEnd:
+ unimp
+
+%def opcode_pre():
+% pass
+%def opcode_name_prefix():
+% return "nterp_"
+%def opcode_start():
+ NAME_START nterp_${opcode}
+%def opcode_end():
+ NAME_END nterp_${opcode}
+ unimp
+%def opcode_slow_path_start(name):
+ NAME_START ${name}
+%def opcode_slow_path_end(name):
+ NAME_END ${name}
diff --git a/runtime/interpreter/mterp/riscv64/object.S b/runtime/interpreter/mterp/riscv64/object.S
new file mode 100644
index 0000000000..449df1e07f
--- /dev/null
+++ b/runtime/interpreter/mterp/riscv64/object.S
@@ -0,0 +1,109 @@
+%def op_check_cast():
+ unimp
+
+%def op_check_cast_slow_path():
+ unimp
+
+%def op_instance_of():
+ unimp
+
+%def op_instance_of_slow_path():
+ unimp
+
+%def op_iget_boolean():
+% op_iget(load="ldrb", wide="0", is_object="0")
+
+%def op_iget_byte():
+% op_iget(load="ldrsb", wide="0", is_object="0")
+
+%def op_iget_char():
+% op_iget(load="ldrh", wide="0", is_object="0")
+
+%def op_iget_short():
+% op_iget(load="ldrsh", wide="0", is_object="0")
+
+%def op_iget(load="ldr", wide="0", is_object="0"):
+ unimp
+
+%def op_iget_slow_path(load, wide, is_object):
+
+%def op_iget_wide():
+% op_iget(load="ldr", wide="1", is_object="0")
+
+%def op_iget_object():
+% op_iget(load="ldr", wide="0", is_object="1")
+
+%def op_iput_boolean():
+% op_iput(store="strb", wide="0", is_object="0")
+
+%def op_iput_byte():
+% op_iput(store="strb", wide="0", is_object="0")
+
+%def op_iput_char():
+% op_iput(store="strh", wide="0", is_object="0")
+
+%def op_iput_short():
+% op_iput(store="strh", wide="0", is_object="0")
+
+%def op_iput(store="str", wide="0", is_object="0"):
+ unimp
+
+%def op_iput_slow_path(store, wide, is_object):
+ unimp
+
+%def op_iput_wide():
+% op_iput(store="str", wide="1", is_object="0")
+
+%def op_iput_object():
+% op_iput(store="str", wide="0", is_object="1")
+
+%def op_sget_boolean():
+% op_sget(load="ldrb", wide="0", is_object="0")
+
+%def op_sget_byte():
+% op_sget(load="ldrsb", wide="0", is_object="0")
+
+%def op_sget_char():
+% op_sget(load="ldrh", wide="0", is_object="0")
+
+%def op_sget_short():
+% op_sget(load="ldrsh", wide="0", is_object="0")
+
+%def op_sget(load="ldr", wide="0", is_object="0"):
+ unimp
+
+%def op_sget_slow_path(load="ldr", wide="0", is_object="0"):
+ unimp
+
+%def op_sget_wide():
+% op_sget(load="ldr", wide="1", is_object="0")
+
+%def op_sget_object():
+% op_sget(load="ldr", wide="0", is_object="1")
+
+%def op_sput_boolean():
+% op_sput(store="strb", wide="0", is_object="0")
+
+%def op_sput_byte():
+% op_sput(store="strb", wide="0", is_object="0")
+
+%def op_sput_char():
+% op_sput(store="strh", wide="0", is_object="0")
+
+%def op_sput_short():
+% op_sput(store="strh", wide="0", is_object="0")
+
+%def op_sput(store="str", wide="0", is_object="0"):
+ unimp
+
+%def op_sput_slow_path(store, wide, is_object):
+ unimp
+
+%def op_sput_wide():
+% op_sput(store="str", wide="1", is_object="0")
+
+%def op_sput_object():
+% op_sput(store="str", wide="0", is_object="1")
+
+%def op_new_instance():
+ unimp
diff --git a/runtime/interpreter/mterp/riscv64/other.S b/runtime/interpreter/mterp/riscv64/other.S
new file mode 100644
index 0000000000..0e7ba95bab
--- /dev/null
+++ b/runtime/interpreter/mterp/riscv64/other.S
@@ -0,0 +1,167 @@
+%def unused():
+ ebreak
+%def op_const():
+ unimp
+
+%def op_const_16():
+ unimp
+%def op_const_4():
+ unimp
+%def op_const_high16():
+ unimp
+%def op_const_object(jumbo="0", helper="nterp_load_object"):
+ unimp
+%def op_const_class():
+ unimp
+%def op_const_method_handle():
+ unimp
+%def op_const_method_type():
+ unimp
+%def op_const_string():
+ unimp
+%def op_const_string_jumbo():
+ unimp
+%def op_const_wide():
+ unimp
+%def op_const_wide_16():
+ unimp
+%def op_const_wide_32():
+ unimp
+%def op_const_wide_high16():
+ unimp
+%def op_monitor_enter():
+ unimp
+%def op_monitor_exit():
+ unimp
+%def op_move(is_object="0"):
+ unimp
+%def op_move_16(is_object="0"):
+ unimp
+%def op_move_exception():
+ unimp
+%def op_move_from16(is_object="0"):
+ unimp
+%def op_move_object():
+ unimp
+%def op_move_object_16():
+ unimp
+%def op_move_object_from16():
+ unimp
+%def op_move_result(is_object="0"):
+ unimp
+%def op_move_result_object():
+ unimp
+%def op_move_result_wide():
+ unimp
+%def op_move_wide():
+ unimp
+%def op_move_wide_16():
+ unimp
+%def op_move_wide_from16():
+ unimp
+
+%def op_nop():
+ unimp
+
+%def op_unused_3e():
+% unused()
+
+%def op_unused_3f():
+% unused()
+
+%def op_unused_40():
+% unused()
+
+%def op_unused_41():
+% unused()
+
+%def op_unused_42():
+% unused()
+
+%def op_unused_43():
+% unused()
+
+%def op_unused_73():
+% unused()
+
+%def op_unused_79():
+% unused()
+
+%def op_unused_7a():
+% unused()
+
+%def op_unused_e3():
+% unused()
+
+%def op_unused_e4():
+% unused()
+
+%def op_unused_e5():
+% unused()
+
+%def op_unused_e6():
+% unused()
+
+%def op_unused_e7():
+% unused()
+
+%def op_unused_e8():
+% unused()
+
+%def op_unused_e9():
+% unused()
+
+%def op_unused_ea():
+% unused()
+
+%def op_unused_eb():
+% unused()
+
+%def op_unused_ec():
+% unused()
+
+%def op_unused_ed():
+% unused()
+
+%def op_unused_ee():
+% unused()
+
+%def op_unused_ef():
+% unused()
+
+%def op_unused_f0():
+% unused()
+
+%def op_unused_f1():
+% unused()
+
+%def op_unused_f2():
+% unused()
+
+%def op_unused_f3():
+% unused()
+
+%def op_unused_f4():
+% unused()
+
+%def op_unused_f5():
+% unused()
+
+%def op_unused_f6():
+% unused()
+
+%def op_unused_f7():
+% unused()
+
+%def op_unused_f8():
+% unused()
+
+%def op_unused_f9():
+% unused()
+
+%def op_unused_fc():
+% unused()
+
+%def op_unused_fd():
+% unused()
+
diff --git a/runtime/interpreter/mterp/x86_64ng/main.S b/runtime/interpreter/mterp/x86_64ng/main.S
index bd191c09ec..9ad7efa95b 100644
--- a/runtime/interpreter/mterp/x86_64ng/main.S
+++ b/runtime/interpreter/mterp/x86_64ng/main.S
@@ -208,7 +208,7 @@
ASM_HIDDEN SYMBOL(\name)
.global SYMBOL(\name)
.balign 16
- // Padding of 3 * 8 bytes to get 16 bytes alignment of code entry.
+ // Padding of 3 * 4 bytes to get 16 bytes alignment of code entry.
.long 0
.long 0
.long 0
@@ -237,7 +237,9 @@ DEFINE_FUNCTION \name
SETUP_SAVE_REFS_ONLY_FRAME
call \helper
RESTORE_SAVE_REFS_ONLY_FRAME
- RETURN_OR_DELIVER_PENDING_EXCEPTION
+ cmpq LITERAL(0), %gs:THREAD_EXCEPTION_OFFSET
+ jne nterp_deliver_pending_exception
+ ret
END_FUNCTION \name
.endm
@@ -1694,6 +1696,21 @@ END_FUNCTION \name
* rest method parameters
*/
+OAT_ENTRY ExecuteNterpWithClinitImpl, EndExecuteNterpWithClinitImpl
+ // For simplicity, we don't do a read barrier here, but instead rely
+ // on art_quick_resolution_trampoline to always have a suspend point before
+ // calling back here.
+ movl ART_METHOD_DECLARING_CLASS_OFFSET(%rdi), %r10d
+ cmpb $$(MIRROR_CLASS_IS_VISIBLY_INITIALIZED_VALUE), MIRROR_CLASS_IS_VISIBLY_INITIALIZED_OFFSET(%r10d)
+ jae ExecuteNterpImpl
+ cmpb $$(MIRROR_CLASS_IS_INITIALIZING_VALUE), MIRROR_CLASS_IS_VISIBLY_INITIALIZED_OFFSET(%r10d)
+ jb art_quick_resolution_trampoline
+ movl MIRROR_CLASS_CLINIT_THREAD_ID_OFFSET(%r10d), %r10d
+ cmpl %r10d, rSELF:THREAD_TID_OFFSET
+ je ExecuteNterpImpl
+ jmp art_quick_resolution_trampoline
+EndExecuteNterpWithClinitImpl:
+
OAT_ENTRY ExecuteNterpImpl, EndExecuteNterpImpl
.cfi_startproc
.cfi_def_cfa rsp, 8
@@ -1923,7 +1940,7 @@ NterpNewInstance:
movq rSELF:THREAD_SELF_OFFSET, %rdi
movq 0(%rsp), %rsi
movq rPC, %rdx
- call nterp_get_class_or_allocate_object
+ call nterp_allocate_object
jmp 1b
3:
// 07 is %rdi
@@ -1949,7 +1966,7 @@ NterpNewArray:
movq rSELF:THREAD_SELF_OFFSET, %rdi
movq 0(%rsp), %rsi
movq rPC, %rdx
- call nterp_get_class_or_allocate_object
+ call nterp_get_class
movq %rax, %rdi
jmp 1b
3:
@@ -2295,11 +2312,16 @@ NTERP_TRAMPOLINE nterp_get_static_field, NterpGetStaticField
NTERP_TRAMPOLINE nterp_get_instance_field_offset, NterpGetInstanceFieldOffset
NTERP_TRAMPOLINE nterp_filled_new_array, NterpFilledNewArray
NTERP_TRAMPOLINE nterp_filled_new_array_range, NterpFilledNewArrayRange
-NTERP_TRAMPOLINE nterp_get_class_or_allocate_object, NterpGetClassOrAllocateObject
+NTERP_TRAMPOLINE nterp_get_class, NterpGetClass
+NTERP_TRAMPOLINE nterp_allocate_object, NterpAllocateObject
NTERP_TRAMPOLINE nterp_get_method, NterpGetMethod
NTERP_TRAMPOLINE nterp_hot_method, NterpHotMethod
NTERP_TRAMPOLINE nterp_load_object, NterpLoadObject
+DEFINE_FUNCTION nterp_deliver_pending_exception
+ DELIVER_PENDING_EXCEPTION
+END_FUNCTION nterp_deliver_pending_exception
+
// gen_mterp.py will inline the following definitions
// within [ExecuteNterpImpl, EndExecuteNterpImpl).
%def instruction_end():
diff --git a/runtime/interpreter/mterp/x86_64ng/object.S b/runtime/interpreter/mterp/x86_64ng/object.S
index 140ea754e7..21a6e6710d 100644
--- a/runtime/interpreter/mterp/x86_64ng/object.S
+++ b/runtime/interpreter/mterp/x86_64ng/object.S
@@ -16,7 +16,7 @@
movq rSELF:THREAD_SELF_OFFSET, %rdi
movq 0(%rsp), %rsi
movq rPC, %rdx
- call nterp_get_class_or_allocate_object
+ call nterp_get_class
movq %rax, %rsi
jmp 1b
@@ -149,7 +149,7 @@
movq rSELF:THREAD_SELF_OFFSET, %rdi
movq 0(%rsp), %rsi
movq rPC, %rdx
- call nterp_get_class_or_allocate_object
+ call nterp_get_class
movq %rax, %rsi
jmp .L${opcode}_start
diff --git a/runtime/interpreter/mterp/x86_64ng/other.S b/runtime/interpreter/mterp/x86_64ng/other.S
index a72ee58e4c..f7890862e8 100644
--- a/runtime/interpreter/mterp/x86_64ng/other.S
+++ b/runtime/interpreter/mterp/x86_64ng/other.S
@@ -53,7 +53,7 @@
jmp 1b
%def op_const_class():
-% op_const_object(jumbo="0", helper="nterp_get_class_or_allocate_object")
+% op_const_object(jumbo="0", helper="nterp_get_class")
%def op_const_method_handle():
% op_const_object(jumbo="0")
diff --git a/runtime/interpreter/mterp/x86ng/main.S b/runtime/interpreter/mterp/x86ng/main.S
index db8519b8f7..5b0edd4817 100644
--- a/runtime/interpreter/mterp/x86ng/main.S
+++ b/runtime/interpreter/mterp/x86ng/main.S
@@ -275,7 +275,9 @@ DEFINE_FUNCTION \name
RESTORE_IBASE
FETCH_INST_CLEAR_OPCODE
RESTORE_SAVE_REFS_ONLY_FRAME
- RETURN_OR_DELIVER_PENDING_EXCEPTION
+ cmpl LITERAL(0), %fs:THREAD_EXCEPTION_OFFSET
+ jne nterp_deliver_pending_exception
+ ret
END_FUNCTION \name
.endm
@@ -1757,6 +1759,27 @@ END_FUNCTION \name
* rest method parameters
*/
+OAT_ENTRY ExecuteNterpWithClinitImpl, EndExecuteNterpWithClinitImpl
+ push %esi
+ // For simplicity, we don't do a read barrier here, but instead rely
+ // on art_quick_resolution_trampoline to always have a suspend point before
+ // calling back here.
+ movl ART_METHOD_DECLARING_CLASS_OFFSET(%eax), %esi
+ cmpb $$(MIRROR_CLASS_IS_VISIBLY_INITIALIZED_VALUE), MIRROR_CLASS_IS_VISIBLY_INITIALIZED_OFFSET(%esi)
+ jae .Lcontinue_execute_nterp
+ cmpb $$(MIRROR_CLASS_IS_INITIALIZING_VALUE), MIRROR_CLASS_IS_VISIBLY_INITIALIZED_OFFSET(%esi)
+ jb .Linvoke_trampoline
+ movl MIRROR_CLASS_CLINIT_THREAD_ID_OFFSET(%esi), %esi
+ cmpl %esi, rSELF:THREAD_TID_OFFSET
+ je .Lcontinue_execute_nterp
+.Linvoke_trampoline:
+ pop %esi
+ jmp art_quick_resolution_trampoline
+.Lcontinue_execute_nterp:
+ pop %esi
+ jmp ExecuteNterpImpl
+EndExecuteNterpWithClinitImpl:
+
OAT_ENTRY ExecuteNterpImpl, EndExecuteNterpImpl
.cfi_startproc
.cfi_def_cfa esp, 4
@@ -1980,7 +2003,7 @@ NterpNewInstance:
movl rSELF:THREAD_SELF_OFFSET, ARG0
movl 0(%esp), ARG1
movl rPC, ARG2
- call nterp_get_class_or_allocate_object
+ call nterp_allocate_object
jmp 1b
3:
// 00 is %eax
@@ -2008,7 +2031,7 @@ NterpNewArray:
movl rSELF:THREAD_SELF_OFFSET, ARG0
movl 0(%esp), ARG1
movl rPC, ARG2
- call nterp_get_class_or_allocate_object
+ call nterp_get_class
jmp 1b
3:
// 00 is %eax
@@ -2339,11 +2362,16 @@ NTERP_TRAMPOLINE nterp_get_static_field, NterpGetStaticField
NTERP_TRAMPOLINE nterp_get_instance_field_offset, NterpGetInstanceFieldOffset
NTERP_TRAMPOLINE nterp_filled_new_array, NterpFilledNewArray
NTERP_TRAMPOLINE nterp_filled_new_array_range, NterpFilledNewArrayRange
-NTERP_TRAMPOLINE nterp_get_class_or_allocate_object, NterpGetClassOrAllocateObject
+NTERP_TRAMPOLINE nterp_get_class, NterpGetClass
+NTERP_TRAMPOLINE nterp_allocate_object, NterpAllocateObject
NTERP_TRAMPOLINE nterp_get_method, NterpGetMethod
NTERP_TRAMPOLINE nterp_hot_method, NterpHotMethod
NTERP_TRAMPOLINE nterp_load_object, NterpLoadObject
+DEFINE_FUNCTION nterp_deliver_pending_exception
+ DELIVER_PENDING_EXCEPTION
+END_FUNCTION nterp_deliver_pending_exception
+
// gen_mterp.py will inline the following definitions
// within [ExecuteNterpImpl, EndExecuteNterpImpl).
%def instruction_end():
diff --git a/runtime/interpreter/mterp/x86ng/object.S b/runtime/interpreter/mterp/x86ng/object.S
index 1d11e10021..39091cec3a 100644
--- a/runtime/interpreter/mterp/x86ng/object.S
+++ b/runtime/interpreter/mterp/x86ng/object.S
@@ -16,7 +16,7 @@
movl rSELF:THREAD_SELF_OFFSET, ARG0
movl 0(%esp), ARG1
movl rPC, ARG2
- call nterp_get_class_or_allocate_object
+ call nterp_get_class
movl %eax, %ecx
jmp 1b
@@ -58,7 +58,7 @@
movl rSELF:THREAD_SELF_OFFSET, ARG0
movl 0(%esp), ARG1
movl rPC, ARG2
- call nterp_get_class_or_allocate_object
+ call nterp_get_class
movl %eax, %ecx
jmp 1b
diff --git a/runtime/interpreter/mterp/x86ng/other.S b/runtime/interpreter/mterp/x86ng/other.S
index 4cf982c1c1..6dd1ce3553 100644
--- a/runtime/interpreter/mterp/x86ng/other.S
+++ b/runtime/interpreter/mterp/x86ng/other.S
@@ -53,7 +53,7 @@
jmp 1b
%def op_const_class():
-% op_const_object(jumbo="0", helper="nterp_get_class_or_allocate_object")
+% op_const_object(jumbo="0", helper="nterp_get_class")
%def op_const_method_handle():
% op_const_object(jumbo="0")
diff --git a/runtime/interpreter/shadow_frame.h b/runtime/interpreter/shadow_frame.h
index 8cb2b33a07..7ca2423856 100644
--- a/runtime/interpreter/shadow_frame.h
+++ b/runtime/interpreter/shadow_frame.h
@@ -54,7 +54,7 @@ class ShadowFrame {
// We have been requested to notify when this frame gets popped.
kNotifyFramePop = 1 << 0,
// We have been asked to pop this frame off the stack as soon as possible.
- kForcePopFrame = 1 << 1,
+ kForcePopFrame = 1 << 1,
// We have been asked to re-execute the last instruction.
kForceRetryInst = 1 << 2,
// Mark that we expect the next frame to retry the last instruction (used by instrumentation and
@@ -62,6 +62,9 @@ class ShadowFrame {
kSkipMethodExitEvents = 1 << 3,
// Used to suppress exception events caused by other instrumentation events.
kSkipNextExceptionEvent = 1 << 4,
+ // Used to specify if DexPCMoveEvents have to be reported. These events will
+ // only be reported if the method has a breakpoint set.
+ kNotifyDexPcMoveEvents = 1 << 5,
};
public:
@@ -72,10 +75,11 @@ class ShadowFrame {
}
// Create ShadowFrame in heap for deoptimization.
- static ShadowFrame* CreateDeoptimizedFrame(uint32_t num_vregs, ShadowFrame* link,
- ArtMethod* method, uint32_t dex_pc) {
+ static ShadowFrame* CreateDeoptimizedFrame(uint32_t num_vregs,
+ ArtMethod* method,
+ uint32_t dex_pc) {
uint8_t* memory = new uint8_t[ComputeSize(num_vregs)];
- return CreateShadowFrameImpl(num_vregs, link, method, dex_pc, memory);
+ return CreateShadowFrameImpl(num_vregs, method, dex_pc, memory);
}
// Delete a ShadowFrame allocated on the heap for deoptimization.
@@ -87,12 +91,11 @@ class ShadowFrame {
// Create a shadow frame in a fresh alloca. This needs to be in the context of the caller.
// Inlining doesn't work, the compiler will still undo the alloca. So this needs to be a macro.
-#define CREATE_SHADOW_FRAME(num_vregs, link, method, dex_pc) ({ \
+#define CREATE_SHADOW_FRAME(num_vregs, method, dex_pc) ({ \
size_t frame_size = ShadowFrame::ComputeSize(num_vregs); \
void* alloca_mem = alloca(frame_size); \
ShadowFrameAllocaUniquePtr( \
- ShadowFrame::CreateShadowFrameImpl((num_vregs), (link), (method), (dex_pc), \
- (alloca_mem))); \
+ ShadowFrame::CreateShadowFrameImpl((num_vregs), (method), (dex_pc), (alloca_mem))); \
})
~ShadowFrame() {}
@@ -132,9 +135,14 @@ class ShadowFrame {
void SetLink(ShadowFrame* frame) {
DCHECK_NE(this, frame);
+ DCHECK_EQ(link_, nullptr);
link_ = frame;
}
+ void ClearLink() {
+ link_ = nullptr;
+ }
+
int32_t GetVReg(size_t i) const {
DCHECK_LT(i, NumberOfVRegs());
const uint32_t* vreg = &vregs_[i];
@@ -169,14 +177,14 @@ class ShadowFrame {
int64_t GetVRegLong(size_t i) const {
DCHECK_LT(i + 1, NumberOfVRegs());
const uint32_t* vreg = &vregs_[i];
- typedef const int64_t unaligned_int64 __attribute__ ((aligned (4)));
+ using unaligned_int64 __attribute__((aligned(4))) = const int64_t;
return *reinterpret_cast<unaligned_int64*>(vreg);
}
double GetVRegDouble(size_t i) const {
DCHECK_LT(i + 1, NumberOfVRegs());
const uint32_t* vreg = &vregs_[i];
- typedef const double unaligned_double __attribute__ ((aligned (4)));
+ using unaligned_double __attribute__((aligned(4))) = const double;
return *reinterpret_cast<unaligned_double*>(vreg);
}
@@ -221,7 +229,7 @@ class ShadowFrame {
void SetVRegLong(size_t i, int64_t val) {
DCHECK_LT(i + 1, NumberOfVRegs());
uint32_t* vreg = &vregs_[i];
- typedef int64_t unaligned_int64 __attribute__ ((aligned (4)));
+ using unaligned_int64 __attribute__((aligned(4))) = int64_t;
*reinterpret_cast<unaligned_int64*>(vreg) = val;
// This is needed for moving collectors since these can update the vreg references if they
// happen to agree with references in the reference array.
@@ -232,7 +240,7 @@ class ShadowFrame {
void SetVRegDouble(size_t i, double val) {
DCHECK_LT(i + 1, NumberOfVRegs());
uint32_t* vreg = &vregs_[i];
- typedef double unaligned_double __attribute__ ((aligned (4)));
+ using unaligned_double __attribute__((aligned(4))) = double;
*reinterpret_cast<unaligned_double*>(vreg) = val;
// This is needed for moving collectors since these can update the vreg references if they
// happen to agree with references in the reference array.
@@ -314,11 +322,10 @@ class ShadowFrame {
// Create ShadowFrame for interpreter using provided memory.
static ShadowFrame* CreateShadowFrameImpl(uint32_t num_vregs,
- ShadowFrame* link,
ArtMethod* method,
uint32_t dex_pc,
void* memory) {
- return new (memory) ShadowFrame(num_vregs, link, method, dex_pc);
+ return new (memory) ShadowFrame(num_vregs, method, dex_pc);
}
const uint16_t* GetDexPCPtr() {
@@ -373,6 +380,14 @@ class ShadowFrame {
UpdateFrameFlag(enable, FrameFlags::kSkipNextExceptionEvent);
}
+ bool GetNotifyDexPcMoveEvents() const {
+ return GetFrameFlag(FrameFlags::kNotifyDexPcMoveEvents);
+ }
+
+ void SetNotifyDexPcMoveEvents(bool enable) {
+ UpdateFrameFlag(enable, FrameFlags::kNotifyDexPcMoveEvents);
+ }
+
void CheckConsistentVRegs() const {
if (kIsDebugBuild) {
// A shadow frame visible to GC requires the following rule: for a given vreg,
@@ -385,8 +400,8 @@ class ShadowFrame {
}
private:
- ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method, uint32_t dex_pc)
- : link_(link),
+ ShadowFrame(uint32_t num_vregs, ArtMethod* method, uint32_t dex_pc)
+ : link_(nullptr),
method_(method),
result_register_(nullptr),
dex_pc_ptr_(nullptr),
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 62051ee9db..32ed4300b7 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -47,6 +47,7 @@
#include "mirror/array-alloc-inl.h"
#include "mirror/array-inl.h"
#include "mirror/class-alloc-inl.h"
+#include "mirror/class.h"
#include "mirror/executable-inl.h"
#include "mirror/field.h"
#include "mirror/method.h"
@@ -61,7 +62,7 @@
#include "thread-inl.h"
#include "transaction.h"
#include "unstarted_runtime_list.h"
-#include "well_known_classes.h"
+#include "well_known_classes-inl.h"
namespace art {
namespace interpreter {
@@ -138,6 +139,10 @@ static void UnstartedRuntimeFindClass(Thread* self,
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ObjPtr<mirror::Class> found = class_linker->FindClass(self, descriptor.c_str(), class_loader);
+ if (found != nullptr && !found->CheckIsVisibleWithTargetSdk(self)) {
+ CHECK(self->IsExceptionPending());
+ return;
+ }
if (found != nullptr && initialize_class) {
StackHandleScope<1> hs(self);
HandleWrapperObjPtr<mirror::Class> h_class = hs.NewHandleWrapper(&found);
@@ -231,8 +236,7 @@ void UnstartedRuntime::UnstartedClassForNameCommon(Thread* self,
class_loader = nullptr;
}
- ScopedObjectAccessUnchecked soa(self);
- if (class_loader != nullptr && !ClassLinker::IsBootClassLoader(soa, class_loader)) {
+ if (class_loader != nullptr && !ClassLinker::IsBootClassLoader(class_loader)) {
AbortTransactionOrFail(self,
"Only the boot classloader is supported: %s",
mirror::Object::PrettyTypeOf(class_loader).c_str());
@@ -659,8 +663,7 @@ void UnstartedRuntime::UnstartedClassLoaderGetResourceAsStream(
StackHandleScope<1> hs(self);
Handle<mirror::Class> this_classloader_class(hs.NewHandle(this_obj->GetClass()));
- if (self->DecodeJObject(WellKnownClasses::java_lang_BootClassLoader) !=
- this_classloader_class.Get()) {
+ if (WellKnownClasses::java_lang_BootClassLoader != this_classloader_class.Get()) {
AbortTransactionOrFail(self,
"Unsupported classloader type %s for getResourceAsStream",
mirror::Class::PrettyClass(this_classloader_class.Get()).c_str());
@@ -1113,18 +1116,14 @@ void UnstartedRuntime::UnstartedThreadCurrentThread(
// thread as unstarted to the ThreadGroup. A faked-up main thread peer is good enough for
// these purposes.
Runtime::Current()->InitThreadGroups(self);
- jobject main_peer =
- self->CreateCompileTimePeer(self->GetJniEnv(),
- "main",
- false,
- Runtime::Current()->GetMainThreadGroup());
+ ObjPtr<mirror::Object> main_peer = self->CreateCompileTimePeer(
+ "main", /*as_daemon=*/ false, Runtime::Current()->GetMainThreadGroup());
if (main_peer == nullptr) {
AbortTransactionOrFail(self, "Failed allocating peer");
return;
}
- result->SetL(self->DecodeJObject(main_peer));
- self->GetJniEnv()->DeleteLocalRef(main_peer);
+ result->SetL(main_peer);
} else {
AbortTransactionOrFail(self,
"Thread.currentThread() does not support %s",
@@ -1277,7 +1276,7 @@ static void UnstartedMemoryPeekArray(
if (offset < 0 || offset + count > array->GetLength()) {
std::string error_msg(StringPrintf("Array out of bounds in peekArray: %d/%d vs %d",
offset, count, array->GetLength()));
- Runtime::Current()->AbortTransactionAndThrowAbortError(self, error_msg.c_str());
+ Runtime::Current()->AbortTransactionAndThrowAbortError(self, error_msg);
return;
}
@@ -1367,6 +1366,22 @@ void UnstartedRuntime::UnstartedStringDoReplace(
}
// This allows creating the new style of String objects during compilation.
+void UnstartedRuntime::UnstartedStringFactoryNewStringFromBytes(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ jint high = shadow_frame->GetVReg(arg_offset + 1);
+ jint offset = shadow_frame->GetVReg(arg_offset + 2);
+ jint byte_count = shadow_frame->GetVReg(arg_offset + 3);
+ DCHECK_GE(byte_count, 0);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ByteArray> h_byte_array(
+ hs.NewHandle(shadow_frame->GetVRegReference(arg_offset)->AsByteArray()));
+ Runtime* runtime = Runtime::Current();
+ gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
+ result->SetL(
+ mirror::String::AllocFromByteArray(self, byte_count, h_byte_array, offset, high, allocator));
+}
+
+// This allows creating the new style of String objects during compilation.
void UnstartedRuntime::UnstartedStringFactoryNewStringFromChars(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
jint offset = shadow_frame->GetVReg(arg_offset);
@@ -1557,7 +1572,7 @@ void UnstartedRuntime::UnstartedJdkUnsafeCompareAndSwapObject(
mirror::Object* new_value = shadow_frame->GetVRegReference(arg_offset + 5);
// Must use non transactional mode.
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
// Need to make sure the reference stored in the field is a to-space one before attempting the
// CAS or the CAS could fail incorrectly.
mirror::HeapReference<mirror::Object>* field_addr =
@@ -1921,6 +1936,30 @@ void UnstartedRuntime::UnstartedJNIStringCompareTo(Thread* self,
result->SetI(receiver->AsString()->CompareTo(rhs->AsString()));
}
+void UnstartedRuntime::UnstartedJNIStringFillBytesLatin1(
+ Thread* self, ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver, uint32_t* args, JValue* ATTRIBUTE_UNUSED) {
+ StackHandleScope<2> hs(self);
+ Handle<mirror::String> h_receiver(hs.NewHandle(
+ reinterpret_cast<mirror::String*>(receiver)->AsString()));
+ Handle<mirror::ByteArray> h_buffer(hs.NewHandle(
+ reinterpret_cast<mirror::ByteArray*>(args[0])->AsByteArray()));
+ int32_t index = static_cast<int32_t>(args[1]);
+ h_receiver->FillBytesLatin1(h_buffer, index);
+}
+
+void UnstartedRuntime::UnstartedJNIStringFillBytesUTF16(
+ Thread* self, ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver, uint32_t* args, JValue* ATTRIBUTE_UNUSED) {
+ StackHandleScope<2> hs(self);
+ Handle<mirror::String> h_receiver(hs.NewHandle(
+ reinterpret_cast<mirror::String*>(receiver)->AsString()));
+ Handle<mirror::ByteArray> h_buffer(hs.NewHandle(
+ reinterpret_cast<mirror::ByteArray*>(args[0])->AsByteArray()));
+ int32_t index = static_cast<int32_t>(args[1]);
+ h_receiver->FillBytesUTF16(h_buffer, index);
+}
+
void UnstartedRuntime::UnstartedJNIStringIntern(
Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver,
uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
@@ -2163,6 +2202,7 @@ using JNIHandler = void(*)(Thread* self,
uint32_t* args,
JValue* result);
+// NOLINTNEXTLINE
#define ONE_PLUS(ShortNameIgnored, DescriptorIgnored, NameIgnored, SignatureIgnored) 1 +
static constexpr size_t kInvokeHandlersSize = UNSTARTED_RUNTIME_DIRECT_LIST(ONE_PLUS) 0;
static constexpr size_t kJniHandlersSize = UNSTARTED_RUNTIME_JNI_LIST(ONE_PLUS) 0;
@@ -2262,6 +2302,9 @@ void UnstartedRuntime::Invoke(Thread* self, const CodeItemDataAccessor& accessor
const auto& iter = invoke_handlers_.find(shadow_frame->GetMethod());
if (iter != invoke_handlers_.end()) {
+ // Note: When we special case the method, we do not ensure initialization.
+ // This has been the behavior since implementation of this feature.
+
// Clear out the result in case it's not zeroed out.
result->SetL(nullptr);
@@ -2272,6 +2315,9 @@ void UnstartedRuntime::Invoke(Thread* self, const CodeItemDataAccessor& accessor
self->PopShadowFrame();
} else {
+ if (!EnsureInitialized(self, shadow_frame)) {
+ return;
+ }
// Not special, continue with regular interpreter execution.
ArtInterpreterToInterpreterBridge(self, accessor, shadow_frame, result);
}
diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h
index 5f8add0c73..dd2028a0ed 100644
--- a/runtime/interpreter/unstarted_runtime_list.h
+++ b/runtime/interpreter/unstarted_runtime_list.h
@@ -67,6 +67,7 @@
V(StringGetCharsNoCheck, "Ljava/lang/String;", "getCharsNoCheck", "(II[CI)V") \
V(StringCharAt, "Ljava/lang/String;", "charAt", "(I)C") \
V(StringDoReplace, "Ljava/lang/String;", "doReplace", "(CC)Ljava/lang/String;") \
+ V(StringFactoryNewStringFromBytes, "Ljava/lang/StringFactory;", "newStringFromBytes", "([BIII)Ljava/lang/String;") \
V(StringFactoryNewStringFromChars, "Ljava/lang/StringFactory;", "newStringFromChars", "(II[C)Ljava/lang/String;") \
V(StringFactoryNewStringFromString, "Ljava/lang/StringFactory;", "newStringFromString", "(Ljava/lang/String;)Ljava/lang/String;") \
V(StringFastSubstring, "Ljava/lang/String;", "fastSubstring", "(II)Ljava/lang/String;") \
@@ -105,6 +106,8 @@
V(ObjectInternalClone, "Ljava/lang/Object;", "internalClone", "()Ljava/lang/Object;") \
V(ObjectNotifyAll, "Ljava/lang/Object;", "notifyAll", "()V") \
V(StringCompareTo, "Ljava/lang/String;", "compareTo", "(Ljava/lang/String;)I") \
+ V(StringFillBytesLatin1, "Ljava/lang/String;", "fillBytesLatin1", "([BI)V") \
+ V(StringFillBytesUTF16, "Ljava/lang/String;", "fillBytesUTF16", "([BI)V") \
V(StringIntern, "Ljava/lang/String;", "intern", "()Ljava/lang/String;") \
V(ArrayCreateMultiArray, "Ljava/lang/reflect/Array;", "createMultiArray", "(Ljava/lang/Class;[I)Ljava/lang/Object;") \
V(ArrayCreateObjectArray, "Ljava/lang/reflect/Array;", "createObjectArray", "(Ljava/lang/Class;I)Ljava/lang/Object;") \
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index 75a692e48d..3227ef7d76 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -62,11 +62,6 @@ using UniqueDeoptShadowFramePtr = std::unique_ptr<ShadowFrame, DeoptShadowFrameD
class UnstartedRuntimeTest : public CommonRuntimeTest {
protected:
- void SetUp() override {
- CommonRuntimeTest::SetUp();
- InitializeIntrinsics();
- }
-
// Re-expose all UnstartedRuntime implementations so we don't need to declare a million
// test friends.
@@ -96,11 +91,10 @@ class UnstartedRuntimeTest : public CommonRuntimeTest {
#undef UNSTARTED_JNI
UniqueDeoptShadowFramePtr CreateShadowFrame(uint32_t num_vregs,
- ShadowFrame* link,
ArtMethod* method,
uint32_t dex_pc) {
return UniqueDeoptShadowFramePtr(
- ShadowFrame::CreateDeoptimizedFrame(num_vregs, link, method, dex_pc));
+ ShadowFrame::CreateDeoptimizedFrame(num_vregs, method, dex_pc));
}
// Helpers for ArrayCopy.
@@ -237,7 +231,7 @@ TEST_F(UnstartedRuntimeTest, MemoryPeekByte) {
const uint8_t* base_ptr = base_array;
JValue result;
- UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, 0);
for (int32_t i = 0; i < kBaseLen; ++i) {
tmp->SetVRegLong(0, static_cast<int64_t>(reinterpret_cast<intptr_t>(base_ptr + i)));
@@ -257,7 +251,7 @@ TEST_F(UnstartedRuntimeTest, MemoryPeekShort) {
const uint8_t* base_ptr = base_array;
JValue result;
- UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, 0);
int32_t adjusted_length = kBaseLen - sizeof(int16_t);
for (int32_t i = 0; i < adjusted_length; ++i) {
@@ -280,7 +274,7 @@ TEST_F(UnstartedRuntimeTest, MemoryPeekInt) {
const uint8_t* base_ptr = base_array;
JValue result;
- UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, 0);
int32_t adjusted_length = kBaseLen - sizeof(int32_t);
for (int32_t i = 0; i < adjusted_length; ++i) {
@@ -303,7 +297,7 @@ TEST_F(UnstartedRuntimeTest, MemoryPeekLong) {
const uint8_t* base_ptr = base_array;
JValue result;
- UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, 0);
int32_t adjusted_length = kBaseLen - sizeof(int64_t);
for (int32_t i = 0; i < adjusted_length; ++i) {
@@ -333,7 +327,7 @@ TEST_F(UnstartedRuntimeTest, StringGetCharsNoCheck) {
uint16_t buf[kBaseLen];
JValue result;
- UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, 0);
for (int32_t start_index = 0; start_index < kBaseLen; ++start_index) {
for (int32_t count = 0; count <= kBaseLen; ++count) {
@@ -385,7 +379,7 @@ TEST_F(UnstartedRuntimeTest, StringCharAt) {
ObjPtr<mirror::String> test_string = mirror::String::AllocFromModifiedUtf8(self, base_string);
JValue result;
- UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, 0);
for (int32_t i = 0; i < base_len; ++i) {
tmp->SetVRegReference(0, test_string);
@@ -410,7 +404,7 @@ TEST_F(UnstartedRuntimeTest, StringInit) {
uint16_t inst_data[3] = { 0x2070, 0x0000, 0x0010 };
JValue result;
- UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, method, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, method, 0);
const char* base_string = "hello_world";
StackHandleScope<2> hs(self);
Handle<mirror::String> string_arg =
@@ -420,12 +414,14 @@ TEST_F(UnstartedRuntimeTest, StringInit) {
shadow_frame->SetVRegReference(0, reference_empty_string.Get());
shadow_frame->SetVRegReference(1, string_arg.Get());
- interpreter::DoCall<false, false>(method,
- self,
- *shadow_frame,
- Instruction::At(inst_data),
- inst_data[0],
- &result);
+ ArtMethod* factory = WellKnownClasses::StringInitToStringFactory(method);
+ interpreter::DoCall<false>(factory,
+ self,
+ *shadow_frame,
+ Instruction::At(inst_data),
+ inst_data[0],
+ /* string_init= */ true,
+ &result);
ObjPtr<mirror::String> string_result = down_cast<mirror::String*>(result.GetL());
EXPECT_EQ(string_arg->GetLength(), string_result->GetLength());
@@ -453,7 +449,7 @@ TEST_F(UnstartedRuntimeTest, SystemArrayCopyObjectArrayTestExceptions) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
JValue result;
- UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, 0);
// Note: all tests are not GC safe. Assume there's no GC running here with the few objects we
// allocate.
@@ -485,7 +481,7 @@ TEST_F(UnstartedRuntimeTest, SystemArrayCopyObjectArrayTest) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
JValue result;
- UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, 0);
StackHandleScope<1> hs_object(self);
Handle<mirror::Class> object_class(hs_object.NewHandle(GetClassRoot<mirror::Object>()));
@@ -588,7 +584,7 @@ TEST_F(UnstartedRuntimeTest, IntegerParseIntTest) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, 0);
// Test string. Should be valid, and between minimal values of LONG_MIN and LONG_MAX (for all
// suffixes).
@@ -634,7 +630,7 @@ TEST_F(UnstartedRuntimeTest, LongParseLongTest) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, 0);
// Test string. Should be valid, and between minimal values of LONG_MIN and LONG_MAX (for all
// suffixes).
@@ -679,7 +675,7 @@ TEST_F(UnstartedRuntimeTest, Ceil) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, 0);
constexpr double nan = std::numeric_limits<double>::quiet_NaN();
constexpr double inf = std::numeric_limits<double>::infinity();
@@ -706,7 +702,7 @@ TEST_F(UnstartedRuntimeTest, Floor) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, 0);
constexpr double nan = std::numeric_limits<double>::quiet_NaN();
constexpr double inf = std::numeric_limits<double>::infinity();
@@ -733,7 +729,7 @@ TEST_F(UnstartedRuntimeTest, ToLowerUpper) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, 0);
std::locale c_locale("C");
@@ -828,7 +824,7 @@ TEST_F(UnstartedRuntimeTest, Sin) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, 0);
// Test an important value, PI/6. That's the one we see in practice.
constexpr uint64_t lvalue = UINT64_C(0x3fe0c152382d7365);
@@ -845,7 +841,7 @@ TEST_F(UnstartedRuntimeTest, Cos) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, 0);
// Test an important value, PI/6. That's the one we see in practice.
constexpr uint64_t lvalue = UINT64_C(0x3fe0c152382d7365);
@@ -862,7 +858,7 @@ TEST_F(UnstartedRuntimeTest, Pow) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, 0);
// Test an important pair.
constexpr uint64_t lvalue1 = UINT64_C(0x4079000000000000);
@@ -883,7 +879,7 @@ TEST_F(UnstartedRuntimeTest, IsAnonymousClass) {
ScopedObjectAccess soa(self);
JValue result;
- UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, 0);
ObjPtr<mirror::Class> class_klass = GetClassRoot<mirror::Class>();
shadow_frame->SetVRegReference(0, class_klass);
@@ -906,7 +902,7 @@ TEST_F(UnstartedRuntimeTest, GetDeclaringClass) {
ScopedObjectAccess soa(self);
JValue result;
- UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, 0);
jobject class_loader = LoadDex("Nested");
StackHandleScope<4> hs(self);
@@ -938,7 +934,7 @@ TEST_F(UnstartedRuntimeTest, ThreadLocalGet) {
ScopedObjectAccess soa(self);
JValue result;
- UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, 0);
StackHandleScope<1> hs(self);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -960,14 +956,14 @@ TEST_F(UnstartedRuntimeTest, ThreadLocalGet) {
ASSERT_TRUE(caller_method != nullptr);
ASSERT_TRUE(caller_method->IsDirect());
ASSERT_TRUE(caller_method->GetDeclaringClass() == floating_decimal.Get());
- UniqueDeoptShadowFramePtr caller_frame = CreateShadowFrame(10, nullptr, caller_method, 0);
+ UniqueDeoptShadowFramePtr caller_frame = CreateShadowFrame(10, caller_method, 0);
shadow_frame->SetLink(caller_frame.get());
UnstartedThreadLocalGet(self, shadow_frame.get(), &result, 0);
EXPECT_TRUE(result.GetL() != nullptr);
EXPECT_FALSE(self->IsExceptionPending());
- shadow_frame->SetLink(nullptr);
+ shadow_frame->ClearLink();
}
// Negative test.
@@ -978,7 +974,7 @@ TEST_F(UnstartedRuntimeTest, ThreadLocalGet) {
ObjPtr<mirror::Class> class_class = GetClassRoot<mirror::Class>();
ArtMethod* caller_method =
&*class_class->GetDeclaredMethods(class_linker->GetImagePointerSize()).begin();
- UniqueDeoptShadowFramePtr caller_frame = CreateShadowFrame(10, nullptr, caller_method, 0);
+ UniqueDeoptShadowFramePtr caller_frame = CreateShadowFrame(10, caller_method, 0);
shadow_frame->SetLink(caller_frame.get());
EnterTransactionMode();
@@ -988,7 +984,7 @@ TEST_F(UnstartedRuntimeTest, ThreadLocalGet) {
ASSERT_TRUE(self->IsExceptionPending());
self->ClearException();
- shadow_frame->SetLink(nullptr);
+ shadow_frame->ClearLink();
}
}
@@ -1016,15 +1012,16 @@ TEST_F(UnstartedRuntimeTest, FloatConversion) {
uint16_t inst_data[3] = { 0x2070, 0x0000, 0x0010 };
JValue result;
- UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, method, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, method, 0);
shadow_frame->SetVRegDouble(0, 1.23);
- interpreter::DoCall<false, false>(method,
- self,
- *shadow_frame,
- Instruction::At(inst_data),
- inst_data[0],
- &result);
+ interpreter::DoCall<false>(method,
+ self,
+ *shadow_frame,
+ Instruction::At(inst_data),
+ inst_data[0],
+ /* string_init= */ false,
+ &result);
ObjPtr<mirror::String> string_result = down_cast<mirror::String*>(result.GetL());
ASSERT_TRUE(string_result != nullptr);
@@ -1037,7 +1034,7 @@ TEST_F(UnstartedRuntimeTest, ThreadCurrentThread) {
ScopedObjectAccess soa(self);
JValue result;
- UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, 0);
StackHandleScope<1> hs(self);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -1114,13 +1111,15 @@ class UnstartedClassForNameTest : public UnstartedRuntimeTest {
}
JValue result;
- UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, 0);
for (const char* name : kTestCases) {
ObjPtr<mirror::String> name_string = mirror::String::AllocFromModifiedUtf8(self, name);
CHECK(name_string != nullptr);
if (in_transaction) {
+ StackHandleScope<1> hs(self);
+ HandleWrapperObjPtr<mirror::String> h(hs.NewHandleWrapper(&name_string));
EnterTransactionMode();
}
CHECK(!self->IsExceptionPending());
@@ -1168,18 +1167,19 @@ class UnstartedClassForNameTest : public UnstartedRuntimeTest {
CHECK(boot_cp_init != nullptr);
JValue result;
- UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, boot_cp_init, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, boot_cp_init, 0);
shadow_frame->SetVRegReference(0, boot_cp.Get());
// create instruction data for invoke-direct {v0} of method with fake index
uint16_t inst_data[3] = { 0x1070, 0x0000, 0x0010 };
- interpreter::DoCall<false, false>(boot_cp_init,
- self,
- *shadow_frame,
- Instruction::At(inst_data),
- inst_data[0],
- &result);
+ interpreter::DoCall<false>(boot_cp_init,
+ self,
+ *shadow_frame,
+ Instruction::At(inst_data),
+ inst_data[0],
+ /* string_init= */ false,
+ &result);
CHECK(!self->IsExceptionPending());
}
@@ -1287,7 +1287,7 @@ TEST_F(UnstartedRuntimeTest, ClassGetSignatureAnnotation) {
ASSERT_TRUE(class_linker->EnsureInitialized(self, list_class, true, true));
JValue result;
- UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, 0);
shadow_frame->SetVRegReference(0, list_class.Get());
UnstartedClassGetSignatureAnnotation(self, shadow_frame.get(), &result, 0);
@@ -1339,7 +1339,7 @@ TEST_F(UnstartedRuntimeTest, ConstructorNewInstance0) {
// OK, we're ready now.
JValue result;
- UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, 0);
shadow_frame->SetVRegReference(0, cons.Get());
shadow_frame->SetVRegReference(1, args.Get());
UnstartedConstructorNewInstance0(self, shadow_frame.get(), &result, 0);
@@ -1360,7 +1360,7 @@ TEST_F(UnstartedRuntimeTest, ConstructorNewInstance0) {
TEST_F(UnstartedRuntimeTest, IdentityHashCode) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, 0);
JValue result;
UnstartedSystemIdentityHashCode(self, tmp.get(), &result, 0);