Revert^2 "Remove Global deopt requirement for several jvmti events"
This reverts commit ad344b6a14feba90a06a205760e9bc766c56cab0.
Jit-on-first-use would cause the runtime to skip sending FramePop
events for exceptions in some circumstances due to longjmp-ing over
the frames. This requires forcing jit off from the first instant a
thread can get frame popped events, which is unfortunate.
Reason for revert: Fixed issue causing 1925 to fail on
jit-on-first-use and redefine-stress
Test: ./test.py --host
Test: ./art/test/testrunner/run_build_test_target.py -j50 art-jit-on-first-use
Change-Id: I6ec2a799db1041262a055be10e1af1faece6f2fc
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 0d279ed..842716f 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -76,7 +76,7 @@
" f0: f1bc 0f00 cmp.w ip, #0\n",
" f4: bf18 it ne\n",
" f6: f20d 4c01 addwne ip, sp, #1025 ; 0x401\n",
- " fa: f8d9 c094 ldr.w ip, [r9, #148] ; 0x94\n",
+ " fa: f8d9 c09c ldr.w ip, [r9, #156] ; 0x9c\n",
" fe: f1bc 0f00 cmp.w ip, #0\n",
" 102: d171 bne.n 1e8 <VixlJniHelpers+0x1e8>\n",
" 104: f8cd c7ff str.w ip, [sp, #2047] ; 0x7ff\n",
@@ -153,7 +153,7 @@
" 21c: f8d9 8034 ldr.w r8, [r9, #52] ; 0x34\n",
" 220: 4770 bx lr\n",
" 222: 4660 mov r0, ip\n",
- " 224: f8d9 c2dc ldr.w ip, [r9, #732] ; 0x2dc\n",
+ " 224: f8d9 c2e4 ldr.w ip, [r9, #740] ; 0x2e4\n",
" 228: 47e0 blx ip\n",
nullptr
};
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index 7a2b638..39e49d7 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -1053,22 +1053,9 @@
jthread event_thread,
...) {
ENSURE_VALID_ENV(env);
- art::Thread* art_thread = nullptr;
- if (event_thread != nullptr) {
- // TODO The locking around this call is less then what we really want.
- art::ScopedObjectAccess soa(art::Thread::Current());
- art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
- jvmtiError err = ERR(INTERNAL);
- if (!ThreadUtil::GetAliveNativeThread(event_thread, soa, &art_thread, &err)) {
- return err;
- } else if (art_thread->IsStillStarting()) {
- return ERR(THREAD_NOT_ALIVE);
- }
- }
-
ArtJvmTiEnv* art_env = ArtJvmTiEnv::AsArtJvmTiEnv(env);
return gEventHandler->SetEvent(art_env,
- art_thread,
+ event_thread,
GetArtJvmtiEvent(art_env, event_type),
mode);
}
diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc
index d456d83..ee77b7b 100644
--- a/openjdkjvmti/deopt_manager.cc
+++ b/openjdkjvmti/deopt_manager.cc
@@ -49,6 +49,7 @@
#include "nativehelper/scoped_local_ref.h"
#include "runtime_callbacks.h"
#include "scoped_thread_state_change-inl.h"
+#include "scoped_thread_state_change.h"
#include "thread-current-inl.h"
#include "thread_list.h"
#include "ti_phase.h"
@@ -356,6 +357,47 @@
kDeoptManagerInstrumentationKey);
}
+jvmtiError DeoptManager::AddDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked& soa, jthread jtarget) {
+ art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
+ art::Thread* target = nullptr;
+ jvmtiError err = OK;
+ if (!ThreadUtil::GetNativeThread(jtarget, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
+ return err;
+ }
+ // We don't need additional locking here because we hold the Thread_list_lock_.
+ target->SetForceInterpreterCount(target->ForceInterpreterCount() + 1);
+ if (target->ForceInterpreterCount() == 1) {
+ struct DeoptClosure : public art::Closure {
+ public:
+ explicit DeoptClosure(DeoptManager* man) : man_(man) {}
+ void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ man_->DeoptimizeThread(self);
+ }
+
+ private:
+ DeoptManager* man_;
+ };
+ DeoptClosure c(this);
+ target->RequestSynchronousCheckpoint(&c);
+ } else {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
+ }
+ return OK;
+}
+
+jvmtiError DeoptManager::RemoveDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked& soa, jthread jtarget) {
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ art::Thread* target = nullptr;
+ jvmtiError err = OK;
+ if (!ThreadUtil::GetNativeThread(jtarget, soa, &target, &err)) {
+ return err;
+ }
+ // We don't need additional locking here because we hold the Thread_list_lock_.
+ DCHECK_GT(target->ForceInterpreterCount(), 0u);
+ target->DecrementForceInterpreterCount();
+ return OK;
+}
void DeoptManager::RemoveDeoptimizationRequester() {
art::Thread* self = art::Thread::Current();
diff --git a/openjdkjvmti/deopt_manager.h b/openjdkjvmti/deopt_manager.h
index 856f3f4..4c4a774 100644
--- a/openjdkjvmti/deopt_manager.h
+++ b/openjdkjvmti/deopt_manager.h
@@ -38,8 +38,11 @@
#include "base/mutex.h"
#include "runtime_callbacks.h"
+#include <jvmti.h>
+
namespace art {
class ArtMethod;
+class ScopedObjectAccessUnchecked;
namespace mirror {
class Class;
} // namespace mirror
@@ -98,6 +101,14 @@
REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
REQUIRES_SHARED(art::Locks::mutator_lock_);
+ jvmtiError AddDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked& soa, jthread thread)
+ REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ jvmtiError RemoveDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked& soa, jthread thread)
+ REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
void DeoptimizeThread(art::Thread* target) REQUIRES_SHARED(art::Locks::mutator_lock_);
void DeoptimizeAllThreads() REQUIRES_SHARED(art::Locks::mutator_lock_);
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index 22c622a..b41edc5 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -794,6 +794,12 @@
void WatchedFramePop(art::Thread* self, const art::ShadowFrame& frame)
REQUIRES_SHARED(art::Locks::mutator_lock_) override {
art::JNIEnvExt* jnienv = self->GetJniEnv();
+ // Remove the force-interpreter added by the WatchFrame.
+ {
+ art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ CHECK_GT(self->ForceInterpreterCount(), 0u);
+ self->DecrementForceInterpreterCount();
+ }
jboolean is_exception_pending = self->IsExceptionPending();
RunEventCallback<ArtJvmtiEvent::kFramePop>(
event_handler_,
@@ -965,45 +971,78 @@
}
}
-static bool EventNeedsFullDeopt(ArtJvmtiEvent event) {
+enum class DeoptRequirement {
+ // Limited/no deopt required.
+ kLimited,
+ // A single thread must be put into interpret only.
+ kThread,
+ // All methods and all threads deopted.
+ kFull,
+};
+
+static DeoptRequirement GetDeoptRequirement(ArtJvmtiEvent event, jthread thread) {
switch (event) {
case ArtJvmtiEvent::kBreakpoint:
case ArtJvmtiEvent::kException:
- return false;
- // TODO We should support more of these or at least do something to make them discriminate by
- // thread.
+ return DeoptRequirement::kLimited;
+ // TODO MethodEntry is needed due to inconsistencies between the interpreter and the trampoline
+ // in how to handle exceptions.
case ArtJvmtiEvent::kMethodEntry:
case ArtJvmtiEvent::kExceptionCatch:
+ return DeoptRequirement::kFull;
case ArtJvmtiEvent::kMethodExit:
case ArtJvmtiEvent::kFieldModification:
case ArtJvmtiEvent::kFieldAccess:
case ArtJvmtiEvent::kSingleStep:
case ArtJvmtiEvent::kFramePop:
- return true;
+ return thread == nullptr ? DeoptRequirement::kFull : DeoptRequirement::kThread;
default:
LOG(FATAL) << "Unexpected event type!";
UNREACHABLE();
}
}
-void EventHandler::SetupTraceListener(JvmtiMethodTraceListener* listener,
- ArtJvmtiEvent event,
- bool enable) {
- bool needs_full_deopt = EventNeedsFullDeopt(event);
+jvmtiError EventHandler::SetupTraceListener(JvmtiMethodTraceListener* listener,
+ ArtJvmtiEvent event,
+ jthread thread,
+ bool enable) {
+ DeoptRequirement deopt_req = GetDeoptRequirement(event, thread);
// Make sure we can deopt.
{
art::ScopedObjectAccess soa(art::Thread::Current());
DeoptManager* deopt_manager = DeoptManager::Get();
+ jvmtiError err = OK;
if (enable) {
deopt_manager->AddDeoptimizationRequester();
- if (needs_full_deopt) {
- deopt_manager->AddDeoptimizeAllMethods();
+ switch (deopt_req) {
+ case DeoptRequirement::kFull:
+ deopt_manager->AddDeoptimizeAllMethods();
+ break;
+ case DeoptRequirement::kThread:
+ err = deopt_manager->AddDeoptimizeThreadMethods(soa, thread);
+ break;
+ default:
+ break;
+ }
+ if (err != OK) {
+ deopt_manager->RemoveDeoptimizationRequester();
+ return err;
}
} else {
- if (needs_full_deopt) {
- deopt_manager->RemoveDeoptimizeAllMethods();
+ switch (deopt_req) {
+ case DeoptRequirement::kFull:
+ deopt_manager->RemoveDeoptimizeAllMethods();
+ break;
+ case DeoptRequirement::kThread:
+ err = deopt_manager->RemoveDeoptimizeThreadMethods(soa, thread);
+ break;
+ default:
+ break;
}
deopt_manager->RemoveDeoptimizationRequester();
+ if (err != OK) {
+ return err;
+ }
}
}
@@ -1019,7 +1058,7 @@
if (IsEventEnabledAnywhere(other)) {
// The event needs to be kept around/is already enabled by the other jvmti event that uses the
// same instrumentation event.
- return;
+ return OK;
}
}
art::ScopedThreadStateChange stsc(art::Thread::Current(), art::ThreadState::kNative);
@@ -1030,6 +1069,7 @@
} else {
instr->RemoveListener(listener, new_events);
}
+ return OK;
}
// Makes sure that all compiled methods are AsyncDeoptimizable so we can deoptimize (and force to
@@ -1085,41 +1125,42 @@
return false;
}
-void EventHandler::SetupFramePopTraceListener(bool enable) {
+jvmtiError EventHandler::SetupFramePopTraceListener(jthread thread, bool enable) {
if (enable) {
frame_pop_enabled = true;
- SetupTraceListener(method_trace_listener_.get(), ArtJvmtiEvent::kFramePop, enable);
+ return SetupTraceListener(
+ method_trace_listener_.get(), ArtJvmtiEvent::kFramePop, thread, enable);
} else {
// remove the listener if we have no outstanding frames.
{
art::ReaderMutexLock mu(art::Thread::Current(), envs_lock_);
- for (ArtJvmTiEnv* env : envs) {
+ for (ArtJvmTiEnv *env : envs) {
art::ReaderMutexLock event_mu(art::Thread::Current(), env->event_info_mutex_);
if (!env->notify_frames.empty()) {
// Leaving FramePop listener since there are unsent FramePop events.
- return;
+ return OK;
}
}
frame_pop_enabled = false;
}
- SetupTraceListener(method_trace_listener_.get(), ArtJvmtiEvent::kFramePop, enable);
+ return SetupTraceListener(
+ method_trace_listener_.get(), ArtJvmtiEvent::kFramePop, thread, enable);
}
}
// Handle special work for the given event type, if necessary.
-void EventHandler::HandleEventType(ArtJvmtiEvent event, bool enable) {
+jvmtiError EventHandler::HandleEventType(ArtJvmtiEvent event, jthread thread, bool enable) {
switch (event) {
case ArtJvmtiEvent::kDdmPublishChunk:
SetupDdmTracking(ddm_listener_.get(), enable);
- return;
+ return OK;
case ArtJvmtiEvent::kVmObjectAlloc:
SetupObjectAllocationTracking(alloc_listener_.get(), enable);
- return;
-
+ return OK;
case ArtJvmtiEvent::kGarbageCollectionStart:
case ArtJvmtiEvent::kGarbageCollectionFinish:
SetupGcPauseTracking(gc_pause_listener_.get(), event, enable);
- return;
+ return OK;
// FramePop can never be disabled once it's been turned on if it was turned off with outstanding
// pop-events since we would either need to deal with dangling pointers or have missed events.
case ArtJvmtiEvent::kFramePop:
@@ -1127,8 +1168,7 @@
// The frame-pop event was held on by pending events so we don't need to do anything.
break;
} else {
- SetupFramePopTraceListener(enable);
- break;
+ return SetupFramePopTraceListener(thread, enable);
}
case ArtJvmtiEvent::kMethodEntry:
case ArtJvmtiEvent::kMethodExit:
@@ -1138,8 +1178,7 @@
case ArtJvmtiEvent::kExceptionCatch:
case ArtJvmtiEvent::kBreakpoint:
case ArtJvmtiEvent::kSingleStep:
- SetupTraceListener(method_trace_listener_.get(), event, enable);
- return;
+ return SetupTraceListener(method_trace_listener_.get(), event, thread, enable);
case ArtJvmtiEvent::kMonitorContendedEnter:
case ArtJvmtiEvent::kMonitorContendedEntered:
case ArtJvmtiEvent::kMonitorWait:
@@ -1147,10 +1186,11 @@
if (!OtherMonitorEventsEnabledAnywhere(event)) {
SetupMonitorListener(monitor_listener_.get(), park_listener_.get(), enable);
}
- return;
+ return OK;
default:
break;
}
+ return OK;
}
// Checks to see if the env has the capabilities associated with the given event.
@@ -1212,21 +1252,9 @@
}
jvmtiError EventHandler::SetEvent(ArtJvmTiEnv* env,
- art::Thread* thread,
+ jthread thread,
ArtJvmtiEvent event,
jvmtiEventMode mode) {
- if (thread != nullptr) {
- art::ThreadState state = thread->GetState();
- if (state == art::ThreadState::kStarting ||
- state == art::ThreadState::kTerminated ||
- thread->IsStillStarting()) {
- return ERR(THREAD_NOT_ALIVE);
- }
- if (!IsThreadControllable(event)) {
- return ERR(ILLEGAL_ARGUMENT);
- }
- }
-
if (mode != JVMTI_ENABLE && mode != JVMTI_DISABLE) {
return ERR(ILLEGAL_ARGUMENT);
}
@@ -1239,6 +1267,28 @@
return ERR(MUST_POSSESS_CAPABILITY);
}
+ art::Thread* art_thread = nullptr;
+ if (thread != nullptr) {
+ if (!IsThreadControllable(event)) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ jvmtiError err = ERR(INTERNAL);
+ if (!ThreadUtil::GetAliveNativeThread(thread, soa, &art_thread, &err)) {
+ return err;
+ } else if (art_thread->IsStillStarting()) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
+ art::ThreadState state = art_thread->GetState();
+ if (state == art::ThreadState::kStarting || state == art::ThreadState::kTerminated) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
+ }
+
+ // TODO We use art_thread simply as a global unique identifier here. It is not safe to actually
+ // use it without holding the thread_list_lock_.
+
bool old_state;
bool new_state;
@@ -1249,13 +1299,14 @@
art::WriterMutexLock mu_env_info(self, env->event_info_mutex_);
old_state = global_mask.Test(event);
if (mode == JVMTI_ENABLE) {
- env->event_masks.EnableEvent(env, thread, event);
+ env->event_masks.EnableEvent(env, art_thread, event);
global_mask.Set(event);
new_state = true;
} else {
DCHECK_EQ(mode, JVMTI_DISABLE);
- env->event_masks.DisableEvent(env, thread, event);
+ // TODO Replace art_thread with a uintptr_t or something to indicate we cannot read from it.
+ env->event_masks.DisableEvent(env, art_thread, event);
RecalculateGlobalEventMaskLocked(event);
new_state = global_mask.Test(event);
}
@@ -1263,7 +1314,7 @@
// Handle any special work required for the event type.
if (new_state != old_state) {
- HandleEventType(event, mode == JVMTI_ENABLE);
+ return HandleEventType(event, thread, mode == JVMTI_ENABLE);
}
return ERR(NONE);
diff --git a/openjdkjvmti/events.h b/openjdkjvmti/events.h
index abb15cc..e48772c 100644
--- a/openjdkjvmti/events.h
+++ b/openjdkjvmti/events.h
@@ -198,7 +198,7 @@
}
jvmtiError SetEvent(ArtJvmTiEnv* env,
- art::Thread* thread,
+ jthread thread,
ArtJvmtiEvent event,
jvmtiEventMode mode)
REQUIRES(!envs_lock_);
@@ -246,10 +246,13 @@
REQUIRES(!envs_lock_);
private:
- void SetupTraceListener(JvmtiMethodTraceListener* listener, ArtJvmtiEvent event, bool enable);
+ jvmtiError SetupTraceListener(JvmtiMethodTraceListener* listener,
+ ArtJvmtiEvent event,
+ jthread thread,
+ bool enable);
// Specifically handle the FramePop event which it might not always be possible to turn off.
- void SetupFramePopTraceListener(bool enable);
+ jvmtiError SetupFramePopTraceListener(jthread thread, bool enable);
template <ArtJvmtiEvent kEvent, typename ...Args>
ALWAYS_INLINE
@@ -309,7 +312,7 @@
jclass klass) const
REQUIRES(!envs_lock_);
- void HandleEventType(ArtJvmtiEvent event, bool enable);
+ jvmtiError HandleEventType(ArtJvmtiEvent event, jthread thread, bool enable);
void HandleLocalAccessCapabilityAdded();
void HandleBreakpointEventsChanged(bool enable);
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index 385ac45..5e934df 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -1061,8 +1061,14 @@
art::ShadowFrame* shadow_frame = visitor.GetOrCreateShadowFrame(&needs_instrument);
{
art::WriterMutexLock lk(self, tienv->event_info_mutex_);
- // Mark shadow frame as needs_notify_pop_
- shadow_frame->SetNotifyPop(true);
+ if (LIKELY(!shadow_frame->NeedsNotifyPop())) {
+ // Ensure we won't miss exceptions being thrown if we get jit-compiled. We only do this for
+ // the first NotifyPopFrame.
+ target->IncrementForceInterpreterCount();
+
+ // Mark shadow frame as needs_notify_pop_
+ shadow_frame->SetNotifyPop(true);
+ }
tienv->notify_frames.insert(shadow_frame);
}
// Make sure can we will go to the interpreter and use the shadow frames.
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index c7e41be..0f25ec4 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -322,7 +322,9 @@
// If the runtime is not yet started or it is required by the debugger, then perform the
// Invocation by the interpreter, explicitly forcing interpretation over JIT to prevent
// cycling around the various JIT/Interpreter methods that handle method invocation.
- if (UNLIKELY(!runtime->IsStarted() || Dbg::IsForcedInterpreterNeededForCalling(self, this))) {
+ if (UNLIKELY(!runtime->IsStarted() ||
+ (self->IsForceInterpreter() && !IsNative() && !IsProxyMethod() && IsInvokable()) ||
+ Dbg::IsForcedInterpreterNeededForCalling(self, this))) {
if (IsStatic()) {
art::interpreter::EnterInterpreterFromInvoke(
self, this, nullptr, args, result, /*stay_in_interpreter=*/ true);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 89c6a0b..de9d9ca 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -3287,7 +3287,8 @@
return true;
}
- if (Dbg::IsForcedInterpreterNeededForCalling(Thread::Current(), method)) {
+ if (Thread::Current()->IsForceInterpreter() ||
+ Dbg::IsForcedInterpreterNeededForCalling(Thread::Current(), method)) {
// Force the use of interpreter when it is required by the debugger.
return true;
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 6deb509..77cd145 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -800,8 +800,9 @@
uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp);
// If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization
// should be done and it knows the real return pc.
- if (UNLIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) &&
- Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) {
+ if (UNLIKELY(
+ caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) &&
+ (self->IsForceInterpreter() || Dbg::IsForcedInterpreterNeededForUpcall(self, caller)))) {
if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) {
LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
<< caller->PrettyMethod();
@@ -1453,8 +1454,10 @@
StackHandleScope<1> hs(soa.Self());
Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
linker->EnsureInitialized(soa.Self(), called_class, true, true);
+ bool force_interpreter = self->IsForceInterpreter() && !called->IsNative();
if (LIKELY(called_class->IsInitialized())) {
- if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
+ if (UNLIKELY(force_interpreter ||
+ Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
// If we are single-stepping or the called method is deoptimized (by a
// breakpoint, for example), then we have to execute the called method
// with the interpreter.
@@ -1473,7 +1476,8 @@
code = called->GetEntryPointFromQuickCompiledCode();
}
} else if (called_class->IsInitializing()) {
- if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
+ if (UNLIKELY(force_interpreter ||
+ Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
// If we are single-stepping or the called method is deoptimized (by a
// breakpoint, for example), then we have to execute the called method
// with the interpreter.
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 36f7b3d..88d6fee 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -1491,6 +1491,7 @@
visitor.WalkStack(true);
bool deoptimize = (visitor.caller != nullptr) &&
(interpreter_stubs_installed_ || IsDeoptimized(visitor.caller) ||
+ self->IsForceInterpreter() ||
Dbg::IsForcedInterpreterNeededForUpcall(self, visitor.caller));
if (is_ref) {
// Restore the return value if it's a reference since it might have moved.
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index aa11562..25d48c2 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -289,7 +289,7 @@
}
}
- if (!stay_in_interpreter) {
+ if (!stay_in_interpreter && !self->IsForceInterpreter()) {
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
jit->MethodEntered(self, shadow_frame.GetMethod());
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 8890a30..526413b 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -3571,7 +3571,7 @@
}
force_deopt = force_frame_pop || force_retry_instr;
}
- if (Dbg::IsForcedInterpreterNeededForException(this) || force_deopt) {
+ if (Dbg::IsForcedInterpreterNeededForException(this) || force_deopt || IsForceInterpreter()) {
NthCallerVisitor visitor(this, 0, false);
visitor.WalkStack();
if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.caller_pc)) {
diff --git a/runtime/thread.h b/runtime/thread.h
index ec276b5..085bd98 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1203,6 +1203,26 @@
tls32_.is_transitioning_to_runnable = value;
}
+ uint32_t DecrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
+ return --tls32_.force_interpreter_count;
+ }
+
+ uint32_t IncrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
+ return ++tls32_.force_interpreter_count;
+ }
+
+ void SetForceInterpreterCount(uint32_t value) REQUIRES(Locks::thread_list_lock_) {
+ tls32_.force_interpreter_count = value;
+ }
+
+ uint32_t ForceInterpreterCount() const {
+ return tls32_.force_interpreter_count;
+ }
+
+ bool IsForceInterpreter() const {
+ return tls32_.force_interpreter_count != 0;
+ }
+
void PushVerifier(verifier::MethodVerifier* verifier);
void PopVerifier(verifier::MethodVerifier* verifier);
@@ -1465,7 +1485,7 @@
thread_exit_check_count(0), handling_signal_(false),
is_transitioning_to_runnable(false), ready_for_debug_invoke(false),
debug_method_entry_(false), is_gc_marking(false), weak_ref_access_enabled(true),
- disable_thread_flip_count(0), user_code_suspend_count(0) {
+ disable_thread_flip_count(0), user_code_suspend_count(0), force_interpreter_count(0) {
}
union StateAndFlags state_and_flags;
@@ -1549,6 +1569,10 @@
// told that AssertHeld should be good enough.
int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
+ // Count of how many times this thread has been forced to interpreter. If this is not 0 the
+ // thread must remain in interpreted code as much as possible.
+ uint32_t force_interpreter_count;
+
// True if everything is in the ideal state for fast interpretation.
// False if we need to switch to the C++ interpreter to handle special cases.
std::atomic<bool32_t> use_mterp;