Revert "Revert^2 "Remove Global deopt requirement for several jvmti events""
This reverts commit a75e5892fba6d0208f33e9ab8e19e732daf94996.
Reason for revert: Fails jdwp tests.
Change-Id: Ic883f4f0108b07f1609c6c6a9565eae5903bfd2c
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 842716f..0d279ed 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -76,7 +76,7 @@
" f0: f1bc 0f00 cmp.w ip, #0\n",
" f4: bf18 it ne\n",
" f6: f20d 4c01 addwne ip, sp, #1025 ; 0x401\n",
- " fa: f8d9 c09c ldr.w ip, [r9, #156] ; 0x9c\n",
+ " fa: f8d9 c094 ldr.w ip, [r9, #148] ; 0x94\n",
" fe: f1bc 0f00 cmp.w ip, #0\n",
" 102: d171 bne.n 1e8 <VixlJniHelpers+0x1e8>\n",
" 104: f8cd c7ff str.w ip, [sp, #2047] ; 0x7ff\n",
@@ -153,7 +153,7 @@
" 21c: f8d9 8034 ldr.w r8, [r9, #52] ; 0x34\n",
" 220: 4770 bx lr\n",
" 222: 4660 mov r0, ip\n",
- " 224: f8d9 c2e4 ldr.w ip, [r9, #740] ; 0x2e4\n",
+ " 224: f8d9 c2dc ldr.w ip, [r9, #732] ; 0x2dc\n",
" 228: 47e0 blx ip\n",
nullptr
};
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index 39e49d7..7a2b638 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -1053,9 +1053,22 @@
jthread event_thread,
...) {
ENSURE_VALID_ENV(env);
+ art::Thread* art_thread = nullptr;
+ if (event_thread != nullptr) {
+ // TODO The locking around this call is less then what we really want.
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ jvmtiError err = ERR(INTERNAL);
+ if (!ThreadUtil::GetAliveNativeThread(event_thread, soa, &art_thread, &err)) {
+ return err;
+ } else if (art_thread->IsStillStarting()) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
+ }
+
ArtJvmTiEnv* art_env = ArtJvmTiEnv::AsArtJvmTiEnv(env);
return gEventHandler->SetEvent(art_env,
- event_thread,
+ art_thread,
GetArtJvmtiEvent(art_env, event_type),
mode);
}
diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc
index ee77b7b..d456d83 100644
--- a/openjdkjvmti/deopt_manager.cc
+++ b/openjdkjvmti/deopt_manager.cc
@@ -49,7 +49,6 @@
#include "nativehelper/scoped_local_ref.h"
#include "runtime_callbacks.h"
#include "scoped_thread_state_change-inl.h"
-#include "scoped_thread_state_change.h"
#include "thread-current-inl.h"
#include "thread_list.h"
#include "ti_phase.h"
@@ -357,47 +356,6 @@
kDeoptManagerInstrumentationKey);
}
-jvmtiError DeoptManager::AddDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked& soa, jthread jtarget) {
- art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
- art::Thread* target = nullptr;
- jvmtiError err = OK;
- if (!ThreadUtil::GetNativeThread(jtarget, soa, &target, &err)) {
- art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
- return err;
- }
- // We don't need additional locking here because we hold the Thread_list_lock_.
- target->SetForceInterpreterCount(target->ForceInterpreterCount() + 1);
- if (target->ForceInterpreterCount() == 1) {
- struct DeoptClosure : public art::Closure {
- public:
- explicit DeoptClosure(DeoptManager* man) : man_(man) {}
- void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
- man_->DeoptimizeThread(self);
- }
-
- private:
- DeoptManager* man_;
- };
- DeoptClosure c(this);
- target->RequestSynchronousCheckpoint(&c);
- } else {
- art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
- }
- return OK;
-}
-
-jvmtiError DeoptManager::RemoveDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked& soa, jthread jtarget) {
- art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
- art::Thread* target = nullptr;
- jvmtiError err = OK;
- if (!ThreadUtil::GetNativeThread(jtarget, soa, &target, &err)) {
- return err;
- }
- // We don't need additional locking here because we hold the Thread_list_lock_.
- DCHECK_GT(target->ForceInterpreterCount(), 0u);
- target->DecrementForceInterpreterCount();
- return OK;
-}
void DeoptManager::RemoveDeoptimizationRequester() {
art::Thread* self = art::Thread::Current();
diff --git a/openjdkjvmti/deopt_manager.h b/openjdkjvmti/deopt_manager.h
index 4c4a774..856f3f4 100644
--- a/openjdkjvmti/deopt_manager.h
+++ b/openjdkjvmti/deopt_manager.h
@@ -38,11 +38,8 @@
#include "base/mutex.h"
#include "runtime_callbacks.h"
-#include <jvmti.h>
-
namespace art {
class ArtMethod;
-class ScopedObjectAccessUnchecked;
namespace mirror {
class Class;
} // namespace mirror
@@ -101,14 +98,6 @@
REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
REQUIRES_SHARED(art::Locks::mutator_lock_);
- jvmtiError AddDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked& soa, jthread thread)
- REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
- REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- jvmtiError RemoveDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked& soa, jthread thread)
- REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
- REQUIRES_SHARED(art::Locks::mutator_lock_);
-
void DeoptimizeThread(art::Thread* target) REQUIRES_SHARED(art::Locks::mutator_lock_);
void DeoptimizeAllThreads() REQUIRES_SHARED(art::Locks::mutator_lock_);
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index b41edc5..22c622a 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -794,12 +794,6 @@
void WatchedFramePop(art::Thread* self, const art::ShadowFrame& frame)
REQUIRES_SHARED(art::Locks::mutator_lock_) override {
art::JNIEnvExt* jnienv = self->GetJniEnv();
- // Remove the force-interpreter added by the WatchFrame.
- {
- art::MutexLock mu(self, *art::Locks::thread_list_lock_);
- CHECK_GT(self->ForceInterpreterCount(), 0u);
- self->DecrementForceInterpreterCount();
- }
jboolean is_exception_pending = self->IsExceptionPending();
RunEventCallback<ArtJvmtiEvent::kFramePop>(
event_handler_,
@@ -971,78 +965,45 @@
}
}
-enum class DeoptRequirement {
- // Limited/no deopt required.
- kLimited,
- // A single thread must be put into interpret only.
- kThread,
- // All methods and all threads deopted.
- kFull,
-};
-
-static DeoptRequirement GetDeoptRequirement(ArtJvmtiEvent event, jthread thread) {
+static bool EventNeedsFullDeopt(ArtJvmtiEvent event) {
switch (event) {
case ArtJvmtiEvent::kBreakpoint:
case ArtJvmtiEvent::kException:
- return DeoptRequirement::kLimited;
- // TODO MethodEntry is needed due to inconsistencies between the interpreter and the trampoline
- // in how to handle exceptions.
+ return false;
+ // TODO We should support more of these or at least do something to make them discriminate by
+ // thread.
case ArtJvmtiEvent::kMethodEntry:
case ArtJvmtiEvent::kExceptionCatch:
- return DeoptRequirement::kFull;
case ArtJvmtiEvent::kMethodExit:
case ArtJvmtiEvent::kFieldModification:
case ArtJvmtiEvent::kFieldAccess:
case ArtJvmtiEvent::kSingleStep:
case ArtJvmtiEvent::kFramePop:
- return thread == nullptr ? DeoptRequirement::kFull : DeoptRequirement::kThread;
+ return true;
default:
LOG(FATAL) << "Unexpected event type!";
UNREACHABLE();
}
}
-jvmtiError EventHandler::SetupTraceListener(JvmtiMethodTraceListener* listener,
- ArtJvmtiEvent event,
- jthread thread,
- bool enable) {
- DeoptRequirement deopt_req = GetDeoptRequirement(event, thread);
+void EventHandler::SetupTraceListener(JvmtiMethodTraceListener* listener,
+ ArtJvmtiEvent event,
+ bool enable) {
+ bool needs_full_deopt = EventNeedsFullDeopt(event);
// Make sure we can deopt.
{
art::ScopedObjectAccess soa(art::Thread::Current());
DeoptManager* deopt_manager = DeoptManager::Get();
- jvmtiError err = OK;
if (enable) {
deopt_manager->AddDeoptimizationRequester();
- switch (deopt_req) {
- case DeoptRequirement::kFull:
- deopt_manager->AddDeoptimizeAllMethods();
- break;
- case DeoptRequirement::kThread:
- err = deopt_manager->AddDeoptimizeThreadMethods(soa, thread);
- break;
- default:
- break;
- }
- if (err != OK) {
- deopt_manager->RemoveDeoptimizationRequester();
- return err;
+ if (needs_full_deopt) {
+ deopt_manager->AddDeoptimizeAllMethods();
}
} else {
- switch (deopt_req) {
- case DeoptRequirement::kFull:
- deopt_manager->RemoveDeoptimizeAllMethods();
- break;
- case DeoptRequirement::kThread:
- err = deopt_manager->RemoveDeoptimizeThreadMethods(soa, thread);
- break;
- default:
- break;
+ if (needs_full_deopt) {
+ deopt_manager->RemoveDeoptimizeAllMethods();
}
deopt_manager->RemoveDeoptimizationRequester();
- if (err != OK) {
- return err;
- }
}
}
@@ -1058,7 +1019,7 @@
if (IsEventEnabledAnywhere(other)) {
// The event needs to be kept around/is already enabled by the other jvmti event that uses the
// same instrumentation event.
- return OK;
+ return;
}
}
art::ScopedThreadStateChange stsc(art::Thread::Current(), art::ThreadState::kNative);
@@ -1069,7 +1030,6 @@
} else {
instr->RemoveListener(listener, new_events);
}
- return OK;
}
// Makes sure that all compiled methods are AsyncDeoptimizable so we can deoptimize (and force to
@@ -1125,42 +1085,41 @@
return false;
}
-jvmtiError EventHandler::SetupFramePopTraceListener(jthread thread, bool enable) {
+void EventHandler::SetupFramePopTraceListener(bool enable) {
if (enable) {
frame_pop_enabled = true;
- return SetupTraceListener(
- method_trace_listener_.get(), ArtJvmtiEvent::kFramePop, thread, enable);
+ SetupTraceListener(method_trace_listener_.get(), ArtJvmtiEvent::kFramePop, enable);
} else {
// remove the listener if we have no outstanding frames.
{
art::ReaderMutexLock mu(art::Thread::Current(), envs_lock_);
- for (ArtJvmTiEnv *env : envs) {
+ for (ArtJvmTiEnv* env : envs) {
art::ReaderMutexLock event_mu(art::Thread::Current(), env->event_info_mutex_);
if (!env->notify_frames.empty()) {
// Leaving FramePop listener since there are unsent FramePop events.
- return OK;
+ return;
}
}
frame_pop_enabled = false;
}
- return SetupTraceListener(
- method_trace_listener_.get(), ArtJvmtiEvent::kFramePop, thread, enable);
+ SetupTraceListener(method_trace_listener_.get(), ArtJvmtiEvent::kFramePop, enable);
}
}
// Handle special work for the given event type, if necessary.
-jvmtiError EventHandler::HandleEventType(ArtJvmtiEvent event, jthread thread, bool enable) {
+void EventHandler::HandleEventType(ArtJvmtiEvent event, bool enable) {
switch (event) {
case ArtJvmtiEvent::kDdmPublishChunk:
SetupDdmTracking(ddm_listener_.get(), enable);
- return OK;
+ return;
case ArtJvmtiEvent::kVmObjectAlloc:
SetupObjectAllocationTracking(alloc_listener_.get(), enable);
- return OK;
+ return;
+
case ArtJvmtiEvent::kGarbageCollectionStart:
case ArtJvmtiEvent::kGarbageCollectionFinish:
SetupGcPauseTracking(gc_pause_listener_.get(), event, enable);
- return OK;
+ return;
// FramePop can never be disabled once it's been turned on if it was turned off with outstanding
// pop-events since we would either need to deal with dangling pointers or have missed events.
case ArtJvmtiEvent::kFramePop:
@@ -1168,7 +1127,8 @@
// The frame-pop event was held on by pending events so we don't need to do anything.
break;
} else {
- return SetupFramePopTraceListener(thread, enable);
+ SetupFramePopTraceListener(enable);
+ break;
}
case ArtJvmtiEvent::kMethodEntry:
case ArtJvmtiEvent::kMethodExit:
@@ -1178,7 +1138,8 @@
case ArtJvmtiEvent::kExceptionCatch:
case ArtJvmtiEvent::kBreakpoint:
case ArtJvmtiEvent::kSingleStep:
- return SetupTraceListener(method_trace_listener_.get(), event, thread, enable);
+ SetupTraceListener(method_trace_listener_.get(), event, enable);
+ return;
case ArtJvmtiEvent::kMonitorContendedEnter:
case ArtJvmtiEvent::kMonitorContendedEntered:
case ArtJvmtiEvent::kMonitorWait:
@@ -1186,11 +1147,10 @@
if (!OtherMonitorEventsEnabledAnywhere(event)) {
SetupMonitorListener(monitor_listener_.get(), park_listener_.get(), enable);
}
- return OK;
+ return;
default:
break;
}
- return OK;
}
// Checks to see if the env has the capabilities associated with the given event.
@@ -1252,9 +1212,21 @@
}
jvmtiError EventHandler::SetEvent(ArtJvmTiEnv* env,
- jthread thread,
+ art::Thread* thread,
ArtJvmtiEvent event,
jvmtiEventMode mode) {
+ if (thread != nullptr) {
+ art::ThreadState state = thread->GetState();
+ if (state == art::ThreadState::kStarting ||
+ state == art::ThreadState::kTerminated ||
+ thread->IsStillStarting()) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
+ if (!IsThreadControllable(event)) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ }
+
if (mode != JVMTI_ENABLE && mode != JVMTI_DISABLE) {
return ERR(ILLEGAL_ARGUMENT);
}
@@ -1267,28 +1239,6 @@
return ERR(MUST_POSSESS_CAPABILITY);
}
- art::Thread* art_thread = nullptr;
- if (thread != nullptr) {
- if (!IsThreadControllable(event)) {
- return ERR(ILLEGAL_ARGUMENT);
- }
- art::ScopedObjectAccess soa(art::Thread::Current());
- art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
- jvmtiError err = ERR(INTERNAL);
- if (!ThreadUtil::GetAliveNativeThread(thread, soa, &art_thread, &err)) {
- return err;
- } else if (art_thread->IsStillStarting()) {
- return ERR(THREAD_NOT_ALIVE);
- }
- art::ThreadState state = art_thread->GetState();
- if (state == art::ThreadState::kStarting || state == art::ThreadState::kTerminated) {
- return ERR(THREAD_NOT_ALIVE);
- }
- }
-
- // TODO We use art_thread simply as a global unique identifier here. It is not safe to actually
- // use it without holding the thread_list_lock_.
-
bool old_state;
bool new_state;
@@ -1299,14 +1249,13 @@
art::WriterMutexLock mu_env_info(self, env->event_info_mutex_);
old_state = global_mask.Test(event);
if (mode == JVMTI_ENABLE) {
- env->event_masks.EnableEvent(env, art_thread, event);
+ env->event_masks.EnableEvent(env, thread, event);
global_mask.Set(event);
new_state = true;
} else {
DCHECK_EQ(mode, JVMTI_DISABLE);
- // TODO Replace art_thread with a uintptr_t or something to indicate we cannot read from it.
- env->event_masks.DisableEvent(env, art_thread, event);
+ env->event_masks.DisableEvent(env, thread, event);
RecalculateGlobalEventMaskLocked(event);
new_state = global_mask.Test(event);
}
@@ -1314,7 +1263,7 @@
// Handle any special work required for the event type.
if (new_state != old_state) {
- return HandleEventType(event, thread, mode == JVMTI_ENABLE);
+ HandleEventType(event, mode == JVMTI_ENABLE);
}
return ERR(NONE);
diff --git a/openjdkjvmti/events.h b/openjdkjvmti/events.h
index e48772c..abb15cc 100644
--- a/openjdkjvmti/events.h
+++ b/openjdkjvmti/events.h
@@ -198,7 +198,7 @@
}
jvmtiError SetEvent(ArtJvmTiEnv* env,
- jthread thread,
+ art::Thread* thread,
ArtJvmtiEvent event,
jvmtiEventMode mode)
REQUIRES(!envs_lock_);
@@ -246,13 +246,10 @@
REQUIRES(!envs_lock_);
private:
- jvmtiError SetupTraceListener(JvmtiMethodTraceListener* listener,
- ArtJvmtiEvent event,
- jthread thread,
- bool enable);
+ void SetupTraceListener(JvmtiMethodTraceListener* listener, ArtJvmtiEvent event, bool enable);
// Specifically handle the FramePop event which it might not always be possible to turn off.
- jvmtiError SetupFramePopTraceListener(jthread thread, bool enable);
+ void SetupFramePopTraceListener(bool enable);
template <ArtJvmtiEvent kEvent, typename ...Args>
ALWAYS_INLINE
@@ -312,7 +309,7 @@
jclass klass) const
REQUIRES(!envs_lock_);
- jvmtiError HandleEventType(ArtJvmtiEvent event, jthread thread, bool enable);
+ void HandleEventType(ArtJvmtiEvent event, bool enable);
void HandleLocalAccessCapabilityAdded();
void HandleBreakpointEventsChanged(bool enable);
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index 5e934df..385ac45 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -1061,14 +1061,8 @@
art::ShadowFrame* shadow_frame = visitor.GetOrCreateShadowFrame(&needs_instrument);
{
art::WriterMutexLock lk(self, tienv->event_info_mutex_);
- if (LIKELY(!shadow_frame->NeedsNotifyPop())) {
- // Ensure we won't miss exceptions being thrown if we get jit-compiled. We only do this for
- // the first NotifyPopFrame.
- target->IncrementForceInterpreterCount();
-
- // Mark shadow frame as needs_notify_pop_
- shadow_frame->SetNotifyPop(true);
- }
+ // Mark shadow frame as needs_notify_pop_
+ shadow_frame->SetNotifyPop(true);
tienv->notify_frames.insert(shadow_frame);
}
// Make sure can we will go to the interpreter and use the shadow frames.
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 0f25ec4..c7e41be 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -322,9 +322,7 @@
// If the runtime is not yet started or it is required by the debugger, then perform the
// Invocation by the interpreter, explicitly forcing interpretation over JIT to prevent
// cycling around the various JIT/Interpreter methods that handle method invocation.
- if (UNLIKELY(!runtime->IsStarted() ||
- (self->IsForceInterpreter() && !IsNative() && !IsProxyMethod() && IsInvokable()) ||
- Dbg::IsForcedInterpreterNeededForCalling(self, this))) {
+ if (UNLIKELY(!runtime->IsStarted() || Dbg::IsForcedInterpreterNeededForCalling(self, this))) {
if (IsStatic()) {
art::interpreter::EnterInterpreterFromInvoke(
self, this, nullptr, args, result, /*stay_in_interpreter=*/ true);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index de9d9ca..89c6a0b 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -3287,8 +3287,7 @@
return true;
}
- if (Thread::Current()->IsForceInterpreter() ||
- Dbg::IsForcedInterpreterNeededForCalling(Thread::Current(), method)) {
+ if (Dbg::IsForcedInterpreterNeededForCalling(Thread::Current(), method)) {
// Force the use of interpreter when it is required by the debugger.
return true;
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 77cd145..6deb509 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -800,9 +800,8 @@
uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp);
// If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization
// should be done and it knows the real return pc.
- if (UNLIKELY(
- caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) &&
- (self->IsForceInterpreter() || Dbg::IsForcedInterpreterNeededForUpcall(self, caller)))) {
+ if (UNLIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) &&
+ Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) {
if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) {
LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
<< caller->PrettyMethod();
@@ -1454,10 +1453,8 @@
StackHandleScope<1> hs(soa.Self());
Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
linker->EnsureInitialized(soa.Self(), called_class, true, true);
- bool force_interpreter = self->IsForceInterpreter() && !called->IsNative();
if (LIKELY(called_class->IsInitialized())) {
- if (UNLIKELY(force_interpreter ||
- Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
+ if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
// If we are single-stepping or the called method is deoptimized (by a
// breakpoint, for example), then we have to execute the called method
// with the interpreter.
@@ -1476,8 +1473,7 @@
code = called->GetEntryPointFromQuickCompiledCode();
}
} else if (called_class->IsInitializing()) {
- if (UNLIKELY(force_interpreter ||
- Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
+ if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
// If we are single-stepping or the called method is deoptimized (by a
// breakpoint, for example), then we have to execute the called method
// with the interpreter.
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 88d6fee..36f7b3d 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -1491,7 +1491,6 @@
visitor.WalkStack(true);
bool deoptimize = (visitor.caller != nullptr) &&
(interpreter_stubs_installed_ || IsDeoptimized(visitor.caller) ||
- self->IsForceInterpreter() ||
Dbg::IsForcedInterpreterNeededForUpcall(self, visitor.caller));
if (is_ref) {
// Restore the return value if it's a reference since it might have moved.
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 25d48c2..aa11562 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -289,7 +289,7 @@
}
}
- if (!stay_in_interpreter && !self->IsForceInterpreter()) {
+ if (!stay_in_interpreter) {
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
jit->MethodEntered(self, shadow_frame.GetMethod());
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 526413b..8890a30 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -3571,7 +3571,7 @@
}
force_deopt = force_frame_pop || force_retry_instr;
}
- if (Dbg::IsForcedInterpreterNeededForException(this) || force_deopt || IsForceInterpreter()) {
+ if (Dbg::IsForcedInterpreterNeededForException(this) || force_deopt) {
NthCallerVisitor visitor(this, 0, false);
visitor.WalkStack();
if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.caller_pc)) {
diff --git a/runtime/thread.h b/runtime/thread.h
index 085bd98..ec276b5 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1203,26 +1203,6 @@
tls32_.is_transitioning_to_runnable = value;
}
- uint32_t DecrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
- return --tls32_.force_interpreter_count;
- }
-
- uint32_t IncrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
- return ++tls32_.force_interpreter_count;
- }
-
- void SetForceInterpreterCount(uint32_t value) REQUIRES(Locks::thread_list_lock_) {
- tls32_.force_interpreter_count = value;
- }
-
- uint32_t ForceInterpreterCount() const {
- return tls32_.force_interpreter_count;
- }
-
- bool IsForceInterpreter() const {
- return tls32_.force_interpreter_count != 0;
- }
-
void PushVerifier(verifier::MethodVerifier* verifier);
void PopVerifier(verifier::MethodVerifier* verifier);
@@ -1485,7 +1465,7 @@
thread_exit_check_count(0), handling_signal_(false),
is_transitioning_to_runnable(false), ready_for_debug_invoke(false),
debug_method_entry_(false), is_gc_marking(false), weak_ref_access_enabled(true),
- disable_thread_flip_count(0), user_code_suspend_count(0), force_interpreter_count(0) {
+ disable_thread_flip_count(0), user_code_suspend_count(0) {
}
union StateAndFlags state_and_flags;
@@ -1569,10 +1549,6 @@
// told that AssertHeld should be good enough.
int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
- // Count of how many times this thread has been forced to interpreter. If this is not 0 the
- // thread must remain in interpreted code as much as possible.
- uint32_t force_interpreter_count;
-
// True if everything is in the ideal state for fast interpretation.
// False if we need to switch to the C++ interpreter to handle special cases.
std::atomic<bool32_t> use_mterp;