Revert^2 "Suspend the runtime when switching interpreters."
This reverts commit 9fd68f6795eab7085986f80b1c4d00dc45a68485.
Test: art/test.py -b --host --64
Test: art/tools/run-libcore-tests.sh '--mode=host' '--variant=X64'
Test: art/tools/run-libjdwp-tests.sh '--mode=host' '--variant=X64'
Change-Id: I9d2faeb8b88ce7cf42915890c6089c725907e6dd
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index aa11562..5dab2fa 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -254,13 +254,6 @@
DCHECK(!shadow_frame.GetMethod()->IsAbstract());
DCHECK(!shadow_frame.GetMethod()->IsNative());
- // Check that we are using the right interpreter.
- if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) {
- // The flag might be currently being updated on all threads. Retry with lock.
- MutexLock tll_mu(self, *Locks::thread_list_lock_);
- DCHECK_EQ(self->UseMterp(), CanUseMterp());
- }
-
if (LIKELY(!from_deoptimize)) { // Entering the method, but not via deoptimization.
if (kIsDebugBuild) {
CHECK_EQ(shadow_frame.GetDexPC(), 0u);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 6366035..b3b0ced 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -140,8 +140,10 @@
uint16_t inst_data,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK_EQ(self->UseMterp(), CanUseMterp());
// Make sure to check for async exceptions before anything else.
- if (is_mterp && self->UseMterp()) {
+ if (is_mterp) {
+ DCHECK(self->UseMterp());
DCHECK(!self->ObserveAsyncException());
} else if (UNLIKELY(self->ObserveAsyncException())) {
return false;
@@ -219,7 +221,7 @@
// If the bit is not set, we explicitly recheck all the conditions.
// If any of the conditions get falsified, it is important to clear the bit.
bool use_fast_path = false;
- if (is_mterp && self->UseMterp()) {
+ if (is_mterp) {
use_fast_path = called_method->UseFastInterpreterToInterpreterInvoke();
if (!use_fast_path) {
use_fast_path = UseFastInterpreterToInterpreterInvoke(called_method);
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 912c444..fd1430a 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -546,12 +546,7 @@
extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr)
REQUIRES_SHARED(Locks::mutator_lock_) {
- // Check that we are using the right interpreter.
- if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) {
- // The flag might be currently being updated on all threads. Retry with lock.
- MutexLock tll_mu(self, *Locks::thread_list_lock_);
- DCHECK_EQ(self->UseMterp(), CanUseMterp());
- }
+ DCHECK(self->UseMterp());
DCHECK(!Runtime::Current()->IsActiveTransaction());
const Instruction* inst = Instruction::At(dex_pc_ptr);
uint16_t inst_data = inst->Fetch16(0);
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index 2ffaf98..c7731f4 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -28,6 +28,7 @@
#include "gc_root-inl.h"
#include "interpreter/mterp/mterp.h"
#include "obj_ptr-inl.h"
+#include "scoped_thread_state_change-inl.h"
#include "thread_list.h"
namespace art {
@@ -90,12 +91,23 @@
}
template<typename Action>
-void Runtime::DoAndMaybeSwitchInterpreter(Action lamda) {
- MutexLock tll_mu(Thread::Current(), *Locks::thread_list_lock_);
- lamda();
- Runtime::Current()->GetThreadList()->ForEach([](Thread* thread, void*) {
- thread->tls32_.use_mterp.store(interpreter::CanUseMterp());
- }, nullptr);
+void Runtime::DoAndMaybeSwitchInterpreter(Action lambda) {
+ Thread* self = Thread::Current();
+ if (Runtime::Current()->IsShuttingDown(self) || Locks::mutator_lock_->IsExclusiveHeld(self)) {
+ MutexLock tll_mu(self, *Locks::thread_list_lock_);
+ lambda();
+ Runtime::Current()->GetThreadList()->ForEach([](Thread* thread, void*) {
+ thread->tls32_.use_mterp.store(interpreter::CanUseMterp());
+ }, nullptr);
+ } else {
+ ScopedThreadStateChange tsc(self, kSuspended);
+ ScopedSuspendAll ssa(__FUNCTION__);
+ MutexLock tll_mu(self, *Locks::thread_list_lock_);
+ lambda();
+ Runtime::Current()->GetThreadList()->ForEach([](Thread* thread, void*) {
+ thread->tls32_.use_mterp.store(interpreter::CanUseMterp());
+ }, nullptr);
+ }
}
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index ace0eea..85a047e 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -683,7 +683,7 @@
// It ensures that two calls do not interfere with each other and
// it makes it possible to DCHECK that thread local flag is correct.
template<typename Action>
- static void DoAndMaybeSwitchInterpreter(Action lamda);
+ static void DoAndMaybeSwitchInterpreter(Action lambda);
// Returns the build fingerprint, if set. Otherwise an empty string is returned.
std::string GetFingerprint() {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 8890a30..581fb4c 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -4179,7 +4179,11 @@
void Thread::SetAsyncException(ObjPtr<mirror::Throwable> new_exception) {
CHECK(new_exception != nullptr);
- Runtime::Current()->SetAsyncExceptionsThrown();
+ {
+ StackHandleScope<1> hs(Thread::Current());
+ auto h_exception = hs.NewHandleWrapper(&new_exception);
+ Runtime::Current()->SetAsyncExceptionsThrown();
+ }
if (kIsDebugBuild) {
// Make sure we are in a checkpoint.
MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);