diff options
| -rw-r--r-- | runtime/interpreter/interpreter.cc | 7 | ||||
| -rw-r--r-- | runtime/interpreter/interpreter_common.h | 6 | ||||
| -rw-r--r-- | runtime/interpreter/mterp/mterp.cc | 7 | ||||
| -rw-r--r-- | runtime/runtime-inl.h | 24 | ||||
| -rw-r--r-- | runtime/runtime.h | 2 | ||||
| -rw-r--r-- | runtime/thread.cc | 6 |
6 files changed, 23 insertions, 29 deletions
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index d68bf95593..d004d642d0 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -253,6 +253,13 @@ static inline JValue Execute( DCHECK(!shadow_frame.GetMethod()->IsAbstract()); DCHECK(!shadow_frame.GetMethod()->IsNative()); + // Check that we are using the right interpreter. + if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) { + // The flag might be currently being updated on all threads. Retry with lock. + MutexLock tll_mu(self, *Locks::thread_list_lock_); + DCHECK_EQ(self->UseMterp(), CanUseMterp()); + } + if (LIKELY(!from_deoptimize)) { // Entering the method, but not via deoptimization. if (kIsDebugBuild) { CHECK_EQ(shadow_frame.GetDexPC(), 0u); diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index 62f5d911bc..a633a63873 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -140,10 +140,8 @@ static ALWAYS_INLINE bool DoInvoke(Thread* self, uint16_t inst_data, JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) { - DCHECK_EQ(self->UseMterp(), CanUseMterp()); // Make sure to check for async exceptions before anything else. - if (is_mterp) { - DCHECK(self->UseMterp()); + if (is_mterp && self->UseMterp()) { DCHECK(!self->ObserveAsyncException()); } else if (UNLIKELY(self->ObserveAsyncException())) { return false; @@ -221,7 +219,7 @@ static ALWAYS_INLINE bool DoInvoke(Thread* self, // If the bit is not set, we explicitly recheck all the conditions. // If any of the conditions get falsified, it is important to clear the bit. bool use_fast_path = false; - if (is_mterp) { + if (is_mterp && self->UseMterp()) { use_fast_path = called_method->UseFastInterpreterToInterpreterInvoke(); if (!use_fast_path) { use_fast_path = UseFastInterpreterToInterpreterInvoke(called_method); diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc index fd1430aa70..912c44463f 100644 --- a/runtime/interpreter/mterp/mterp.cc +++ b/runtime/interpreter/mterp/mterp.cc @@ -546,7 +546,12 @@ DEFINE_RUNTIME_DEBUG_FLAG(MterpCheckHelper, kSlowMode); extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr) REQUIRES_SHARED(Locks::mutator_lock_) { - DCHECK(self->UseMterp()); + // Check that we are using the right interpreter. + if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) { + // The flag might be currently being updated on all threads. Retry with lock. + MutexLock tll_mu(self, *Locks::thread_list_lock_); + DCHECK_EQ(self->UseMterp(), CanUseMterp()); + } DCHECK(!Runtime::Current()->IsActiveTransaction()); const Instruction* inst = Instruction::At(dex_pc_ptr); uint16_t inst_data = inst->Fetch16(0); diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h index c7731f44d2..2ffaf98103 100644 --- a/runtime/runtime-inl.h +++ b/runtime/runtime-inl.h @@ -28,7 +28,6 @@ #include "gc_root-inl.h" #include "interpreter/mterp/mterp.h" #include "obj_ptr-inl.h" -#include "scoped_thread_state_change-inl.h" #include "thread_list.h" namespace art { @@ -91,23 +90,12 @@ inline ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type) } template<typename Action> -void Runtime::DoAndMaybeSwitchInterpreter(Action lambda) { - Thread* self = Thread::Current(); - if (Runtime::Current()->IsShuttingDown(self) || Locks::mutator_lock_->IsExclusiveHeld(self)) { - MutexLock tll_mu(self, *Locks::thread_list_lock_); - lambda(); - Runtime::Current()->GetThreadList()->ForEach([](Thread* thread, void*) { - thread->tls32_.use_mterp.store(interpreter::CanUseMterp()); - }, nullptr); - } else { - ScopedThreadStateChange tsc(self, kSuspended); - ScopedSuspendAll ssa(__FUNCTION__); - MutexLock tll_mu(self, *Locks::thread_list_lock_); - lambda(); - Runtime::Current()->GetThreadList()->ForEach([](Thread* thread, void*) { - thread->tls32_.use_mterp.store(interpreter::CanUseMterp()); - }, nullptr); - } +void Runtime::DoAndMaybeSwitchInterpreter(Action lamda) { + MutexLock tll_mu(Thread::Current(), *Locks::thread_list_lock_); + lamda(); + Runtime::Current()->GetThreadList()->ForEach([](Thread* thread, void*) { + thread->tls32_.use_mterp.store(interpreter::CanUseMterp()); + }, nullptr); } } // namespace art diff --git a/runtime/runtime.h b/runtime/runtime.h index bd85cf63bd..b76a658b49 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -669,7 +669,7 @@ class Runtime { // It ensures that two calls do not interfere with each other and // it makes it possible to DCHECK that thread local flag is correct. template<typename Action> - static void DoAndMaybeSwitchInterpreter(Action lambda); + static void DoAndMaybeSwitchInterpreter(Action lamda); // Returns the build fingerprint, if set. Otherwise an empty string is returned. std::string GetFingerprint() { diff --git a/runtime/thread.cc b/runtime/thread.cc index 26bfa44225..6c41ae42b7 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -4156,11 +4156,7 @@ void Thread::DeoptimizeWithDeoptimizationException(JValue* result) { void Thread::SetAsyncException(ObjPtr<mirror::Throwable> new_exception) { CHECK(new_exception != nullptr); - { - StackHandleScope<1> hs(Thread::Current()); - auto h_exception = hs.NewHandleWrapper(&new_exception); - Runtime::Current()->SetAsyncExceptionsThrown(); - } + Runtime::Current()->SetAsyncExceptionsThrown(); if (kIsDebugBuild) { // Make sure we are in a checkpoint. MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_); |