summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--openjdkjvmti/deopt_manager.cc142
-rw-r--r--openjdkjvmti/deopt_manager.h37
-rw-r--r--openjdkjvmti/events.cc30
-rw-r--r--openjdkjvmti/ti_thread.cc136
-rw-r--r--runtime/jit/jit.cc9
-rw-r--r--runtime/jit/jit_code_cache.cc15
-rw-r--r--runtime/jit/jit_code_cache.h2
-rw-r--r--runtime/suspend_reason.h2
-rw-r--r--runtime/thread.cc42
-rw-r--r--runtime/thread.h20
-rw-r--r--runtime/thread_list.cc2
-rw-r--r--runtime/thread_pool.cc5
12 files changed, 356 insertions, 86 deletions
diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc
index 2f24d7ea3d..b444cc789f 100644
--- a/openjdkjvmti/deopt_manager.cc
+++ b/openjdkjvmti/deopt_manager.cc
@@ -40,7 +40,9 @@
#include "dex/dex_file_annotations.h"
#include "dex/modifiers.h"
#include "events-inl.h"
+#include "intrinsics_list.h"
#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "jni/jni_internal.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
@@ -84,11 +86,13 @@ DeoptManager::DeoptManager()
deoptimization_condition_("JVMTI_DeoptimizationCondition", deoptimization_status_lock_),
performing_deoptimization_(false),
global_deopt_count_(0),
+ global_interpreter_deopt_count_(0),
deopter_count_(0),
breakpoint_status_lock_("JVMTI_BreakpointStatusLock",
static_cast<art::LockLevel>(art::LockLevel::kAbortLock + 1)),
inspection_callback_(this),
- set_local_variable_called_(false) { }
+ set_local_variable_called_(false),
+ already_disabled_intrinsics_(false) { }
void DeoptManager::Setup() {
art::ScopedThreadStateChange stsc(art::Thread::Current(),
@@ -159,18 +163,18 @@ bool DeoptManager::MethodHasBreakpointsLocked(art::ArtMethod* method) {
return elem != breakpoint_status_.end() && elem->second != 0;
}
-void DeoptManager::RemoveDeoptimizeAllMethods() {
+void DeoptManager::RemoveDeoptimizeAllMethods(FullDeoptRequirement req) {
art::Thread* self = art::Thread::Current();
art::ScopedThreadSuspension sts(self, art::kSuspended);
deoptimization_status_lock_.ExclusiveLock(self);
- RemoveDeoptimizeAllMethodsLocked(self);
+ RemoveDeoptimizeAllMethodsLocked(self, req);
}
-void DeoptManager::AddDeoptimizeAllMethods() {
+void DeoptManager::AddDeoptimizeAllMethods(FullDeoptRequirement req) {
art::Thread* self = art::Thread::Current();
art::ScopedThreadSuspension sts(self, art::kSuspended);
deoptimization_status_lock_.ExclusiveLock(self);
- AddDeoptimizeAllMethodsLocked(self);
+ AddDeoptimizeAllMethodsLocked(self, req);
}
void DeoptManager::AddMethodBreakpoint(art::ArtMethod* method) {
@@ -207,7 +211,7 @@ void DeoptManager::AddMethodBreakpoint(art::ArtMethod* method) {
deoptimization_status_lock_.ExclusiveUnlock(self);
return;
} else if (is_default) {
- AddDeoptimizeAllMethodsLocked(self);
+ AddDeoptimizeAllMethodsLocked(self, FullDeoptRequirement::kInterpreter);
} else {
PerformLimitedDeoptimization(self, method);
}
@@ -244,7 +248,7 @@ void DeoptManager::RemoveMethodBreakpoint(art::ArtMethod* method) {
return;
} else if (is_last_breakpoint) {
if (UNLIKELY(is_default)) {
- RemoveDeoptimizeAllMethodsLocked(self);
+ RemoveDeoptimizeAllMethodsLocked(self, FullDeoptRequirement::kInterpreter);
} else {
PerformLimitedUndeoptimization(self, method);
}
@@ -272,13 +276,22 @@ class ScopedDeoptimizationContext : public art::ValueObject {
RELEASE(deopt->deoptimization_status_lock_)
ACQUIRE(art::Locks::mutator_lock_)
ACQUIRE(art::Roles::uninterruptible_)
- : self_(self), deopt_(deopt), uninterruptible_cause_(nullptr) {
+ : self_(self),
+ deopt_(deopt),
+ uninterruptible_cause_(nullptr),
+ jit_(art::Runtime::Current()->GetJit()) {
deopt_->WaitForDeoptimizationToFinishLocked(self_);
DCHECK(!deopt->performing_deoptimization_)
<< "Already performing deoptimization on another thread!";
// Use performing_deoptimization_ to keep track of the lock.
deopt_->performing_deoptimization_ = true;
deopt_->deoptimization_status_lock_.Unlock(self_);
+ // Stop the jit. We might need to disable all intrinsics which needs the jit disabled and this
+ // is the only place we can do that. Since this isn't expected to be entered too often it should
+ // be fine to always stop it.
+ if (jit_ != nullptr) {
+ jit_->Stop();
+ }
art::Runtime::Current()->GetThreadList()->SuspendAll("JMVTI Deoptimizing methods",
/*long_suspend*/ false);
uninterruptible_cause_ = self_->StartAssertNoThreadSuspension("JVMTI deoptimizing methods");
@@ -291,6 +304,10 @@ class ScopedDeoptimizationContext : public art::ValueObject {
self_->EndAssertNoThreadSuspension(uninterruptible_cause_);
// Release the mutator lock.
art::Runtime::Current()->GetThreadList()->ResumeAll();
+ // Let the jit start again.
+ if (jit_ != nullptr) {
+ jit_->Start();
+ }
// Let other threads know it's fine to proceed.
art::MutexLock lk(self_, deopt_->deoptimization_status_lock_);
deopt_->performing_deoptimization_ = false;
@@ -301,22 +318,44 @@ class ScopedDeoptimizationContext : public art::ValueObject {
art::Thread* self_;
DeoptManager* deopt_;
const char* uninterruptible_cause_;
+ art::jit::Jit* jit_;
};
-void DeoptManager::AddDeoptimizeAllMethodsLocked(art::Thread* self) {
+void DeoptManager::AddDeoptimizeAllMethodsLocked(art::Thread* self, FullDeoptRequirement req) {
+ DCHECK_GE(global_deopt_count_, global_interpreter_deopt_count_);
global_deopt_count_++;
+ if (req == FullDeoptRequirement::kInterpreter) {
+ global_interpreter_deopt_count_++;
+ }
if (global_deopt_count_ == 1) {
- PerformGlobalDeoptimization(self);
+ PerformGlobalDeoptimization(self,
+ /*needs_interpreter*/ global_interpreter_deopt_count_ > 0,
+ /*disable_intrinsics*/ global_interpreter_deopt_count_ == 0);
+ } else if (req == FullDeoptRequirement::kInterpreter && global_interpreter_deopt_count_ == 1) {
+ // First kInterpreter request.
+ PerformGlobalDeoptimization(self,
+ /*needs_interpreter*/true,
+ /*disable_intrinsics*/false);
} else {
WaitForDeoptimizationToFinish(self);
}
}
-void DeoptManager::RemoveDeoptimizeAllMethodsLocked(art::Thread* self) {
+void DeoptManager::RemoveDeoptimizeAllMethodsLocked(art::Thread* self, FullDeoptRequirement req) {
DCHECK_GT(global_deopt_count_, 0u) << "Request to remove non-existent global deoptimization!";
+ DCHECK_GE(global_deopt_count_, global_interpreter_deopt_count_);
global_deopt_count_--;
+ if (req == FullDeoptRequirement::kInterpreter) {
+ global_interpreter_deopt_count_--;
+ }
if (global_deopt_count_ == 0) {
- PerformGlobalUndeoptimization(self);
+ PerformGlobalUndeoptimization(self,
+ /*still_needs_stubs*/ false,
+ /*disable_intrinsics*/ false);
+ } else if (req == FullDeoptRequirement::kInterpreter && global_interpreter_deopt_count_ == 0) {
+ PerformGlobalUndeoptimization(self,
+ /*still_needs_stubs*/ global_deopt_count_ > 0,
+ /*disable_intrinsics*/ global_deopt_count_ > 0);
} else {
WaitForDeoptimizationToFinish(self);
}
@@ -332,18 +371,85 @@ void DeoptManager::PerformLimitedUndeoptimization(art::Thread* self, art::ArtMet
art::Runtime::Current()->GetInstrumentation()->Undeoptimize(method);
}
-void DeoptManager::PerformGlobalDeoptimization(art::Thread* self) {
+void DeoptManager::PerformGlobalDeoptimization(art::Thread* self,
+ bool needs_interpreter,
+ bool disable_intrinsics) {
ScopedDeoptimizationContext sdc(self, this);
- art::Runtime::Current()->GetInstrumentation()->DeoptimizeEverything(
- kDeoptManagerInstrumentationKey);
+ art::Runtime::Current()->GetInstrumentation()->EnableMethodTracing(
+ kDeoptManagerInstrumentationKey, needs_interpreter);
+ MaybeDisableIntrinsics(disable_intrinsics);
}
-void DeoptManager::PerformGlobalUndeoptimization(art::Thread* self) {
+void DeoptManager::PerformGlobalUndeoptimization(art::Thread* self,
+ bool still_needs_stubs,
+ bool disable_intrinsics) {
ScopedDeoptimizationContext sdc(self, this);
- art::Runtime::Current()->GetInstrumentation()->UndeoptimizeEverything(
- kDeoptManagerInstrumentationKey);
+ if (still_needs_stubs) {
+ art::Runtime::Current()->GetInstrumentation()->EnableMethodTracing(
+ kDeoptManagerInstrumentationKey, /*needs_interpreter*/false);
+ MaybeDisableIntrinsics(disable_intrinsics);
+ } else {
+ art::Runtime::Current()->GetInstrumentation()->DisableMethodTracing(
+ kDeoptManagerInstrumentationKey);
+ // We shouldn't care about intrinsics if we don't need tracing anymore.
+ DCHECK(!disable_intrinsics);
+ }
+}
+
+static void DisableSingleIntrinsic(const char* class_name,
+ const char* method_name,
+ const char* signature)
+ REQUIRES(art::Locks::mutator_lock_, art::Roles::uninterruptible_) {
+ // Since these intrinsics are all loaded during runtime startup this cannot fail and will not
+ // suspend.
+ art::Thread* self = art::Thread::Current();
+ art::ClassLinker* class_linker = art::Runtime::Current()->GetClassLinker();
+ art::ObjPtr<art::mirror::Class> cls = class_linker->FindSystemClass(self, class_name);
+
+ if (cls == nullptr) {
+ LOG(FATAL) << "Could not find class of intrinsic "
+ << class_name << "->" << method_name << signature;
+ }
+
+ art::ArtMethod* method = cls->FindClassMethod(method_name, signature, art::kRuntimePointerSize);
+ if (method == nullptr || method->GetDeclaringClass() != cls) {
+ LOG(FATAL) << "Could not find method of intrinsic "
+ << class_name << "->" << method_name << signature;
+ }
+
+ if (LIKELY(method->IsIntrinsic())) {
+ method->SetNotIntrinsic();
+ } else {
+ LOG(WARNING) << method->PrettyMethod() << " was already marked as non-intrinsic!";
+ }
}
+void DeoptManager::MaybeDisableIntrinsics(bool do_disable) {
+ if (!do_disable || already_disabled_intrinsics_) {
+ // Don't toggle intrinsics on and off. It will lead to too much purging of the jit and would
+ // require us to keep around the intrinsics status of all methods.
+ return;
+ }
+ already_disabled_intrinsics_ = true;
+ // First just mark all intrinsic methods as no longer intrinsics.
+#define DISABLE_INTRINSIC(_1, _2, _3, _4, _5, decl_class_name, meth_name, meth_desc) \
+ DisableSingleIntrinsic(decl_class_name, meth_name, meth_desc);
+ INTRINSICS_LIST(DISABLE_INTRINSIC);
+#undef DISABLE_INTRINSIC
+ // Next tell the jit to throw away all of its code (since there might be intrinsic code in them.
+ // TODO it would be nice to be more selective.
+ art::jit::Jit* jit = art::Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ jit->GetCodeCache()->ClearAllCompiledDexCode();
+ }
+ art::MutexLock mu(art::Thread::Current(), *art::Locks::thread_list_lock_);
+ // Now make all threads go to interpreter.
+ art::Runtime::Current()->GetThreadList()->ForEach(
+ [](art::Thread* thr, void* ctx) REQUIRES(art::Locks::mutator_lock_) {
+ reinterpret_cast<DeoptManager*>(ctx)->DeoptimizeThread(thr);
+ },
+ this);
+}
void DeoptManager::RemoveDeoptimizationRequester() {
art::Thread* self = art::Thread::Current();
diff --git a/openjdkjvmti/deopt_manager.h b/openjdkjvmti/deopt_manager.h
index 6e991dee3d..1a13c0ff15 100644
--- a/openjdkjvmti/deopt_manager.h
+++ b/openjdkjvmti/deopt_manager.h
@@ -51,6 +51,11 @@ class Class;
namespace openjdkjvmti {
+enum class FullDeoptRequirement {
+ kStubs,
+ kInterpreter,
+};
+
class DeoptManager;
struct JvmtiMethodInspectionCallback : public art::MethodInspectionCallback {
@@ -94,11 +99,11 @@ class DeoptManager {
REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
REQUIRES_SHARED(art::Locks::mutator_lock_);
- void AddDeoptimizeAllMethods()
+ void AddDeoptimizeAllMethods(FullDeoptRequirement requirement)
REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
REQUIRES_SHARED(art::Locks::mutator_lock_);
- void RemoveDeoptimizeAllMethods()
+ void RemoveDeoptimizeAllMethods(FullDeoptRequirement requirement)
REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
REQUIRES_SHARED(art::Locks::mutator_lock_);
@@ -132,19 +137,23 @@ class DeoptManager {
void WaitForDeoptimizationToFinishLocked(art::Thread* self)
REQUIRES(deoptimization_status_lock_, !art::Locks::mutator_lock_);
- void AddDeoptimizeAllMethodsLocked(art::Thread* self)
+ void AddDeoptimizeAllMethodsLocked(art::Thread* self, FullDeoptRequirement req)
RELEASE(deoptimization_status_lock_)
REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
- void RemoveDeoptimizeAllMethodsLocked(art::Thread* self)
+ void RemoveDeoptimizeAllMethodsLocked(art::Thread* self, FullDeoptRequirement req)
RELEASE(deoptimization_status_lock_)
REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
- void PerformGlobalDeoptimization(art::Thread* self)
+ void PerformGlobalDeoptimization(art::Thread* self,
+ bool needs_interpreter,
+ bool disable_intrinsics)
RELEASE(deoptimization_status_lock_)
REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
- void PerformGlobalUndeoptimization(art::Thread* self)
+ void PerformGlobalUndeoptimization(art::Thread* self,
+ bool still_needs_stubs,
+ bool disable_intrinsics)
RELEASE(deoptimization_status_lock_)
REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
@@ -156,15 +165,25 @@ class DeoptManager {
RELEASE(deoptimization_status_lock_)
REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+ // Disables intrinsics and purges the jit code cache if needed.
+ void MaybeDisableIntrinsics(bool do_disable)
+ REQUIRES(art::Locks::mutator_lock_,
+ !deoptimization_status_lock_,
+ art::Roles::uninterruptible_);
+
static constexpr const char* kDeoptManagerInstrumentationKey = "JVMTI_DeoptManager";
art::Mutex deoptimization_status_lock_ ACQUIRED_BEFORE(art::Locks::classlinker_classes_lock_);
art::ConditionVariable deoptimization_condition_ GUARDED_BY(deoptimization_status_lock_);
bool performing_deoptimization_ GUARDED_BY(deoptimization_status_lock_);
- // Number of times we have gotten requests to deopt everything.
+ // Number of times we have gotten requests to deopt everything both requiring and not requiring
+ // interpreter.
uint32_t global_deopt_count_ GUARDED_BY(deoptimization_status_lock_);
+ // Number of deopt-everything requests that require interpreter.
+ uint32_t global_interpreter_deopt_count_ GUARDED_BY(deoptimization_status_lock_);
+
// Number of users of deoptimization there currently are.
uint32_t deopter_count_ GUARDED_BY(deoptimization_status_lock_);
@@ -182,6 +201,10 @@ class DeoptManager {
// OSR after this.
std::atomic<bool> set_local_variable_called_;
+ // If we have already disabled intrinsics. Since doing this throws out all JIT code we really will
+ // only ever do it once and never undo it.
+ bool already_disabled_intrinsics_ GUARDED_BY(art::Locks::mutator_lock_);
+
// Helper for setting up/tearing-down for deoptimization.
friend class ScopedDeoptimizationContext;
};
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index f71a5dc72d..10a4923935 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -888,16 +888,29 @@ static bool EventNeedsFullDeopt(ArtJvmtiEvent event) {
case ArtJvmtiEvent::kBreakpoint:
case ArtJvmtiEvent::kException:
return false;
- // TODO We should support more of these or at least do something to make them discriminate by
- // thread.
+ default:
+ return true;
+ }
+}
+
+static FullDeoptRequirement GetFullDeoptRequirement(ArtJvmtiEvent event) {
+ switch (event) {
+ // TODO We should support more of these as Limited or at least do something to make them
+ // discriminate by thread.
case ArtJvmtiEvent::kMethodEntry:
- case ArtJvmtiEvent::kExceptionCatch:
case ArtJvmtiEvent::kMethodExit:
+ // We only need MethodEntered and MethodExited for these so we can use Stubs. We will need to
+ // disable intrinsics.
+ // TODO Offer a version of this without disabling intrinsics.
+ return FullDeoptRequirement::kStubs;
+ case ArtJvmtiEvent::kExceptionCatch:
case ArtJvmtiEvent::kFieldModification:
case ArtJvmtiEvent::kFieldAccess:
case ArtJvmtiEvent::kSingleStep:
+ // NB If we ever make this runnable using stubs or some other method we will need to be careful
+ // that it doesn't require disabling intrinsics.
case ArtJvmtiEvent::kFramePop:
- return true;
+ return FullDeoptRequirement::kInterpreter;
default:
LOG(FATAL) << "Unexpected event type!";
UNREACHABLE();
@@ -907,19 +920,18 @@ static bool EventNeedsFullDeopt(ArtJvmtiEvent event) {
void EventHandler::SetupTraceListener(JvmtiMethodTraceListener* listener,
ArtJvmtiEvent event,
bool enable) {
- bool needs_full_deopt = EventNeedsFullDeopt(event);
// Make sure we can deopt.
{
art::ScopedObjectAccess soa(art::Thread::Current());
DeoptManager* deopt_manager = DeoptManager::Get();
if (enable) {
deopt_manager->AddDeoptimizationRequester();
- if (needs_full_deopt) {
- deopt_manager->AddDeoptimizeAllMethods();
+ if (EventNeedsFullDeopt(event)) {
+ deopt_manager->AddDeoptimizeAllMethods(GetFullDeoptRequirement(event));
}
} else {
- if (needs_full_deopt) {
- deopt_manager->RemoveDeoptimizeAllMethods();
+ if (EventNeedsFullDeopt(event)) {
+ deopt_manager->RemoveDeoptimizeAllMethods(GetFullDeoptRequirement(event));
}
deopt_manager->RemoveDeoptimizationRequester();
}
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index 369eb7641e..949b566860 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -818,6 +818,37 @@ jvmtiError ThreadUtil::RunAgentThread(jvmtiEnv* jvmti_env,
return ERR(NONE);
}
+class ScopedSuspendByPeer {
+ public:
+ explicit ScopedSuspendByPeer(jthread jtarget)
+ : thread_list_(art::Runtime::Current()->GetThreadList()),
+ timeout_(false),
+ target_(thread_list_->SuspendThreadByPeer(jtarget,
+ /* suspend_thread */ true,
+ art::SuspendReason::kInternal,
+ &timeout_)) { }
+ ~ScopedSuspendByPeer() {
+ if (target_ != nullptr) {
+ if (!thread_list_->Resume(target_, art::SuspendReason::kInternal)) {
+ LOG(ERROR) << "Failed to resume " << target_ << "!";
+ }
+ }
+ }
+
+ art::Thread* GetTargetThread() const {
+ return target_;
+ }
+
+ bool TimedOut() const {
+ return timeout_;
+ }
+
+ private:
+ art::ThreadList* thread_list_;
+ bool timeout_;
+ art::Thread* target_;
+};
+
jvmtiError ThreadUtil::SuspendOther(art::Thread* self,
jthread target_jthread) {
// Loop since we need to bail out and try again if we would end up getting suspended while holding
@@ -845,29 +876,27 @@ jvmtiError ThreadUtil::SuspendOther(art::Thread* self,
if (!GetAliveNativeThread(target_jthread, soa, &target, &err)) {
return err;
}
- art::ThreadState state = target->GetState();
- if (state == art::ThreadState::kStarting || target->IsStillStarting()) {
- return ERR(THREAD_NOT_ALIVE);
- } else {
- art::MutexLock thread_suspend_count_mu(self, *art::Locks::thread_suspend_count_lock_);
- if (target->GetUserCodeSuspendCount() != 0) {
- return ERR(THREAD_SUSPENDED);
- }
- }
}
- bool timeout = true;
- art::Thread* ret_target = art::Runtime::Current()->GetThreadList()->SuspendThreadByPeer(
- target_jthread,
- /* request_suspension */ true,
- art::SuspendReason::kForUserCode,
- &timeout);
- if (ret_target == nullptr && !timeout) {
+ // Get the actual thread in a suspended state so we can change the user-code suspend count.
+ ScopedSuspendByPeer ssbp(target_jthread);
+ if (ssbp.GetTargetThread() == nullptr && !ssbp.TimedOut()) {
// TODO It would be good to get more information about why exactly the thread failed to
// suspend.
return ERR(INTERNAL);
- } else if (!timeout) {
- // we didn't time out and got a result.
- return OK;
+ } else if (!ssbp.TimedOut()) {
+ art::ThreadState state = ssbp.GetTargetThread()->GetState();
+ if (state == art::ThreadState::kStarting || ssbp.GetTargetThread()->IsStillStarting()) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
+ // we didn't time out and got a result. Suspend the thread by usercode and return. It's
+ // already suspended internal so we don't need to do anything but increment the count.
+ art::MutexLock thread_suspend_count_mu(self, *art::Locks::thread_suspend_count_lock_);
+ if (ssbp.GetTargetThread()->GetUserCodeSuspendCount() != 0) {
+ return ERR(THREAD_SUSPENDED);
+ }
+ bool res = ssbp.GetTargetThread()->ModifySuspendCount(
+ self, +1, nullptr, art::SuspendReason::kForUserCode);
+ return res ? OK : ERR(INTERNAL);
}
// We timed out. Just go around and try again.
} while (true);
@@ -876,6 +905,17 @@ jvmtiError ThreadUtil::SuspendOther(art::Thread* self,
jvmtiError ThreadUtil::SuspendSelf(art::Thread* self) {
CHECK(self == art::Thread::Current());
+ if (!self->CanBeSuspendedByUserCode()) {
+ // TODO This is really undesirable. As far as I can tell this is can only come about because of
+ // class-loads in the jit-threads (through either VMObjectAlloc or the ClassLoad/ClassPrepare
+ // events that we send). It's unlikely that anyone would be suspending themselves there since
+ // it's almost guaranteed to cause a deadlock but it is technically allowed. Ideally we'd want
+ // to put a CHECK here (or in the event-dispatch code) that we are only in this situation when
+ // sending the GC callbacks but the jit causing events means we cannot do this.
+ LOG(WARNING) << "Attempt to self-suspend on a thread without suspension enabled. Thread is "
+ << *self;
+ return ERR(INTERNAL);
+ }
{
art::MutexLock mu(self, *art::Locks::user_code_suspension_lock_);
art::MutexLock thread_list_mu(self, *art::Locks::thread_suspend_count_lock_);
@@ -923,7 +963,6 @@ jvmtiError ThreadUtil::ResumeThread(jvmtiEnv* env ATTRIBUTE_UNUSED,
return ERR(NULL_POINTER);
}
art::Thread* self = art::Thread::Current();
- art::Thread* target;
// Retry until we know we won't get suspended by user code while resuming something.
do {
SuspendCheck(self);
@@ -934,36 +973,37 @@ jvmtiError ThreadUtil::ResumeThread(jvmtiEnv* env ATTRIBUTE_UNUSED,
continue;
}
// From now on we know we cannot get suspended by user-code.
- {
- // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
- // have the 'suspend_lock' locked here.
- art::ScopedObjectAccess soa(self);
- art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
- jvmtiError err = ERR(INTERNAL);
- if (!GetAliveNativeThread(thread, soa, &target, &err)) {
- return err;
- } else if (target == self) {
- // We would have paused until we aren't suspended anymore due to the ScopedObjectAccess so
- // we can just return THREAD_NOT_SUSPENDED. Unfortunately we cannot do any real DCHECKs
- // about current state since it's all concurrent.
- return ERR(THREAD_NOT_SUSPENDED);
- }
- // The JVMTI spec requires us to return THREAD_NOT_SUSPENDED if it is alive but we really
- // cannot tell why resume failed.
- {
- art::MutexLock thread_suspend_count_mu(self, *art::Locks::thread_suspend_count_lock_);
- if (target->GetUserCodeSuspendCount() == 0) {
- return ERR(THREAD_NOT_SUSPENDED);
- }
- }
+ // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
+ // have the 'suspend_lock' locked here.
+ art::ScopedObjectAccess soa(self);
+ if (thread == nullptr) {
+ // The thread is the current thread.
+ return ERR(THREAD_NOT_SUSPENDED);
+ } else if (!soa.Env()->IsInstanceOf(thread, art::WellKnownClasses::java_lang_Thread)) {
+ // Not a thread object.
+ return ERR(INVALID_THREAD);
+ } else if (self->GetPeer() == soa.Decode<art::mirror::Object>(thread)) {
+ // The thread is the current thread.
+ return ERR(THREAD_NOT_SUSPENDED);
+ }
+ ScopedSuspendByPeer ssbp(thread);
+ if (ssbp.TimedOut()) {
+ // Unknown error. Couldn't suspend thread!
+ return ERR(INTERNAL);
+ } else if (ssbp.GetTargetThread() == nullptr) {
+ // Thread must not be alive.
+ return ERR(THREAD_NOT_ALIVE);
}
- // It is okay that we don't have a thread_list_lock here since we know that the thread cannot
- // die since it is currently held suspended by a SuspendReason::kForUserCode suspend.
- DCHECK(target != self);
- if (!art::Runtime::Current()->GetThreadList()->Resume(target,
- art::SuspendReason::kForUserCode)) {
+ // We didn't time out and got a result. Check the thread is suspended by usercode, unsuspend it
+ // and return. It's already suspended internal so we don't need to do anything but decrement the
+ // count.
+ art::MutexLock thread_list_mu(self, *art::Locks::thread_suspend_count_lock_);
+ if (ssbp.GetTargetThread()->GetUserCodeSuspendCount() == 0) {
+ return ERR(THREAD_NOT_SUSPENDED);
+ } else if (!ssbp.GetTargetThread()->ModifySuspendCount(
+ self, -1, nullptr, art::SuspendReason::kForUserCode)) {
// TODO Give a better error.
- // This is most likely THREAD_NOT_SUSPENDED but we cannot really be sure.
+ // This should not really be possible and is probably some race.
return ERR(INTERNAL);
} else {
return OK;
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index ed449b5433..86f06064ad 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -768,13 +768,20 @@ void Jit::WaitForCompilationToFinish(Thread* self) {
void Jit::Stop() {
Thread* self = Thread::Current();
// TODO(ngeoffray): change API to not require calling WaitForCompilationToFinish twice.
+ // During shutdown and startup the thread-pool can be null.
+ if (GetThreadPool() == nullptr) {
+ return;
+ }
WaitForCompilationToFinish(self);
GetThreadPool()->StopWorkers(self);
WaitForCompilationToFinish(self);
}
void Jit::Start() {
- GetThreadPool()->StartWorkers(Thread::Current());
+ // During shutdown and startup the thread-pool can be null.
+ if (GetThreadPool() != nullptr) {
+ GetThreadPool()->StartWorkers(Thread::Current());
+ }
}
ScopedJitSuspend::ScopedJitSuspend() {
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index eeb35156b5..70a717154b 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -362,6 +362,21 @@ const void* JitCodeCache::GetJniStubCode(ArtMethod* method) {
return nullptr;
}
+void JitCodeCache::ClearAllCompiledDexCode() {
+ MutexLock mu(Thread::Current(), lock_);
+ // Get rid of OSR code waiting to be put on a thread.
+ osr_code_map_.clear();
+
+ // We don't clear out or even touch method_code_map_ since that is what we use to go the other
+ // way, move from code currently-running to the method it's from. Getting rid of it would break
+ // the jit-gc, stack-walking and signal handling. Since we never look through it to go the other
+ // way (from method -> code) everything is fine.
+
+ for (ProfilingInfo* p : profiling_infos_) {
+ p->SetSavedEntryPoint(nullptr);
+ }
+}
+
const void* JitCodeCache::FindCompiledCodeForInstrumentation(ArtMethod* method) {
// If jit-gc is still on we use the SavedEntryPoint field for doing that and so cannot use it to
// find the instrumentation entrypoint.
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 49a19a18f1..ee6111a430 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -215,6 +215,8 @@ class JitCodeCache {
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ void ClearAllCompiledDexCode() REQUIRES(!lock_, Locks::mutator_lock_);
+
void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/suspend_reason.h b/runtime/suspend_reason.h
index 289a1a4fb3..4e75a4feec 100644
--- a/runtime/suspend_reason.h
+++ b/runtime/suspend_reason.h
@@ -22,6 +22,8 @@
namespace art {
// The various reasons that we might be suspending a thread.
+// TODO Once kForDebugger is removed by removing the old debugger we should make the kForUserCode
+// just a basic count for bookkeeping instead of linking it as directly with internal suspends.
enum class SuspendReason {
// Suspending for internal reasons (e.g. GC, stack trace, etc.).
// TODO Split this into more descriptive sections.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index cd6c834475..18dc0e8c45 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1228,6 +1228,34 @@ static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREA
LOG(FATAL) << ss.str();
}
+void Thread::SetCanBeSuspendedByUserCode(bool can_be_suspended_by_user_code) {
+ CHECK_EQ(this, Thread::Current()) << "This function may only be called on the current thread. "
+ << *Thread::Current() << " tried to modify the suspendability "
+ << "of " << *this;
+ // NB This checks the new value! This ensures that we can only set can_be_suspended_by_user_code
+ // to false if !CanCallIntoJava().
+ DCHECK(!CanCallIntoJava() || can_be_suspended_by_user_code)
+ << "Threads able to call into java may not be marked as unsuspendable!";
+ if (can_be_suspended_by_user_code == CanBeSuspendedByUserCode()) {
+ // Don't need to do anything if nothing is changing.
+ return;
+ }
+ art::MutexLock mu(this, *Locks::user_code_suspension_lock_);
+ art::MutexLock thread_list_mu(this, *Locks::thread_suspend_count_lock_);
+
+ // We want to add the user-code suspend count if we are newly allowing user-code suspends and
+ // remove them if we are disabling them.
+ int adj = can_be_suspended_by_user_code ? GetUserCodeSuspendCount() : -GetUserCodeSuspendCount();
+ // Adjust the global suspend count appropriately. Use kInternal to not change the ForUserCode
+ // count.
+ if (adj != 0) {
+ bool suspend = ModifySuspendCountInternal(this, adj, nullptr, SuspendReason::kInternal);
+ CHECK(suspend) << this << " was unable to modify it's own suspend count!";
+ }
+ // Mark thread as accepting user-code suspensions.
+ can_be_suspended_by_user_code_ = can_be_suspended_by_user_code;
+}
+
bool Thread::ModifySuspendCountInternal(Thread* self,
int delta,
AtomicInteger* suspend_barrier,
@@ -1249,6 +1277,17 @@ bool Thread::ModifySuspendCountInternal(Thread* self,
LOG(ERROR) << "attempting to modify suspend count in an illegal way.";
return false;
}
+ DCHECK(this == self || this->IsSuspended())
+ << "Only self kForUserCode suspension on an unsuspended thread is allowed: " << this;
+ if (UNLIKELY(!CanBeSuspendedByUserCode())) {
+ VLOG(threads) << this << " is being requested to suspend for user code but that is disabled "
+ << "the thread will not actually go to sleep.";
+ // Having the user_code_suspend_count still be around is useful but we don't need to actually
+ // do anything since we aren't going to 'really' suspend. Just adjust the
+ // user_code_suspend_count and return.
+ tls32_.user_code_suspend_count += delta;
+ return true;
+ }
}
if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) {
UnsafeLogFatalForSuspendCount(self, this);
@@ -2109,7 +2148,8 @@ void Thread::NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject t
Thread::Thread(bool daemon)
: tls32_(daemon),
wait_monitor_(nullptr),
- can_call_into_java_(true) {
+ can_call_into_java_(true),
+ can_be_suspended_by_user_code_(true) {
wait_mutex_ = new Mutex("a thread wait mutex");
wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>;
diff --git a/runtime/thread.h b/runtime/thread.h
index edc429b7b9..d169a62198 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -989,6 +989,17 @@ class Thread {
--tls32_.disable_thread_flip_count;
}
+ // Returns true if the thread is subject to user_code_suspensions.
+ bool CanBeSuspendedByUserCode() const {
+ return can_be_suspended_by_user_code_;
+ }
+
+ // Sets CanBeSuspenededByUserCode and adjusts the suspend-count as needed. This may only be called
+ // when running on the current thread. It is **absolutely required** that this be called only on
+ // the Thread::Current() thread.
+ void SetCanBeSuspendedByUserCode(bool can_be_suspended_by_user_code)
+ REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::user_code_suspension_lock_);
+
// Returns true if the thread is allowed to call into java.
bool CanCallIntoJava() const {
return can_call_into_java_;
@@ -1552,8 +1563,9 @@ class Thread {
// critical section enter.
uint32_t disable_thread_flip_count;
- // How much of 'suspend_count_' is by request of user code, used to distinguish threads
- // suspended by the runtime from those suspended by user code.
+ // If CanBeSuspendedByUserCode, how much of 'suspend_count_' is by request of user code, used to
+ // distinguish threads suspended by the runtime from those suspended by user code. Otherwise
+ // this is just a count of how many user-code suspends have been attempted (but were ignored).
// This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
// told that AssertHeld should be good enough.
int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
@@ -1772,6 +1784,10 @@ class Thread {
// By default this is true.
bool can_call_into_java_;
+ // True if the thread is subject to user-code suspension. By default this is true. This can only
+ // be false for threads where '!can_call_into_java_'.
+ bool can_be_suspended_by_user_code_;
+
friend class Dbg; // For SetStateUnsafe.
friend class gc::collector::SemiSpace; // For getting stack traces.
friend class Runtime; // For CreatePeer.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index b2be549996..ba333f6dd9 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -902,6 +902,8 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer,
bool request_suspension,
SuspendReason reason,
bool* timed_out) {
+ CHECK_NE(reason, SuspendReason::kForUserCode) << "Cannot suspend for user-code by peer. Must be "
+ << "done directly on the thread.";
const uint64_t start_time = NanoTime();
useconds_t sleep_us = kThreadSuspendInitialSleepUs;
*timed_out = false;
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index bec1150807..26ca19054d 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -100,8 +100,13 @@ void* ThreadPoolWorker::Callback(void* arg) {
worker->thread_ = Thread::Current();
// Thread pool workers cannot call into java.
worker->thread_->SetCanCallIntoJava(false);
+ // Thread pool workers should not be getting paused by user-code.
+ worker->thread_->SetCanBeSuspendedByUserCode(false);
// Do work until its time to shut down.
worker->Run();
+ // Thread pool worker is finished. We want to allow suspension during shutdown.
+ worker->thread_->SetCanBeSuspendedByUserCode(true);
+ // Thread shuts down.
runtime->DetachCurrentThread();
return nullptr;
}