summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/base/mutex.cc8
-rw-r--r--runtime/base/mutex.h12
-rw-r--r--runtime/cha.cc74
-rw-r--r--runtime/jit/jit_code_cache.cc109
-rw-r--r--runtime/jit/jit_code_cache.h6
5 files changed, 118 insertions, 91 deletions
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 7b888b18d9..044c4c2f78 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -1142,10 +1142,6 @@ void Locks::Init() {
DCHECK(subtype_check_lock_ == nullptr);
subtype_check_lock_ = new Mutex("SubtypeCheck lock", current_lock_level);
- UPDATE_CURRENT_LOCK_LEVEL(kCHALock);
- DCHECK(cha_lock_ == nullptr);
- cha_lock_ = new Mutex("CHA lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
DCHECK(classlinker_classes_lock_ == nullptr);
classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
@@ -1226,6 +1222,10 @@ void Locks::Init() {
DCHECK(custom_tls_lock_ == nullptr);
custom_tls_lock_ = new Mutex("Thread::custom_tls_ lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kCHALock);
+ DCHECK(cha_lock_ == nullptr);
+ cha_lock_ = new Mutex("CHA lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock);
DCHECK(native_debug_interface_lock_ == nullptr);
native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index af2e7b2763..fba209a0b6 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -72,6 +72,7 @@ enum LockLevel : uint8_t {
kJdwpSocketLock,
kRegionSpaceRegionLock,
kMarkSweepMarkStackLock,
+ kCHALock,
kJitCodeCacheLock,
kRosAllocGlobalLock,
kRosAllocBracketLock,
@@ -109,7 +110,6 @@ enum LockLevel : uint8_t {
kMonitorPoolLock,
kClassLinkerClassesLock, // TODO rename.
kDexToDexCompilerLock,
- kCHALock,
kSubtypeCheckLock,
kBreakpointLock,
kMonitorLock,
@@ -661,14 +661,11 @@ class Locks {
// TODO: improve name, perhaps instrumentation_update_lock_.
static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
- // Guards Class Hierarchy Analysis (CHA).
- static Mutex* cha_lock_ ACQUIRED_AFTER(deoptimization_lock_);
-
// Guard the update of the SubtypeCheck data stores in each Class::status_ field.
// This lock is used in SubtypeCheck methods which are the interface for
// any SubtypeCheck-mutating methods.
// In Class::IsSubClass, the lock is not required since it does not update the SubtypeCheck data.
- static Mutex* subtype_check_lock_ ACQUIRED_AFTER(cha_lock_);
+ static Mutex* subtype_check_lock_ ACQUIRED_AFTER(deoptimization_lock_);
// The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
// attaching and detaching.
@@ -745,11 +742,14 @@ class Locks {
// GetThreadLocalStorage.
static Mutex* custom_tls_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
+ // Guards Class Hierarchy Analysis (CHA).
+ static Mutex* cha_lock_ ACQUIRED_AFTER(custom_tls_lock_);
+
// When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
// doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this
// actually only encodes the mutex being below jni_function_table_lock_ although having
// kGenericBottomLock level is lower than this.
- #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::custom_tls_lock_)
+ #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::cha_lock_)
// Have an exclusive aborting thread.
static Mutex* abort_lock_ ACQUIRED_AFTER(custom_tls_lock_);
diff --git a/runtime/cha.cc b/runtime/cha.cc
index ccbe066ed6..ce84e8ce2e 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -636,38 +636,54 @@ void ClassHierarchyAnalysis::InvalidateSingleImplementationMethods(
// We do this under cha_lock_. Committing code also grabs this lock to
// make sure the code is only committed when all single-implementation
// assumptions are still true.
- MutexLock cha_mu(self, *Locks::cha_lock_);
- // Invalidate compiled methods that assume some virtual calls have only
- // single implementations.
- for (ArtMethod* invalidated : invalidated_single_impl_methods) {
- if (!invalidated->HasSingleImplementation()) {
- // It might have been invalidated already when other class linking is
- // going on.
- continue;
- }
- invalidated->SetHasSingleImplementation(false);
- if (invalidated->IsAbstract()) {
- // Clear the single implementation method.
- invalidated->SetSingleImplementation(nullptr, image_pointer_size);
- }
+ std::vector<std::pair<ArtMethod*, OatQuickMethodHeader*>> headers;
+ {
+ MutexLock cha_mu(self, *Locks::cha_lock_);
+ // Invalidate compiled methods that assume some virtual calls have only
+ // single implementations.
+ for (ArtMethod* invalidated : invalidated_single_impl_methods) {
+ if (!invalidated->HasSingleImplementation()) {
+ // It might have been invalidated already when other class linking is
+ // going on.
+ continue;
+ }
+ invalidated->SetHasSingleImplementation(false);
+ if (invalidated->IsAbstract()) {
+ // Clear the single implementation method.
+ invalidated->SetSingleImplementation(nullptr, image_pointer_size);
+ }
- if (runtime->IsAotCompiler()) {
- // No need to invalidate any compiled code as the AotCompiler doesn't
- // run any code.
- continue;
- }
+ if (runtime->IsAotCompiler()) {
+ // No need to invalidate any compiled code as the AotCompiler doesn't
+ // run any code.
+ continue;
+ }
- // Invalidate all dependents.
- for (const auto& dependent : GetDependents(invalidated)) {
- ArtMethod* method = dependent.first;;
- OatQuickMethodHeader* method_header = dependent.second;
- VLOG(class_linker) << "CHA invalidated compiled code for " << method->PrettyMethod();
- DCHECK(runtime->UseJitCompilation());
- runtime->GetJit()->GetCodeCache()->InvalidateCompiledCodeFor(
- method, method_header);
- dependent_method_headers.insert(method_header);
+ // Invalidate all dependents.
+ for (const auto& dependent : GetDependents(invalidated)) {
+ ArtMethod* method = dependent.first;;
+ OatQuickMethodHeader* method_header = dependent.second;
+ VLOG(class_linker) << "CHA invalidated compiled code for " << method->PrettyMethod();
+ DCHECK(runtime->UseJitCompilation());
+ // We need to call JitCodeCache::InvalidateCompiledCodeFor but we cannot do it here
+ // since it would run into problems with lock-ordering. We don't want to re-order the
+ // locks since that would make code-commit racy.
+ headers.push_back({method, method_header});
+ dependent_method_headers.insert(method_header);
+ }
+ RemoveAllDependenciesFor(invalidated);
+ }
+ }
+ // Since we are still loading the class that invalidated the code it's fine we have this after
+ // getting rid of the dependency. Any calls would need to be with the old version (since the
+ // new one isn't loaded yet) which still works fine. We will deoptimize just after this to
+ // ensure everything gets the new state.
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ jit::JitCodeCache* code_cache = jit->GetCodeCache();
+ for (const auto& pair : headers) {
+ code_cache->InvalidateCompiledCodeFor(pair.first, pair.second);
}
- RemoveAllDependenciesFor(invalidated);
}
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index b92affa26e..a8692a0702 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -608,17 +608,17 @@ void JitCodeCache::FreeCodeAndData(const void* code_ptr) {
void JitCodeCache::FreeAllMethodHeaders(
const std::unordered_set<OatQuickMethodHeader*>& method_headers) {
- {
- MutexLock mu(Thread::Current(), *Locks::cha_lock_);
- Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
- ->RemoveDependentsWithMethodHeaders(method_headers);
- }
-
// We need to remove entries in method_headers from CHA dependencies
// first since once we do FreeCode() below, the memory can be reused
// so it's possible for the same method_header to start representing
// different compile code.
MutexLock mu(Thread::Current(), lock_);
+ {
+ MutexLock mu2(Thread::Current(), *Locks::cha_lock_);
+ Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
+ ->RemoveDependentsWithMethodHeaders(method_headers);
+ }
+
ScopedCodeCacheWrite scc(this);
for (const OatQuickMethodHeader* method_header : method_headers) {
FreeCodeAndData(method_header->GetCode());
@@ -742,6 +742,18 @@ static void ClearMethodCounter(ArtMethod* method, bool was_warm) {
method->SetCounter(std::min(jit_warmup_threshold - 1, 1));
}
+void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) {
+ while (collection_in_progress_) {
+ lock_.Unlock(self);
+ {
+ ScopedThreadSuspension sts(self, kSuspended);
+ MutexLock mu(self, lock_);
+ WaitForPotentialCollectionToComplete(self);
+ }
+ lock_.Lock(self);
+ }
+}
+
uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
ArtMethod* method,
uint8_t* stack_map,
@@ -755,6 +767,13 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
const ArenaSet<ArtMethod*>&
cha_single_implementation_list) {
DCHECK(!method->IsNative() || !osr);
+
+ if (!method->IsNative()) {
+ // We need to do this before grabbing the lock_ because it needs to be able to see the string
+ // InternTable. Native methods do not have roots.
+ DCheckRootsAreValid(roots);
+ }
+
size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
// Ensure the header ends up at expected instruction alignment.
size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
@@ -763,44 +782,45 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
OatQuickMethodHeader* method_header = nullptr;
uint8_t* code_ptr = nullptr;
uint8_t* memory = nullptr;
+ MutexLock mu(self, lock_);
+ // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
+ // finish.
+ WaitForPotentialCollectionToCompleteRunnable(self);
{
- ScopedThreadSuspension sts(self, kSuspended);
- MutexLock mu(self, lock_);
- WaitForPotentialCollectionToComplete(self);
- {
- ScopedCodeCacheWrite scc(this);
- memory = AllocateCode(total_size);
- if (memory == nullptr) {
- return nullptr;
- }
- code_ptr = memory + header_size;
-
- std::copy(code, code + code_size, code_ptr);
- method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
- new (method_header) OatQuickMethodHeader(
- (stack_map != nullptr) ? code_ptr - stack_map : 0u,
- code_size);
- // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
- // trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
- // This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
- // 6P) stop being supported or their kernels are fixed.
- //
- // For reference, this behavior is caused by this commit:
- // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
- FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
- reinterpret_cast<char*>(code_ptr + code_size));
- DCHECK(!Runtime::Current()->IsAotCompiler());
- if (has_should_deoptimize_flag) {
- method_header->SetHasShouldDeoptimizeFlag();
- }
+ ScopedCodeCacheWrite scc(this);
+ memory = AllocateCode(total_size);
+ if (memory == nullptr) {
+ return nullptr;
+ }
+ code_ptr = memory + header_size;
+
+ std::copy(code, code + code_size, code_ptr);
+ method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ new (method_header) OatQuickMethodHeader(
+ (stack_map != nullptr) ? code_ptr - stack_map : 0u,
+ code_size);
+ // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
+ // trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
+ // This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
+ // 6P) stop being supported or their kernels are fixed.
+ //
+ // For reference, this behavior is caused by this commit:
+ // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
+ FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
+ reinterpret_cast<char*>(code_ptr + code_size));
+ DCHECK(!Runtime::Current()->IsAotCompiler());
+ if (has_should_deoptimize_flag) {
+ method_header->SetHasShouldDeoptimizeFlag();
}
number_of_compilations_++;
}
// We need to update the entry point in the runnable state for the instrumentation.
{
- // Need cha_lock_ for checking all single-implementation flags and register
- // dependencies.
+ // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the
+ // compiled code is considered invalidated by some class linking, but below we still make the
+ // compiled code valid for the method. Need cha_lock_ for checking all single-implementation
+ // flags and register dependencies.
MutexLock cha_mu(self, *Locks::cha_lock_);
bool single_impl_still_valid = true;
for (ArtMethod* single_impl : cha_single_implementation_list) {
@@ -826,16 +846,6 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
single_impl, method, method_header);
}
- if (!method->IsNative()) {
- // We need to do this before grabbing the lock_ because it needs to be able to see the string
- // InternTable. Native methods do not have roots.
- DCheckRootsAreValid(roots);
- }
-
- // The following needs to be guarded by cha_lock_ also. Otherwise it's
- // possible that the compiled code is considered invalidated by some class linking,
- // but below we still make the compiled code valid for the method.
- MutexLock mu(self, lock_);
if (UNLIKELY(method->IsNative())) {
auto it = jni_stubs_map_.find(JniStubKey(method));
DCHECK(it != jni_stubs_map_.end())
@@ -867,11 +877,6 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
method, method_header->GetEntryPoint());
}
}
- if (collection_in_progress_) {
- // We need to update the live bitmap if there is a GC to ensure it sees this new
- // code.
- GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
- }
VLOG(jit)
<< "JIT added (osr=" << std::boolalpha << osr << std::noboolalpha << ") "
<< ArtMethod::PrettyMethod(method) << "@" << method
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 29f9c9cf43..632b45bb00 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -314,6 +314,12 @@ class JitCodeCache {
REQUIRES(lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // If a collection is in progress, wait for it to finish. Must be called with the mutator lock.
+ // The non-mutator lock version should be used if possible. This method will release then
+ // re-acquire the mutator lock.
+ void WaitForPotentialCollectionToCompleteRunnable(Thread* self)
+ REQUIRES(lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_);
+
// If a collection is in progress, wait for it to finish. Return
// whether the thread actually waited.
bool WaitForPotentialCollectionToComplete(Thread* self)