summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Nicolas Geoffray <ngeoffray@google.com> 2024-07-05 09:31:37 +0000
committer Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com> 2024-07-05 09:31:37 +0000
commit4cc09a1621364268f457d4e7ee95145c19e77da7 (patch)
tree4730e9b9cea6067e2b0157d486ef4da728afaa92
parent316f9620e3ecb7a6c15361fa74ade07542552189 (diff)
parentba209d65dd6080cf9708bb7145f7a17d71aa9966 (diff)
Introduce a new lock for JIT data structures accessed by mutators. am: ba209d65dd
Original change: https://android-review.googlesource.com/c/platform/art/+/3133678 Change-Id: Ic208c69e7d7ceb17247f92fcc53f4233fcf8cdca Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--runtime/base/locks.cc7
-rw-r--r--runtime/base/locks.h7
-rw-r--r--runtime/jit/jit_code_cache.cc372
-rw-r--r--runtime/jit/jit_code_cache.h35
4 files changed, 223 insertions, 198 deletions
diff --git a/runtime/base/locks.cc b/runtime/base/locks.cc
index 011d46e6b4..2c6b96892b 100644
--- a/runtime/base/locks.cc
+++ b/runtime/base/locks.cc
@@ -64,6 +64,7 @@ Mutex* Locks::runtime_shutdown_lock_ = nullptr;
Mutex* Locks::runtime_thread_pool_lock_ = nullptr;
Mutex* Locks::cha_lock_ = nullptr;
Mutex* Locks::jit_lock_ = nullptr;
+ReaderWriterMutex* Locks::jit_mutator_lock_ = nullptr;
Mutex* Locks::subtype_check_lock_ = nullptr;
Mutex* Locks::thread_list_lock_ = nullptr;
ConditionVariable* Locks::thread_exit_cond_ = nullptr;
@@ -151,6 +152,7 @@ void Locks::Init() {
DCHECK(profiler_lock_ != nullptr);
DCHECK(cha_lock_ != nullptr);
DCHECK(jit_lock_ != nullptr);
+ DCHECK(jit_mutator_lock_ != nullptr);
DCHECK(subtype_check_lock_ != nullptr);
DCHECK(thread_list_lock_ != nullptr);
DCHECK(thread_suspend_count_lock_ != nullptr);
@@ -316,7 +318,10 @@ void Locks::Init() {
DCHECK(jit_lock_ == nullptr);
jit_lock_ = new Mutex("Jit code cache", current_lock_level);
- UPDATE_CURRENT_LOCK_LEVEL(kCHALock);
+ UPDATE_CURRENT_LOCK_LEVEL(kJitCodeCacheMutatorAndCHALock);
+ DCHECK(jit_mutator_lock_ == nullptr);
+ jit_mutator_lock_ = new ReaderWriterMutex("Jit code cache for mutator", current_lock_level);
+
DCHECK(cha_lock_ == nullptr);
cha_lock_ = new Mutex("CHA lock", current_lock_level);
diff --git a/runtime/base/locks.h b/runtime/base/locks.h
index d6a59f8b83..8dcad7a091 100644
--- a/runtime/base/locks.h
+++ b/runtime/base/locks.h
@@ -67,7 +67,7 @@ enum LockLevel : uint8_t {
kMarkSweepMarkStackLock,
// Can be held while GC related work is done, and thus must be above kMarkSweepMarkStackLock
kThreadWaitLock,
- kCHALock,
+ kJitCodeCacheMutatorAndCHALock,
kRosAllocGlobalLock,
kRosAllocBracketLock,
kRosAllocBulkFreeLock,
@@ -336,9 +336,12 @@ class EXPORT Locks {
// GetThreadLocalStorage.
static Mutex* custom_tls_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
- // Guard access to any JIT data structure.
+ // Guard access to JIT data structures mostly used by the JIT thread.
static Mutex* jit_lock_ ACQUIRED_AFTER(custom_tls_lock_);
+ // Guard access to any JIT data structure that mutators can also access.
+ static ReaderWriterMutex* jit_mutator_lock_ ACQUIRED_AFTER(custom_tls_lock_);
+
// Guards Class Hierarchy Analysis (CHA).
static Mutex* cha_lock_ ACQUIRED_AFTER(jit_lock_);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index f6c1bf7d67..477cf23e31 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -296,7 +296,7 @@ bool JitCodeCache::ContainsPc(const void* ptr) const {
bool JitCodeCache::ContainsMethod(ArtMethod* method) {
Thread* self = Thread::Current();
ScopedDebugDisallowReadBarriers sddrb(self);
- MutexLock mu(self, *Locks::jit_lock_);
+ ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
if (UNLIKELY(method->IsNative())) {
auto it = jni_stubs_map_.find(JniStubKey(method));
if (it != jni_stubs_map_.end() &&
@@ -321,7 +321,7 @@ const void* JitCodeCache::GetJniStubCode(ArtMethod* method) {
DCHECK(method->IsNative());
Thread* self = Thread::Current();
ScopedDebugDisallowReadBarriers sddrb(self);
- MutexLock mu(self, *Locks::jit_lock_);
+ ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
auto it = jni_stubs_map_.find(JniStubKey(method));
if (it != jni_stubs_map_.end()) {
JniStubData& data = it->second;
@@ -340,7 +340,7 @@ const void* JitCodeCache::GetSavedEntryPointOfPreCompiledMethod(ArtMethod* metho
if (method->GetDeclaringClass<kWithoutReadBarrier>()->IsBootStrapClassLoaded()) {
code_ptr = zygote_map_.GetCodeFor(method);
} else {
- MutexLock mu(self, *Locks::jit_lock_);
+ WriterMutexLock mu(self, *Locks::jit_mutator_lock_);
auto it = saved_compiled_methods_map_.find(method);
if (it != saved_compiled_methods_map_.end()) {
code_ptr = it->second;
@@ -405,61 +405,64 @@ static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots
void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
Thread* self = Thread::Current();
ScopedDebugDisallowReadBarriers sddrb(self);
- MutexLock mu(self, *Locks::jit_lock_);
- for (const auto& entry : method_code_map_) {
- uint32_t number_of_roots = 0;
- const uint8_t* root_table = GetRootTable(entry.first, &number_of_roots);
- uint8_t* roots_data = private_region_.IsInDataSpace(root_table)
- ? private_region_.GetWritableDataAddress(root_table)
- : shared_region_.GetWritableDataAddress(root_table);
- GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
- for (uint32_t i = 0; i < number_of_roots; ++i) {
- // This does not need a read barrier because this is called by GC.
- mirror::Object* object = roots[i].Read<kWithoutReadBarrier>();
- if (object == nullptr || object == Runtime::GetWeakClassSentinel()) {
- // entry got deleted in a previous sweep.
- } else if (object->IsString<kDefaultVerifyFlags>()) {
- mirror::Object* new_object = visitor->IsMarked(object);
- // We know the string is marked because it's a strongly-interned string that
- // is always alive.
- // TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method
- // out of the weak access/creation pause. b/32167580
- DCHECK_NE(new_object, nullptr) << "old-string:" << object;
- if (new_object != object) {
- roots[i] = GcRoot<mirror::Object>(new_object);
- }
- } else if (object->IsClass<kDefaultVerifyFlags>()) {
- mirror::Object* new_klass = visitor->IsMarked(object);
- if (new_klass == nullptr) {
- roots[i] = GcRoot<mirror::Object>(Runtime::GetWeakClassSentinel());
- } else if (new_klass != object) {
- roots[i] = GcRoot<mirror::Object>(new_klass);
- }
- } else {
- mirror::Object* new_method_type = visitor->IsMarked(object);
- if (kIsDebugBuild) {
- if (new_method_type != nullptr) {
- // SweepSystemWeaks() is happening in the compaction pause. At that point
- // IsMarked(object) returns the moved address, but the content is not there yet.
- if (!Runtime::Current()->GetHeap()->IsPerformingUffdCompaction()) {
- ObjPtr<mirror::Class> method_type_class =
- WellKnownClasses::java_lang_invoke_MethodType.Get<kWithoutReadBarrier>();
-
- CHECK_EQ((new_method_type->GetClass<kVerifyNone, kWithoutReadBarrier>()),
- method_type_class.Ptr());
+ {
+ ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
+ for (const auto& entry : method_code_map_) {
+ uint32_t number_of_roots = 0;
+ const uint8_t* root_table = GetRootTable(entry.first, &number_of_roots);
+ uint8_t* roots_data = private_region_.IsInDataSpace(root_table)
+ ? private_region_.GetWritableDataAddress(root_table)
+ : shared_region_.GetWritableDataAddress(root_table);
+ GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
+ for (uint32_t i = 0; i < number_of_roots; ++i) {
+ // This does not need a read barrier because this is called by GC.
+ mirror::Object* object = roots[i].Read<kWithoutReadBarrier>();
+ if (object == nullptr || object == Runtime::GetWeakClassSentinel()) {
+ // entry got deleted in a previous sweep.
+ } else if (object->IsString<kDefaultVerifyFlags>()) {
+ mirror::Object* new_object = visitor->IsMarked(object);
+ // We know the string is marked because it's a strongly-interned string that
+ // is always alive.
+ // TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method
+ // out of the weak access/creation pause. b/32167580
+ DCHECK_NE(new_object, nullptr) << "old-string:" << object;
+ if (new_object != object) {
+ roots[i] = GcRoot<mirror::Object>(new_object);
+ }
+ } else if (object->IsClass<kDefaultVerifyFlags>()) {
+ mirror::Object* new_klass = visitor->IsMarked(object);
+ if (new_klass == nullptr) {
+ roots[i] = GcRoot<mirror::Object>(Runtime::GetWeakClassSentinel());
+ } else if (new_klass != object) {
+ roots[i] = GcRoot<mirror::Object>(new_klass);
+ }
+ } else {
+ mirror::Object* new_method_type = visitor->IsMarked(object);
+ if (kIsDebugBuild) {
+ if (new_method_type != nullptr) {
+ // SweepSystemWeaks() is happening in the compaction pause. At that point
+ // IsMarked(object) returns the moved address, but the content is not there yet.
+ if (!Runtime::Current()->GetHeap()->IsPerformingUffdCompaction()) {
+ ObjPtr<mirror::Class> method_type_class =
+ WellKnownClasses::java_lang_invoke_MethodType.Get<kWithoutReadBarrier>();
+
+ CHECK_EQ((new_method_type->GetClass<kVerifyNone, kWithoutReadBarrier>()),
+ method_type_class.Ptr());
+ }
}
}
- }
- if (new_method_type == nullptr) {
- roots[i] = nullptr;
- } else if (new_method_type != object) {
- // References are updated in VisitRootTables. Reaching this means that ArtMethod is no
- // longer reachable.
- roots[i] = GcRoot<mirror::Object>(new_method_type);
+ if (new_method_type == nullptr) {
+ roots[i] = nullptr;
+ } else if (new_method_type != object) {
+ // References are updated in VisitRootTables. Reaching this means that ArtMethod is no
+ // longer reachable.
+ roots[i] = GcRoot<mirror::Object>(new_method_type);
+ }
}
}
}
}
+ MutexLock mu(self, *Locks::jit_lock_);
// Walk over inline caches to clear entries containing unloaded classes.
for (const auto& [_, info] : profiling_infos_) {
InlineCache* caches = info->GetInlineCaches();
@@ -519,6 +522,7 @@ void JitCodeCache::FreeAllMethodHeaders(
if (kIsDebugBuild && !Runtime::Current()->IsZygote()) {
std::map<const void*, ArtMethod*> compiled_methods;
std::set<const void*> debug_info;
+ ReaderMutexLock mu2(Thread::Current(), *Locks::jit_mutator_lock_);
VisitAllMethods([&](const void* addr, ArtMethod* method) {
if (!IsInZygoteExecSpace(addr)) {
CHECK(addr != nullptr && method != nullptr);
@@ -549,10 +553,11 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
// the CHA dependency map just once with an unordered_set.
std::unordered_set<OatQuickMethodHeader*> method_headers;
MutexLock mu(self, *Locks::jit_lock_);
- // We do not check if a code cache GC is in progress, as this method comes
- // with the classlinker_classes_lock_ held, and suspending ourselves could
- // lead to a deadlock.
{
+ WriterMutexLock mu2(self, *Locks::jit_mutator_lock_);
+ // We do not check if a code cache GC is in progress, as this method comes
+ // with the classlinker_classes_lock_ held, and suspending ourselves could
+ // lead to a deadlock.
for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
it->second.RemoveMethodsIn(alloc);
if (it->second.GetMethods().empty()) {
@@ -570,13 +575,6 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
++it;
}
}
- for (auto it = processed_zombie_jni_code_.begin(); it != processed_zombie_jni_code_.end();) {
- if (alloc.ContainsUnsafe(*it)) {
- it = processed_zombie_jni_code_.erase(it);
- } else {
- ++it;
- }
- }
for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
if (alloc.ContainsUnsafe(it->second)) {
method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
@@ -589,17 +587,26 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
++it;
}
}
+ for (auto it = osr_code_map_.begin(); it != osr_code_map_.end();) {
+ DCHECK(!ContainsElement(zombie_code_, it->second));
+ if (alloc.ContainsUnsafe(it->first)) {
+ // Note that the code has already been pushed to method_headers in the loop
+ // above and is going to be removed in FreeCode() below.
+ it = osr_code_map_.erase(it);
+ } else {
+ ++it;
+ }
+ }
}
- for (auto it = osr_code_map_.begin(); it != osr_code_map_.end();) {
- DCHECK(!ContainsElement(zombie_code_, it->second));
- if (alloc.ContainsUnsafe(it->first)) {
- // Note that the code has already been pushed to method_headers in the loop
- // above and is going to be removed in FreeCode() below.
- it = osr_code_map_.erase(it);
+
+ for (auto it = processed_zombie_jni_code_.begin(); it != processed_zombie_jni_code_.end();) {
+ if (alloc.ContainsUnsafe(*it)) {
+ it = processed_zombie_jni_code_.erase(it);
} else {
++it;
}
}
+
for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
ProfilingInfo* info = it->second;
if (alloc.ContainsUnsafe(info->GetMethod())) {
@@ -765,6 +772,7 @@ bool JitCodeCache::Commit(Thread* self,
if (UNLIKELY(method->IsNative())) {
ScopedDebugDisallowReadBarriers sddrb(self);
+ WriterMutexLock mu2(self, *Locks::jit_mutator_lock_);
auto it = jni_stubs_map_.find(JniStubKey(method));
DCHECK(it != jni_stubs_map_.end())
<< "Entry inserted in NotifyCompilationOf() should be alive.";
@@ -779,6 +787,7 @@ bool JitCodeCache::Commit(Thread* self,
zygote_map_.Put(code_ptr, method);
} else {
ScopedDebugDisallowReadBarriers sddrb(self);
+ WriterMutexLock mu2(self, *Locks::jit_mutator_lock_);
method_code_map_.Put(code_ptr, method);
// Searching for MethodType-s in roots. They need to be treated as strongly reachable while
@@ -802,6 +811,7 @@ bool JitCodeCache::Commit(Thread* self,
}
if (compilation_kind == CompilationKind::kOsr) {
ScopedDebugDisallowReadBarriers sddrb(self);
+ WriterMutexLock mu2(self, *Locks::jit_mutator_lock_);
osr_code_map_.Put(method, code_ptr);
} else if (method->StillNeedsClinitCheck()) {
ScopedDebugDisallowReadBarriers sddrb(self);
@@ -811,6 +821,7 @@ bool JitCodeCache::Commit(Thread* self,
// The shared region can easily be queried. For the private region, we
// use a side map.
if (!IsSharedRegion(*region)) {
+ WriterMutexLock mu2(self, *Locks::jit_mutator_lock_);
saved_compiled_methods_map_.Put(method, code_ptr);
}
} else {
@@ -850,7 +861,6 @@ bool JitCodeCache::RemoveMethod(ArtMethod* method, bool release_memory) {
ScopedDebugDisallowReadBarriers sddrb(self);
MutexLock mu(self, *Locks::jit_lock_);
- bool osr = osr_code_map_.find(method) != osr_code_map_.end();
bool in_cache = RemoveMethodLocked(method, release_memory);
if (!in_cache) {
@@ -858,11 +868,6 @@ bool JitCodeCache::RemoveMethod(ArtMethod* method, bool release_memory) {
}
Runtime::Current()->GetInstrumentation()->InitializeMethodsCode(method, /*aot_code=*/ nullptr);
- VLOG(jit)
- << "JIT removed (osr=" << std::boolalpha << osr << std::noboolalpha << ") "
- << ArtMethod::PrettyMethod(method) << "@" << method
- << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
- << " dcache_size=" << PrettySize(DataCacheSizeLocked());
return true;
}
@@ -876,6 +881,7 @@ bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) {
bool in_cache = false;
ScopedCodeCacheWrite ccw(private_region_);
+ WriterMutexLock mu(Thread::Current(), *Locks::jit_mutator_lock_);
if (UNLIKELY(method->IsNative())) {
auto it = jni_stubs_map_.find(JniStubKey(method));
if (it != jni_stubs_map_.end() && it->second.RemoveMethod(method)) {
@@ -933,7 +939,7 @@ void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
Thread* self = Thread::Current();
ScopedDebugDisallowReadBarriers sddrb(self);
- MutexLock mu(self, *Locks::jit_lock_);
+ WriterMutexLock mu(self, *Locks::jit_mutator_lock_);
if (old_method->IsNative()) {
// Update methods in jni_stubs_map_.
for (auto& entry : jni_stubs_map_) {
@@ -942,25 +948,25 @@ void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_meth
}
return;
}
+
// Update method_code_map_ to point to the new method.
for (auto& it : method_code_map_) {
if (it.second == old_method) {
it.second = new_method;
}
}
-
- auto node = method_code_map_reversed_.extract(old_method);
- if (!node.empty()) {
- node.key() = new_method;
- method_code_map_reversed_.insert(std::move(node));
- }
-
// Update osr_code_map_ to point to the new method.
auto code_map = osr_code_map_.find(old_method);
if (code_map != osr_code_map_.end()) {
osr_code_map_.Put(new_method, code_map->second);
osr_code_map_.erase(old_method);
}
+
+ auto node = method_code_map_reversed_.extract(old_method);
+ if (!node.empty()) {
+ node.key() = new_method;
+ method_code_map_reversed_.insert(std::move(node));
+ }
}
void JitCodeCache::TransitionToDebuggable() {
@@ -969,16 +975,17 @@ void JitCodeCache::TransitionToDebuggable() {
// ClassLinker::UpdateEntryPointsClassVisitor.
Thread* self = Thread::Current();
ScopedDebugDisallowReadBarriers sddrb(self);
- {
- MutexLock mu(self, *Locks::jit_lock_);
- if (kIsDebugBuild) {
- // TODO: Check `jni_stubs_map_`?
- for (const auto& entry : method_code_map_) {
- ArtMethod* method = entry.second;
- DCHECK(!method->IsPreCompiled());
- DCHECK(!IsInZygoteExecSpace(method->GetEntryPointFromQuickCompiledCode()));
- }
+ if (kIsDebugBuild) {
+ // TODO: Check `jni_stubs_map_`?
+ ReaderMutexLock mu2(self, *Locks::jit_mutator_lock_);
+ for (const auto& entry : method_code_map_) {
+ ArtMethod* method = entry.second;
+ DCHECK(!method->IsPreCompiled());
+ DCHECK(!IsInZygoteExecSpace(method->GetEntryPointFromQuickCompiledCode()));
}
+ }
+ {
+ WriterMutexLock mu(self, *Locks::jit_mutator_lock_);
// Not strictly necessary, but this map is useless now.
saved_compiled_methods_map_.clear();
}
@@ -1160,29 +1167,32 @@ void JitCodeCache::RemoveUnmarkedCode(Thread* self) {
} else {
OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
method_headers.insert(header);
+ {
+ WriterMutexLock mu2(self, *Locks::jit_mutator_lock_);
+ auto method_it = method_code_map_.find(header->GetCode());
- auto method_it = method_code_map_.find(header->GetCode());
+ if (method_it != method_code_map_.end()) {
+ ArtMethod* method = method_it->second;
+ auto code_ptrs_it = method_code_map_reversed_.find(method);
- if (method_it != method_code_map_.end()) {
- ArtMethod* method = method_it->second;
- auto code_ptrs_it = method_code_map_reversed_.find(method);
+ if (code_ptrs_it != method_code_map_reversed_.end()) {
+ std::vector<const void*>& code_ptrs = code_ptrs_it->second;
+ RemoveElement(code_ptrs, code_ptr);
- if (code_ptrs_it != method_code_map_reversed_.end()) {
- std::vector<const void*>& code_ptrs = code_ptrs_it->second;
- RemoveElement(code_ptrs, code_ptr);
-
- if (code_ptrs.empty()) {
- method_code_map_reversed_.erase(code_ptrs_it);
+ if (code_ptrs.empty()) {
+ method_code_map_reversed_.erase(code_ptrs_it);
+ }
}
}
- }
- method_code_map_.erase(header->GetCode());
+ method_code_map_.erase(header->GetCode());
+ }
VLOG(jit) << "JIT removed " << *it;
it = processed_zombie_code_.erase(it);
}
}
for (auto it = processed_zombie_jni_code_.begin(); it != processed_zombie_jni_code_.end();) {
+ WriterMutexLock mu2(self, *Locks::jit_mutator_lock_);
ArtMethod* method = *it;
auto stub = jni_stubs_map_.find(JniStubKey(method));
if (stub == jni_stubs_map_.end()) {
@@ -1216,51 +1226,52 @@ void JitCodeCache::RemoveUnmarkedCode(Thread* self) {
FreeAllMethodHeaders(method_headers);
}
+class JitGcTask final : public Task {
+ public:
+ JitGcTask() {}
+
+ void Run(Thread* self) override {
+ Runtime::Current()->GetJit()->GetCodeCache()->DoCollection(self);
+ }
+
+ void Finalize() override {
+ delete this;
+ }
+};
+
void JitCodeCache::AddZombieCode(ArtMethod* method, const void* entry_point) {
CHECK(ContainsPc(entry_point));
CHECK(method->IsNative() || (method->GetEntryPointFromQuickCompiledCode() != entry_point));
const void* code_ptr = OatQuickMethodHeader::FromEntryPoint(entry_point)->GetCode();
if (!IsInZygoteExecSpace(code_ptr)) {
Thread* self = Thread::Current();
- if (Locks::jit_lock_->IsExclusiveHeld(self)) {
+ if (Locks::jit_mutator_lock_->IsExclusiveHeld(self)) {
AddZombieCodeInternal(method, code_ptr);
} else {
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ WriterMutexLock mu(self, *Locks::jit_mutator_lock_);
AddZombieCodeInternal(method, code_ptr);
}
}
}
-class JitGcTask final : public Task {
- public:
- JitGcTask() {}
-
- void Run(Thread* self) override {
- Runtime::Current()->GetJit()->GetCodeCache()->DoCollection(self);
- }
-
- void Finalize() override {
- delete this;
- }
-};
-
void JitCodeCache::AddZombieCodeInternal(ArtMethod* method, const void* code_ptr) {
if (method->IsNative()) {
- CHECK(jni_stubs_map_.find(JniStubKey(method)) != jni_stubs_map_.end());
zombie_jni_code_.insert(method);
} else {
CHECK(!ContainsElement(zombie_code_, code_ptr));
zombie_code_.insert(code_ptr);
}
+
// Arbitrary threshold of number of zombie code before doing a GC.
static constexpr size_t kNumberOfZombieCodeThreshold = kIsDebugBuild ? 1 : 1000;
size_t number_of_code_to_delete =
zombie_code_.size() + zombie_jni_code_.size() + osr_code_map_.size();
if (number_of_code_to_delete >= kNumberOfZombieCodeThreshold) {
JitThreadPool* pool = Runtime::Current()->GetJit()->GetThreadPool();
- if (pool != nullptr && !gc_task_scheduled_) {
- gc_task_scheduled_ = true;
+ if (pool != nullptr && !std::atomic_exchange_explicit(&gc_task_scheduled_,
+ true,
+ std::memory_order_relaxed)) {
pool->AddTask(Thread::Current(), new JitGcTask());
}
}
@@ -1321,16 +1332,19 @@ void JitCodeCache::DoCollection(Thread* self) {
reinterpret_cast<uintptr_t>(private_region_.GetExecPages()->Begin()),
reinterpret_cast<uintptr_t>(
private_region_.GetExecPages()->Begin() + private_region_.GetCurrentCapacity() / 2)));
- processed_zombie_code_.insert(zombie_code_.begin(), zombie_code_.end());
- zombie_code_.clear();
- processed_zombie_jni_code_.insert(zombie_jni_code_.begin(), zombie_jni_code_.end());
- zombie_jni_code_.clear();
- // Empty osr method map, as osr compiled code will be deleted (except the ones
- // on thread stacks).
- for (auto it = osr_code_map_.begin(); it != osr_code_map_.end(); ++it) {
- processed_zombie_code_.insert(it->second);
+ {
+ WriterMutexLock mu2(self, *Locks::jit_mutator_lock_);
+ processed_zombie_code_.insert(zombie_code_.begin(), zombie_code_.end());
+ zombie_code_.clear();
+ processed_zombie_jni_code_.insert(zombie_jni_code_.begin(), zombie_jni_code_.end());
+ zombie_jni_code_.clear();
+ // Empty osr method map, as osr compiled code will be deleted (except the ones
+ // on thread stacks).
+ for (auto it = osr_code_map_.begin(); it != osr_code_map_.end(); ++it) {
+ processed_zombie_code_.insert(it->second);
+ }
+ osr_code_map_.clear();
}
- osr_code_map_.clear();
}
TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit));
{
@@ -1345,20 +1359,16 @@ void JitCodeCache::DoCollection(Thread* self) {
RemoveUnmarkedCode(self);
}
+ gc_task_scheduled_ = false;
MutexLock mu(self, *Locks::jit_lock_);
live_bitmap_.reset(nullptr);
- NotifyCollectionDone(self);
+ collection_in_progress_ = false;
+ lock_cond_.Broadcast(self);
}
Runtime::Current()->GetJit()->AddTimingLogger(logger);
}
-void JitCodeCache::NotifyCollectionDone(Thread* self) {
- collection_in_progress_ = false;
- gc_task_scheduled_ = false;
- lock_cond_.Broadcast(self);
-}
-
OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA");
const void* pc_ptr = reinterpret_cast<const void*>(pc);
@@ -1373,10 +1383,10 @@ OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod*
Thread* self = Thread::Current();
ScopedDebugDisallowReadBarriers sddrb(self);
- MutexLock mu(self, *Locks::jit_lock_);
OatQuickMethodHeader* method_header = nullptr;
ArtMethod* found_method = nullptr; // Only for DCHECK(), not for JNI stubs.
if (method != nullptr && UNLIKELY(method->IsNative())) {
+ ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
auto it = jni_stubs_map_.find(JniStubKey(method));
if (it == jni_stubs_map_.end()) {
return nullptr;
@@ -1400,19 +1410,23 @@ OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod*
return OatQuickMethodHeader::FromCodePointer(code_ptr);
}
}
- auto it = method_code_map_.lower_bound(pc_ptr);
- if ((it == method_code_map_.end() || it->first != pc_ptr) &&
- it != method_code_map_.begin()) {
- --it;
- }
- if (it != method_code_map_.end()) {
- const void* code_ptr = it->first;
- if (OatQuickMethodHeader::FromCodePointer(code_ptr)->Contains(pc)) {
- method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
- found_method = it->second;
+ {
+ ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
+ auto it = method_code_map_.lower_bound(pc_ptr);
+ if ((it == method_code_map_.end() || it->first != pc_ptr) &&
+ it != method_code_map_.begin()) {
+ --it;
+ }
+ if (it != method_code_map_.end()) {
+ const void* code_ptr = it->first;
+ if (OatQuickMethodHeader::FromCodePointer(code_ptr)->Contains(pc)) {
+ method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ found_method = it->second;
+ }
}
}
if (method_header == nullptr && method == nullptr) {
+ ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
// Scan all compiled JNI stubs as well. This slow search is used only
// for checks in debug build, for release builds the `method` is not null.
for (auto&& entry : jni_stubs_map_) {
@@ -1440,7 +1454,7 @@ OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod*
OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) {
Thread* self = Thread::Current();
ScopedDebugDisallowReadBarriers sddrb(self);
- MutexLock mu(self, *Locks::jit_lock_);
+ ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
auto it = osr_code_map_.find(method);
if (it == osr_code_map_.end()) {
return nullptr;
@@ -1524,6 +1538,7 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
{
MutexLock mu(self, *Locks::jit_lock_);
profiling_infos = profiling_infos_;
+ ReaderMutexLock mu2(self, *Locks::jit_mutator_lock_);
for (const auto& entry : method_code_map_) {
copies.push_back(entry.second);
}
@@ -1618,7 +1633,7 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
bool JitCodeCache::IsOsrCompiled(ArtMethod* method) {
Thread* self = Thread::Current();
ScopedDebugDisallowReadBarriers sddrb(self);
- MutexLock mu(self, *Locks::jit_lock_);
+ ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
return osr_code_map_.find(method) != osr_code_map_.end();
}
@@ -1659,15 +1674,15 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
ScopedDebugDisallowReadBarriers sddrb(self);
if (compilation_kind == CompilationKind::kOsr) {
- MutexLock mu(self, *Locks::jit_lock_);
+ ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
if (osr_code_map_.find(method) != osr_code_map_.end()) {
return false;
}
}
if (UNLIKELY(method->IsNative())) {
- MutexLock mu(self, *Locks::jit_lock_);
JniStubKey key(method);
+ WriterMutexLock mu(self, *Locks::jit_mutator_lock_);
auto it = jni_stubs_map_.find(key);
bool new_compilation = false;
if (it == jni_stubs_map_.end()) {
@@ -1721,8 +1736,8 @@ void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) {
void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self) {
DCHECK_EQ(Thread::Current(), self);
ScopedDebugDisallowReadBarriers sddrb(self);
- MutexLock mu(self, *Locks::jit_lock_);
if (UNLIKELY(method->IsNative())) {
+ WriterMutexLock mu(self, *Locks::jit_mutator_lock_);
auto it = jni_stubs_map_.find(JniStubKey(method));
DCHECK(it != jni_stubs_map_.end());
JniStubData* data = &it->second;
@@ -1737,33 +1752,38 @@ void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self) {
void JitCodeCache::InvalidateAllCompiledCode() {
Thread* self = Thread::Current();
ScopedDebugDisallowReadBarriers sddrb(self);
- art::MutexLock mu(self, *Locks::jit_lock_);
VLOG(jit) << "Invalidating all compiled code";
Runtime* runtime = Runtime::Current();
ClassLinker* linker = runtime->GetClassLinker();
instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
- // Change entry points of native methods back to the GenericJNI entrypoint.
- for (const auto& entry : jni_stubs_map_) {
- const JniStubData& data = entry.second;
- if (!data.IsCompiled() || IsInZygoteExecSpace(data.GetCode())) {
- continue;
- }
- const OatQuickMethodHeader* method_header =
- OatQuickMethodHeader::FromCodePointer(data.GetCode());
- for (ArtMethod* method : data.GetMethods()) {
- if (method->GetEntryPointFromQuickCompiledCode() == method_header->GetEntryPoint()) {
- instr->InitializeMethodsCode(method, /*aot_code=*/ nullptr);
+ {
+ WriterMutexLock mu(self, *Locks::jit_mutator_lock_);
+ // Change entry points of native methods back to the GenericJNI entrypoint.
+ for (const auto& entry : jni_stubs_map_) {
+ const JniStubData& data = entry.second;
+ if (!data.IsCompiled() || IsInZygoteExecSpace(data.GetCode())) {
+ continue;
+ }
+ const OatQuickMethodHeader* method_header =
+ OatQuickMethodHeader::FromCodePointer(data.GetCode());
+ for (ArtMethod* method : data.GetMethods()) {
+ if (method->GetEntryPointFromQuickCompiledCode() == method_header->GetEntryPoint()) {
+ instr->InitializeMethodsCode(method, /*aot_code=*/ nullptr);
+ }
}
}
- }
- for (const auto& entry : method_code_map_) {
- ArtMethod* meth = entry.second;
- if (UNLIKELY(meth->IsObsolete())) {
- linker->SetEntryPointsForObsoleteMethod(meth);
- } else {
- instr->InitializeMethodsCode(meth, /*aot_code=*/ nullptr);
+
+ for (const auto& entry : method_code_map_) {
+ ArtMethod* meth = entry.second;
+ if (UNLIKELY(meth->IsObsolete())) {
+ linker->SetEntryPointsForObsoleteMethod(meth);
+ } else {
+ instr->InitializeMethodsCode(meth, /*aot_code=*/ nullptr);
+ }
}
+ osr_code_map_.clear();
+ saved_compiled_methods_map_.clear();
}
for (const auto& entry : zygote_map_) {
@@ -1775,9 +1795,6 @@ void JitCodeCache::InvalidateAllCompiledCode() {
}
instr->InitializeMethodsCode(entry.method, /*aot_code=*/nullptr);
}
-
- saved_compiled_methods_map_.clear();
- osr_code_map_.clear();
}
void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
@@ -1793,7 +1810,7 @@ void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
} else {
Thread* self = Thread::Current();
ScopedDebugDisallowReadBarriers sddrb(self);
- MutexLock mu(self, *Locks::jit_lock_);
+ WriterMutexLock mu(self, *Locks::jit_mutator_lock_);
auto it = osr_code_map_.find(method);
if (it != osr_code_map_.end() && OatQuickMethodHeader::FromCodePointer(it->second) == header) {
// Remove the OSR method, to avoid using it again.
@@ -1824,6 +1841,7 @@ void JitCodeCache::Dump(std::ostream& os) {
<< shared_region_.GetUsedMemoryForData() / KB << "KB / "
<< shared_region_.GetResidentMemoryForData() / KB << "KB\n";
}
+ ReaderMutexLock mu2(Thread::Current(), *Locks::jit_mutator_lock_);
os << "Current JIT mini-debug-info size: " << PrettySize(GetJitMiniDebugInfoMemUsage()) << "\n"
<< "Current JIT capacity: " << PrettySize(GetCurrentRegion()->GetCurrentCapacity()) << "\n"
<< "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n"
@@ -1839,7 +1857,7 @@ void JitCodeCache::Dump(std::ostream& os) {
}
void JitCodeCache::DumpAllCompiledMethods(std::ostream& os) {
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ ReaderMutexLock mu(Thread::Current(), *Locks::jit_mutator_lock_);
for (const auto& [code_ptr, meth] : method_code_map_) { // Includes OSR methods.
OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
os << meth->PrettyMethod() << "@" << std::hex
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 8dc13cd14c..2d2f841360 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -437,7 +437,7 @@ class JitCodeCache {
JitCodeCache();
void AddZombieCodeInternal(ArtMethod* method, const void* code_ptr)
- REQUIRES(Locks::jit_lock_)
+ REQUIRES(Locks::jit_mutator_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
ProfilingInfo* AddProfilingInfoInternal(Thread* self,
@@ -452,9 +452,6 @@ class JitCodeCache {
bool WaitForPotentialCollectionToComplete(Thread* self)
REQUIRES(Locks::jit_lock_) REQUIRES_SHARED(!Locks::mutator_lock_);
- // Notify all waiting threads that a collection is done.
- void NotifyCollectionDone(Thread* self) REQUIRES(Locks::jit_lock_);
-
// Remove CHA dependents and underlying allocations for entries in `method_headers`.
void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
REQUIRES(Locks::jit_lock_)
@@ -468,7 +465,7 @@ class JitCodeCache {
// Call given callback for every compiled method in the code cache.
void VisitAllMethods(const std::function<void(const void*, ArtMethod*)>& cb)
- REQUIRES(Locks::jit_lock_);
+ REQUIRES_SHARED(Locks::jit_mutator_lock_);
// Free code and data allocations for `code_ptr`.
void FreeCodeAndData(const void* code_ptr)
@@ -552,31 +549,29 @@ class JitCodeCache {
// before the declaring class memory is freed.
// Holds compiled code associated with the shorty for a JNI stub.
- SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(Locks::jit_lock_);
+ SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(Locks::jit_mutator_lock_);
// Holds compiled code associated to the ArtMethod.
- SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(Locks::jit_lock_);
+ SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(Locks::jit_mutator_lock_);
// Subset of `method_code_map_`, but keyed by `ArtMethod*`. Used to treat certain
// objects (like `MethodType`-s) as strongly reachable from the corresponding ArtMethod.
SafeMap<ArtMethod*, std::vector<const void*>> method_code_map_reversed_
- GUARDED_BY(Locks::jit_lock_);
+ GUARDED_BY(Locks::jit_mutator_lock_);
// Holds compiled code associated to the ArtMethod. Used when pre-jitting
// methods whose entrypoints have the resolution stub.
- SafeMap<ArtMethod*, const void*> saved_compiled_methods_map_ GUARDED_BY(Locks::jit_lock_);
+ SafeMap<ArtMethod*, const void*> saved_compiled_methods_map_ GUARDED_BY(Locks::jit_mutator_lock_);
// Holds osr compiled code associated to the ArtMethod.
- SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(Locks::jit_lock_);
-
- // ProfilingInfo objects we have allocated.
- SafeMap<ArtMethod*, ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_);
+ SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(Locks::jit_mutator_lock_);
// Zombie code and JNI methods to consider for collection.
- std::set<const void*> zombie_code_ GUARDED_BY(Locks::jit_lock_);
- std::set<ArtMethod*> zombie_jni_code_ GUARDED_BY(Locks::jit_lock_);
+ std::set<const void*> zombie_code_ GUARDED_BY(Locks::jit_mutator_lock_);
+ std::set<ArtMethod*> zombie_jni_code_ GUARDED_BY(Locks::jit_mutator_lock_);
- std::set<const void*> processed_zombie_code_ GUARDED_BY(Locks::jit_lock_);
- std::set<ArtMethod*> processed_zombie_jni_code_ GUARDED_BY(Locks::jit_lock_);
+ // ProfilingInfo objects we have allocated. Mutators don't need to access
+ // these so this can be guarded by the JIT lock.
+ SafeMap<ArtMethod*, ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_);
// Methods that the zygote has compiled and can be shared across processes
// forked from the zygote.
@@ -591,7 +586,7 @@ class JitCodeCache {
bool collection_in_progress_ GUARDED_BY(Locks::jit_lock_);
// Whether a GC task is already scheduled.
- bool gc_task_scheduled_ GUARDED_BY(Locks::jit_lock_);
+ std::atomic<bool> gc_task_scheduled_;
// Bitmap for collecting code and data.
std::unique_ptr<CodeCacheBitmap> live_bitmap_;
@@ -599,6 +594,10 @@ class JitCodeCache {
// Whether we can do garbage collection. Not 'const' as tests may override this.
bool garbage_collect_code_ GUARDED_BY(Locks::jit_lock_);
+ // Zombie code being processed by the GC.
+ std::set<const void*> processed_zombie_code_ GUARDED_BY(Locks::jit_lock_);
+ std::set<ArtMethod*> processed_zombie_jni_code_ GUARDED_BY(Locks::jit_lock_);
+
// ---------------- JIT statistics -------------------------------------- //
// Number of baseline compilations done throughout the lifetime of the JIT.