summaryrefslogtreecommitdiff
path: root/runtime/jit/jit_code_cache.cc
diff options
context:
space:
mode:
author Nicolas Geoffray <ngeoffray@google.com> 2024-04-17 15:19:55 +0100
committer Nicolas Geoffray <ngeoffray@google.com> 2024-04-18 10:25:36 +0000
commit16567ac3d6925c0b84c819f2e475c2cadbde5dc3 (patch)
tree75e41b7b4da978a857400410a34a06d2271322b2 /runtime/jit/jit_code_cache.cc
parent36aba6488cd335f651161be607ac0af240faddc8 (diff)
Reland "Revamp JIT GC."
This reverts commit de8e6689cb3f829b73e921d843cfa38bbfe996bf. Reason for revert: - Adjust test for new behavior - Enter MarkCompiledCodeOnThreadStacks with the mutator lock, as expected by the checkpoint. Change-Id: Ic9fca3e5fd04da62081701f4deccbc68bee56c2f
Diffstat (limited to 'runtime/jit/jit_code_cache.cc')
-rw-r--r--runtime/jit/jit_code_cache.cc329
1 files changed, 175 insertions, 154 deletions
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 7ea9efb0f9..01637008c1 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -342,6 +342,8 @@ const void* JitCodeCache::GetSavedEntryPointOfPreCompiledMethod(ArtMethod* metho
auto it = saved_compiled_methods_map_.find(method);
if (it != saved_compiled_methods_map_.end()) {
code_ptr = it->second;
+ // Now that we're using the saved entrypoint, remove it from the saved map.
+ saved_compiled_methods_map_.erase(it);
}
}
if (code_ptr != nullptr) {
@@ -489,33 +491,40 @@ void JitCodeCache::FreeAllMethodHeaders(
->RemoveDependentsWithMethodHeaders(method_headers);
}
- ScopedCodeCacheWrite scc(private_region_);
- for (const OatQuickMethodHeader* method_header : method_headers) {
- FreeCodeAndData(method_header->GetCode());
- }
+ {
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ ScopedCodeCacheWrite scc(private_region_);
+ for (const OatQuickMethodHeader* method_header : method_headers) {
+ FreeCodeAndData(method_header->GetCode());
+ }
- // We have potentially removed a lot of debug info. Do maintenance pass to save space.
- RepackNativeDebugInfoForJit();
+ // We have potentially removed a lot of debug info. Do maintenance pass to save space.
+ RepackNativeDebugInfoForJit();
+ }
// Check that the set of compiled methods exactly matches native debug information.
// Does not check zygote methods since they can change concurrently.
if (kIsDebugBuild && !Runtime::Current()->IsZygote()) {
std::map<const void*, ArtMethod*> compiled_methods;
- VisitAllMethods([&](const void* addr, ArtMethod* method) {
- if (!IsInZygoteExecSpace(addr)) {
- CHECK(addr != nullptr && method != nullptr);
- compiled_methods.emplace(addr, method);
- }
- });
std::set<const void*> debug_info;
- ForEachNativeDebugSymbol([&](const void* addr, size_t, const char* name) {
- addr = AlignDown(addr, GetInstructionSetInstructionAlignment(kRuntimeISA)); // Thumb-bit.
- CHECK(debug_info.emplace(addr).second) << "Duplicate debug info: " << addr << " " << name;
- CHECK_EQ(compiled_methods.count(addr), 1u) << "Extra debug info: " << addr << " " << name;
- });
+ {
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ VisitAllMethods([&](const void* addr, ArtMethod* method) {
+ if (!IsInZygoteExecSpace(addr)) {
+ CHECK(addr != nullptr && method != nullptr);
+ compiled_methods.emplace(addr, method);
+ }
+ });
+ ForEachNativeDebugSymbol([&](const void* addr, size_t, const char* name) {
+ addr = AlignDown(addr, GetInstructionSetInstructionAlignment(kRuntimeISA)); // Thumb-bit.
+ bool res = debug_info.emplace(addr).second;
+ CHECK(res) << "Duplicate debug info: " << addr << " " << name;
+ CHECK_EQ(compiled_methods.count(addr), 1u) << "Extra debug info: " << addr << " " << name;
+ });
+ }
if (!debug_info.empty()) { // If debug-info generation is enabled.
for (auto it : compiled_methods) {
- CHECK_EQ(debug_info.count(it.first), 1u) << "No debug info: " << it.second->PrettyMethod();
+ CHECK_EQ(debug_info.count(it.first), 1u) << "Missing debug info";
}
CHECK_EQ(compiled_methods.size(), debug_info.size());
}
@@ -546,17 +555,26 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
++it;
}
}
+ for (auto it = zombie_jni_code_.begin(); it != zombie_jni_code_.end();) {
+ if (alloc.ContainsUnsafe(*it)) {
+ it = zombie_jni_code_.erase(it);
+ } else {
+ ++it;
+ }
+ }
for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
if (alloc.ContainsUnsafe(it->second)) {
method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first;
it = method_code_map_.erase(it);
+ zombie_code_.erase(it->first);
} else {
++it;
}
}
}
for (auto it = osr_code_map_.begin(); it != osr_code_map_.end();) {
+ DCHECK(!ContainsElement(zombie_code_, it->second));
if (alloc.ContainsUnsafe(it->first)) {
// Note that the code has already been pushed to method_headers in the loop
// above and is going to be removed in FreeCode() below.
@@ -574,8 +592,8 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
++it;
}
}
- FreeAllMethodHeaders(method_headers);
}
+ FreeAllMethodHeaders(method_headers);
}
bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const {
@@ -641,18 +659,6 @@ static void ClearMethodCounter(ArtMethod* method, bool was_warm)
method->UpdateCounter(/* new_samples= */ 1);
}
-void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) {
- while (collection_in_progress_) {
- Locks::jit_lock_->Unlock(self);
- {
- ScopedThreadSuspension sts(self, ThreadState::kSuspended);
- MutexLock mu(self, *Locks::jit_lock_);
- WaitForPotentialCollectionToComplete(self);
- }
- Locks::jit_lock_->Lock(self);
- }
-}
-
bool JitCodeCache::Commit(Thread* self,
JitMemoryRegion* region,
ArtMethod* method,
@@ -680,9 +686,6 @@ bool JitCodeCache::Commit(Thread* self,
OatQuickMethodHeader* method_header = nullptr;
{
MutexLock mu(self, *Locks::jit_lock_);
- // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
- // finish.
- WaitForPotentialCollectionToCompleteRunnable(self);
const uint8_t* code_ptr = region->CommitCode(reserved_code, code, stack_map_data);
if (code_ptr == nullptr) {
return false;
@@ -782,11 +785,6 @@ bool JitCodeCache::Commit(Thread* self,
method, method_header->GetEntryPoint());
}
}
- if (collection_in_progress_) {
- // We need to update the live bitmap if there is a GC to ensure it sees this new
- // code.
- GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
- }
VLOG(jit)
<< "JIT added (kind=" << compilation_kind << ") "
<< ArtMethod::PrettyMethod(method) << "@" << method
@@ -855,6 +853,7 @@ bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) {
FreeCodeAndData(it->second.GetCode());
}
jni_stubs_map_.erase(it);
+ zombie_jni_code_.erase(method);
} else {
it->first.UpdateShorty(it->second.GetMethods().front());
}
@@ -985,7 +984,6 @@ bool JitCodeCache::Reserve(Thread* self,
{
ScopedThreadSuspension sts(self, ThreadState::kSuspended);
MutexLock mu(self, *Locks::jit_lock_);
- WaitForPotentialCollectionToComplete(self);
ScopedCodeCacheWrite ccw(*region);
code = region->AllocateCode(code_size);
data = region->AllocateData(data_size);
@@ -1002,8 +1000,8 @@ bool JitCodeCache::Reserve(Thread* self,
<< PrettySize(data_size);
return false;
}
- // Run a code cache collection and try again.
- GarbageCollectCache(self);
+ // Increase the capacity and try again.
+ IncreaseCodeCacheCapacity(self);
}
*reserved_code = ArrayRef<const uint8_t>(code, code_size);
@@ -1081,11 +1079,6 @@ class MarkCodeClosure final : public Closure {
Barrier* const barrier_;
};
-void JitCodeCache::NotifyCollectionDone(Thread* self) {
- collection_in_progress_ = false;
- lock_cond_.Broadcast(self);
-}
-
void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) {
Barrier barrier(0);
size_t threads_running_checkpoint = 0;
@@ -1103,86 +1096,117 @@ bool JitCodeCache::IsAtMaxCapacity() const {
return private_region_.GetCurrentCapacity() == private_region_.GetMaxCapacity();
}
-void JitCodeCache::GarbageCollectCache(Thread* self) {
- ScopedTrace trace(__FUNCTION__);
- // Wait for an existing collection, or let everyone know we are starting one.
- {
- ScopedThreadSuspension sts(self, ThreadState::kSuspended);
- MutexLock mu(self, *Locks::jit_lock_);
- if (!garbage_collect_code_) {
- private_region_.IncreaseCodeCacheCapacity();
- return;
- } else if (WaitForPotentialCollectionToComplete(self)) {
- return;
- } else {
- number_of_collections_++;
- live_bitmap_.reset(CodeCacheBitmap::Create(
- "code-cache-bitmap",
- reinterpret_cast<uintptr_t>(private_region_.GetExecPages()->Begin()),
- reinterpret_cast<uintptr_t>(
- private_region_.GetExecPages()->Begin() + private_region_.GetCurrentCapacity() / 2)));
- collection_in_progress_ = true;
- }
- }
-
- TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit));
- {
- TimingLogger::ScopedTiming st("Code cache collection", &logger);
-
- VLOG(jit) << "Do code cache collection, code="
- << PrettySize(CodeCacheSize())
- << ", data=" << PrettySize(DataCacheSize());
-
- DoCollection(self);
-
- VLOG(jit) << "After code cache collection, code="
- << PrettySize(CodeCacheSize())
- << ", data=" << PrettySize(DataCacheSize());
-
- {
- MutexLock mu(self, *Locks::jit_lock_);
- private_region_.IncreaseCodeCacheCapacity();
- live_bitmap_.reset(nullptr);
- NotifyCollectionDone(self);
- }
- }
- Runtime::Current()->GetJit()->AddTimingLogger(logger);
+void JitCodeCache::IncreaseCodeCacheCapacity(Thread* self) {
+ MutexLock mu(self, *Locks::jit_lock_);
+ private_region_.IncreaseCodeCacheCapacity();
}
void JitCodeCache::RemoveUnmarkedCode(Thread* self) {
ScopedTrace trace(__FUNCTION__);
- ScopedDebugDisallowReadBarriers sddrb(self);
std::unordered_set<OatQuickMethodHeader*> method_headers;
+ ScopedDebugDisallowReadBarriers sddrb(self);
{
MutexLock mu(self, *Locks::jit_lock_);
- // Iterate over all compiled code and remove entries that are not marked.
- for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
- JniStubData* data = &it->second;
- if (IsInZygoteExecSpace(data->GetCode()) ||
- !data->IsCompiled() ||
- GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) {
- ++it;
- } else {
- method_headers.insert(OatQuickMethodHeader::FromCodePointer(data->GetCode()));
- for (ArtMethod* method : data->GetMethods()) {
- VLOG(jit) << "JIT removed (JNI) " << method->PrettyMethod() << ": " << data->GetCode();
- }
- it = jni_stubs_map_.erase(it);
- }
- }
- for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
- const void* code_ptr = it->first;
+ // Iterate over all zombie code and remove entries that are not marked.
+ for (auto it = zombie_code_.begin(); it != zombie_code_.end();) {
+ const void* code_ptr = *it;
uintptr_t allocation = FromCodeToAllocation(code_ptr);
- if (IsInZygoteExecSpace(code_ptr) || GetLiveBitmap()->Test(allocation)) {
+ DCHECK(!IsInZygoteExecSpace(code_ptr));
+ if (GetLiveBitmap()->Test(allocation)) {
++it;
} else {
OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
method_headers.insert(header);
- VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first;
- it = method_code_map_.erase(it);
+ VLOG(jit) << "JIT removed " << *it;
+ it = zombie_code_.erase(it);
+ method_code_map_.erase(header->GetCode());
+ }
+ }
+ for (auto it = zombie_jni_code_.begin(); it != zombie_jni_code_.end();) {
+ ArtMethod* method = *it;
+ auto stub = jni_stubs_map_.find(JniStubKey(method));
+ CHECK(stub != jni_stubs_map_.end());
+ JniStubData& data = stub->second;
+ CHECK(data.IsCompiled());
+ CHECK(ContainsElement(data.GetMethods(), method));
+ uintptr_t allocation = FromCodeToAllocation(data.GetCode());
+ if (method->GetEntryPointFromQuickCompiledCode() ==
+ OatQuickMethodHeader::FromCodePointer(data.GetCode())->GetEntryPoint()) {
+ // The stub got reused for this method, remove ourselves from the zombie
+ // list.
+ it = zombie_jni_code_.erase(it);
+ } else if (!GetLiveBitmap()->Test(allocation)) {
+ if (data.GetMethods().empty()) {
+ OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(data.GetCode());
+ method_headers.insert(header);
+ CHECK(ContainsPc(header));
+ VLOG(jit) << "JIT removed native code of" << method->PrettyMethod();
+ jni_stubs_map_.erase(stub);
+ } else if (ContainsElement(data.GetMethods(), method)) {
+ if (data.GetMethods().size() == 1) {
+ OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(data.GetCode());
+ method_headers.insert(header);
+ CHECK(ContainsPc(header));
+ VLOG(jit) << "JIT removed native code of" << method->PrettyMethod();
+ jni_stubs_map_.erase(stub);
+ } else {
+ stub->second.RemoveMethod(method);
+ stub->first.UpdateShorty(stub->second.GetMethods().front());
+ }
+ }
+ it = zombie_jni_code_.erase(it);
+ } else {
+ ++it;
}
}
- FreeAllMethodHeaders(method_headers);
+ }
+ FreeAllMethodHeaders(method_headers);
+}
+
+void JitCodeCache::AddZombieCode(ArtMethod* method, const void* entry_point) {
+ CHECK(method->IsNative() || (method->GetEntryPointFromQuickCompiledCode() != entry_point));
+ const void* code_ptr = OatQuickMethodHeader::FromEntryPoint(entry_point)->GetCode();
+ if (!IsInZygoteExecSpace(code_ptr)) {
+ Thread* self = Thread::Current();
+ if (Locks::jit_lock_->IsExclusiveHeld(self)) {
+ AddZombieCodeInternal(method, code_ptr);
+ } else {
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ AddZombieCodeInternal(method, code_ptr);
+ }
+ }
+}
+
+
+class JitGcTask final : public Task {
+ public:
+ JitGcTask() {}
+
+ void Run(Thread* self) override {
+ Runtime::Current()->GetJit()->GetCodeCache()->DoCollection(self);
+ }
+
+ void Finalize() override {
+ delete this;
+ }
+};
+
+void JitCodeCache::AddZombieCodeInternal(ArtMethod* method, const void* code_ptr) {
+ if (method->IsNative()) {
+ zombie_jni_code_.insert(method);
+ } else {
+ zombie_code_.insert(code_ptr);
+ }
+ // Arbitrary threshold of number of zombie code before doing a GC.
+ static constexpr size_t kNumberOfZombieCodeThreshold = kIsDebugBuild ? 1 : 1000;
+ size_t number_of_code_to_delete =
+ zombie_code_.size() + zombie_jni_code_.size() + osr_code_map_.size();
+ if (number_of_code_to_delete >= kNumberOfZombieCodeThreshold) {
+ JitThreadPool* pool = Runtime::Current()->GetJit()->GetThreadPool();
+ if (pool != nullptr && !gc_task_scheduled_) {
+ gc_task_scheduled_ = true;
+ pool->AddTask(Thread::Current(), new JitGcTask());
+ }
}
}
@@ -1237,48 +1261,50 @@ void JitCodeCache::DoCollection(Thread* self) {
{
ScopedDebugDisallowReadBarriers sddrb(self);
MutexLock mu(self, *Locks::jit_lock_);
- // Mark compiled code that are entrypoints of ArtMethods. Compiled code that is not
- // an entry point is either:
- // - an osr compiled code, that will be removed if not in a thread call stack.
- // - discarded compiled code, that will be removed if not in a thread call stack.
- for (const auto& entry : jni_stubs_map_) {
- const JniStubData& data = entry.second;
- const void* code_ptr = data.GetCode();
- if (IsInZygoteExecSpace(code_ptr)) {
- continue;
- }
- const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
- for (ArtMethod* method : data.GetMethods()) {
- if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
- GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
- break;
- }
- }
- }
- for (const auto& it : method_code_map_) {
- ArtMethod* method = it.second;
- const void* code_ptr = it.first;
- if (IsInZygoteExecSpace(code_ptr)) {
- continue;
- }
- const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
- if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
- GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
- }
+ if (!garbage_collect_code_) {
+ return;
+ } else if (WaitForPotentialCollectionToComplete(self)) {
+ return;
}
-
+ collection_in_progress_ = true;
+ number_of_collections_++;
+ live_bitmap_.reset(CodeCacheBitmap::Create(
+ "code-cache-bitmap",
+ reinterpret_cast<uintptr_t>(private_region_.GetExecPages()->Begin()),
+ reinterpret_cast<uintptr_t>(
+ private_region_.GetExecPages()->Begin() + private_region_.GetCurrentCapacity() / 2)));
// Empty osr method map, as osr compiled code will be deleted (except the ones
// on thread stacks).
+ for (auto it = osr_code_map_.begin(); it != osr_code_map_.end(); ++it) {
+ zombie_code_.insert(it->second);
+ }
osr_code_map_.clear();
}
+ TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit));
+ {
+ TimingLogger::ScopedTiming st("Code cache collection", &logger);
+
+ {
+ ScopedObjectAccess soa(self);
+ // Run a checkpoint on all threads to mark the JIT compiled code they are running.
+ MarkCompiledCodeOnThreadStacks(self);
+
+ // Remove zombie code which hasn't been marked.
+ RemoveUnmarkedCode(self);
+ }
+
+ MutexLock mu(self, *Locks::jit_lock_);
+ live_bitmap_.reset(nullptr);
+ NotifyCollectionDone(self);
+ }
- // Run a checkpoint on all threads to mark the JIT compiled code they are running.
- MarkCompiledCodeOnThreadStacks(self);
+ Runtime::Current()->GetJit()->AddTimingLogger(logger);
+}
- // At this point, mutator threads are still running, and entrypoints of methods can
- // change. We do know they cannot change to a code cache entry that is not marked,
- // therefore we can safely remove those entries.
- RemoveUnmarkedCode(self);
+void JitCodeCache::NotifyCollectionDone(Thread* self) {
+ collection_in_progress_ = false;
+ gc_task_scheduled_ = false;
+ lock_cond_.Broadcast(self);
}
OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
@@ -1382,7 +1408,7 @@ ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
}
if (info == nullptr) {
- GarbageCollectCache(self);
+ IncreaseCodeCacheCapacity(self);
MutexLock mu(self, *Locks::jit_lock_);
info = AddProfilingInfoInternal(self, method, inline_cache_entries, branch_cache_entries);
}
@@ -1610,11 +1636,6 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
// as well change them back as this stub shall not be collected anyway and this
// can avoid a few expensive GenericJNI calls.
data->UpdateEntryPoints(entrypoint);
- if (collection_in_progress_) {
- if (!IsInZygoteExecSpace(data->GetCode())) {
- GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(data->GetCode()));
- }
- }
}
return new_compilation;
} else {