Support garbage collection of JITted code.
Change-Id: I9afc544460ae4fb31149644b6196ac7f5182c784
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index f5befdf..a10d7af 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -390,21 +390,70 @@
}
Runtime* runtime = Runtime::Current();
- const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*));
- DCHECK(code != nullptr);
+ const void* existing_entry_point = GetEntryPointFromQuickCompiledCode();
+ DCHECK(existing_entry_point != nullptr);
+ ClassLinker* class_linker = runtime->GetClassLinker();
- if (runtime->GetClassLinker()->IsQuickGenericJniStub(code)) {
+ if (class_linker->IsQuickGenericJniStub(existing_entry_point)) {
// The generic JNI does not have any method header.
return nullptr;
}
- code = EntryPointToCodePointer(code);
- OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(
- reinterpret_cast<uintptr_t>(code) - sizeof(OatQuickMethodHeader));
+ // Check whether the current entry point contains this pc.
+ if (!class_linker->IsQuickResolutionStub(existing_entry_point) &&
+ !class_linker->IsQuickToInterpreterBridge(existing_entry_point)) {
+ OatQuickMethodHeader* method_header =
+ OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
- // TODO(ngeoffray): validate the pc. Note that unit tests can give unrelated pcs (for
- // example arch_test).
- UNUSED(pc);
+ if (method_header->Contains(pc)) {
+ return method_header;
+ }
+ }
+
+ // Check whether the pc is in the JIT code cache.
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ jit::JitCodeCache* code_cache = jit->GetCodeCache();
+ OatQuickMethodHeader* method_header = code_cache->LookupMethodHeader(pc, this);
+ if (method_header != nullptr) {
+ DCHECK(method_header->Contains(pc));
+ return method_header;
+ } else {
+ DCHECK(!code_cache->ContainsPc(reinterpret_cast<const void*>(pc))) << std::hex << pc;
+ }
+ }
+
+ // The code has to be in an oat file.
+ bool found;
+ OatFile::OatMethod oat_method = class_linker->FindOatMethodFor(this, &found);
+ if (!found) {
+ // Only for unit tests.
+ // TODO(ngeoffray): Update these tests to pass the right pc?
+ return OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
+ }
+ const void* oat_entry_point = oat_method.GetQuickCode();
+ if (oat_entry_point == nullptr || class_linker->IsQuickGenericJniStub(oat_entry_point)) {
+ DCHECK(IsNative());
+ return nullptr;
+ }
+
+ OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromEntryPoint(oat_entry_point);
+ if (pc == 0) {
+ // This is a downcall, it can only happen for a native method.
+ DCHECK(IsNative());
+ return method_header;
+ }
+
+ if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
+ // If we're instrumenting, just return the compiled OAT code.
+ // TODO(ngeoffray): Avoid this call path.
+ return method_header;
+ }
+
+ DCHECK(method_header->Contains(pc))
+ << PrettyMethod(this)
+ << std::hex << pc << " " << oat_entry_point
+ << " " << (uintptr_t)(method_header->code_ + method_header->code_size_);
return method_header;
}
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 9f1495c..bb9804e 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -433,6 +433,10 @@
return ++hotness_count_;
}
+ void ClearCounter() {
+ hotness_count_ = 0;
+ }
+
const uint8_t* GetQuickenedInfo() SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the method header for the compiled code containing 'pc'. Note that runtime
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 81622e1..d6d6448 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1174,15 +1174,26 @@
mirror::LongArray::ResetArrayClass();
mirror::ShortArray::ResetArrayClass();
Thread* const self = Thread::Current();
- JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
for (const ClassLoaderData& data : class_loaders_) {
- vm->DeleteWeakGlobalRef(self, data.weak_root);
- delete data.allocator;
- delete data.class_table;
+ DeleteClassLoader(self, data);
}
class_loaders_.clear();
}
+void ClassLinker::DeleteClassLoader(Thread* self, const ClassLoaderData& data) {
+ Runtime* const runtime = Runtime::Current();
+ JavaVMExt* const vm = runtime->GetJavaVM();
+ vm->DeleteWeakGlobalRef(self, data.weak_root);
+ if (runtime->GetJit() != nullptr) {
+ jit::JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache();
+ if (code_cache != nullptr) {
+ code_cache->RemoveMethodsIn(self, *data.allocator);
+ }
+ }
+ delete data.allocator;
+ delete data.class_table;
+}
+
mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length) {
return down_cast<mirror::PointerArray*>(image_pointer_size_ == 8u ?
static_cast<mirror::Array*>(mirror::LongArray::Alloc(self, length)) :
@@ -1833,13 +1844,6 @@
return code;
}
}
- jit::Jit* const jit = Runtime::Current()->GetJit();
- if (jit != nullptr) {
- auto* code = jit->GetCodeCache()->GetCodeFor(method);
- if (code != nullptr) {
- return code;
- }
- }
if (method->IsNative()) {
// No code and native? Use generic trampoline.
return GetQuickGenericJniStub();
@@ -1856,13 +1860,6 @@
if (found) {
return oat_method.GetQuickCode();
}
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr) {
- auto* code = jit->GetCodeCache()->GetCodeFor(method);
- if (code != nullptr) {
- return code;
- }
- }
return nullptr;
}
@@ -6387,7 +6384,6 @@
void ClassLinker::CleanupClassLoaders() {
Thread* const self = Thread::Current();
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
for (auto it = class_loaders_.begin(); it != class_loaders_.end(); ) {
const ClassLoaderData& data = *it;
// Need to use DecodeJObject so that we get null for cleared JNI weak globals.
@@ -6395,10 +6391,7 @@
if (class_loader != nullptr) {
++it;
} else {
- // Weak reference was cleared, delete the data associated with this class loader.
- delete data.class_table;
- delete data.allocator;
- vm->DeleteWeakGlobalRef(self, data.weak_root);
+ DeleteClassLoader(self, data);
it = class_loaders_.erase(it);
}
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index a2d38ac..392efd2 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -551,6 +551,10 @@
LinearAlloc* allocator;
};
+ static void DeleteClassLoader(Thread* self, const ClassLoaderData& data)
+ REQUIRES(Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
void VisitClassLoaders(ClassLoaderVisitor* visitor) const
SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 4de8a8e..b1d4d35 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -93,7 +93,7 @@
fake_code_.begin(), fake_code_.end());
// NOTE: Don't align the code (it will not be executed) but check that the Thumb2
- // adjustment will be a NOP, see ArtMethod::EntryPointToCodePointer().
+ // adjustment will be a NOP, see EntryPointToCodePointer().
CHECK_ALIGNED(mapping_table_offset, 2);
const uint8_t* code_ptr = &fake_header_code_and_maps_[gc_map_offset];
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index fdded02..380cb8e 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -18,6 +18,7 @@
#include "base/bit_utils.h"
#include "card_table.h"
+#include "jit/jit_code_cache.h"
#include "mem_map.h"
namespace art {
@@ -91,6 +92,7 @@
}
template class MemoryRangeBitmap<CardTable::kCardSize>;
+template class MemoryRangeBitmap<jit::kJitCodeAlignment>;
} // namespace accounting
} // namespace gc
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index ed64d7e..4db37e6 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -97,16 +97,6 @@
static void UpdateEntrypoints(ArtMethod* method, const void* quick_code)
SHARED_REQUIRES(Locks::mutator_lock_) {
- Runtime* const runtime = Runtime::Current();
- jit::Jit* jit = runtime->GetJit();
- if (jit != nullptr) {
- const void* old_code_ptr = method->GetEntryPointFromQuickCompiledCode();
- jit::JitCodeCache* code_cache = jit->GetCodeCache();
- if (code_cache->ContainsCodePtr(old_code_ptr)) {
- // Save the old compiled code since we need it to implement ClassLinker::GetQuickOatCodeFor.
- code_cache->SaveCompiledCode(method, old_code_ptr);
- }
- }
method->SetEntryPointFromQuickCompiledCode(quick_code);
}
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 0607493..5afd28e 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -49,7 +49,7 @@
void Jit::DumpInfo(std::ostream& os) {
os << "Code cache size=" << PrettySize(code_cache_->CodeCacheSize())
<< " data cache size=" << PrettySize(code_cache_->DataCacheSize())
- << " num methods=" << code_cache_->NumMethods()
+ << " number of compiled code=" << code_cache_->NumberOfCompiledCode()
<< "\n";
cumulative_timings_.Dump(os);
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 4187358..2d0a2a5 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -19,8 +19,12 @@
#include <sstream>
#include "art_method-inl.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
+#include "gc/accounting/bitmap-inl.h"
+#include "linear_alloc.h"
#include "mem_map.h"
#include "oat_file-inl.h"
+#include "thread_list.h"
namespace art {
namespace jit {
@@ -74,14 +78,10 @@
JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map)
: lock_("Jit code cache", kJitCodeCacheLock),
+ lock_cond_("Jit code cache variable", lock_),
+ collection_in_progress_(false),
code_map_(code_map),
- data_map_(data_map),
- num_methods_(0) {
-
- VLOG(jit) << "Created jit code cache: data size="
- << PrettySize(data_map_->Size())
- << ", code size="
- << PrettySize(code_map_->Size());
+ data_map_(data_map) {
code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/);
data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/);
@@ -96,13 +96,22 @@
CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
+
+ live_bitmap_.reset(CodeCacheBitmap::Create("code-cache-bitmap",
+ reinterpret_cast<uintptr_t>(code_map_->Begin()),
+ reinterpret_cast<uintptr_t>(code_map_->End())));
+
+ if (live_bitmap_.get() == nullptr) {
+ PLOG(FATAL) << "creating bitmaps for the JIT code cache failed";
+ }
+
+ VLOG(jit) << "Created jit code cache: data size="
+ << PrettySize(data_map_->Size())
+ << ", code size="
+ << PrettySize(code_map_->Size());
}
-bool JitCodeCache::ContainsMethod(ArtMethod* method) const {
- return ContainsCodePtr(method->GetEntryPointFromQuickCompiledCode());
-}
-
-bool JitCodeCache::ContainsCodePtr(const void* ptr) const {
+bool JitCodeCache::ContainsPc(const void* ptr) const {
return code_map_->Begin() <= ptr && ptr < code_map_->End();
}
@@ -121,6 +130,7 @@
};
uint8_t* JitCodeCache::CommitCode(Thread* self,
+ ArtMethod* method,
const uint8_t* mapping_table,
const uint8_t* vmap_table,
const uint8_t* gc_map,
@@ -129,6 +139,93 @@
size_t fp_spill_mask,
const uint8_t* code,
size_t code_size) {
+ uint8_t* result = CommitCodeInternal(self,
+ method,
+ mapping_table,
+ vmap_table,
+ gc_map,
+ frame_size_in_bytes,
+ core_spill_mask,
+ fp_spill_mask,
+ code,
+ code_size);
+ if (result == nullptr) {
+ // Retry.
+ GarbageCollectCache(self);
+ result = CommitCodeInternal(self,
+ method,
+ mapping_table,
+ vmap_table,
+ gc_map,
+ frame_size_in_bytes,
+ core_spill_mask,
+ fp_spill_mask,
+ code,
+ code_size);
+ }
+ return result;
+}
+
+bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
+ bool in_collection = false;
+ while (collection_in_progress_) {
+ in_collection = true;
+ lock_cond_.Wait(self);
+ }
+ return in_collection;
+}
+
+static uintptr_t FromCodeToAllocation(const void* code) {
+ size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+ return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
+}
+
+void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
+ uintptr_t allocation = FromCodeToAllocation(code_ptr);
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ const uint8_t* data = method_header->GetNativeGcMap();
+ if (data != nullptr) {
+ mspace_free(data_mspace_, const_cast<uint8_t*>(data));
+ }
+ data = method_header->GetMappingTable();
+ if (data != nullptr) {
+ mspace_free(data_mspace_, const_cast<uint8_t*>(data));
+ }
+ // Use the offset directly to prevent sanity check that the method is
+ // compiled with optimizing.
+ // TODO(ngeoffray): Clean up.
+ if (method_header->vmap_table_offset_ != 0) {
+ data = method_header->code_ - method_header->vmap_table_offset_;
+ mspace_free(data_mspace_, const_cast<uint8_t*>(data));
+ }
+ mspace_free(code_mspace_, reinterpret_cast<uint8_t*>(allocation));
+}
+
+void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
+ MutexLock mu(self, lock_);
+ // We do not check if a code cache GC is in progress, as this method comes
+ // with the classlinker_classes_lock_ held, and suspending ourselves could
+ // lead to a deadlock.
+ for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+ if (alloc.ContainsUnsafe(it->second)) {
+ FreeCode(it->first, it->second);
+ it = method_code_map_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
+ ArtMethod* method,
+ const uint8_t* mapping_table,
+ const uint8_t* vmap_table,
+ const uint8_t* gc_map,
+ size_t frame_size_in_bytes,
+ size_t core_spill_mask,
+ size_t fp_spill_mask,
+ const uint8_t* code,
+ size_t code_size) {
size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
// Ensure the header ends up at expected instruction alignment.
size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
@@ -137,7 +234,9 @@
OatQuickMethodHeader* method_header = nullptr;
uint8_t* code_ptr = nullptr;
+ ScopedThreadSuspension sts(self, kSuspended);
MutexLock mu(self, lock_);
+ WaitForPotentialCollectionToComplete(self);
{
ScopedCodeCacheWrite scc(code_map_.get());
uint8_t* result = reinterpret_cast<uint8_t*>(
@@ -149,7 +248,7 @@
DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(code_ptr), alignment);
std::copy(code, code + code_size, code_ptr);
- method_header = reinterpret_cast<OatQuickMethodHeader*>(code_ptr) - 1;
+ method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
new (method_header) OatQuickMethodHeader(
(mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
(vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
@@ -162,8 +261,12 @@
__builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
reinterpret_cast<char*>(code_ptr + code_size));
-
- ++num_methods_; // TODO: This is hacky but works since each method has exactly one code region.
+ method_code_map_.Put(code_ptr, method);
+ // We have checked there was no collection in progress earlier. If we
+ // were, setting the entry point of a method would be unsafe, as the collection
+ // could delete it.
+ DCHECK(!collection_in_progress_);
+ method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
return reinterpret_cast<uint8_t*>(method_header);
}
@@ -181,10 +284,32 @@
return bytes_allocated;
}
+size_t JitCodeCache::NumberOfCompiledCode() {
+ MutexLock mu(Thread::Current(), lock_);
+ return method_code_map_.size();
+}
+
uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
size = RoundUp(size, sizeof(void*));
- MutexLock mu(self, lock_);
- return reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
+ uint8_t* result = nullptr;
+
+ {
+ ScopedThreadSuspension sts(self, kSuspended);
+ MutexLock mu(self, lock_);
+ WaitForPotentialCollectionToComplete(self);
+ result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
+ }
+
+ if (result == nullptr) {
+ // Retry.
+ GarbageCollectCache(self);
+ ScopedThreadSuspension sts(self, kSuspended);
+ MutexLock mu(self, lock_);
+ WaitForPotentialCollectionToComplete(self);
+ result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
+ }
+
+ return result;
}
uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
@@ -196,29 +321,143 @@
return result;
}
-const void* JitCodeCache::GetCodeFor(ArtMethod* method) {
- const void* code = method->GetEntryPointFromQuickCompiledCode();
- if (ContainsCodePtr(code)) {
- return code;
+class MarkCodeVisitor FINAL : public StackVisitor {
+ public:
+ MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
+ : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
+ code_cache_(code_cache_in),
+ bitmap_(code_cache_->GetLiveBitmap()) {}
+
+ bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ if (method_header == nullptr) {
+ return true;
+ }
+ const void* code = method_header->GetCode();
+ if (code_cache_->ContainsPc(code)) {
+ // Use the atomic set version, as multiple threads are executing this code.
+ bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
+ }
+ return true;
}
- MutexLock mu(Thread::Current(), lock_);
- auto it = method_code_map_.find(method);
- if (it != method_code_map_.end()) {
- return it->second;
+
+ private:
+ JitCodeCache* const code_cache_;
+ CodeCacheBitmap* const bitmap_;
+};
+
+class MarkCodeClosure FINAL : public Closure {
+ public:
+ MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
+ : code_cache_(code_cache), barrier_(barrier) {}
+
+ void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(thread == Thread::Current() || thread->IsSuspended());
+ MarkCodeVisitor visitor(thread, code_cache_);
+ visitor.WalkStack();
+ if (thread->GetState() == kRunnable) {
+ barrier_->Pass(Thread::Current());
+ }
}
- return nullptr;
+
+ private:
+ JitCodeCache* const code_cache_;
+ Barrier* const barrier_;
+};
+
+void JitCodeCache::GarbageCollectCache(Thread* self) {
+ if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
+ LOG(INFO) << "Clearing code cache, code="
+ << PrettySize(CodeCacheSize())
+ << ", data=" << PrettySize(DataCacheSize());
+ }
+
+ size_t map_size = 0;
+ ScopedThreadSuspension sts(self, kSuspended);
+
+ // Walk over all compiled methods and set the entry points of these
+ // methods to interpreter.
+ {
+ MutexLock mu(self, lock_);
+ if (WaitForPotentialCollectionToComplete(self)) {
+ return;
+ }
+ collection_in_progress_ = true;
+ map_size = method_code_map_.size();
+ for (auto& it : method_code_map_) {
+ it.second->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+ }
+ }
+
+ // Run a checkpoint on all threads to mark the JIT compiled code they are running.
+ {
+ Barrier barrier(0);
+ MarkCodeClosure closure(this, &barrier);
+ size_t threads_running_checkpoint =
+ Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
+ if (threads_running_checkpoint != 0) {
+ barrier.Increment(self, threads_running_checkpoint);
+ }
+ }
+
+ // Free unused compiled code, and restore the entry point of used compiled code.
+ {
+ MutexLock mu(self, lock_);
+ DCHECK_EQ(map_size, method_code_map_.size());
+ ScopedCodeCacheWrite scc(code_map_.get());
+ for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+ const void* code_ptr = it->first;
+ ArtMethod* method = it->second;
+ uintptr_t allocation = FromCodeToAllocation(code_ptr);
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ if (GetLiveBitmap()->Test(allocation)) {
+ method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
+ ++it;
+ } else {
+ method->ClearCounter();
+ DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
+ FreeCode(code_ptr, method);
+ it = method_code_map_.erase(it);
+ }
+ }
+ GetLiveBitmap()->Bitmap::Clear();
+ collection_in_progress_ = false;
+ lock_cond_.Broadcast(self);
+ }
+
+ if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
+ LOG(INFO) << "After clearing code cache, code="
+ << PrettySize(CodeCacheSize())
+ << ", data=" << PrettySize(DataCacheSize());
+ }
}
-void JitCodeCache::SaveCompiledCode(ArtMethod* method, const void* old_code_ptr) {
- DCHECK_EQ(method->GetEntryPointFromQuickCompiledCode(), old_code_ptr);
- DCHECK(ContainsCodePtr(old_code_ptr)) << PrettyMethod(method) << " old_code_ptr="
- << old_code_ptr;
- MutexLock mu(Thread::Current(), lock_);
- auto it = method_code_map_.find(method);
- if (it != method_code_map_.end()) {
- return;
+
+OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
+ static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
+ if (kRuntimeISA == kArm) {
+ // On Thumb-2, the pc is offset by one.
+ --pc;
}
- method_code_map_.Put(method, old_code_ptr);
+ if (!ContainsPc(reinterpret_cast<const void*>(pc))) {
+ return nullptr;
+ }
+
+ MutexLock mu(Thread::Current(), lock_);
+ if (method_code_map_.empty()) {
+ return nullptr;
+ }
+ auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
+ --it;
+
+ const void* code_ptr = it->first;
+ OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ if (!method_header->Contains(pc)) {
+ return nullptr;
+ }
+ DCHECK_EQ(it->second, method)
+ << PrettyMethod(method) << " " << PrettyMethod(it->second) << " " << std::hex << pc;
+ return method_header;
}
} // namespace jit
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index fa90c18..4e415b8 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -22,6 +22,7 @@
#include "atomic.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "gc/accounting/bitmap.h"
#include "gc/allocator/dlmalloc.h"
#include "gc_root.h"
#include "jni.h"
@@ -33,32 +34,40 @@
namespace art {
class ArtMethod;
-class CompiledMethod;
-class CompilerCallbacks;
+class LinearAlloc;
namespace jit {
class JitInstrumentationCache;
+// Alignment that will suit all architectures.
+static constexpr int kJitCodeAlignment = 16;
+using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;
+
class JitCodeCache {
public:
static constexpr size_t kMaxCapacity = 1 * GB;
- static constexpr size_t kDefaultCapacity = 2 * MB;
+ // Put the default to a very low amount for debug builds to stress the code cache
+ // collection.
+ static constexpr size_t kDefaultCapacity = kIsDebugBuild ? 20 * KB : 2 * MB;
// Create the code cache with a code + data capacity equal to "capacity", error message is passed
// in the out arg error_msg.
static JitCodeCache* Create(size_t capacity, std::string* error_msg);
- size_t NumMethods() const {
- return num_methods_;
- }
-
+ // Number of bytes allocated in the code cache.
size_t CodeCacheSize() REQUIRES(!lock_);
+ // Number of bytes allocated in the data cache.
size_t DataCacheSize() REQUIRES(!lock_);
+ // Number of compiled code in the code cache. Note that this is not the number
+ // of methods that got JIT compiled, as we might have collected some.
+ size_t NumberOfCompiledCode() REQUIRES(!lock_);
+
// Allocate and write code and its metadata to the code cache.
uint8_t* CommitCode(Thread* self,
+ ArtMethod* method,
const uint8_t* mapping_table,
const uint8_t* vmap_table,
const uint8_t* gc_map,
@@ -67,51 +76,89 @@
size_t fp_spill_mask,
const uint8_t* code,
size_t code_size)
+ SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!lock_);
- // Return true if the code cache contains the code pointer which si the entrypoint of the method.
- bool ContainsMethod(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Return true if the code cache contains a code ptr.
- bool ContainsCodePtr(const void* ptr) const;
+ // Return true if the code cache contains this pc.
+ bool ContainsPc(const void* pc) const;
// Reserve a region of data of size at least "size". Returns null if there is no more room.
- uint8_t* ReserveData(Thread* self, size_t size) REQUIRES(!lock_);
+ uint8_t* ReserveData(Thread* self, size_t size)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!lock_);
// Add a data array of size (end - begin) with the associated contents, returns null if there
// is no more room.
uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end)
+ SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!lock_);
- // Get code for a method, returns null if it is not in the jit cache.
- const void* GetCodeFor(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ CodeCacheBitmap* GetLiveBitmap() const {
+ return live_bitmap_.get();
+ }
- // Save the compiled code for a method so that GetCodeFor(method) will return old_code_ptr if the
- // entrypoint isn't within the cache.
- void SaveCompiledCode(ArtMethod* method, const void* old_code_ptr)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ // Perform a collection on the code cache.
+ void GarbageCollectCache(Thread* self)
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Given the 'pc', try to find the JIT compiled code associated with it.
+ // Return null if 'pc' is not in the code cache. 'method' is passed for
+ // sanity check.
+ OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
+ REQUIRES(!lock_)
+ REQUIRES(Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
- // Takes ownership of code_mem_map.
+ // Take ownership of code_mem_map.
JitCodeCache(MemMap* code_map, MemMap* data_map);
- // Lock which guards.
+ // Internal version of 'CommitCode' that will not retry if the
+ // allocation fails. Return null if the allocation fails.
+ uint8_t* CommitCodeInternal(Thread* self,
+ ArtMethod* method,
+ const uint8_t* mapping_table,
+ const uint8_t* vmap_table,
+ const uint8_t* gc_map,
+ size_t frame_size_in_bytes,
+ size_t core_spill_mask,
+ size_t fp_spill_mask,
+ const uint8_t* code,
+ size_t code_size)
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // If a collection is in progress, wait for it to finish. Return
+ // whether the thread actually waited.
+ bool WaitForPotentialCollectionToComplete(Thread* self)
+ REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
+
+ // Free in the mspace allocations taken by 'method'.
+ void FreeCode(const void* code_ptr, ArtMethod* method) REQUIRES(lock_);
+
+ // Lock for guarding allocations, collections, and the method_code_map_.
Mutex lock_;
+ // Condition to wait on during collection.
+ ConditionVariable lock_cond_ GUARDED_BY(lock_);
+ // Whether there is a code cache collection in progress.
+ bool collection_in_progress_ GUARDED_BY(lock_);
// Mem map which holds code.
std::unique_ptr<MemMap> code_map_;
// Mem map which holds data (stack maps and profiling info).
std::unique_ptr<MemMap> data_map_;
// The opaque mspace for allocating code.
- void* code_mspace_;
+ void* code_mspace_ GUARDED_BY(lock_);
// The opaque mspace for allocating data.
- void* data_mspace_;
- // Number of compiled methods.
- size_t num_methods_;
- // This map holds code for methods if they were deoptimized by the instrumentation stubs. This is
- // required since we have to implement ClassLinker::GetQuickOatCodeFor for walking stacks.
- SafeMap<ArtMethod*, const void*> method_code_map_ GUARDED_BY(lock_);
+ void* data_mspace_ GUARDED_BY(lock_);
+ // Bitmap for collecting code and data.
+ std::unique_ptr<CodeCacheBitmap> live_bitmap_;
+ // This map holds compiled code associated to the ArtMethod
+ SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
};
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index 9b9c5d2..666b8e7 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -76,8 +76,7 @@
ScopedObjectAccessUnchecked soa(self);
// Since we don't have on-stack replacement, some methods can remain in the interpreter longer
// than we want resulting in samples even after the method is compiled.
- if (method->IsClassInitializer() || method->IsNative() ||
- Runtime::Current()->GetJit()->GetCodeCache()->ContainsMethod(method)) {
+ if (method->IsClassInitializer() || method->IsNative()) {
return;
}
if (thread_pool_.get() == nullptr) {
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 0c039f2..7c5f78e 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -28,15 +28,10 @@
ProfilingInfo* ProfilingInfo::Create(ArtMethod* method) {
// Walk over the dex instructions of the method and keep track of
// instructions we are interested in profiling.
- const uint16_t* code_ptr = nullptr;
- const uint16_t* code_end = nullptr;
- {
- ScopedObjectAccess soa(Thread::Current());
- DCHECK(!method->IsNative());
- const DexFile::CodeItem& code_item = *method->GetCodeItem();
- code_ptr = code_item.insns_;
- code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
- }
+ DCHECK(!method->IsNative());
+ const DexFile::CodeItem& code_item = *method->GetCodeItem();
+ const uint16_t* code_ptr = code_item.insns_;
+ const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
uint32_t dex_pc = 0;
std::vector<uint32_t> entries;
@@ -91,7 +86,7 @@
ScopedObjectAccess soa(self);
for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
- mirror::Class* existing = cache->classes_[i].Read<kWithoutReadBarrier>();
+ mirror::Class* existing = cache->classes_[i].Read();
if (existing == cls) {
// Receiver type is already in the cache, nothing else to do.
return;
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index 73ca41a..7a2d1a8 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -36,7 +36,7 @@
*/
class ProfilingInfo {
public:
- static ProfilingInfo* Create(ArtMethod* method);
+ static ProfilingInfo* Create(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
// Add information from an executed INVOKE instruction to the profile.
void AddInvokeInfo(Thread* self, uint32_t dex_pc, mirror::Class* cls);
diff --git a/runtime/linear_alloc.cc b/runtime/linear_alloc.cc
index 43e81d9..f91b0ed 100644
--- a/runtime/linear_alloc.cc
+++ b/runtime/linear_alloc.cc
@@ -48,4 +48,8 @@
return allocator_.Contains(ptr);
}
+bool LinearAlloc::ContainsUnsafe(void* ptr) const {
+ return allocator_.Contains(ptr);
+}
+
} // namespace art
diff --git a/runtime/linear_alloc.h b/runtime/linear_alloc.h
index 1b21527..df7f17d 100644
--- a/runtime/linear_alloc.h
+++ b/runtime/linear_alloc.h
@@ -47,6 +47,10 @@
// Return true if the linear alloc contrains an address.
bool Contains(void* ptr) const REQUIRES(!lock_);
+ // Unsafe version of 'Contains' only to be used when the allocator is going
+ // to be deleted.
+ bool ContainsUnsafe(void* ptr) const NO_THREAD_SAFETY_ANALYSIS;
+
private:
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
ArenaAllocator allocator_ GUARDED_BY(lock_);
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 6eadd87..03cad08 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -21,6 +21,7 @@
#include "base/macros.h"
#include "quick/quick_method_frame_info.h"
#include "stack_map.h"
+#include "utils.h"
namespace art {
@@ -39,6 +40,18 @@
~OatQuickMethodHeader();
+ static OatQuickMethodHeader* FromCodePointer(const void* code_ptr) {
+ uintptr_t code = reinterpret_cast<uintptr_t>(code_ptr);
+ uintptr_t header = code - OFFSETOF_MEMBER(OatQuickMethodHeader, code_);
+ DCHECK(IsAlignedParam(code, GetInstructionSetAlignment(kRuntimeISA)) ||
+ IsAlignedParam(header, GetInstructionSetAlignment(kRuntimeISA)));
+ return reinterpret_cast<OatQuickMethodHeader*>(header);
+ }
+
+ static OatQuickMethodHeader* FromEntryPoint(const void* entry_point) {
+ return FromCodePointer(EntryPointToCodePointer(entry_point));
+ }
+
OatQuickMethodHeader& operator=(const OatQuickMethodHeader&) = default;
uintptr_t NativeQuickPcOffset(const uintptr_t pc) const {
@@ -74,6 +87,11 @@
bool Contains(uintptr_t pc) const {
uintptr_t code_start = reinterpret_cast<uintptr_t>(code_);
+ static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
+ if (kRuntimeISA == kArm) {
+ // On Thumb-2, the pc is offset by one.
+ code_start++;
+ }
return code_start <= pc && pc <= (code_start + code_size_);
}
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 9359d27..b0727da 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -856,13 +856,11 @@
// If we are the JIT then we may have just compiled the method after the
// IsQuickToInterpreterBridge check.
jit::Jit* const jit = Runtime::Current()->GetJit();
- if (jit != nullptr &&
- jit->GetCodeCache()->ContainsCodePtr(reinterpret_cast<const void*>(code))) {
+ if (jit != nullptr && jit->GetCodeCache()->ContainsPc(code)) {
return;
}
- uint32_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(
- EntryPointToCodePointer(code))[-1].code_size_;
+ uint32_t code_size = OatQuickMethodHeader::FromEntryPoint(code)->code_size_;
uintptr_t code_start = reinterpret_cast<uintptr_t>(code);
CHECK(code_start <= pc && pc <= (code_start + code_size))
<< PrettyMethod(method)