summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/interpreter/interpreter.cc4
-rw-r--r--runtime/jit/jit.cc4
-rw-r--r--runtime/jit/jit.h4
-rw-r--r--runtime/jit/jit_code_cache.cc199
-rw-r--r--runtime/jit/jit_code_cache.h13
-rw-r--r--runtime/jit/jit_instrumentation.cc9
-rw-r--r--runtime/jit/profiling_info.h15
7 files changed, 98 insertions, 150 deletions
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 9808e22927..a595d33f04 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -295,7 +295,9 @@ static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item,
}
jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr && jit->CanInvokeCompiledCode(method)) {
+ if (UNLIKELY(jit != nullptr &&
+ jit->JitAtFirstUse() &&
+ jit->GetCodeCache()->ContainsMethod(method))) {
JValue result;
// Pop the shadow frame before calling into compiled code.
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 91b006a3ea..3e66ce20eb 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -213,10 +213,6 @@ bool Jit::JitAtFirstUse() {
return false;
}
-bool Jit::CanInvokeCompiledCode(ArtMethod* method) {
- return code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode());
-}
-
Jit::~Jit() {
DCHECK(!save_profiling_info_ || !ProfileSaver::IsStarted());
if (dump_info_on_shutdown_) {
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 3f54192d9f..109ca3dbd1 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -84,12 +84,8 @@ class Jit {
// into the specified class linker to the jit debug interface,
void DumpTypeInfoForLoadedTypes(ClassLinker* linker);
- // Return whether we should try to JIT compiled code as soon as an ArtMethod is invoked.
bool JitAtFirstUse();
- // Return whether we can invoke JIT code for `method`.
- bool CanInvokeCompiledCode(ArtMethod* method);
-
// If an OSR compiled version is available for `method`,
// and `dex_pc + dex_pc_offset` is an entry point of that compiled
// version, this method will jump to the compiled code, let it run,
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 4500dbdf67..8858b486f9 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -123,7 +123,7 @@ JitCodeCache::JitCodeCache(MemMap* code_map,
current_capacity_(initial_code_capacity + initial_data_capacity),
code_end_(initial_code_capacity),
data_end_(initial_data_capacity),
- last_collection_increased_code_cache_(false),
+ has_done_full_collection_(false),
last_update_time_ns_(0),
garbage_collect_code_(garbage_collect_code),
used_memory_for_data_(0),
@@ -546,20 +546,34 @@ void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) {
}
}
-bool JitCodeCache::ShouldDoFullCollection() {
- if (current_capacity_ == max_capacity_) {
- // Always do a full collection when the code cache is full.
- return true;
- } else if (current_capacity_ < kReservedCapacity) {
- // Always do partial collection when the code cache size is below the reserved
- // capacity.
- return false;
- } else if (last_collection_increased_code_cache_) {
- // This time do a full collection.
- return true;
- } else {
- // This time do a partial collection.
- return false;
+void JitCodeCache::RemoveUnusedCode(Thread* self) {
+ // Clear the osr map, chances are most of the code in it is now dead.
+ {
+ MutexLock mu(self, lock_);
+ osr_code_map_.clear();
+ }
+
+ // Run a checkpoint on all threads to mark the JIT compiled code they are running.
+ MarkCompiledCodeOnThreadStacks(self);
+
+ // Iterate over all compiled code and remove entries that are not marked and not
+ // the entrypoint of their corresponding ArtMethod.
+ {
+ MutexLock mu(self, lock_);
+ ScopedCodeCacheWrite scc(code_map_.get());
+ for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+ const void* code_ptr = it->first;
+ ArtMethod* method = it->second;
+ uintptr_t allocation = FromCodeToAllocation(code_ptr);
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ if ((method->GetEntryPointFromQuickCompiledCode() != method_header->GetEntryPoint()) &&
+ !GetLiveBitmap()->Test(allocation)) {
+ FreeCode(code_ptr, method);
+ it = method_code_map_.erase(it);
+ } else {
+ ++it;
+ }
+ }
}
}
@@ -585,10 +599,21 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
}
}
- bool do_full_collection = false;
+ // Check if we want to do a full collection.
+ bool do_full_collection = true;
{
MutexLock mu(self, lock_);
- do_full_collection = ShouldDoFullCollection();
+ if (current_capacity_ == max_capacity_) {
+ // Always do a full collection when the code cache is full.
+ do_full_collection = true;
+ } else if (current_capacity_ < kReservedCapacity) {
+ // Do a partial collection until we hit the reserved capacity limit.
+ do_full_collection = false;
+ } else if (has_done_full_collection_) {
+ // Do a partial collection if we have done a full collection in the last
+ // collection round.
+ do_full_collection = false;
+ }
}
if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
@@ -599,94 +624,45 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
<< ", data=" << PrettySize(DataCacheSize());
}
- DoCollection(self, /* collect_profiling_info */ do_full_collection);
-
- if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
- LOG(INFO) << "After code cache collection, code="
- << PrettySize(CodeCacheSize())
- << ", data=" << PrettySize(DataCacheSize());
+ if (do_full_collection) {
+ DoFullCollection(self);
+ } else {
+ RemoveUnusedCode(self);
}
{
MutexLock mu(self, lock_);
-
- // Increase the code cache only when we do partial collections.
- // TODO: base this strategy on how full the code cache is?
- if (do_full_collection) {
- last_collection_increased_code_cache_ = false;
- } else {
- last_collection_increased_code_cache_ = true;
+ if (!do_full_collection) {
+ has_done_full_collection_ = false;
IncreaseCodeCacheCapacity();
- }
-
- bool next_collection_will_be_full = ShouldDoFullCollection();
-
- // Start polling the liveness of compiled code to prepare for the next full collection.
- if (next_collection_will_be_full) {
- // Save the entry point of methods we have compiled, and update the entry
- // point of those methods to the interpreter. If the method is invoked, the
- // interpreter will update its entry point to the compiled code and call it.
- for (ProfilingInfo* info : profiling_infos_) {
- const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
- if (ContainsPc(entry_point)) {
- info->SetSavedEntryPoint(entry_point);
- info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
- }
- }
-
- if (kIsDebugBuild) {
- // Check that methods we have compiled do have a ProfilingInfo object. We would
- // have memory leaks of compiled code otherwise.
- for (const auto& it : method_code_map_) {
- DCHECK(it.second->GetProfilingInfo(sizeof(void*)) != nullptr);
- }
- }
+ } else {
+ has_done_full_collection_ = true;
}
live_bitmap_.reset(nullptr);
NotifyCollectionDone(self);
}
-}
-void JitCodeCache::RemoveUnusedAndUnmarkedCode(Thread* self) {
- MutexLock mu(self, lock_);
- ScopedCodeCacheWrite scc(code_map_.get());
- // Iterate over all compiled code and remove entries that are not marked and not
- // the entrypoint of their corresponding ArtMethod.
- for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
- const void* code_ptr = it->first;
- ArtMethod* method = it->second;
- uintptr_t allocation = FromCodeToAllocation(code_ptr);
- const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
- const void* entrypoint = method->GetEntryPointFromQuickCompiledCode();
- if ((entrypoint == method_header->GetEntryPoint()) || GetLiveBitmap()->Test(allocation)) {
- ++it;
- } else {
- if (entrypoint == GetQuickToInterpreterBridge()) {
- method->ClearCounter();
- }
- FreeCode(code_ptr, method);
- it = method_code_map_.erase(it);
- }
+ if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
+ LOG(INFO) << "After code cache collection, code="
+ << PrettySize(CodeCacheSize())
+ << ", data=" << PrettySize(DataCacheSize());
}
}
-void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
+void JitCodeCache::DoFullCollection(Thread* self) {
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
{
MutexLock mu(self, lock_);
- if (collect_profiling_info) {
- // Clear the profiling info of methods that do not have compiled code as entrypoint.
- // Also remove the saved entry point from the ProfilingInfo objects.
- for (ProfilingInfo* info : profiling_infos_) {
- const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
- if (!ContainsPc(ptr) && !info->IsMethodBeingCompiled()) {
- info->GetMethod()->SetProfilingInfo(nullptr);
- }
- info->SetSavedEntryPoint(nullptr);
- }
- } else if (kIsDebugBuild) {
- // Sanity check that the profiling infos do not have a dangling entry point.
- for (ProfilingInfo* info : profiling_infos_) {
- DCHECK(info->GetSavedEntryPoint() == nullptr);
+ // Walk over all compiled methods and set the entry points of these
+ // methods to interpreter.
+ for (auto& it : method_code_map_) {
+ instrumentation->UpdateMethodsCode(it.second, GetQuickToInterpreterBridge());
+ }
+
+ // Clear the profiling info of methods that are not being compiled.
+ for (ProfilingInfo* info : profiling_infos_) {
+ if (!info->IsMethodBeingCompiled()) {
+ info->GetMethod()->SetProfilingInfo(nullptr);
}
}
@@ -698,22 +674,32 @@ void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
// Run a checkpoint on all threads to mark the JIT compiled code they are running.
MarkCompiledCodeOnThreadStacks(self);
- // Remove compiled code that is not the entrypoint of their method and not in the call
- // stack.
- RemoveUnusedAndUnmarkedCode(self);
-
- if (collect_profiling_info) {
+ {
MutexLock mu(self, lock_);
- // Free all profiling infos of methods not compiled nor being compiled.
+ // Free unused compiled code, and restore the entry point of used compiled code.
+ {
+ ScopedCodeCacheWrite scc(code_map_.get());
+ for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+ const void* code_ptr = it->first;
+ ArtMethod* method = it->second;
+ uintptr_t allocation = FromCodeToAllocation(code_ptr);
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ if (GetLiveBitmap()->Test(allocation)) {
+ instrumentation->UpdateMethodsCode(method, method_header->GetEntryPoint());
+ ++it;
+ } else {
+ method->ClearCounter();
+ DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
+ FreeCode(code_ptr, method);
+ it = method_code_map_.erase(it);
+ }
+ }
+ }
+
+ // Free all profiling infos of methods that were not being compiled.
auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
[this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS {
- const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
- if (ContainsPc(ptr) && info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
- // Make sure compiled methods have a ProfilingInfo object. It is needed for
- // code cache collection.
- info->GetMethod()->SetProfilingInfo(info);
- } else if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) != info) {
- // No need for this ProfilingInfo object anymore.
+ if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
FreeData(reinterpret_cast<uint8_t*>(info));
return true;
}
@@ -863,13 +849,6 @@ size_t JitCodeCache::GetMemorySizeOfCodePointer(const void* ptr) {
void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
const OatQuickMethodHeader* header) {
- ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*));
- if ((profiling_info != nullptr) &&
- (profiling_info->GetSavedEntryPoint() == header->GetEntryPoint())) {
- // Prevent future uses of the compiled code.
- profiling_info->SetSavedEntryPoint(nullptr);
- }
-
if (method->GetEntryPointFromQuickCompiledCode() == header->GetEntryPoint()) {
// The entrypoint is the one to invalidate, so we just update
// it to the interpreter entry point and clear the counter to get the method
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 2cf3c4d0f4..4574edfb46 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -124,11 +124,6 @@ class JitCodeCache {
return live_bitmap_.get();
}
- // Return whether we should do a full collection given the current state of the cache.
- bool ShouldDoFullCollection()
- REQUIRES(lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
// Perform a collection on the code cache.
void GarbageCollectCache(Thread* self)
REQUIRES(!lock_)
@@ -240,11 +235,11 @@ class JitCodeCache {
// Set the footprint limit of the code cache.
void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
- void DoCollection(Thread* self, bool collect_profiling_info)
+ void DoFullCollection(Thread* self)
REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
- void RemoveUnusedAndUnmarkedCode(Thread* self)
+ void RemoveUnusedCode(Thread* self)
REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -287,8 +282,8 @@ class JitCodeCache {
// The current footprint in bytes of the data portion of the code cache.
size_t data_end_ GUARDED_BY(lock_);
- // Whether the last collection round increased the code cache.
- bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
+ // Whether a full collection has already been done on the current capacity.
+ bool has_done_full_collection_ GUARDED_BY(lock_);
// Last time the the code_cache was updated.
// It is atomic to avoid locking when reading it.
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index 86761e402f..a4e40ad3fd 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -183,14 +183,7 @@ void JitInstrumentationListener::MethodEntered(Thread* thread,
return;
}
- ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*));
- if ((profiling_info != nullptr) && (profiling_info->GetSavedEntryPoint() != nullptr)) {
- // Update the entrypoint if the ProfilingInfo has one. The interpreter will call it
- // instead of interpreting the method.
- method->SetEntryPointFromQuickCompiledCode(profiling_info->GetSavedEntryPoint());
- } else {
- instrumentation_cache_->AddSamples(thread, method, 1);
- }
+ instrumentation_cache_->AddSamples(thread, method, 1);
}
void JitInstrumentationListener::Branch(Thread* thread,
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index d54f3dfc11..ab7237376b 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -126,20 +126,11 @@ class ProfilingInfo {
is_method_being_compiled_ = value;
}
- void SetSavedEntryPoint(const void* entry_point) {
- saved_entry_point_ = entry_point;
- }
-
- const void* GetSavedEntryPoint() const {
- return saved_entry_point_;
- }
-
private:
ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries)
: number_of_inline_caches_(entries.size()),
method_(method),
- is_method_being_compiled_(false),
- saved_entry_point_(nullptr) {
+ is_method_being_compiled_(false) {
memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
for (size_t i = 0; i < number_of_inline_caches_; ++i) {
cache_[i].dex_pc_ = entries[i];
@@ -157,10 +148,6 @@ class ProfilingInfo {
// TODO: Make the JIT code cache lock global.
bool is_method_being_compiled_;
- // Entry point of the corresponding ArtMethod, while the JIT code cache
- // is poking for the liveness of compiled code.
- const void* saved_entry_point_;
-
// Dynamically allocated array of size `number_of_inline_caches_`.
InlineCache cache_[0];