Revert "Revert "Use the interpreter as a heartbeat for the JIT.""
Bug: 27398183
Bug: 23128949
Bug: 26846185
This reverts commit a96917a6983a5abbe973255a3846fda549fb1657.
Change-Id: I5c4f0d87d3293a6a7ab56a33396670704b66a347
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index bfb1f9d..b7e5b30 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -295,9 +295,7 @@
}
jit::Jit* jit = Runtime::Current()->GetJit();
- if (UNLIKELY(jit != nullptr &&
- jit->JitAtFirstUse() &&
- jit->GetCodeCache()->ContainsMethod(method))) {
+ if (jit != nullptr && jit->CanInvokeCompiledCode(method)) {
JValue result;
// Pop the shadow frame before calling into compiled code.
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 3e66ce2..91b006a 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -213,6 +213,10 @@
return false;
}
+bool Jit::CanInvokeCompiledCode(ArtMethod* method) {
+ return code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode());
+}
+
Jit::~Jit() {
DCHECK(!save_profiling_info_ || !ProfileSaver::IsStarted());
if (dump_info_on_shutdown_) {
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 109ca3d..3f54192 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -84,8 +84,12 @@
// into the specified class linker to the jit debug interface,
void DumpTypeInfoForLoadedTypes(ClassLinker* linker);
+ // Return whether we should try to JIT compiled code as soon as an ArtMethod is invoked.
bool JitAtFirstUse();
+ // Return whether we can invoke JIT code for `method`.
+ bool CanInvokeCompiledCode(ArtMethod* method);
+
// If an OSR compiled version is available for `method`,
// and `dex_pc + dex_pc_offset` is an entry point of that compiled
// version, this method will jump to the compiled code, let it run,
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 8858b48..e8a7189 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -123,7 +123,7 @@
current_capacity_(initial_code_capacity + initial_data_capacity),
code_end_(initial_code_capacity),
data_end_(initial_data_capacity),
- has_done_full_collection_(false),
+ last_collection_increased_code_cache_(false),
last_update_time_ns_(0),
garbage_collect_code_(garbage_collect_code),
used_memory_for_data_(0),
@@ -546,34 +546,20 @@
}
}
-void JitCodeCache::RemoveUnusedCode(Thread* self) {
- // Clear the osr map, chances are most of the code in it is now dead.
- {
- MutexLock mu(self, lock_);
- osr_code_map_.clear();
- }
-
- // Run a checkpoint on all threads to mark the JIT compiled code they are running.
- MarkCompiledCodeOnThreadStacks(self);
-
- // Iterate over all compiled code and remove entries that are not marked and not
- // the entrypoint of their corresponding ArtMethod.
- {
- MutexLock mu(self, lock_);
- ScopedCodeCacheWrite scc(code_map_.get());
- for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
- const void* code_ptr = it->first;
- ArtMethod* method = it->second;
- uintptr_t allocation = FromCodeToAllocation(code_ptr);
- const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
- if ((method->GetEntryPointFromQuickCompiledCode() != method_header->GetEntryPoint()) &&
- !GetLiveBitmap()->Test(allocation)) {
- FreeCode(code_ptr, method);
- it = method_code_map_.erase(it);
- } else {
- ++it;
- }
- }
+bool JitCodeCache::ShouldDoFullCollection() {
+ if (current_capacity_ == max_capacity_) {
+ // Always do a full collection when the code cache is full.
+ return true;
+ } else if (current_capacity_ < kReservedCapacity) {
+ // Always do partial collection when the code cache size is below the reserved
+ // capacity.
+ return false;
+ } else if (last_collection_increased_code_cache_) {
+ // This time do a full collection.
+ return true;
+ } else {
+ // This time do a partial collection.
+ return false;
}
}
@@ -599,21 +585,10 @@
}
}
- // Check if we want to do a full collection.
- bool do_full_collection = true;
+ bool do_full_collection = false;
{
MutexLock mu(self, lock_);
- if (current_capacity_ == max_capacity_) {
- // Always do a full collection when the code cache is full.
- do_full_collection = true;
- } else if (current_capacity_ < kReservedCapacity) {
- // Do a partial collection until we hit the reserved capacity limit.
- do_full_collection = false;
- } else if (has_done_full_collection_) {
- // Do a partial collection if we have done a full collection in the last
- // collection round.
- do_full_collection = false;
- }
+ do_full_collection = ShouldDoFullCollection();
}
if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
@@ -624,45 +599,91 @@
<< ", data=" << PrettySize(DataCacheSize());
}
- if (do_full_collection) {
- DoFullCollection(self);
- } else {
- RemoveUnusedCode(self);
- }
-
- {
- MutexLock mu(self, lock_);
- if (!do_full_collection) {
- has_done_full_collection_ = false;
- IncreaseCodeCacheCapacity();
- } else {
- has_done_full_collection_ = true;
- }
- live_bitmap_.reset(nullptr);
- NotifyCollectionDone(self);
- }
+ DoCollection(self, /* collect_profiling_info */ do_full_collection);
if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
LOG(INFO) << "After code cache collection, code="
<< PrettySize(CodeCacheSize())
<< ", data=" << PrettySize(DataCacheSize());
}
-}
-void JitCodeCache::DoFullCollection(Thread* self) {
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
{
MutexLock mu(self, lock_);
- // Walk over all compiled methods and set the entry points of these
- // methods to interpreter.
- for (auto& it : method_code_map_) {
- instrumentation->UpdateMethodsCode(it.second, GetQuickToInterpreterBridge());
+
+ // Increase the code cache only when we do partial collections.
+ // TODO: base this strategy on how full the code cache is?
+ if (do_full_collection) {
+ last_collection_increased_code_cache_ = false;
+ } else {
+ last_collection_increased_code_cache_ = true;
+ IncreaseCodeCacheCapacity();
}
- // Clear the profiling info of methods that are not being compiled.
- for (ProfilingInfo* info : profiling_infos_) {
- if (!info->IsMethodBeingCompiled()) {
- info->GetMethod()->SetProfilingInfo(nullptr);
+ bool next_collection_will_be_full = ShouldDoFullCollection();
+
+ // Start polling the liveness of compiled code to prepare for the next full collection.
+ // We avoid doing this if exit stubs are installed to not mess with the instrumentation.
+ // TODO(ngeoffray): Clean up instrumentation and code cache interactions.
+ if (!Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled() &&
+ next_collection_will_be_full) {
+ // Save the entry point of methods we have compiled, and update the entry
+ // point of those methods to the interpreter. If the method is invoked, the
+ // interpreter will update its entry point to the compiled code and call it.
+ for (ProfilingInfo* info : profiling_infos_) {
+ const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+ if (ContainsPc(entry_point)) {
+ info->SetSavedEntryPoint(entry_point);
+ info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+ }
+ }
+
+ DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
+ }
+ live_bitmap_.reset(nullptr);
+ NotifyCollectionDone(self);
+ }
+}
+
+void JitCodeCache::RemoveUnusedAndUnmarkedCode(Thread* self) {
+ MutexLock mu(self, lock_);
+ ScopedCodeCacheWrite scc(code_map_.get());
+ // Iterate over all compiled code and remove entries that are not marked and not
+ // the entrypoint of their corresponding ArtMethod.
+ for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+ const void* code_ptr = it->first;
+ ArtMethod* method = it->second;
+ uintptr_t allocation = FromCodeToAllocation(code_ptr);
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ const void* entrypoint = method->GetEntryPointFromQuickCompiledCode();
+ if ((entrypoint == method_header->GetEntryPoint()) || GetLiveBitmap()->Test(allocation)) {
+ ++it;
+ } else {
+ if (entrypoint == GetQuickToInterpreterBridge()) {
+ method->ClearCounter();
+ }
+ FreeCode(code_ptr, method);
+ it = method_code_map_.erase(it);
+ }
+ }
+}
+
+void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
+ {
+ MutexLock mu(self, lock_);
+ if (collect_profiling_info) {
+ // Clear the profiling info of methods that do not have compiled code as entrypoint.
+ // Also remove the saved entry point from the ProfilingInfo objects.
+ for (ProfilingInfo* info : profiling_infos_) {
+ const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+ if (!ContainsPc(ptr) && !info->IsMethodBeingCompiled()) {
+ info->GetMethod()->SetProfilingInfo(nullptr);
+ }
+ info->SetSavedEntryPoint(nullptr);
+ }
+ } else if (kIsDebugBuild) {
+ // Sanity check that the profiling infos do not have a dangling entry point.
+ for (ProfilingInfo* info : profiling_infos_) {
+ DCHECK(info->GetSavedEntryPoint() == nullptr);
}
}
@@ -674,41 +695,50 @@
// Run a checkpoint on all threads to mark the JIT compiled code they are running.
MarkCompiledCodeOnThreadStacks(self);
- {
- MutexLock mu(self, lock_);
- // Free unused compiled code, and restore the entry point of used compiled code.
- {
- ScopedCodeCacheWrite scc(code_map_.get());
- for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
- const void* code_ptr = it->first;
- ArtMethod* method = it->second;
- uintptr_t allocation = FromCodeToAllocation(code_ptr);
- const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
- if (GetLiveBitmap()->Test(allocation)) {
- instrumentation->UpdateMethodsCode(method, method_header->GetEntryPoint());
- ++it;
- } else {
- method->ClearCounter();
- DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
- FreeCode(code_ptr, method);
- it = method_code_map_.erase(it);
- }
- }
- }
+ // Remove compiled code that is not the entrypoint of their method and not in the call
+ // stack.
+ RemoveUnusedAndUnmarkedCode(self);
- // Free all profiling infos of methods that were not being compiled.
+ if (collect_profiling_info) {
+ MutexLock mu(self, lock_);
+ // Free all profiling infos of methods not compiled nor being compiled.
auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
[this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS {
- if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
+ const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+ if (ContainsPc(ptr) && info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
+ // Make sure compiled methods have a ProfilingInfo object. It is needed for
+ // code cache collection.
+ info->GetMethod()->SetProfilingInfo(info);
+ } else if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) != info) {
+ // No need for this ProfilingInfo object anymore.
FreeData(reinterpret_cast<uint8_t*>(info));
return true;
}
return false;
});
profiling_infos_.erase(profiling_kept_end, profiling_infos_.end());
+ DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
}
}
+bool JitCodeCache::CheckLiveCompiledCodeHasProfilingInfo() {
+ // Check that methods we have compiled do have a ProfilingInfo object. We would
+ // have memory leaks of compiled code otherwise.
+ for (const auto& it : method_code_map_) {
+ ArtMethod* method = it.second;
+ if (method->GetProfilingInfo(sizeof(void*)) == nullptr) {
+ const void* code_ptr = it.first;
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
+ // If the code is not dead, then we have a problem. Note that this can even
+ // happen just after a collection, as mutator threads are running in parallel
+ // and could deoptimize an existing compiled code.
+ return false;
+ }
+ }
+ }
+ return true;
+}
OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
@@ -849,6 +879,13 @@
void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
const OatQuickMethodHeader* header) {
+ ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*));
+ if ((profiling_info != nullptr) &&
+ (profiling_info->GetSavedEntryPoint() == header->GetEntryPoint())) {
+ // Prevent future uses of the compiled code.
+ profiling_info->SetSavedEntryPoint(nullptr);
+ }
+
if (method->GetEntryPointFromQuickCompiledCode() == header->GetEntryPoint()) {
// The entrypoint is the one to invalidate, so we just update
// it to the interpreter entry point and clear the counter to get the method
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 4574edf..7b33b92 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -124,6 +124,11 @@
return live_bitmap_.get();
}
+ // Return whether we should do a full collection given the current state of the cache.
+ bool ShouldDoFullCollection()
+ REQUIRES(lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// Perform a collection on the code cache.
void GarbageCollectCache(Thread* self)
REQUIRES(!lock_)
@@ -235,11 +240,11 @@
// Set the footprint limit of the code cache.
void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
- void DoFullCollection(Thread* self)
+ void DoCollection(Thread* self, bool collect_profiling_info)
REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
- void RemoveUnusedCode(Thread* self)
+ void RemoveUnusedAndUnmarkedCode(Thread* self)
REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -247,6 +252,10 @@
REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ bool CheckLiveCompiledCodeHasProfilingInfo()
+ REQUIRES(lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// Lock for guarding allocations, collections, and the method_code_map_.
Mutex lock_;
// Condition to wait on during collection.
@@ -282,8 +291,8 @@
// The current footprint in bytes of the data portion of the code cache.
size_t data_end_ GUARDED_BY(lock_);
- // Whether a full collection has already been done on the current capacity.
- bool has_done_full_collection_ GUARDED_BY(lock_);
+ // Whether the last collection round increased the code cache.
+ bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
// Last time the the code_cache was updated.
// It is atomic to avoid locking when reading it.
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index 46c362a..d751e5a 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -187,7 +187,18 @@
return;
}
- instrumentation_cache_->AddSamples(thread, method, 1);
+ ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*));
+ // Update the entrypoint if the ProfilingInfo has one. The interpreter will call it
+ // instead of interpreting the method.
+ // We avoid doing this if exit stubs are installed to not mess with the instrumentation.
+ // TODO(ngeoffray): Clean up instrumentation and code cache interactions.
+ if ((profiling_info != nullptr) &&
+ (profiling_info->GetSavedEntryPoint() != nullptr) &&
+ !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) {
+ method->SetEntryPointFromQuickCompiledCode(profiling_info->GetSavedEntryPoint());
+ } else {
+ instrumentation_cache_->AddSamples(thread, method, 1);
+ }
}
void JitInstrumentationListener::Branch(Thread* thread,
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index ab72373..d54f3df 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -126,11 +126,20 @@
is_method_being_compiled_ = value;
}
+ void SetSavedEntryPoint(const void* entry_point) {
+ saved_entry_point_ = entry_point;
+ }
+
+ const void* GetSavedEntryPoint() const {
+ return saved_entry_point_;
+ }
+
private:
ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries)
: number_of_inline_caches_(entries.size()),
method_(method),
- is_method_being_compiled_(false) {
+ is_method_being_compiled_(false),
+ saved_entry_point_(nullptr) {
memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
for (size_t i = 0; i < number_of_inline_caches_; ++i) {
cache_[i].dex_pc_ = entries[i];
@@ -148,6 +157,10 @@
// TODO: Make the JIT code cache lock global.
bool is_method_being_compiled_;
+ // Entry point of the corresponding ArtMethod, while the JIT code cache
+ // is poking for the liveness of compiled code.
+ const void* saved_entry_point_;
+
// Dynamically allocated array of size `number_of_inline_caches_`.
InlineCache cache_[0];