Fix a braino when marking a method as being compiled.
We now compile without having ProfilingInfos. Move the flag of being
compiled from the ProfilingInfo to sets in the JitCodeCache.
Test: test.py jit-at-first-use
Bug: 147207937
Change-Id: I1a372bb5534764278f5e9df674783cf918c690b3
diff --git a/libartbase/base/stl_util.h b/libartbase/base/stl_util.h
index fbafd53..cd7b812 100644
--- a/libartbase/base/stl_util.h
+++ b/libartbase/base/stl_util.h
@@ -19,6 +19,7 @@
#include <algorithm>
#include <iterator>
+#include <set>
#include <sstream>
#include <android-base/logging.h>
@@ -124,6 +125,11 @@
return it != container.end();
}
+template <typename T>
+bool ContainsElement(const std::set<T>& container, const T& value) {
+ return container.count(value) != 0u;
+}
+
// 32-bit FNV-1a hash function suitable for std::unordered_map.
// It can be used with any container which works with range-based for loop.
// See http://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 2ef1cb4..c7db749 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -338,7 +338,7 @@
<< " osr=" << std::boolalpha << osr
<< " baseline=" << std::boolalpha << baseline;
bool success = jit_compiler_->CompileMethod(self, region, method_to_compile, baseline, osr);
- code_cache_->DoneCompiling(method_to_compile, self, osr);
+ code_cache_->DoneCompiling(method_to_compile, self, osr, baseline);
if (!success) {
VLOG(jit) << "Failed to compile method "
<< ArtMethod::PrettyMethod(method_to_compile)
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index cf07fe5..166beef 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -1287,6 +1287,40 @@
}
}
+void JitCodeCache::RemoveMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline) {
+ DCHECK(IsMethodBeingCompiled(method, osr, baseline));
+ if (osr) {
+ current_osr_compilations_.erase(method);
+ } else if (baseline) {
+ current_baseline_compilations_.erase(method);
+ } else {
+ current_optimized_compilations_.erase(method);
+ }
+}
+
+void JitCodeCache::AddMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline) {
+ DCHECK(!IsMethodBeingCompiled(method, osr, baseline));
+ if (osr) {
+ current_osr_compilations_.insert(method);
+ } else if (baseline) {
+ current_baseline_compilations_.insert(method);
+ } else {
+ current_optimized_compilations_.insert(method);
+ }
+}
+
+bool JitCodeCache::IsMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline) {
+ return osr ? ContainsElement(current_osr_compilations_, method)
+ : baseline ? ContainsElement(current_baseline_compilations_, method)
+ : ContainsElement(current_optimized_compilations_, method);
+}
+
+bool JitCodeCache::IsMethodBeingCompiled(ArtMethod* method) {
+ return ContainsElement(current_optimized_compilations_, method) ||
+ ContainsElement(current_osr_compilations_, method) ||
+ ContainsElement(current_baseline_compilations_, method);
+}
+
void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
ScopedTrace trace(__FUNCTION__);
{
@@ -1317,7 +1351,10 @@
// Also remove the saved entry point from the ProfilingInfo objects.
for (ProfilingInfo* info : profiling_infos_) {
const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
- if (!ContainsPc(ptr) && !info->IsInUseByCompiler() && !IsInZygoteDataSpace(info)) {
+ if (!ContainsPc(ptr) &&
+ !IsMethodBeingCompiled(info->GetMethod()) &&
+ !info->IsInUseByCompiler() &&
+ !IsInZygoteDataSpace(info)) {
info->GetMethod()->SetProfilingInfo(nullptr);
}
@@ -1734,13 +1771,12 @@
ClearMethodCounter(method, /*was_warm=*/ false);
return false;
}
- } else {
- MutexLock mu(self, *Locks::jit_lock_);
- if (info->IsMethodBeingCompiled(osr)) {
- return false;
- }
- info->SetIsMethodBeingCompiled(true, osr);
}
+ MutexLock mu(self, *Locks::jit_lock_);
+ if (IsMethodBeingCompiled(method, osr, baseline)) {
+ return false;
+ }
+ AddMethodBeingCompiled(method, osr, baseline);
return true;
}
}
@@ -1764,7 +1800,7 @@
info->DecrementInlineUse();
}
-void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self, bool osr) {
+void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self, bool osr, bool baseline) {
DCHECK_EQ(Thread::Current(), self);
MutexLock mu(self, *Locks::jit_lock_);
if (UNLIKELY(method->IsNative())) {
@@ -1777,11 +1813,7 @@
jni_stubs_map_.erase(it); // Remove the entry added in NotifyCompilationOf().
} // else Commit() updated entrypoints of all methods in the JniStubData.
} else {
- ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
- if (info != nullptr) {
- DCHECK(info->IsMethodBeingCompiled(osr));
- info->SetIsMethodBeingCompiled(false, osr);
- }
+ RemoveMethodBeingCompiled(method, osr, baseline);
}
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index cefbf25..7e00bcb 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -214,7 +214,7 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
- void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
+ void DoneCompiling(ArtMethod* method, Thread* self, bool osr, bool baseline)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
@@ -499,6 +499,22 @@
REQUIRES(!Locks::jit_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Record that `method` is being compiled with the given mode.
+ // TODO: introduce an enum for the mode.
+ void AddMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline)
+ REQUIRES(Locks::jit_lock_);
+
+ // Remove `method` from the list of methods meing compiled with the given mode.
+ void RemoveMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline)
+ REQUIRES(Locks::jit_lock_);
+
+ // Return whether `method` is being compiled with the given mode.
+ bool IsMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline)
+ REQUIRES(Locks::jit_lock_);
+
+ // Return whether `method` is being compiled in any mode.
+ bool IsMethodBeingCompiled(ArtMethod* method) REQUIRES(Locks::jit_lock_);
+
class JniStubKey;
class JniStubData;
@@ -536,6 +552,11 @@
// ProfilingInfo objects we have allocated.
std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_);
+ // Methods we are currently compiling, one set for each kind of compilation.
+ std::set<ArtMethod*> current_optimized_compilations_ GUARDED_BY(Locks::jit_lock_);
+ std::set<ArtMethod*> current_osr_compilations_ GUARDED_BY(Locks::jit_lock_);
+ std::set<ArtMethod*> current_baseline_compilations_ GUARDED_BY(Locks::jit_lock_);
+
// Methods that the zygote has compiled and can be shared across processes
// forked from the zygote.
ZygoteMap zygote_map_;
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 8c88760..d039bb3 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -30,9 +30,7 @@
method_(method),
saved_entry_point_(nullptr),
number_of_inline_caches_(entries.size()),
- current_inline_uses_(0),
- is_method_being_compiled_(false),
- is_osr_method_being_compiled_(false) {
+ current_inline_uses_(0) {
memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
for (size_t i = 0; i < number_of_inline_caches_; ++i) {
cache_[i].dex_pc_ = entries[i];
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index 14d76d2..cb9e423 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -83,20 +83,6 @@
InlineCache* GetInlineCache(uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsMethodBeingCompiled(bool osr) const {
- return osr
- ? is_osr_method_being_compiled_
- : is_method_being_compiled_;
- }
-
- void SetIsMethodBeingCompiled(bool value, bool osr) {
- if (osr) {
- is_osr_method_being_compiled_ = value;
- } else {
- is_method_being_compiled_ = value;
- }
- }
-
void SetSavedEntryPoint(const void* entry_point) {
saved_entry_point_ = entry_point;
}
@@ -122,8 +108,7 @@
}
bool IsInUseByCompiler() const {
- return IsMethodBeingCompiled(/*osr=*/ true) || IsMethodBeingCompiled(/*osr=*/ false) ||
- (current_inline_uses_ > 0);
+ return current_inline_uses_ > 0;
}
static constexpr MemberOffset BaselineHotnessCountOffset() {
@@ -162,12 +147,6 @@
// it updates this counter so that the GC does not try to clear the inline caches.
uint16_t current_inline_uses_;
- // Whether the ArtMethod is currently being compiled. This flag
- // is implicitly guarded by the JIT code cache lock.
- // TODO: Make the JIT code cache lock global.
- bool is_method_being_compiled_;
- bool is_osr_method_being_compiled_;
-
// Dynamically allocated array of size `number_of_inline_caches_`.
InlineCache cache_[0];