summaryrefslogtreecommitdiff
path: root/runtime/jit/jit_code_cache.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/jit/jit_code_cache.cc')
-rw-r--r--runtime/jit/jit_code_cache.cc58
1 files changed, 45 insertions, 13 deletions
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index cf28fe12df..4b9538d093 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -286,6 +286,15 @@ bool JitCodeCache::ContainsPc(const void* ptr) const {
return PrivateRegionContainsPc(ptr) || shared_region_.IsInExecSpace(ptr);
}
+bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
+ ScopedObjectAccess soa(art::Thread::Current());
+ ScopedAssertNoThreadSuspension sants(__FUNCTION__);
+ if (ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
+ return true;
+ }
+ return false;
+}
+
bool JitCodeCache::ContainsMethod(ArtMethod* method) {
MutexLock mu(Thread::Current(), *Locks::jit_lock_);
if (UNLIKELY(method->IsNative())) {
@@ -1460,18 +1469,30 @@ OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) {
ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
ArtMethod* method,
- const std::vector<uint32_t>& entries) {
+ const std::vector<uint32_t>& entries,
+ bool retry_allocation)
+ // No thread safety analysis as we are using TryLock/Unlock explicitly.
+ NO_THREAD_SAFETY_ANALYSIS {
DCHECK(CanAllocateProfilingInfo());
ProfilingInfo* info = nullptr;
- {
- MutexLock mu(self, *Locks::jit_lock_);
- info = AddProfilingInfoInternal(self, method, entries);
- }
+ if (!retry_allocation) {
+ // If we are allocating for the interpreter, just try to lock, to avoid
+ // lock contention with the JIT.
+ if (Locks::jit_lock_->ExclusiveTryLock(self)) {
+ info = AddProfilingInfoInternal(self, method, entries);
+ Locks::jit_lock_->ExclusiveUnlock(self);
+ }
+ } else {
+ {
+ MutexLock mu(self, *Locks::jit_lock_);
+ info = AddProfilingInfoInternal(self, method, entries);
+ }
- if (info == nullptr) {
- GarbageCollectCache(self);
- MutexLock mu(self, *Locks::jit_lock_);
- info = AddProfilingInfoInternal(self, method, entries);
+ if (info == nullptr) {
+ GarbageCollectCache(self);
+ MutexLock mu(self, *Locks::jit_lock_);
+ info = AddProfilingInfoInternal(self, method, entries);
+ }
}
return info;
}
@@ -1604,7 +1625,8 @@ bool JitCodeCache::IsOsrCompiled(ArtMethod* method) {
bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
Thread* self,
CompilationKind compilation_kind,
- bool prejit) {
+ bool prejit,
+ JitMemoryRegion* region) {
const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
if (compilation_kind != CompilationKind::kOsr && ContainsPc(existing_entry_point)) {
OatQuickMethodHeader* method_header =
@@ -1678,11 +1700,21 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
}
return new_compilation;
} else {
+ ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
if (CanAllocateProfilingInfo() &&
(compilation_kind == CompilationKind::kBaseline) &&
- (method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) {
- if (ProfilingInfo::Create(self, method) == nullptr) {
- VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled baseline";
+ (info == nullptr)) {
+ // We can retry allocation here as we're the JIT thread.
+ if (ProfilingInfo::Create(self, method, /* retry_allocation= */ true)) {
+ info = method->GetProfilingInfo(kRuntimePointerSize);
+ }
+ }
+ if (info == nullptr) {
+ // When prejitting, we don't allocate a profiling info.
+ if (!prejit && !IsSharedRegion(*region)) {
+ VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
+ // Because the counter is not atomic, there are some rare cases where we may not hit the
+ // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
ClearMethodCounter(method, /*was_warm=*/ false);
return false;
}