Move the profiling info out of ArtMethod.
Instead, keep a map in JitCodeCache.
Bug: 112676029
Test: test.py
Change-Id: I5ab769a9b7b3214af7832478d1b06c9e9adbf8b8
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index edc8321..b8f4011 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1141,8 +1141,9 @@
}
if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
- ScopedObjectAccess soa(Thread::Current());
- ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
+ ScopedProfilingInfoUse spiu(
+ Runtime::Current()->GetJit(), GetGraph()->GetArtMethod(), Thread::Current());
+ ProfilingInfo* info = spiu.GetProfilingInfo();
if (info != nullptr) {
uint64_t address = reinterpret_cast64<uint64_t>(info);
vixl::aarch64::Label done;
@@ -4289,8 +4290,9 @@
GetGraph()->IsCompilingBaseline() &&
!Runtime::Current()->IsAotCompiler()) {
DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
- ScopedObjectAccess soa(Thread::Current());
- ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
+ ScopedProfilingInfoUse spiu(
+ Runtime::Current()->GetJit(), GetGraph()->GetArtMethod(), Thread::Current());
+ ProfilingInfo* info = spiu.GetProfilingInfo();
if (info != nullptr) {
InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
uint64_t address = reinterpret_cast64<uint64_t>(cache);
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index ccb0609..5c6f835 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2104,8 +2104,9 @@
}
if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
- ScopedObjectAccess soa(Thread::Current());
- ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
+ ScopedProfilingInfoUse spiu(
+ Runtime::Current()->GetJit(), GetGraph()->GetArtMethod(), Thread::Current());
+ ProfilingInfo* info = spiu.GetProfilingInfo();
if (info != nullptr) {
uint32_t address = reinterpret_cast32<uint32_t>(info);
vixl::aarch32::Label done;
@@ -3434,8 +3435,9 @@
GetGraph()->IsCompilingBaseline() &&
!Runtime::Current()->IsAotCompiler()) {
DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
- ScopedObjectAccess soa(Thread::Current());
- ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
+ ScopedProfilingInfoUse spiu(
+ Runtime::Current()->GetJit(), GetGraph()->GetArtMethod(), Thread::Current());
+ ProfilingInfo* info = spiu.GetProfilingInfo();
if (info != nullptr) {
InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
uint32_t address = reinterpret_cast32<uint32_t>(cache);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 370b839..3adf440 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1096,8 +1096,9 @@
}
if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
- ScopedObjectAccess soa(Thread::Current());
- ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
+ ScopedProfilingInfoUse spiu(
+ Runtime::Current()->GetJit(), GetGraph()->GetArtMethod(), Thread::Current());
+ ProfilingInfo* info = spiu.GetProfilingInfo();
if (info != nullptr) {
uint32_t address = reinterpret_cast32<uint32_t>(info);
NearLabel done;
@@ -2468,8 +2469,9 @@
GetGraph()->IsCompilingBaseline() &&
!Runtime::Current()->IsAotCompiler()) {
DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
- ScopedObjectAccess soa(Thread::Current());
- ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
+ ScopedProfilingInfoUse spiu(
+ Runtime::Current()->GetJit(), GetGraph()->GetArtMethod(), Thread::Current());
+ ProfilingInfo* info = spiu.GetProfilingInfo();
if (info != nullptr) {
InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
uint32_t address = reinterpret_cast32<uint32_t>(cache);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index b421079..37265ec 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1416,8 +1416,9 @@
}
if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
- ScopedObjectAccess soa(Thread::Current());
- ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
+ ScopedProfilingInfoUse spiu(
+ Runtime::Current()->GetJit(), GetGraph()->GetArtMethod(), Thread::Current());
+ ProfilingInfo* info = spiu.GetProfilingInfo();
if (info != nullptr) {
uint64_t address = reinterpret_cast64<uint64_t>(info);
NearLabel done;
@@ -2689,8 +2690,9 @@
if (!instruction->GetLocations()->Intrinsified() &&
GetGraph()->IsCompilingBaseline() &&
!Runtime::Current()->IsAotCompiler()) {
- ScopedObjectAccess soa(Thread::Current());
- ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
+ ScopedProfilingInfoUse spiu(
+ Runtime::Current()->GetJit(), GetGraph()->GetArtMethod(), Thread::Current());
+ ProfilingInfo* info = spiu.GetProfilingInfo();
if (info != nullptr) {
InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
uint64_t address = reinterpret_cast64<uint64_t>(cache);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index d3a4407..4530f1d 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -319,33 +319,6 @@
return index;
}
-class ScopedProfilingInfoInlineUse {
- public:
- explicit ScopedProfilingInfoInlineUse(ArtMethod* method, Thread* self)
- : method_(method),
- self_(self),
- // Fetch the profiling info ahead of using it. If it's null when fetching,
- // we should not call JitCodeCache::DoneInlining.
- profiling_info_(
- Runtime::Current()->GetJit()->GetCodeCache()->NotifyCompilerUse(method, self)) {
- }
-
- ~ScopedProfilingInfoInlineUse() {
- if (profiling_info_ != nullptr) {
- PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- DCHECK_EQ(profiling_info_, method_->GetProfilingInfo(pointer_size));
- Runtime::Current()->GetJit()->GetCodeCache()->DoneCompilerUse(method_, self_);
- }
- }
-
- ProfilingInfo* GetProfilingInfo() const { return profiling_info_; }
-
- private:
- ArtMethod* const method_;
- Thread* const self_;
- ProfilingInfo* const profiling_info_;
-};
-
HInliner::InlineCacheType HInliner::GetInlineCacheType(
const Handle<mirror::ObjectArray<mirror::Class>>& classes)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -678,8 +651,8 @@
ArtMethod* caller = graph_->GetArtMethod();
// Under JIT, we should always know the caller.
DCHECK(caller != nullptr);
- ScopedProfilingInfoInlineUse spiis(caller, Thread::Current());
- ProfilingInfo* profiling_info = spiis.GetProfilingInfo();
+ ScopedProfilingInfoUse spiu(Runtime::Current()->GetJit(), caller, Thread::Current());
+ ProfilingInfo* profiling_info = spiu.GetProfilingInfo();
if (profiling_info == nullptr) {
return kInlineCacheNoData;
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 3d6b66a..b3101d8 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -515,30 +515,6 @@
SetDataPtrSize(table, pointer_size);
}
- ProfilingInfo* GetProfilingInfo(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) {
- if (UNLIKELY(IsNative() || IsProxyMethod() || !IsInvokable())) {
- return nullptr;
- }
- return reinterpret_cast<ProfilingInfo*>(GetDataPtrSize(pointer_size));
- }
-
- ALWAYS_INLINE void SetProfilingInfo(ProfilingInfo* info) REQUIRES_SHARED(Locks::mutator_lock_) {
- SetProfilingInfoPtrSize(info, kRuntimePointerSize);
- }
-
- ALWAYS_INLINE void SetProfilingInfoPtrSize(ProfilingInfo* info, PointerSize pointer_size)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(!IsProxyMethod());
- DCHECK(!IsNative());
- DCHECK(IsInvokable());
- SetDataPtrSize(info, pointer_size);
- }
-
- static MemberOffset ProfilingInfoOffset() {
- DCHECK(IsImagePointerSize(kRuntimePointerSize));
- return DataOffset(kRuntimePointerSize);
- }
-
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE bool HasSingleImplementation() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index fbc843e..6a8cf69 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -432,7 +432,8 @@
}
}
// Walk over inline caches to clear entries containing unloaded classes.
- for (ProfilingInfo* info : profiling_infos_) {
+ for (auto it : profiling_infos_) {
+ ProfilingInfo* info = it.second;
for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
InlineCache* cache = &info->cache_[i];
for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
@@ -540,9 +541,8 @@
}
}
for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
- ProfilingInfo* info = *it;
+ ProfilingInfo* info = it->second;
if (alloc.ContainsUnsafe(info->GetMethod())) {
- info->GetMethod()->SetProfilingInfo(nullptr);
private_region_.FreeWritableData(reinterpret_cast<uint8_t*>(info));
it = profiling_infos_.erase(it);
} else {
@@ -799,11 +799,10 @@
bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) {
if (LIKELY(!method->IsNative())) {
- ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
- if (info != nullptr) {
- RemoveElement(profiling_infos_, info);
+ auto it = profiling_infos_.find(method);
+ if (it != profiling_infos_.end()) {
+ profiling_infos_.erase(it);
}
- method->SetProfilingInfo(nullptr);
}
bool in_cache = false;
@@ -868,17 +867,6 @@
}
return;
}
- // Update ProfilingInfo to the new one and remove it from the old_method.
- if (old_method->GetProfilingInfo(kRuntimePointerSize) != nullptr) {
- DCHECK_EQ(old_method->GetProfilingInfo(kRuntimePointerSize)->GetMethod(), old_method);
- ProfilingInfo* info = old_method->GetProfilingInfo(kRuntimePointerSize);
- old_method->SetProfilingInfo(nullptr);
- // Since the JIT should be paused and all threads suspended by the time this is called these
- // checks should always pass.
- DCHECK(!info->IsInUseByCompiler());
- new_method->SetProfilingInfo(info);
- info->method_ = new_method;
- }
// Update method_code_map_ to point to the new method.
for (auto& it : method_code_map_) {
if (it.second == old_method) {
@@ -1162,8 +1150,8 @@
// Start polling the liveness of compiled code to prepare for the next full collection.
if (next_collection_will_be_full) {
- for (ProfilingInfo* info : profiling_infos_) {
- info->SetBaselineHotnessCount(0);
+ for (auto it : profiling_infos_) {
+ it.second->SetBaselineHotnessCount(0);
}
// Change entry points of native methods back to the GenericJNI entrypoint.
@@ -1296,7 +1284,8 @@
// hotness count is zero.
// Note that these methods may be in thread stack or concurrently revived
// between. That's OK, as the thread executing it will mark it.
- for (ProfilingInfo* info : profiling_infos_) {
+ for (auto it : profiling_infos_) {
+ ProfilingInfo* info = it.second;
if (info->GetBaselineHotnessCount() == 0) {
const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
if (ContainsPc(entry_point)) {
@@ -1355,28 +1344,7 @@
RemoveUnmarkedCode(self);
if (collect_profiling_info) {
- MutexLock mu(self, *Locks::jit_lock_);
- // Free all profiling infos of methods not compiled nor being compiled.
- auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
- [this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS {
- const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
- // We have previously cleared the ProfilingInfo pointer in the ArtMethod in the hope
- // that the compiled code would not get revived. As mutator threads run concurrently,
- // they may have revived the compiled code, and now we are in the situation where
- // a method has compiled code but no ProfilingInfo.
- // We make sure compiled methods have a ProfilingInfo object. It is needed for
- // code cache collection.
- if (ContainsPc(ptr) &&
- info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
- info->GetMethod()->SetProfilingInfo(info);
- } else if (info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) != info) {
- // No need for this ProfilingInfo object anymore.
- private_region_.FreeWritableData(reinterpret_cast<uint8_t*>(info));
- return true;
- }
- return false;
- });
- profiling_infos_.erase(profiling_kept_end, profiling_infos_.end());
+ // TODO: Collect unused profiling infos.
}
}
@@ -1479,29 +1447,24 @@
ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self ATTRIBUTE_UNUSED,
ArtMethod* method,
const std::vector<uint32_t>& entries) {
+ // Check whether some other thread has concurrently created it.
+ auto it = profiling_infos_.find(method);
+ if (it != profiling_infos_.end()) {
+ return it->second;
+ }
+
size_t profile_info_size = RoundUp(
sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size(),
sizeof(void*));
- // Check whether some other thread has concurrently created it.
- ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
- if (info != nullptr) {
- return info;
- }
-
const uint8_t* data = private_region_.AllocateData(profile_info_size);
if (data == nullptr) {
return nullptr;
}
uint8_t* writable_data = private_region_.GetWritableDataAddress(data);
- info = new (writable_data) ProfilingInfo(method, entries);
+ ProfilingInfo* info = new (writable_data) ProfilingInfo(method, entries);
- // Make sure other threads see the data in the profiling info object before the
- // store in the ArtMethod's ProfilingInfo pointer.
- std::atomic_thread_fence(std::memory_order_release);
-
- method->SetProfilingInfo(info);
- profiling_infos_.push_back(info);
+ profiling_infos_.Put(method, info);
histogram_profiling_info_memory_use_.AddValue(profile_info_size);
return info;
}
@@ -1519,7 +1482,8 @@
MutexLock mu(self, *Locks::jit_lock_);
ScopedTrace trace(__FUNCTION__);
uint16_t jit_compile_threshold = Runtime::Current()->GetJITOptions()->GetCompileThreshold();
- for (const ProfilingInfo* info : profiling_infos_) {
+ for (auto it : profiling_infos_) {
+ ProfilingInfo* info = it.second;
ArtMethod* method = info->GetMethod();
const DexFile* dex_file = method->GetDexFile();
const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation());
@@ -1678,13 +1642,18 @@
}
return new_compilation;
} else {
- if (CanAllocateProfilingInfo() &&
- (compilation_kind == CompilationKind::kBaseline) &&
- (method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) {
- if (ProfilingInfo::Create(self, method) == nullptr) {
- VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled baseline";
- ClearMethodCounter(method, /*was_warm=*/ false);
- return false;
+ if (CanAllocateProfilingInfo() && (compilation_kind == CompilationKind::kBaseline)) {
+ bool has_profiling_info = false;
+ {
+ MutexLock mu(self, *Locks::jit_lock_);
+ has_profiling_info = (profiling_infos_.find(method) != profiling_infos_.end());
+ }
+ if (!has_profiling_info) {
+ if (ProfilingInfo::Create(self, method) == nullptr) {
+ VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled baseline";
+ ClearMethodCounter(method, /*was_warm=*/ false);
+ return false;
+ }
}
}
MutexLock mu(self, *Locks::jit_lock_);
@@ -1698,21 +1667,22 @@
ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
MutexLock mu(self, *Locks::jit_lock_);
- ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
- if (info != nullptr) {
- if (!info->IncrementInlineUse()) {
- // Overflow of inlining uses, just bail.
- return nullptr;
- }
+ auto it = profiling_infos_.find(method);
+ if (it == profiling_infos_.end()) {
+ return nullptr;
}
- return info;
+ if (!it->second->IncrementInlineUse()) {
+ // Overflow of inlining uses, just bail.
+ return nullptr;
+ }
+ return it->second;
}
void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) {
MutexLock mu(self, *Locks::jit_lock_);
- ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
- DCHECK(info != nullptr);
- info->DecrementInlineUse();
+ auto it = profiling_infos_.find(method);
+ DCHECK(it != profiling_infos_.end());
+ it->second->DecrementInlineUse();
}
void JitCodeCache::DoneCompiling(ArtMethod* method,
@@ -1755,7 +1725,6 @@
void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
const OatQuickMethodHeader* header) {
DCHECK(!method->IsNative());
- ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
const void* method_entrypoint = method->GetEntryPointFromQuickCompiledCode();
// Clear the method counter if we are running jitted code since we might want to jit this again in
@@ -1765,7 +1734,7 @@
// and clear the counter to get the method Jitted again.
Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
method, GetQuickToInterpreterBridge());
- ClearMethodCounter(method, /*was_warm=*/ profiling_info != nullptr);
+ ClearMethodCounter(method, /*was_warm=*/ true);
} else {
MutexLock mu(Thread::Current(), *Locks::jit_lock_);
auto it = osr_code_map_.find(method);
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 7c828ae..ea1e924 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -536,7 +536,7 @@
SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(Locks::jit_lock_);
// ProfilingInfo objects we have allocated.
- std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_);
+ SafeMap<ArtMethod*, ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_);
// Methods we are currently compiling, one set for each kind of compilation.
std::set<ArtMethod*> current_optimized_compilations_ GUARDED_BY(Locks::jit_lock_);
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index fe551f3..332c6a7 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -386,9 +386,7 @@
const uint16_t counter = method.GetCounter();
// Mark startup methods as hot if they have more than hot_method_sample_threshold
// samples. This means they will get compiled by the compiler driver.
- if (method.GetProfilingInfo(kRuntimePointerSize) != nullptr ||
- method.PreviouslyWarm() ||
- counter >= hot_method_sample_threshold) {
+ if (method.PreviouslyWarm() || counter >= hot_method_sample_threshold) {
hot_methods->AddReference(method.GetDexFile(), method.GetDexMethodIndex());
} else if (counter != 0) {
sampled_methods->AddReference(method.GetDexFile(), method.GetDexMethodIndex());
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 93951ee..d4b16b4 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -73,6 +73,7 @@
return &cache_[i];
}
}
+ ScopedObjectAccess soa(Thread::Current());
LOG(FATAL) << "No inline cache found for " << ArtMethod::PrettyMethod(method_) << "@" << dex_pc;
UNREACHABLE();
}
@@ -107,4 +108,19 @@
// as the garbage collector might clear the entries concurrently.
}
+ScopedProfilingInfoUse::ScopedProfilingInfoUse(jit::Jit* jit, ArtMethod* method, Thread* self)
+ : jit_(jit),
+ method_(method),
+ self_(self),
+ // Fetch the profiling info ahead of using it. If it's null when fetching,
+ // we should not call JitCodeCache::DoneCompilerUse.
+ profiling_info_(jit->GetCodeCache()->NotifyCompilerUse(method, self)) {
+}
+
+ScopedProfilingInfoUse::~ScopedProfilingInfoUse() {
+ if (profiling_info_ != nullptr) {
+ jit_->GetCodeCache()->DoneCompilerUse(method_, self_);
+ }
+}
+
} // namespace art
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index d136b4c..b1ea227 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -20,6 +20,7 @@
#include <vector>
#include "base/macros.h"
+#include "base/value_object.h"
#include "gc_root.h"
#include "offsets.h"
@@ -29,6 +30,7 @@
class ProfilingInfo;
namespace jit {
+class Jit;
class JitCodeCache;
} // namespace jit
@@ -78,9 +80,7 @@
return method_;
}
- // Mutator lock only required for debugging output.
- InlineCache* GetInlineCache(uint32_t dex_pc)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ InlineCache* GetInlineCache(uint32_t dex_pc);
// Increments the number of times this method is currently being inlined.
// Returns whether it was successful, that is it could increment without
@@ -142,6 +142,22 @@
DISALLOW_COPY_AND_ASSIGN(ProfilingInfo);
};
+class ScopedProfilingInfoUse : public ValueObject {
+ public:
+ ScopedProfilingInfoUse(jit::Jit* jit, ArtMethod* method, Thread* self);
+ ~ScopedProfilingInfoUse();
+
+ ProfilingInfo* GetProfilingInfo() const { return profiling_info_; }
+
+ private:
+ jit::Jit* const jit_;
+ ArtMethod* const method_;
+ Thread* const self_;
+ ProfilingInfo* const profiling_info_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedProfilingInfoUse);
+};
+
} // namespace art
#endif // ART_RUNTIME_JIT_PROFILING_INFO_H_