Support garbage collection of JITted code.
Change-Id: I9afc544460ae4fb31149644b6196ac7f5182c784
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 0607493..5afd28e 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -49,7 +49,7 @@
void Jit::DumpInfo(std::ostream& os) {
os << "Code cache size=" << PrettySize(code_cache_->CodeCacheSize())
<< " data cache size=" << PrettySize(code_cache_->DataCacheSize())
- << " num methods=" << code_cache_->NumMethods()
+ << " number of compiled code=" << code_cache_->NumberOfCompiledCode()
<< "\n";
cumulative_timings_.Dump(os);
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 4187358..2d0a2a5 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -19,8 +19,12 @@
#include <sstream>
#include "art_method-inl.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
+#include "gc/accounting/bitmap-inl.h"
+#include "linear_alloc.h"
#include "mem_map.h"
#include "oat_file-inl.h"
+#include "thread_list.h"
namespace art {
namespace jit {
@@ -74,14 +78,10 @@
JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map)
: lock_("Jit code cache", kJitCodeCacheLock),
+ lock_cond_("Jit code cache variable", lock_),
+ collection_in_progress_(false),
code_map_(code_map),
- data_map_(data_map),
- num_methods_(0) {
-
- VLOG(jit) << "Created jit code cache: data size="
- << PrettySize(data_map_->Size())
- << ", code size="
- << PrettySize(code_map_->Size());
+ data_map_(data_map) {
code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/);
data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/);
@@ -96,13 +96,22 @@
CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
+
+ live_bitmap_.reset(CodeCacheBitmap::Create("code-cache-bitmap",
+ reinterpret_cast<uintptr_t>(code_map_->Begin()),
+ reinterpret_cast<uintptr_t>(code_map_->End())));
+
+ if (live_bitmap_.get() == nullptr) {
+ PLOG(FATAL) << "creating bitmaps for the JIT code cache failed";
+ }
+
+ VLOG(jit) << "Created jit code cache: data size="
+ << PrettySize(data_map_->Size())
+ << ", code size="
+ << PrettySize(code_map_->Size());
}
-bool JitCodeCache::ContainsMethod(ArtMethod* method) const {
- return ContainsCodePtr(method->GetEntryPointFromQuickCompiledCode());
-}
-
-bool JitCodeCache::ContainsCodePtr(const void* ptr) const {
+bool JitCodeCache::ContainsPc(const void* ptr) const {
return code_map_->Begin() <= ptr && ptr < code_map_->End();
}
@@ -121,6 +130,7 @@
};
uint8_t* JitCodeCache::CommitCode(Thread* self,
+ ArtMethod* method,
const uint8_t* mapping_table,
const uint8_t* vmap_table,
const uint8_t* gc_map,
@@ -129,6 +139,93 @@
size_t fp_spill_mask,
const uint8_t* code,
size_t code_size) {
+ uint8_t* result = CommitCodeInternal(self,
+ method,
+ mapping_table,
+ vmap_table,
+ gc_map,
+ frame_size_in_bytes,
+ core_spill_mask,
+ fp_spill_mask,
+ code,
+ code_size);
+ if (result == nullptr) {
+ // Retry.
+ GarbageCollectCache(self);
+ result = CommitCodeInternal(self,
+ method,
+ mapping_table,
+ vmap_table,
+ gc_map,
+ frame_size_in_bytes,
+ core_spill_mask,
+ fp_spill_mask,
+ code,
+ code_size);
+ }
+ return result;
+}
+
+bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
+ bool in_collection = false;
+ while (collection_in_progress_) {
+ in_collection = true;
+ lock_cond_.Wait(self);
+ }
+ return in_collection;
+}
+
+static uintptr_t FromCodeToAllocation(const void* code) {
+ size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+ return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
+}
+
+void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
+ uintptr_t allocation = FromCodeToAllocation(code_ptr);
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ const uint8_t* data = method_header->GetNativeGcMap();
+ if (data != nullptr) {
+ mspace_free(data_mspace_, const_cast<uint8_t*>(data));
+ }
+ data = method_header->GetMappingTable();
+ if (data != nullptr) {
+ mspace_free(data_mspace_, const_cast<uint8_t*>(data));
+ }
+ // Use the offset directly to prevent sanity check that the method is
+ // compiled with optimizing.
+ // TODO(ngeoffray): Clean up.
+ if (method_header->vmap_table_offset_ != 0) {
+ data = method_header->code_ - method_header->vmap_table_offset_;
+ mspace_free(data_mspace_, const_cast<uint8_t*>(data));
+ }
+ mspace_free(code_mspace_, reinterpret_cast<uint8_t*>(allocation));
+}
+
+void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
+ MutexLock mu(self, lock_);
+ // We do not check if a code cache GC is in progress, as this method comes
+ // with the classlinker_classes_lock_ held, and suspending ourselves could
+ // lead to a deadlock.
+ for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+ if (alloc.ContainsUnsafe(it->second)) {
+ FreeCode(it->first, it->second);
+ it = method_code_map_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
+ ArtMethod* method,
+ const uint8_t* mapping_table,
+ const uint8_t* vmap_table,
+ const uint8_t* gc_map,
+ size_t frame_size_in_bytes,
+ size_t core_spill_mask,
+ size_t fp_spill_mask,
+ const uint8_t* code,
+ size_t code_size) {
size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
// Ensure the header ends up at expected instruction alignment.
size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
@@ -137,7 +234,9 @@
OatQuickMethodHeader* method_header = nullptr;
uint8_t* code_ptr = nullptr;
+ ScopedThreadSuspension sts(self, kSuspended);
MutexLock mu(self, lock_);
+ WaitForPotentialCollectionToComplete(self);
{
ScopedCodeCacheWrite scc(code_map_.get());
uint8_t* result = reinterpret_cast<uint8_t*>(
@@ -149,7 +248,7 @@
DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(code_ptr), alignment);
std::copy(code, code + code_size, code_ptr);
- method_header = reinterpret_cast<OatQuickMethodHeader*>(code_ptr) - 1;
+ method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
new (method_header) OatQuickMethodHeader(
(mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
(vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
@@ -162,8 +261,12 @@
__builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
reinterpret_cast<char*>(code_ptr + code_size));
-
- ++num_methods_; // TODO: This is hacky but works since each method has exactly one code region.
+ method_code_map_.Put(code_ptr, method);
+ // We have checked there was no collection in progress earlier. If we
+ // were, setting the entry point of a method would be unsafe, as the collection
+ // could delete it.
+ DCHECK(!collection_in_progress_);
+ method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
return reinterpret_cast<uint8_t*>(method_header);
}
@@ -181,10 +284,32 @@
return bytes_allocated;
}
+size_t JitCodeCache::NumberOfCompiledCode() {
+ MutexLock mu(Thread::Current(), lock_);
+ return method_code_map_.size();
+}
+
uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
size = RoundUp(size, sizeof(void*));
- MutexLock mu(self, lock_);
- return reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
+ uint8_t* result = nullptr;
+
+ {
+ ScopedThreadSuspension sts(self, kSuspended);
+ MutexLock mu(self, lock_);
+ WaitForPotentialCollectionToComplete(self);
+ result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
+ }
+
+ if (result == nullptr) {
+ // Retry.
+ GarbageCollectCache(self);
+ ScopedThreadSuspension sts(self, kSuspended);
+ MutexLock mu(self, lock_);
+ WaitForPotentialCollectionToComplete(self);
+ result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
+ }
+
+ return result;
}
uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
@@ -196,29 +321,143 @@
return result;
}
-const void* JitCodeCache::GetCodeFor(ArtMethod* method) {
- const void* code = method->GetEntryPointFromQuickCompiledCode();
- if (ContainsCodePtr(code)) {
- return code;
+class MarkCodeVisitor FINAL : public StackVisitor {
+ public:
+ MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
+ : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
+ code_cache_(code_cache_in),
+ bitmap_(code_cache_->GetLiveBitmap()) {}
+
+ bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ if (method_header == nullptr) {
+ return true;
+ }
+ const void* code = method_header->GetCode();
+ if (code_cache_->ContainsPc(code)) {
+ // Use the atomic set version, as multiple threads are executing this code.
+ bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
+ }
+ return true;
}
- MutexLock mu(Thread::Current(), lock_);
- auto it = method_code_map_.find(method);
- if (it != method_code_map_.end()) {
- return it->second;
+
+ private:
+ JitCodeCache* const code_cache_;
+ CodeCacheBitmap* const bitmap_;
+};
+
+class MarkCodeClosure FINAL : public Closure {
+ public:
+ MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
+ : code_cache_(code_cache), barrier_(barrier) {}
+
+ void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(thread == Thread::Current() || thread->IsSuspended());
+ MarkCodeVisitor visitor(thread, code_cache_);
+ visitor.WalkStack();
+ if (thread->GetState() == kRunnable) {
+ barrier_->Pass(Thread::Current());
+ }
}
- return nullptr;
+
+ private:
+ JitCodeCache* const code_cache_;
+ Barrier* const barrier_;
+};
+
+void JitCodeCache::GarbageCollectCache(Thread* self) {
+ if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
+ LOG(INFO) << "Clearing code cache, code="
+ << PrettySize(CodeCacheSize())
+ << ", data=" << PrettySize(DataCacheSize());
+ }
+
+ size_t map_size = 0;
+ ScopedThreadSuspension sts(self, kSuspended);
+
+ // Walk over all compiled methods and set the entry points of these
+ // methods to interpreter.
+ {
+ MutexLock mu(self, lock_);
+ if (WaitForPotentialCollectionToComplete(self)) {
+ return;
+ }
+ collection_in_progress_ = true;
+ map_size = method_code_map_.size();
+ for (auto& it : method_code_map_) {
+ it.second->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+ }
+ }
+
+ // Run a checkpoint on all threads to mark the JIT compiled code they are running.
+ {
+ Barrier barrier(0);
+ MarkCodeClosure closure(this, &barrier);
+ size_t threads_running_checkpoint =
+ Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
+ if (threads_running_checkpoint != 0) {
+ barrier.Increment(self, threads_running_checkpoint);
+ }
+ }
+
+ // Free unused compiled code, and restore the entry point of used compiled code.
+ {
+ MutexLock mu(self, lock_);
+ DCHECK_EQ(map_size, method_code_map_.size());
+ ScopedCodeCacheWrite scc(code_map_.get());
+ for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+ const void* code_ptr = it->first;
+ ArtMethod* method = it->second;
+ uintptr_t allocation = FromCodeToAllocation(code_ptr);
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ if (GetLiveBitmap()->Test(allocation)) {
+ method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
+ ++it;
+ } else {
+ method->ClearCounter();
+ DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
+ FreeCode(code_ptr, method);
+ it = method_code_map_.erase(it);
+ }
+ }
+ GetLiveBitmap()->Bitmap::Clear();
+ collection_in_progress_ = false;
+ lock_cond_.Broadcast(self);
+ }
+
+ if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
+ LOG(INFO) << "After clearing code cache, code="
+ << PrettySize(CodeCacheSize())
+ << ", data=" << PrettySize(DataCacheSize());
+ }
}
-void JitCodeCache::SaveCompiledCode(ArtMethod* method, const void* old_code_ptr) {
- DCHECK_EQ(method->GetEntryPointFromQuickCompiledCode(), old_code_ptr);
- DCHECK(ContainsCodePtr(old_code_ptr)) << PrettyMethod(method) << " old_code_ptr="
- << old_code_ptr;
- MutexLock mu(Thread::Current(), lock_);
- auto it = method_code_map_.find(method);
- if (it != method_code_map_.end()) {
- return;
+
+OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
+ static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
+ if (kRuntimeISA == kArm) {
+ // On Thumb-2, the pc is offset by one.
+ --pc;
}
- method_code_map_.Put(method, old_code_ptr);
+ if (!ContainsPc(reinterpret_cast<const void*>(pc))) {
+ return nullptr;
+ }
+
+ MutexLock mu(Thread::Current(), lock_);
+ if (method_code_map_.empty()) {
+ return nullptr;
+ }
+ auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
+ --it;
+
+ const void* code_ptr = it->first;
+ OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ if (!method_header->Contains(pc)) {
+ return nullptr;
+ }
+ DCHECK_EQ(it->second, method)
+ << PrettyMethod(method) << " " << PrettyMethod(it->second) << " " << std::hex << pc;
+ return method_header;
}
} // namespace jit
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index fa90c18..4e415b8 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -22,6 +22,7 @@
#include "atomic.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "gc/accounting/bitmap.h"
#include "gc/allocator/dlmalloc.h"
#include "gc_root.h"
#include "jni.h"
@@ -33,32 +34,40 @@
namespace art {
class ArtMethod;
-class CompiledMethod;
-class CompilerCallbacks;
+class LinearAlloc;
namespace jit {
class JitInstrumentationCache;
+// Alignment that will suit all architectures.
+static constexpr int kJitCodeAlignment = 16;
+using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;
+
class JitCodeCache {
public:
static constexpr size_t kMaxCapacity = 1 * GB;
- static constexpr size_t kDefaultCapacity = 2 * MB;
+ // Put the default to a very low amount for debug builds to stress the code cache
+ // collection.
+ static constexpr size_t kDefaultCapacity = kIsDebugBuild ? 20 * KB : 2 * MB;
// Create the code cache with a code + data capacity equal to "capacity", error message is passed
// in the out arg error_msg.
static JitCodeCache* Create(size_t capacity, std::string* error_msg);
- size_t NumMethods() const {
- return num_methods_;
- }
-
+ // Number of bytes allocated in the code cache.
size_t CodeCacheSize() REQUIRES(!lock_);
+ // Number of bytes allocated in the data cache.
size_t DataCacheSize() REQUIRES(!lock_);
+ // Number of compiled code in the code cache. Note that this is not the number
+ // of methods that got JIT compiled, as we might have collected some.
+ size_t NumberOfCompiledCode() REQUIRES(!lock_);
+
// Allocate and write code and its metadata to the code cache.
uint8_t* CommitCode(Thread* self,
+ ArtMethod* method,
const uint8_t* mapping_table,
const uint8_t* vmap_table,
const uint8_t* gc_map,
@@ -67,51 +76,89 @@
size_t fp_spill_mask,
const uint8_t* code,
size_t code_size)
+ SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!lock_);
- // Return true if the code cache contains the code pointer which si the entrypoint of the method.
- bool ContainsMethod(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Return true if the code cache contains a code ptr.
- bool ContainsCodePtr(const void* ptr) const;
+ // Return true if the code cache contains this pc.
+ bool ContainsPc(const void* pc) const;
// Reserve a region of data of size at least "size". Returns null if there is no more room.
- uint8_t* ReserveData(Thread* self, size_t size) REQUIRES(!lock_);
+ uint8_t* ReserveData(Thread* self, size_t size)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!lock_);
// Add a data array of size (end - begin) with the associated contents, returns null if there
// is no more room.
uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end)
+ SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!lock_);
- // Get code for a method, returns null if it is not in the jit cache.
- const void* GetCodeFor(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ CodeCacheBitmap* GetLiveBitmap() const {
+ return live_bitmap_.get();
+ }
- // Save the compiled code for a method so that GetCodeFor(method) will return old_code_ptr if the
- // entrypoint isn't within the cache.
- void SaveCompiledCode(ArtMethod* method, const void* old_code_ptr)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ // Perform a collection on the code cache.
+ void GarbageCollectCache(Thread* self)
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Given the 'pc', try to find the JIT compiled code associated with it.
+ // Return null if 'pc' is not in the code cache. 'method' is passed for
+ // sanity check.
+ OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
+ REQUIRES(!lock_)
+ REQUIRES(Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
- // Takes ownership of code_mem_map.
+ // Take ownership of code_mem_map.
JitCodeCache(MemMap* code_map, MemMap* data_map);
- // Lock which guards.
+ // Internal version of 'CommitCode' that will not retry if the
+ // allocation fails. Return null if the allocation fails.
+ uint8_t* CommitCodeInternal(Thread* self,
+ ArtMethod* method,
+ const uint8_t* mapping_table,
+ const uint8_t* vmap_table,
+ const uint8_t* gc_map,
+ size_t frame_size_in_bytes,
+ size_t core_spill_mask,
+ size_t fp_spill_mask,
+ const uint8_t* code,
+ size_t code_size)
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // If a collection is in progress, wait for it to finish. Return
+ // whether the thread actually waited.
+ bool WaitForPotentialCollectionToComplete(Thread* self)
+ REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
+
+ // Free in the mspace allocations taken by 'method'.
+ void FreeCode(const void* code_ptr, ArtMethod* method) REQUIRES(lock_);
+
+ // Lock for guarding allocations, collections, and the method_code_map_.
Mutex lock_;
+ // Condition to wait on during collection.
+ ConditionVariable lock_cond_ GUARDED_BY(lock_);
+ // Whether there is a code cache collection in progress.
+ bool collection_in_progress_ GUARDED_BY(lock_);
// Mem map which holds code.
std::unique_ptr<MemMap> code_map_;
// Mem map which holds data (stack maps and profiling info).
std::unique_ptr<MemMap> data_map_;
// The opaque mspace for allocating code.
- void* code_mspace_;
+ void* code_mspace_ GUARDED_BY(lock_);
// The opaque mspace for allocating data.
- void* data_mspace_;
- // Number of compiled methods.
- size_t num_methods_;
- // This map holds code for methods if they were deoptimized by the instrumentation stubs. This is
- // required since we have to implement ClassLinker::GetQuickOatCodeFor for walking stacks.
- SafeMap<ArtMethod*, const void*> method_code_map_ GUARDED_BY(lock_);
+ void* data_mspace_ GUARDED_BY(lock_);
+ // Bitmap for collecting code and data.
+ std::unique_ptr<CodeCacheBitmap> live_bitmap_;
+ // This map holds compiled code associated to the ArtMethod
+ SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
};
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index 9b9c5d2..666b8e7 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -76,8 +76,7 @@
ScopedObjectAccessUnchecked soa(self);
// Since we don't have on-stack replacement, some methods can remain in the interpreter longer
// than we want resulting in samples even after the method is compiled.
- if (method->IsClassInitializer() || method->IsNative() ||
- Runtime::Current()->GetJit()->GetCodeCache()->ContainsMethod(method)) {
+ if (method->IsClassInitializer() || method->IsNative()) {
return;
}
if (thread_pool_.get() == nullptr) {
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 0c039f2..7c5f78e 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -28,15 +28,10 @@
ProfilingInfo* ProfilingInfo::Create(ArtMethod* method) {
// Walk over the dex instructions of the method and keep track of
// instructions we are interested in profiling.
- const uint16_t* code_ptr = nullptr;
- const uint16_t* code_end = nullptr;
- {
- ScopedObjectAccess soa(Thread::Current());
- DCHECK(!method->IsNative());
- const DexFile::CodeItem& code_item = *method->GetCodeItem();
- code_ptr = code_item.insns_;
- code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
- }
+ DCHECK(!method->IsNative());
+ const DexFile::CodeItem& code_item = *method->GetCodeItem();
+ const uint16_t* code_ptr = code_item.insns_;
+ const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
uint32_t dex_pc = 0;
std::vector<uint32_t> entries;
@@ -91,7 +86,7 @@
ScopedObjectAccess soa(self);
for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
- mirror::Class* existing = cache->classes_[i].Read<kWithoutReadBarrier>();
+ mirror::Class* existing = cache->classes_[i].Read();
if (existing == cls) {
// Receiver type is already in the cache, nothing else to do.
return;
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index 73ca41a..7a2d1a8 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -36,7 +36,7 @@
*/
class ProfilingInfo {
public:
- static ProfilingInfo* Create(ArtMethod* method);
+ static ProfilingInfo* Create(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
// Add information from an executed INVOKE instruction to the profile.
void AddInvokeInfo(Thread* self, uint32_t dex_pc, mirror::Class* cls);