summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/jit/jit_compiler.cc3
-rw-r--r--runtime/art_method.cc10
-rw-r--r--runtime/art_method.h3
-rw-r--r--runtime/debugger.cc4
-rw-r--r--runtime/jit/jit_code_cache.cc85
-rw-r--r--runtime/jit/jit_code_cache.h9
6 files changed, 81 insertions, 33 deletions
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 5f4f47292b..2125c9a26a 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -177,7 +177,8 @@ bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method) {
}
// Don't compile the method if we are supposed to be deoptimized.
- if (runtime->GetInstrumentation()->AreAllMethodsDeoptimized()) {
+ instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
+ if (instrumentation->AreAllMethodsDeoptimized() || instrumentation->IsDeoptimized(method)) {
return false;
}
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 2a8cf9965c..dbb546da29 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -456,6 +456,16 @@ const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
return method_header;
}
+bool ArtMethod::HasAnyCompiledCode() {
+ // Check whether the JIT has compiled it.
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr && jit->GetCodeCache()->ContainsMethod(this)) {
+ return true;
+ }
+
+ // Check whether we have AOT code.
+ return Runtime::Current()->GetClassLinker()->GetOatMethodQuickCodeFor(this) != nullptr;
+}
void ArtMethod::CopyFrom(ArtMethod* src, size_t image_pointer_size) {
memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src),
diff --git a/runtime/art_method.h b/runtime/art_method.h
index ce9f2025ce..201b3e64da 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -454,6 +454,9 @@ class ArtMethod FINAL {
const OatQuickMethodHeader* GetOatQuickMethodHeader(uintptr_t pc)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Returns whether the method has any compiled code, JIT or AOT.
+ bool HasAnyCompiledCode() SHARED_REQUIRES(Locks::mutator_lock_);
+
protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
// The class we are a part of.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index e523fbb104..a25d0033ce 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -3284,9 +3284,9 @@ static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
return DeoptimizationRequest::kFullDeoptimization;
} else {
// We don't need to deoptimize if the method has not been compiled.
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- const bool is_compiled = class_linker->GetOatMethodQuickCodeFor(m) != nullptr;
+ const bool is_compiled = m->HasAnyCompiledCode();
if (is_compiled) {
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
// If the method may be called through its direct code pointer (without loading
// its updated entrypoint), we need full deoptimization to not miss the breakpoint.
if (class_linker->MayBeCalledWithDirectCodePointer(m)) {
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index fbcba1b881..9dac5049ce 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -117,6 +117,16 @@ bool JitCodeCache::ContainsPc(const void* ptr) const {
return code_map_->Begin() <= ptr && ptr < code_map_->End();
}
+bool JitCodeCache::ContainsMethod(ArtMethod* method) {
+ MutexLock mu(Thread::Current(), lock_);
+ for (auto& it : method_code_map_) {
+ if (it.second == method) {
+ return true;
+ }
+ }
+ return false;
+}
+
class ScopedCodeCacheWrite {
public:
explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) {
@@ -276,26 +286,36 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
__builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
reinterpret_cast<char*>(code_ptr + code_size));
+ }
+ // We need to update the entry point in the runnable state for the instrumentation.
+ {
+ MutexLock mu(self, lock_);
method_code_map_.Put(code_ptr, method);
- // We have checked there was no collection in progress earlier. If we
- // were, setting the entry point of a method would be unsafe, as the collection
- // could delete it.
- DCHECK(!collection_in_progress_);
- method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
- }
- VLOG(jit)
- << "JIT added "
- << PrettyMethod(method) << "@" << method
- << " ccache_size=" << PrettySize(CodeCacheSize()) << ": "
- << " dcache_size=" << PrettySize(DataCacheSize()) << ": "
- << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
- << reinterpret_cast<const void*>(method_header->GetEntryPoint() + method_header->code_size_);
+ Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
+ method, method_header->GetEntryPoint());
+ if (collection_in_progress_) {
+ // We need to update the live bitmap if there is a GC to ensure it sees this new
+ // code.
+ GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
+ }
+ VLOG(jit)
+ << "JIT added "
+ << PrettyMethod(method) << "@" << method
+ << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
+ << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": "
+ << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
+ << reinterpret_cast<const void*>(method_header->GetEntryPoint() + method_header->code_size_);
+ }
return reinterpret_cast<uint8_t*>(method_header);
}
size_t JitCodeCache::CodeCacheSize() {
MutexLock mu(Thread::Current(), lock_);
+ return CodeCacheSizeLocked();
+}
+
+size_t JitCodeCache::CodeCacheSizeLocked() {
size_t bytes_allocated = 0;
mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
return bytes_allocated;
@@ -303,6 +323,10 @@ size_t JitCodeCache::CodeCacheSize() {
size_t JitCodeCache::DataCacheSize() {
MutexLock mu(Thread::Current(), lock_);
+ return DataCacheSizeLocked();
+}
+
+size_t JitCodeCache::DataCacheSizeLocked() {
size_t bytes_allocated = 0;
mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
return bytes_allocated;
@@ -417,19 +441,25 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
}
size_t map_size = 0;
- ScopedThreadSuspension sts(self, kSuspended);
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
- // Walk over all compiled methods and set the entry points of these
- // methods to interpreter.
+ // Wait for an existing collection, or let everyone know we are starting one.
{
+ ScopedThreadSuspension sts(self, kSuspended);
MutexLock mu(self, lock_);
if (WaitForPotentialCollectionToComplete(self)) {
return;
+ } else {
+ collection_in_progress_ = true;
}
- collection_in_progress_ = true;
+ }
+ // Walk over all compiled methods and set the entry points of these
+ // methods to interpreter.
+ {
+ MutexLock mu(self, lock_);
map_size = method_code_map_.size();
for (auto& it : method_code_map_) {
- it.second->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+ instrumentation->UpdateMethodsCode(it.second, GetQuickToInterpreterBridge());
}
for (ProfilingInfo* info : profiling_infos_) {
info->GetMethod()->SetProfilingInfo(nullptr);
@@ -440,16 +470,12 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
{
Barrier barrier(0);
size_t threads_running_checkpoint = 0;
- {
- // Walking the stack requires the mutator lock.
- // We only take the lock when running the checkpoint and not waiting so that
- // when we go back to suspended, we can execute checkpoints that were requested
- // concurrently, and then move to waiting for our own checkpoint to finish.
- ScopedObjectAccess soa(self);
- MarkCodeClosure closure(this, &barrier);
- threads_running_checkpoint =
- Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
- }
+ MarkCodeClosure closure(this, &barrier);
+ threads_running_checkpoint =
+ Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
+ // Now that we have run our checkpoint, move to a suspended state and wait
+ // for other threads to run the checkpoint.
+ ScopedThreadSuspension sts(self, kSuspended);
if (threads_running_checkpoint != 0) {
barrier.Increment(self, threads_running_checkpoint);
}
@@ -457,7 +483,6 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
{
MutexLock mu(self, lock_);
- DCHECK_EQ(map_size, method_code_map_.size());
// Free unused compiled code, and restore the entry point of used compiled code.
{
ScopedCodeCacheWrite scc(code_map_.get());
@@ -467,7 +492,7 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
uintptr_t allocation = FromCodeToAllocation(code_ptr);
const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
if (GetLiveBitmap()->Test(allocation)) {
- method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
+ instrumentation->UpdateMethodsCode(method, method_header->GetEntryPoint());
++it;
} else {
method->ClearCounter();
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index afff657880..131446c484 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -83,6 +83,9 @@ class JitCodeCache {
// Return true if the code cache contains this pc.
bool ContainsPc(const void* pc) const;
+ // Return true if the code cache contains this method.
+ bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
+
// Reserve a region of data of size at least "size". Returns null if there is no more room.
uint8_t* ReserveData(Thread* self, size_t size)
SHARED_REQUIRES(Locks::mutator_lock_)
@@ -163,6 +166,12 @@ class JitCodeCache {
// Free in the mspace allocations taken by 'method'.
void FreeCode(const void* code_ptr, ArtMethod* method) REQUIRES(lock_);
+ // Number of bytes allocated in the code cache.
+ size_t CodeCacheSizeLocked() REQUIRES(lock_);
+
+ // Number of bytes allocated in the data cache.
+ size_t DataCacheSizeLocked() REQUIRES(lock_);
+
// Lock for guarding allocations, collections, and the method_code_map_.
Mutex lock_;
// Condition to wait on during collection.