Merge "Fix build: lint error in elf_builder.h"
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 5f4f472..2125c9a 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -177,7 +177,8 @@
}
// Don't compile the method if we are supposed to be deoptimized.
- if (runtime->GetInstrumentation()->AreAllMethodsDeoptimized()) {
+ instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
+ if (instrumentation->AreAllMethodsDeoptimized() || instrumentation->IsDeoptimized(method)) {
return false;
}
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 676e564..167c35d 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -998,7 +998,9 @@
return false;
}
- if (invoke->IsInvokeStaticOrDirect()) {
+ if (invoke->IsInvokeStaticOrDirect() &&
+ HInvokeStaticOrDirect::NeedsCurrentMethodInput(
+ invoke->AsInvokeStaticOrDirect()->GetMethodLoadKind())) {
invoke->SetArgumentAt(*argument_index, graph_->GetCurrentMethod());
(*argument_index)++;
}
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index c32ef51..dd380c2 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -257,10 +257,11 @@
}
size_t use_index = use_it.Current()->GetIndex();
if ((use_index >= use->InputCount()) || (use->InputAt(use_index) != instruction)) {
- AddError(StringPrintf("User %s:%d of instruction %d has a wrong "
+ AddError(StringPrintf("User %s:%d of instruction %s:%d has a wrong "
"UseListNode index.",
use->DebugName(),
use->GetId(),
+ instruction->DebugName(),
instruction->GetId()));
}
}
@@ -546,10 +547,14 @@
!use_it.Done(); use_it.Advance()) {
HInstruction* use = use_it.Current()->GetUser();
if (!use->IsPhi() && !instruction->StrictlyDominates(use)) {
- AddError(StringPrintf("Instruction %d in block %d does not dominate "
- "use %d in block %d.",
- instruction->GetId(), current_block_->GetBlockId(),
- use->GetId(), use->GetBlock()->GetBlockId()));
+ AddError(StringPrintf("Instruction %s:%d in block %d does not dominate "
+ "use %s:%d in block %d.",
+ instruction->DebugName(),
+ instruction->GetId(),
+ current_block_->GetBlockId(),
+ use->DebugName(),
+ use->GetId(),
+ use->GetBlock()->GetBlockId()));
}
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index af3d8f4..2d3dcf7 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1978,6 +1978,16 @@
return !opt.GetDoesNotNeedDexCache();
}
+void HInvokeStaticOrDirect::RemoveInputAt(size_t index) {
+ RemoveAsUserOfInput(index);
+ inputs_.erase(inputs_.begin() + index);
+ // Update indexes in use nodes of inputs that have been pulled forward by the erase().
+ for (size_t i = index, e = InputCount(); i < e; ++i) {
+ DCHECK_EQ(InputRecordAt(i).GetUseNode()->GetIndex(), i + 1u);
+ InputRecordAt(i).GetUseNode()->SetIndex(i);
+ }
+}
+
void HInstruction::RemoveEnvironmentUsers() {
for (HUseIterator<HEnvironment*> use_it(GetEnvUses()); !use_it.Done(); use_it.Advance()) {
HUseListNode<HEnvironment*>* user_node = use_it.Current();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index ddd39a3..4e81248 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -3408,11 +3408,12 @@
ClinitCheckRequirement clinit_check_requirement)
: HInvoke(arena,
number_of_arguments,
- // There is one extra argument for the HCurrentMethod node, and
- // potentially one other if the clinit check is explicit, and one other
- // if the method is a string factory.
- 1u + (clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 1u : 0u)
- + (dispatch_info.method_load_kind == MethodLoadKind::kStringInit ? 1u : 0u),
+ // There is potentially one extra argument for the HCurrentMethod node, and
+ // potentially one other if the clinit check is explicit, and potentially
+ // one other if the method is a string factory.
+ (NeedsCurrentMethodInput(dispatch_info.method_load_kind) ? 1u : 0u) +
+ (clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 1u : 0u) +
+ (dispatch_info.method_load_kind == MethodLoadKind::kStringInit ? 1u : 0u),
return_type,
dex_pc,
method_index,
@@ -3420,12 +3421,25 @@
invoke_type_(invoke_type),
clinit_check_requirement_(clinit_check_requirement),
target_method_(target_method),
- dispatch_info_(dispatch_info) {}
+ dispatch_info_(dispatch_info) { }
void SetDispatchInfo(const DispatchInfo& dispatch_info) {
+ bool had_current_method_input = HasCurrentMethodInput();
+ bool needs_current_method_input = NeedsCurrentMethodInput(dispatch_info.method_load_kind);
+
+ // Using the current method is the default and once we find a better
+ // method load kind, we should not go back to using the current method.
+ DCHECK(had_current_method_input || !needs_current_method_input);
+
+ if (had_current_method_input && !needs_current_method_input) {
+ DCHECK_EQ(InputAt(GetCurrentMethodInputIndex()), GetBlock()->GetGraph()->GetCurrentMethod());
+ RemoveInputAt(GetCurrentMethodInputIndex());
+ }
dispatch_info_ = dispatch_info;
}
+ void RemoveInputAt(size_t index);
+
bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
// We access the method via the dex cache so we can't do an implicit null check.
// TODO: for intrinsics we can generate implicit null checks.
@@ -3447,6 +3461,17 @@
bool HasPcRelDexCache() const {
return GetMethodLoadKind() == MethodLoadKind::kDexCachePcRelative;
}
+ bool HasCurrentMethodInput() const {
+ // This function can be called only after the invoke has been fully initialized by the builder.
+ if (NeedsCurrentMethodInput(GetMethodLoadKind())) {
+ DCHECK(InputAt(GetCurrentMethodInputIndex())->IsCurrentMethod());
+ return true;
+ } else {
+ DCHECK(InputCount() == GetCurrentMethodInputIndex() ||
+ !InputAt(GetCurrentMethodInputIndex())->IsCurrentMethod());
+ return false;
+ }
+ }
bool HasDirectCodePtr() const { return GetCodePtrLocation() == CodePtrLocation::kCallDirect; }
MethodReference GetTargetMethod() const { return target_method_; }
@@ -3495,8 +3520,8 @@
bool IsStringFactoryFor(HFakeString* str) const {
if (!IsStringInit()) return false;
- // +1 for the current method.
- if (InputCount() == (number_of_arguments_ + 1)) return false;
+ DCHECK(!HasCurrentMethodInput());
+ if (InputCount() == (number_of_arguments_)) return false;
return InputAt(InputCount() - 1)->AsFakeString() == str;
}
@@ -3522,6 +3547,11 @@
return IsStatic() && (clinit_check_requirement_ == ClinitCheckRequirement::kImplicit);
}
+ // Does this method load kind need the current method as an input?
+ static bool NeedsCurrentMethodInput(MethodLoadKind kind) {
+ return kind == MethodLoadKind::kRecursive || kind == MethodLoadKind::kDexCacheViaMethod;
+ }
+
DECLARE_INSTRUCTION(InvokeStaticOrDirect);
protected:
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 2a8cf99..dbb546d 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -456,6 +456,16 @@
return method_header;
}
+bool ArtMethod::HasAnyCompiledCode() {
+ // Check whether the JIT has compiled it.
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr && jit->GetCodeCache()->ContainsMethod(this)) {
+ return true;
+ }
+
+ // Check whether we have AOT code.
+ return Runtime::Current()->GetClassLinker()->GetOatMethodQuickCodeFor(this) != nullptr;
+}
void ArtMethod::CopyFrom(ArtMethod* src, size_t image_pointer_size) {
memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src),
diff --git a/runtime/art_method.h b/runtime/art_method.h
index ce9f202..201b3e6 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -454,6 +454,9 @@
const OatQuickMethodHeader* GetOatQuickMethodHeader(uintptr_t pc)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Returns whether the method has any compiled code, JIT or AOT.
+ bool HasAnyCompiledCode() SHARED_REQUIRES(Locks::mutator_lock_);
+
protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
// The class we are a part of.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 6e4b7a3..13d0b84 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -3284,9 +3284,9 @@
return DeoptimizationRequest::kFullDeoptimization;
} else {
// We don't need to deoptimize if the method has not been compiled.
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- const bool is_compiled = class_linker->GetOatMethodQuickCodeFor(m) != nullptr;
+ const bool is_compiled = m->HasAnyCompiledCode();
if (is_compiled) {
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
// If the method may be called through its direct code pointer (without loading
// its updated entrypoint), we need full deoptimization to not miss the breakpoint.
if (class_linker->MayBeCalledWithDirectCodePointer(m)) {
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index fbcba1b..a291a09 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -117,6 +117,16 @@
return code_map_->Begin() <= ptr && ptr < code_map_->End();
}
+bool JitCodeCache::ContainsMethod(ArtMethod* method) {
+ MutexLock mu(Thread::Current(), lock_);
+ for (auto& it : method_code_map_) {
+ if (it.second == method) {
+ return true;
+ }
+ }
+ return false;
+}
+
class ScopedCodeCacheWrite {
public:
explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) {
@@ -276,26 +286,36 @@
__builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
reinterpret_cast<char*>(code_ptr + code_size));
- method_code_map_.Put(code_ptr, method);
- // We have checked there was no collection in progress earlier. If we
- // were, setting the entry point of a method would be unsafe, as the collection
- // could delete it.
- DCHECK(!collection_in_progress_);
- method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
}
- VLOG(jit)
- << "JIT added "
- << PrettyMethod(method) << "@" << method
- << " ccache_size=" << PrettySize(CodeCacheSize()) << ": "
- << " dcache_size=" << PrettySize(DataCacheSize()) << ": "
- << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
- << reinterpret_cast<const void*>(method_header->GetEntryPoint() + method_header->code_size_);
+ // We need to update the entry point in the runnable state for the instrumentation.
+ {
+ MutexLock mu(self, lock_);
+ method_code_map_.Put(code_ptr, method);
+ Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
+ method, method_header->GetEntryPoint());
+ if (collection_in_progress_) {
+ // We need to update the live bitmap if there is a GC to ensure it sees this new
+ // code.
+ GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
+ }
+ VLOG(jit)
+ << "JIT added "
+ << PrettyMethod(method) << "@" << method
+ << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
+ << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": "
+ << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
+ << reinterpret_cast<const void*>(method_header->GetEntryPoint() + method_header->code_size_);
+ }
return reinterpret_cast<uint8_t*>(method_header);
}
size_t JitCodeCache::CodeCacheSize() {
MutexLock mu(Thread::Current(), lock_);
+ return CodeCacheSizeLocked();
+}
+
+size_t JitCodeCache::CodeCacheSizeLocked() {
size_t bytes_allocated = 0;
mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
return bytes_allocated;
@@ -303,6 +323,10 @@
size_t JitCodeCache::DataCacheSize() {
MutexLock mu(Thread::Current(), lock_);
+ return DataCacheSizeLocked();
+}
+
+size_t JitCodeCache::DataCacheSizeLocked() {
size_t bytes_allocated = 0;
mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
return bytes_allocated;
@@ -416,20 +440,24 @@
<< ", data=" << PrettySize(DataCacheSize());
}
- size_t map_size = 0;
- ScopedThreadSuspension sts(self, kSuspended);
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ // Wait for an existing collection, or let everyone know we are starting one.
+ {
+ ScopedThreadSuspension sts(self, kSuspended);
+ MutexLock mu(self, lock_);
+ if (WaitForPotentialCollectionToComplete(self)) {
+ return;
+ } else {
+ collection_in_progress_ = true;
+ }
+ }
// Walk over all compiled methods and set the entry points of these
// methods to interpreter.
{
MutexLock mu(self, lock_);
- if (WaitForPotentialCollectionToComplete(self)) {
- return;
- }
- collection_in_progress_ = true;
- map_size = method_code_map_.size();
for (auto& it : method_code_map_) {
- it.second->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+ instrumentation->UpdateMethodsCode(it.second, GetQuickToInterpreterBridge());
}
for (ProfilingInfo* info : profiling_infos_) {
info->GetMethod()->SetProfilingInfo(nullptr);
@@ -440,16 +468,12 @@
{
Barrier barrier(0);
size_t threads_running_checkpoint = 0;
- {
- // Walking the stack requires the mutator lock.
- // We only take the lock when running the checkpoint and not waiting so that
- // when we go back to suspended, we can execute checkpoints that were requested
- // concurrently, and then move to waiting for our own checkpoint to finish.
- ScopedObjectAccess soa(self);
- MarkCodeClosure closure(this, &barrier);
- threads_running_checkpoint =
- Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
- }
+ MarkCodeClosure closure(this, &barrier);
+ threads_running_checkpoint =
+ Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
+ // Now that we have run our checkpoint, move to a suspended state and wait
+ // for other threads to run the checkpoint.
+ ScopedThreadSuspension sts(self, kSuspended);
if (threads_running_checkpoint != 0) {
barrier.Increment(self, threads_running_checkpoint);
}
@@ -457,7 +481,6 @@
{
MutexLock mu(self, lock_);
- DCHECK_EQ(map_size, method_code_map_.size());
// Free unused compiled code, and restore the entry point of used compiled code.
{
ScopedCodeCacheWrite scc(code_map_.get());
@@ -467,7 +490,7 @@
uintptr_t allocation = FromCodeToAllocation(code_ptr);
const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
if (GetLiveBitmap()->Test(allocation)) {
- method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
+ instrumentation->UpdateMethodsCode(method, method_header->GetEntryPoint());
++it;
} else {
method->ClearCounter();
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index afff657..131446c 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -83,6 +83,9 @@
// Return true if the code cache contains this pc.
bool ContainsPc(const void* pc) const;
+ // Return true if the code cache contains this method.
+ bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
+
// Reserve a region of data of size at least "size". Returns null if there is no more room.
uint8_t* ReserveData(Thread* self, size_t size)
SHARED_REQUIRES(Locks::mutator_lock_)
@@ -163,6 +166,12 @@
// Free in the mspace allocations taken by 'method'.
void FreeCode(const void* code_ptr, ArtMethod* method) REQUIRES(lock_);
+ // Number of bytes allocated in the code cache.
+ size_t CodeCacheSizeLocked() REQUIRES(lock_);
+
+ // Number of bytes allocated in the data cache.
+ size_t DataCacheSizeLocked() REQUIRES(lock_);
+
// Lock for guarding allocations, collections, and the method_code_map_.
Mutex lock_;
// Condition to wait on during collection.
diff --git a/test/441-checker-inliner/src/Main.java b/test/441-checker-inliner/src/Main.java
index 96302fb..6d6a4f2 100644
--- a/test/441-checker-inliner/src/Main.java
+++ b/test/441-checker-inliner/src/Main.java
@@ -19,7 +19,7 @@
/// CHECK-START: void Main.InlineVoid() inliner (before)
/// CHECK-DAG: <<Const42:i\d+>> IntConstant 42
/// CHECK-DAG: InvokeStaticOrDirect
- /// CHECK-DAG: InvokeStaticOrDirect [<<Const42>>,{{[ij]\d+}}]
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Const42>>{{(,[ij]\d+)?}}]
/// CHECK-START: void Main.InlineVoid() inliner (after)
/// CHECK-NOT: InvokeStaticOrDirect
@@ -31,7 +31,7 @@
/// CHECK-START: int Main.InlineParameter(int) inliner (before)
/// CHECK-DAG: <<Param:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<Param>>,{{[ij]\d+}}]
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<Param>>{{(,[ij]\d+)?}}]
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: int Main.InlineParameter(int) inliner (after)
@@ -44,7 +44,7 @@
/// CHECK-START: long Main.InlineWideParameter(long) inliner (before)
/// CHECK-DAG: <<Param:j\d+>> ParameterValue
- /// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect [<<Param>>,{{[ij]\d+}}]
+ /// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect [<<Param>>{{(,[ij]\d+)?}}]
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: long Main.InlineWideParameter(long) inliner (after)
@@ -57,7 +57,7 @@
/// CHECK-START: java.lang.Object Main.InlineReferenceParameter(java.lang.Object) inliner (before)
/// CHECK-DAG: <<Param:l\d+>> ParameterValue
- /// CHECK-DAG: <<Result:l\d+>> InvokeStaticOrDirect [<<Param>>,{{[ij]\d+}}]
+ /// CHECK-DAG: <<Result:l\d+>> InvokeStaticOrDirect [<<Param>>{{(,[ij]\d+)?}}]
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: java.lang.Object Main.InlineReferenceParameter(java.lang.Object) inliner (after)
@@ -128,8 +128,8 @@
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<Const5:i\d+>> IntConstant 5
- /// CHECK-DAG: <<Add:i\d+>> InvokeStaticOrDirect [<<Const1>>,<<Const3>>,{{[ij]\d+}}]
- /// CHECK-DAG: <<Sub:i\d+>> InvokeStaticOrDirect [<<Const5>>,<<Const3>>,{{[ij]\d+}}]
+ /// CHECK-DAG: <<Add:i\d+>> InvokeStaticOrDirect [<<Const1>>,<<Const3>>{{(,[ij]\d+)?}}]
+ /// CHECK-DAG: <<Sub:i\d+>> InvokeStaticOrDirect [<<Const5>>,<<Const3>>{{(,[ij]\d+)?}}]
/// CHECK-DAG: <<Phi:i\d+>> Phi [<<Add>>,<<Sub>>]
/// CHECK-DAG: Return [<<Phi>>]
diff --git a/test/478-checker-clinit-check-pruning/src/Main.java b/test/478-checker-clinit-check-pruning/src/Main.java
index e6aab63..cff6273 100644
--- a/test/478-checker-clinit-check-pruning/src/Main.java
+++ b/test/478-checker-clinit-check-pruning/src/Main.java
@@ -67,14 +67,14 @@
*/
/// CHECK-START: void Main.invokeStaticNotInlined() builder (after)
- /// CHECK-DAG: <<LoadClass:l\d+>> LoadClass gen_clinit_check:false
- /// CHECK-DAG: <<ClinitCheck:l\d+>> ClinitCheck [<<LoadClass>>]
- /// CHECK-DAG: InvokeStaticOrDirect [{{[ij]\d+}},<<ClinitCheck>>]
+ /// CHECK: <<LoadClass:l\d+>> LoadClass gen_clinit_check:false
+ /// CHECK: <<ClinitCheck:l\d+>> ClinitCheck [<<LoadClass>>]
+ /// CHECK: InvokeStaticOrDirect [{{[ij]\d+}},<<ClinitCheck>>]
/// CHECK-START: void Main.invokeStaticNotInlined() inliner (after)
- /// CHECK-DAG: <<LoadClass:l\d+>> LoadClass gen_clinit_check:false
- /// CHECK-DAG: <<ClinitCheck:l\d+>> ClinitCheck [<<LoadClass>>]
- /// CHECK-DAG: InvokeStaticOrDirect [{{[ij]\d+}},<<ClinitCheck>>]
+ /// CHECK: <<LoadClass:l\d+>> LoadClass gen_clinit_check:false
+ /// CHECK: <<ClinitCheck:l\d+>> ClinitCheck [<<LoadClass>>]
+ /// CHECK: InvokeStaticOrDirect [{{([ij]\d+,)?}}<<ClinitCheck>>]
// The following checks ensure the clinit check and load class
// instructions added by the builder are pruned by the
@@ -83,7 +83,7 @@
// before the next pass (liveness analysis) instead.
/// CHECK-START: void Main.invokeStaticNotInlined() liveness (before)
- /// CHECK-DAG: InvokeStaticOrDirect
+ /// CHECK: InvokeStaticOrDirect
/// CHECK-START: void Main.invokeStaticNotInlined() liveness (before)
/// CHECK-NOT: LoadClass