Store inline caches in StackHandleScope<> in HInliner.
Avoid a managed heap allocation.
Test: m test-art-host-gtest
Test: testrunner.py --host
Bug: 181943478
Change-Id: I7ce65c93ad2f59490dbfa2aaccba98b6ca1fd585
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 4d45739..7dcca75 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -321,15 +321,9 @@
}
HInliner::InlineCacheType HInliner::GetInlineCacheType(
- const Handle<mirror::ObjectArray<mirror::Class>>& classes)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- uint8_t number_of_types = 0;
- for (; number_of_types < InlineCache::kIndividualCacheSize; ++number_of_types) {
- if (classes->Get(number_of_types) == nullptr) {
- break;
- }
- }
-
+ const StackHandleScope<InlineCache::kIndividualCacheSize>& classes) {
+ DCHECK_EQ(classes.NumberOfReferences(), InlineCache::kIndividualCacheSize);
+ uint8_t number_of_types = InlineCache::kIndividualCacheSize - classes.RemainingSlots();
if (number_of_types == 0) {
return kInlineCacheUninitialized;
} else if (number_of_types == 1) {
@@ -341,10 +335,11 @@
}
}
-static ObjPtr<mirror::Class> GetMonomorphicType(Handle<mirror::ObjectArray<mirror::Class>> classes)
+static inline ObjPtr<mirror::Class> GetMonomorphicType(
+ const StackHandleScope<InlineCache::kIndividualCacheSize>& classes)
REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(classes->Get(0) != nullptr);
- return classes->Get(0);
+ DCHECK(classes.GetReference(0) != nullptr);
+ return classes.GetReference(0)->AsClass();
}
ArtMethod* HInliner::TryCHADevirtualization(ArtMethod* resolved_method) {
@@ -522,26 +517,6 @@
return result;
}
-static Handle<mirror::ObjectArray<mirror::Class>> AllocateInlineCacheHolder(
- const DexCompilationUnit& compilation_unit,
- StackHandleScope<1>* hs)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- Thread* self = Thread::Current();
- ClassLinker* class_linker = compilation_unit.GetClassLinker();
- Handle<mirror::ObjectArray<mirror::Class>> inline_cache = hs->NewHandle(
- mirror::ObjectArray<mirror::Class>::Alloc(
- self,
- GetClassRoot<mirror::ObjectArray<mirror::Class>>(class_linker),
- InlineCache::kIndividualCacheSize));
- if (inline_cache == nullptr) {
- // We got an OOME. Just clear the exception, and don't inline.
- DCHECK(self->IsExceptionPending());
- self->ClearException();
- VLOG(compiler) << "Out of memory in the compiler when trying to inline";
- }
- return inline_cache;
-}
-
bool HInliner::UseOnlyPolymorphicInliningWithNoDeopt() {
// If we are compiling AOT or OSR, pretend the call using inline caches is polymorphic and
// do not generate a deopt.
@@ -575,14 +550,13 @@
return false;
}
- StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::ObjectArray<mirror::Class>> inline_cache;
+ StackHandleScope<InlineCache::kIndividualCacheSize> classes(Thread::Current());
// The Zygote JIT compiles based on a profile, so we shouldn't use runtime inline caches
// for it.
InlineCacheType inline_cache_type =
(Runtime::Current()->IsAotCompiler() || Runtime::Current()->IsZygote())
- ? GetInlineCacheAOT(invoke_instruction, &hs, &inline_cache)
- : GetInlineCacheJIT(invoke_instruction, &hs, &inline_cache);
+ ? GetInlineCacheAOT(invoke_instruction, &classes)
+ : GetInlineCacheJIT(invoke_instruction, &classes);
switch (inline_cache_type) {
case kInlineCacheNoData: {
@@ -603,15 +577,15 @@
case kInlineCacheMonomorphic: {
MaybeRecordStat(stats_, MethodCompilationStat::kMonomorphicCall);
if (UseOnlyPolymorphicInliningWithNoDeopt()) {
- return TryInlinePolymorphicCall(invoke_instruction, inline_cache);
+ return TryInlinePolymorphicCall(invoke_instruction, classes);
} else {
- return TryInlineMonomorphicCall(invoke_instruction, inline_cache);
+ return TryInlineMonomorphicCall(invoke_instruction, classes);
}
}
case kInlineCachePolymorphic: {
MaybeRecordStat(stats_, MethodCompilationStat::kPolymorphicCall);
- return TryInlinePolymorphicCall(invoke_instruction, inline_cache);
+ return TryInlinePolymorphicCall(invoke_instruction, classes);
}
case kInlineCacheMegamorphic: {
@@ -636,9 +610,7 @@
HInliner::InlineCacheType HInliner::GetInlineCacheJIT(
HInvoke* invoke_instruction,
- StackHandleScope<1>* hs,
- /*out*/Handle<mirror::ObjectArray<mirror::Class>>* inline_cache)
- REQUIRES_SHARED(Locks::mutator_lock_) {
+ /*out*/StackHandleScope<InlineCache::kIndividualCacheSize>* classes) {
DCHECK(codegen_->GetCompilerOptions().IsJitCompiler());
ArtMethod* caller = graph_->GetArtMethod();
@@ -651,23 +623,18 @@
return kInlineCacheNoData;
}
- *inline_cache = AllocateInlineCacheHolder(caller_compilation_unit_, hs);
- if (inline_cache->Get() == nullptr) {
- // We can't extract any data if we failed to allocate;
- return kInlineCacheNoData;
- } else {
- Runtime::Current()->GetJit()->GetCodeCache()->CopyInlineCacheInto(
- *profiling_info->GetInlineCache(invoke_instruction->GetDexPc()),
- *inline_cache);
- return GetInlineCacheType(*inline_cache);
- }
+ Runtime::Current()->GetJit()->GetCodeCache()->CopyInlineCacheInto(
+ *profiling_info->GetInlineCache(invoke_instruction->GetDexPc()),
+ classes);
+ return GetInlineCacheType(*classes);
}
HInliner::InlineCacheType HInliner::GetInlineCacheAOT(
HInvoke* invoke_instruction,
- StackHandleScope<1>* hs,
- /*out*/Handle<mirror::ObjectArray<mirror::Class>>* inline_cache)
- REQUIRES_SHARED(Locks::mutator_lock_) {
+ /*out*/StackHandleScope<InlineCache::kIndividualCacheSize>* classes) {
+ DCHECK_EQ(classes->NumberOfReferences(), InlineCache::kIndividualCacheSize);
+ DCHECK_EQ(classes->RemainingSlots(), InlineCache::kIndividualCacheSize);
+
const ProfileCompilationInfo* pci = codegen_->GetCompilerOptions().GetProfileCompilationInfo();
if (pci == nullptr) {
return kInlineCacheNoData;
@@ -695,13 +662,6 @@
}
DCHECK_LE(dex_pc_data.classes.size(), InlineCache::kIndividualCacheSize);
- Handle<mirror::ObjectArray<mirror::Class>> ic =
- AllocateInlineCacheHolder(caller_compilation_unit_, hs);
- if (ic == nullptr) {
- // We can't extract any data if we failed to allocate;
- return kInlineCacheNoData;
- }
-
Thread* self = Thread::Current();
// We need to resolve the class relative to the containing dex file.
// So first, build a mapping from the index of dex file in the profile to
@@ -729,7 +689,6 @@
// Walk over the classes and resolve them. If we cannot find a type we return
// kInlineCacheMissingTypes.
- int ic_index = 0;
for (const ProfileCompilationInfo::ClassReference& class_ref : dex_pc_data.classes) {
ObjPtr<mirror::DexCache> dex_cache =
dex_profile_index_to_dex_cache[class_ref.dex_profile_index];
@@ -745,7 +704,8 @@
dex_cache,
caller_compilation_unit_.GetClassLoader().Get());
if (clazz != nullptr) {
- ic->Set(ic_index++, clazz);
+ DCHECK_NE(classes->RemainingSlots(), 0u);
+ classes->NewHandle(clazz);
} else {
VLOG(compiler) << "Could not resolve class from inline cache in AOT mode "
<< invoke_instruction->GetMethodReference().PrettyMethod()
@@ -756,8 +716,7 @@
}
}
- *inline_cache = ic;
- return GetInlineCacheType(ic);
+ return GetInlineCacheType(*classes);
}
HInstanceFieldGet* HInliner::BuildGetReceiverClass(ClassLinker* class_linker,
@@ -806,8 +765,9 @@
return resolved_method;
}
-bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
- Handle<mirror::ObjectArray<mirror::Class>> classes) {
+bool HInliner::TryInlineMonomorphicCall(
+ HInvoke* invoke_instruction,
+ const StackHandleScope<InlineCache::kIndividualCacheSize>& classes) {
DCHECK(invoke_instruction->IsInvokeVirtual() || invoke_instruction->IsInvokeInterface())
<< invoke_instruction->DebugName();
@@ -827,14 +787,13 @@
graph_->GetHandleCache()->NewHandle(GetMonomorphicType(classes));
ArtMethod* resolved_method = ResolveMethodFromInlineCache(
monomorphic_type, invoke_instruction, pointer_size);
-
if (resolved_method == nullptr) {
// Bogus AOT profile, bail.
DCHECK(Runtime::Current()->IsAotCompiler());
return false;
}
- LOG_NOTE() << "Try inline monomorphic call to " << resolved_method->PrettyMethod();
+ LOG_NOTE() << "Try inline monomorphic call to " << resolved_method->PrettyMethod();
HInstruction* receiver = invoke_instruction->InputAt(0);
HInstruction* cursor = invoke_instruction->GetPrevious();
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
@@ -966,8 +925,9 @@
return compare;
}
-bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
- Handle<mirror::ObjectArray<mirror::Class>> classes) {
+bool HInliner::TryInlinePolymorphicCall(
+ HInvoke* invoke_instruction,
+ const StackHandleScope<InlineCache::kIndividualCacheSize>& classes) {
DCHECK(invoke_instruction->IsInvokeVirtual() || invoke_instruction->IsInvokeInterface())
<< invoke_instruction->DebugName();
@@ -980,14 +940,13 @@
bool all_targets_inlined = true;
bool one_target_inlined = false;
- for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
- if (classes->Get(i) == nullptr) {
- break;
- }
- ArtMethod* method = nullptr;
-
- Handle<mirror::Class> handle = graph_->GetHandleCache()->NewHandle(classes->Get(i));
- method = ResolveMethodFromInlineCache(handle, invoke_instruction, pointer_size);
+ DCHECK_EQ(classes.NumberOfReferences(), InlineCache::kIndividualCacheSize);
+ uint8_t number_of_types = InlineCache::kIndividualCacheSize - classes.RemainingSlots();
+ for (size_t i = 0; i != number_of_types; ++i) {
+ DCHECK(classes.GetReference(i) != nullptr);
+ Handle<mirror::Class> handle =
+ graph_->GetHandleCache()->NewHandle(classes.GetReference(i)->AsClass());
+ ArtMethod* method = ResolveMethodFromInlineCache(handle, invoke_instruction, pointer_size);
if (method == nullptr) {
DCHECK(Runtime::Current()->IsAotCompiler());
// AOT profile is bogus. This loop expects to iterate over all entries,
@@ -1020,8 +979,7 @@
// we deoptimize instead of keeping the original invoke instruction.
bool deoptimize = !UseOnlyPolymorphicInliningWithNoDeopt() &&
all_targets_inlined &&
- (i != InlineCache::kIndividualCacheSize - 1) &&
- (classes->Get(i + 1) == nullptr);
+ (i + 1 == number_of_types);
HInstruction* compare = AddTypeGuard(receiver,
cursor,
@@ -1141,7 +1099,7 @@
bool HInliner::TryInlinePolymorphicCallToSameTarget(
HInvoke* invoke_instruction,
- Handle<mirror::ObjectArray<mirror::Class>> classes) {
+ const StackHandleScope<InlineCache::kIndividualCacheSize>& classes) {
// This optimization only works under JIT for now.
if (!codegen_->GetCompilerOptions().IsJitCompiler()) {
return false;
@@ -1157,13 +1115,13 @@
// Check whether we are actually calling the same method among
// the different types seen.
- for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
- if (classes->Get(i) == nullptr) {
- break;
- }
+ DCHECK_EQ(classes.NumberOfReferences(), InlineCache::kIndividualCacheSize);
+ uint8_t number_of_types = InlineCache::kIndividualCacheSize - classes.RemainingSlots();
+ for (size_t i = 0; i != number_of_types; ++i) {
+ DCHECK(classes.GetReference(i) != nullptr);
ArtMethod* new_method = nullptr;
if (invoke_instruction->IsInvokeInterface()) {
- new_method = classes->Get(i)->GetImt(pointer_size)->Get(
+ new_method = classes.GetReference(i)->AsClass()->GetImt(pointer_size)->Get(
method_index, pointer_size);
if (new_method->IsRuntimeMethod()) {
// Bail out as soon as we see a conflict trampoline in one of the target's
@@ -1172,7 +1130,8 @@
}
} else {
DCHECK(invoke_instruction->IsInvokeVirtual());
- new_method = classes->Get(i)->GetEmbeddedVTableEntry(method_index, pointer_size);
+ new_method =
+ classes.GetReference(i)->AsClass()->GetEmbeddedVTableEntry(method_index, pointer_size);
}
DCHECK(new_method != nullptr);
if (actual_method == nullptr) {