summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator.cc1
-rw-r--r--compiler/optimizing/code_generator.h6
-rw-r--r--compiler/optimizing/code_generator_arm.cc20
-rw-r--r--compiler/optimizing/code_generator_arm64.cc17
-rw-r--r--compiler/optimizing/code_generator_mips.cc10
-rw-r--r--compiler/optimizing/code_generator_mips64.cc10
-rw-r--r--compiler/optimizing/code_generator_x86.cc16
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc17
-rw-r--r--compiler/optimizing/inliner.cc73
-rw-r--r--compiler/optimizing/inliner.h20
-rw-r--r--compiler/optimizing/nodes.h42
-rw-r--r--compiler/optimizing/optimizing_compiler.cc4
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h2
-rw-r--r--compiler/optimizing/register_allocation_resolver.cc4
14 files changed, 234 insertions, 8 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 9f6b78a82c..fa6a5225e7 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -304,6 +304,7 @@ void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
SetFrameSize(RoundUp(
first_register_slot_in_slow_path_
+ maximum_safepoint_spill_size
+ + (GetGraph()->HasShouldDeoptimizeFlag() ? kShouldDeoptimizeFlagSize : 0)
+ FrameEntrySpillSize(),
kStackAlignment));
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index a5d19abe92..4b11e7c699 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -307,6 +307,12 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
return POPCOUNT(GetSlowPathSpills(locations, core_registers));
}
+ size_t GetStackOffsetOfShouldDeoptimizeFlag() const {
+ DCHECK(GetGraph()->HasShouldDeoptimizeFlag());
+ DCHECK_GE(GetFrameSize(), FrameEntrySpillSize() + kShouldDeoptimizeFlagSize);
+ return GetFrameSize() - FrameEntrySpillSize() - kShouldDeoptimizeFlagSize;
+ }
+
// Record native to dex mapping for a suspend point. Required by runtime.
void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path = nullptr);
// Check whether we have already recorded mapping at this PC.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 1f5981682b..ed6eef1b55 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1329,6 +1329,13 @@ void CodeGeneratorARM::GenerateFrameEntry() {
__ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
__ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
}
+
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should_deoptimize flag to 0.
+ __ mov(IP, ShifterOperand(0));
+ __ StoreToOffset(kStoreWord, IP, SP, -kShouldDeoptimizeFlagSize);
+ }
+
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ AddConstant(SP, -adjust);
__ cfi().AdjustCFAOffset(adjust);
@@ -1944,6 +1951,19 @@ void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
/* false_target */ nullptr);
}
+void LocationsBuilderARM::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary(flag, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ __ LoadFromOffset(kLoadWord,
+ flag->GetLocations()->Out().AsRegister<Register>(),
+ SP,
+ codegen_->GetStackOffsetOfShouldDeoptimizeFlag());
+}
+
void LocationsBuilderARM::VisitSelect(HSelect* select) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
if (Primitive::IsFloatingPointType(select->GetType())) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index ab6a33fbd9..6eebd69a04 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1273,6 +1273,12 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
frame_size - GetCoreSpillSize());
GetAssembler()->SpillRegisters(GetFramePreservedFPRegisters(),
frame_size - FrameEntrySpillSize());
+
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should_deoptimize flag to 0.
+ Register wzr = Register(VIXLRegCodeFromART(WZR), kWRegSize);
+ __ Str(wzr, MemOperand(sp, GetStackOffsetOfShouldDeoptimizeFlag()));
+ }
}
}
@@ -3235,6 +3241,17 @@ void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
/* false_target */ nullptr);
}
+void LocationsBuilderARM64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary(flag, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ __ Ldr(OutputRegister(flag),
+ MemOperand(sp, codegen_->GetStackOffsetOfShouldDeoptimizeFlag()));
+}
+
static inline bool IsConditionOnFloatingPointValues(HInstruction* condition) {
return condition->IsCondition() &&
Primitive::IsFloatingPointType(condition->InputAt(0)->GetType());
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 572d900909..61dabfabaa 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -4688,6 +4688,16 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) {
}
}
+void LocationsBuilderMIPS::VisitShouldDeoptimizeFlag(
+ HShouldDeoptimizeFlag* flag ATTRIBUTE_UNUSED) {
+ // TODO: to be implemented.
+}
+
+void InstructionCodeGeneratorMIPS::VisitShouldDeoptimizeFlag(
+ HShouldDeoptimizeFlag* flag ATTRIBUTE_UNUSED) {
+ // TODO: to be implemented.
+}
+
void LocationsBuilderMIPS::VisitSelect(HSelect* select) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
CanMoveConditionally(select, codegen_->GetInstructionSetFeatures().IsR6(), locations);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index b5e98714e6..b1f9b1db53 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2636,6 +2636,16 @@ void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
/* false_target */ nullptr);
}
+void LocationsBuilderMIPS64::VisitShouldDeoptimizeFlag(
+ HShouldDeoptimizeFlag* flag ATTRIBUTE_UNUSED) {
+ // TODO: to be implemented.
+}
+
+void InstructionCodeGeneratorMIPS64::VisitShouldDeoptimizeFlag(
+ HShouldDeoptimizeFlag* flag ATTRIBUTE_UNUSED) {
+ // TODO: to be implemented.
+}
+
void LocationsBuilderMIPS64::VisitSelect(HSelect* select) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
if (Primitive::IsFloatingPointType(select->GetType())) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 12aa03c4af..d6e92ccb81 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1059,6 +1059,11 @@ void CodeGeneratorX86::GenerateFrameEntry() {
}
}
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should_deoptimize flag to 0.
+ __ movl(Address(ESP, -kShouldDeoptimizeFlagSize), Immediate(0));
+ }
+
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ subl(ESP, Immediate(adjust));
__ cfi().AdjustCFAOffset(adjust);
@@ -1676,6 +1681,17 @@ void InstructionCodeGeneratorX86::VisitDeoptimize(HDeoptimize* deoptimize) {
/* false_target */ nullptr);
}
+void LocationsBuilderX86::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary(flag, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ __ movl(flag->GetLocations()->Out().AsRegister<Register>(),
+ Address(ESP, codegen_->GetStackOffsetOfShouldDeoptimizeFlag()));
+}
+
static bool SelectCanUseCMOV(HSelect* select) {
// There are no conditional move instructions for XMMs.
if (Primitive::IsFloatingPointType(select->GetType())) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 22f7f6b52b..4474decf59 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1326,6 +1326,12 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
}
}
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should_deoptimize flag to 0.
+ __ movl(Address(CpuRegister(RSP), xmm_spill_location - kShouldDeoptimizeFlagSize),
+ Immediate(0));
+ }
+
// Save the current method if we need it. Note that we do not
// do this in HCurrentMethod, as the instruction might have been removed
// in the SSA graph.
@@ -1747,6 +1753,17 @@ void InstructionCodeGeneratorX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
/* false_target */ nullptr);
}
+void LocationsBuilderX86_64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary(flag, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86_64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ __ movl(flag->GetLocations()->Out().AsRegister<CpuRegister>(),
+ Address(CpuRegister(RSP), codegen_->GetStackOffsetOfShouldDeoptimizeFlag()));
+}
+
static bool SelectCanUseCMOV(HSelect* select) {
// There are no conditional move instructions for XMMs.
if (Primitive::IsFloatingPointType(select->GetType())) {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 01e89bb304..8d93867230 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -292,6 +292,21 @@ static bool IsPolymorphic(Handle<mirror::ObjectArray<mirror::Class>> classes)
classes->Get(InlineCache::kIndividualCacheSize - 1) == nullptr;
}
+ArtMethod* HInliner::TryCHADevirtualization(ArtMethod* resolved_method) {
+ if (!resolved_method->HasSingleImplementation()) {
+ return nullptr;
+ }
+ if (Runtime::Current()->IsAotCompiler()) {
+ // No CHA-based devirtulization for AOT compiler (yet).
+ return nullptr;
+ }
+ if (outermost_graph_->IsCompilingOsr()) {
+ // We do not support HDeoptimize in OSR methods.
+ return nullptr;
+ }
+ return resolved_method->GetSingleImplementation();
+}
+
bool HInliner::TryInline(HInvoke* invoke_instruction) {
if (invoke_instruction->IsInvokeUnresolved()) {
return false; // Don't bother to move further if we know the method is unresolved.
@@ -317,10 +332,29 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
actual_method = FindVirtualOrInterfaceTarget(invoke_instruction, resolved_method);
}
+ bool cha_devirtualize = false;
+ if (actual_method == nullptr) {
+ ArtMethod* method = TryCHADevirtualization(resolved_method);
+ if (method != nullptr) {
+ cha_devirtualize = true;
+ actual_method = method;
+ }
+ }
+
if (actual_method != nullptr) {
- bool result = TryInlineAndReplace(invoke_instruction, actual_method, /* do_rtp */ true);
+ bool result = TryInlineAndReplace(invoke_instruction,
+ actual_method,
+ /* do_rtp */ true,
+ cha_devirtualize);
if (result && !invoke_instruction->IsInvokeStaticOrDirect()) {
- MaybeRecordStat(kInlinedInvokeVirtualOrInterface);
+ if (cha_devirtualize) {
+ // Add dependency due to devirtulization. We've assumed resolved_method
+ // has single implementation.
+ outermost_graph_->AddCHASingleImplementationDependency(resolved_method);
+ MaybeRecordStat(kCHAInline);
+ } else {
+ MaybeRecordStat(kInlinedInvokeVirtualOrInterface);
+ }
}
return result;
}
@@ -438,7 +472,10 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
HInstruction* cursor = invoke_instruction->GetPrevious();
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
- if (!TryInlineAndReplace(invoke_instruction, resolved_method, /* do_rtp */ false)) {
+ if (!TryInlineAndReplace(invoke_instruction,
+ resolved_method,
+ /* do_rtp */ false,
+ /* cha_devirtualize */ false)) {
return false;
}
@@ -465,6 +502,25 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
return true;
}
+void HInliner::AddCHAGuard(HInstruction* invoke_instruction,
+ uint32_t dex_pc,
+ HInstruction* cursor,
+ HBasicBlock* bb_cursor) {
+ HInstruction* deopt_flag = new (graph_->GetArena()) HShouldDeoptimizeFlag(dex_pc);
+ HInstruction* should_deopt = new (graph_->GetArena()) HNotEqual(
+ deopt_flag, graph_->GetIntConstant(0, dex_pc));
+ HInstruction* deopt = new (graph_->GetArena()) HDeoptimize(should_deopt, dex_pc);
+
+ if (cursor != nullptr) {
+ bb_cursor->InsertInstructionAfter(deopt_flag, cursor);
+ } else {
+ bb_cursor->InsertInstructionBefore(deopt_flag, bb_cursor->GetFirstInstruction());
+ }
+ bb_cursor->InsertInstructionAfter(should_deopt, deopt_flag);
+ bb_cursor->InsertInstructionAfter(deopt, should_deopt);
+ deopt->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
+}
+
HInstruction* HInliner::AddTypeGuard(HInstruction* receiver,
HInstruction* cursor,
HBasicBlock* bb_cursor,
@@ -787,8 +843,14 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(
return true;
}
-bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction, ArtMethod* method, bool do_rtp) {
+bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
+ ArtMethod* method,
+ bool do_rtp,
+ bool cha_devirtualize) {
HInstruction* return_replacement = nullptr;
+ uint32_t dex_pc = invoke_instruction->GetDexPc();
+ HInstruction* cursor = invoke_instruction->GetPrevious();
+ HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
if (!TryBuildAndInline(invoke_instruction, method, &return_replacement)) {
if (invoke_instruction->IsInvokeInterface()) {
// Turn an invoke-interface into an invoke-virtual. An invoke-virtual is always
@@ -826,6 +888,9 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction, ArtMethod* metho
return false;
}
}
+ if (cha_devirtualize) {
+ AddCHAGuard(invoke_instruction, dex_pc, cursor, bb_cursor);
+ }
if (return_replacement != nullptr) {
invoke_instruction->ReplaceWith(return_replacement);
}
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index a2b4fc96c4..ffebd97cb8 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -62,8 +62,12 @@ class HInliner : public HOptimization {
// Try to inline `resolved_method` in place of `invoke_instruction`. `do_rtp` is whether
// reference type propagation can run after the inlining. If the inlining is successful, this
- // method will replace and remove the `invoke_instruction`.
- bool TryInlineAndReplace(HInvoke* invoke_instruction, ArtMethod* resolved_method, bool do_rtp)
+ // method will replace and remove the `invoke_instruction`. If `cha_devirtualize` is true,
+ // a CHA guard needs to be added for the inlining.
+ bool TryInlineAndReplace(HInvoke* invoke_instruction,
+ ArtMethod* resolved_method,
+ bool do_rtp,
+ bool cha_devirtualize)
REQUIRES_SHARED(Locks::mutator_lock_);
bool TryBuildAndInline(HInvoke* invoke_instruction,
@@ -118,6 +122,18 @@ class HInliner : public HOptimization {
Handle<mirror::ObjectArray<mirror::Class>> classes)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Try CHA-based devirtualization to change virtual method calls into
+ // direct calls.
+ // Returns the actual method that resolved_method can be devirtualized to.
+ ArtMethod* TryCHADevirtualization(ArtMethod* resolved_method)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Add a CHA guard for a CHA-based devirtualized call. A CHA guard checks a
+ // should_deoptimize flag and if it's true, does deoptimization.
+ void AddCHAGuard(HInstruction* invoke_instruction,
+ uint32_t dex_pc,
+ HInstruction* cursor,
+ HBasicBlock* bb_cursor);
HInstanceFieldGet* BuildGetReceiverClass(ClassLinker* class_linker,
HInstruction* receiver,
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 7ab04e15fc..e3f4d8f035 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -333,7 +333,8 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
cached_double_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
cached_current_method_(nullptr),
inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()),
- osr_(osr) {
+ osr_(osr),
+ cha_single_implementation_list_(arena->Adapter(kArenaAllocCHA)) {
blocks_.reserve(kDefaultNumberOfBlocks);
}
@@ -536,6 +537,20 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
bool IsCompilingOsr() const { return osr_; }
+ ArenaSet<ArtMethod*>& GetCHASingleImplementationList() {
+ return cha_single_implementation_list_;
+ }
+
+ void AddCHASingleImplementationDependency(ArtMethod* method) {
+ cha_single_implementation_list_.insert(method);
+ }
+
+ bool HasShouldDeoptimizeFlag() const {
+ // TODO: if all CHA guards can be eliminated, there is no need for the flag
+ // even if cha_single_implementation_list_ is not empty.
+ return !cha_single_implementation_list_.empty();
+ }
+
bool HasTryCatch() const { return has_try_catch_; }
void SetHasTryCatch(bool value) { has_try_catch_ = value; }
@@ -672,6 +687,9 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// compiled code entries which the interpreter can directly jump to.
const bool osr_;
+ // List of methods that are assumed to have single implementation.
+ ArenaSet<ArtMethod*> cha_single_implementation_list_;
+
friend class SsaBuilder; // For caching constants.
friend class SsaLivenessAnalysis; // For the linear order.
friend class HInliner; // For the reverse post order.
@@ -1240,6 +1258,7 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(ClinitCheck, Instruction) \
M(Compare, BinaryOperation) \
M(CurrentMethod, Instruction) \
+ M(ShouldDeoptimizeFlag, Instruction) \
M(Deoptimize, Instruction) \
M(Div, BinaryOperation) \
M(DivZeroCheck, Instruction) \
@@ -2875,6 +2894,27 @@ class HDeoptimize FINAL : public HTemplateInstruction<1> {
DISALLOW_COPY_AND_ASSIGN(HDeoptimize);
};
+// Represents a should_deoptimize flag. Currently used for CHA-based devirtualization.
+// The compiled code checks this flag value in a guard before devirtualized call and
+// if it's true, starts to do deoptimization.
+// It has a 4-byte slot on stack.
+// TODO: allocate a register for this flag.
+class HShouldDeoptimizeFlag FINAL : public HExpression<0> {
+ public:
+ // TODO: use SideEffects to aid eliminating some CHA guards.
+ explicit HShouldDeoptimizeFlag(uint32_t dex_pc)
+ : HExpression(Primitive::kPrimInt, SideEffects::None(), dex_pc) {
+ }
+
+ // We don't eliminate CHA guards yet.
+ bool CanBeMoved() const OVERRIDE { return false; }
+
+ DECLARE_INSTRUCTION(ShouldDeoptimizeFlag);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HShouldDeoptimizeFlag);
+};
+
// Represents the ArtMethod that was passed as a first argument to
// the method. It is used by instructions that depend on it, like
// instructions that work with the dex cache.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 2382b728df..e15e33dc78 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1208,7 +1208,9 @@ bool OptimizingCompiler::JitCompile(Thread* self,
code_allocator.GetMemory().data(),
code_allocator.GetSize(),
osr,
- roots);
+ roots,
+ codegen->GetGraph()->HasShouldDeoptimizeFlag(),
+ codegen->GetGraph()->GetCHASingleImplementationList());
if (code == nullptr) {
code_cache->ClearData(self, stack_map_data, roots_data);
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index c8d1ce0bd5..203b1ec7ec 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -27,6 +27,7 @@ namespace art {
enum MethodCompilationStat {
kAttemptCompilation = 0,
+ kCHAInline,
kCompiled,
kInlinedInvoke,
kReplacedInvokeWithSimplePattern,
@@ -106,6 +107,7 @@ class OptimizingCompilerStats {
std::string name;
switch (stat) {
case kAttemptCompilation : name = "AttemptCompilation"; break;
+ case kCHAInline : name = "CHAInline"; break;
case kCompiled : name = "Compiled"; break;
case kInlinedInvoke : name = "InlinedInvoke"; break;
case kReplacedInvokeWithSimplePattern: name = "ReplacedInvokeWithSimplePattern"; break;
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 5991791a15..59523a93a0 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -87,6 +87,10 @@ void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoint
// Adjust the stack slot, now that we know the number of them for each type.
// The way this implementation lays out the stack is the following:
// [parameter slots ]
+ // [art method (caller) ]
+ // [entry spill (core) ]
+ // [entry spill (float) ]
+ // [should_deoptimize flag] (this is optional)
// [catch phi spill slots ]
// [double spill slots ]
// [long spill slots ]