summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/builder.h4
-rw-r--r--compiler/optimizing/code_generator_arm.cc8
-rw-r--r--compiler/optimizing/code_generator_arm64.cc10
-rw-r--r--compiler/optimizing/code_generator_mips.cc9
-rw-r--r--compiler/optimizing/code_generator_mips64.cc11
-rw-r--r--compiler/optimizing/code_generator_x86.cc7
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc9
-rw-r--r--compiler/optimizing/dead_code_elimination.cc9
-rw-r--r--compiler/optimizing/induction_var_analysis.cc39
-rw-r--r--compiler/optimizing/induction_var_analysis.h11
-rw-r--r--compiler/optimizing/induction_var_analysis_test.cc85
-rw-r--r--compiler/optimizing/induction_var_range.cc17
-rw-r--r--compiler/optimizing/induction_var_range.h10
-rw-r--r--compiler/optimizing/inliner.h4
-rw-r--r--compiler/optimizing/loop_optimization.cc94
-rw-r--r--compiler/optimizing/loop_optimization.h10
-rw-r--r--compiler/optimizing/nodes.cc2
-rw-r--r--compiler/optimizing/nodes.h21
-rw-r--r--compiler/optimizing/optimizing_cfi_test_expected.inc317
-rw-r--r--compiler/optimizing/optimizing_compiler.cc14
-rw-r--r--compiler/optimizing/optimizing_unit_test.h2
-rw-r--r--compiler/optimizing/reference_type_propagation.cc4
-rw-r--r--compiler/optimizing/reference_type_propagation.h6
-rw-r--r--compiler/optimizing/reference_type_propagation_test.cc8
-rw-r--r--compiler/optimizing/ssa_builder.h4
25 files changed, 425 insertions, 290 deletions
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 580ef72767..f896f1199e 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -43,7 +43,7 @@ class HGraphBuilder : public ValueObject {
OptimizingCompilerStats* compiler_stats,
const uint8_t* interpreter_metadata,
Handle<mirror::DexCache> dex_cache,
- StackHandleScopeCollection* handles)
+ VariableSizedHandleScope* handles)
: graph_(graph),
dex_file_(dex_file),
code_item_(code_item),
@@ -68,7 +68,7 @@ class HGraphBuilder : public ValueObject {
// Only for unit testing.
HGraphBuilder(HGraph* graph,
const DexFile::CodeItem& code_item,
- StackHandleScopeCollection* handles,
+ VariableSizedHandleScope* handles,
Primitive::Type return_type = Primitive::kPrimInt)
: graph_(graph),
dex_file_(nullptr),
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 9870876879..77d6f23fff 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1129,7 +1129,13 @@ void CodeGeneratorARM::GenerateFrameEntry() {
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ AddConstant(SP, -adjust);
__ cfi().AdjustCFAOffset(adjust);
- __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
+
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
+ }
}
void CodeGeneratorARM::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 969d653f97..f02b028541 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1046,7 +1046,15 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
// ... : other preserved fp registers.
// ... : reserved frame space.
// sp[0] : current method.
- __ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex));
+
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ __ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex));
+ } else {
+ __ Claim(frame_size);
+ }
GetAssembler()->cfi().AdjustCFAOffset(frame_size);
GetAssembler()->SpillRegisters(GetFramePreservedCoreRegisters(),
frame_size - GetCoreSpillSize());
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index b826a2c537..e336df8c6c 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -743,9 +743,12 @@ void CodeGeneratorMIPS::GenerateFrameEntry() {
// TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
}
- // Store the current method pointer.
- // TODO: can we not do this if RequiresCurrentMethod() returns false?
- __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+ }
}
void CodeGeneratorMIPS::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 02576bda67..010bf24232 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -556,9 +556,14 @@ void CodeGeneratorMIPS64::GenerateFrameEntry() {
__ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
- static_assert(IsInt<16>(kCurrentMethodStackOffset),
- "kCurrentMethodStackOffset must fit into int16_t");
- __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ static_assert(IsInt<16>(kCurrentMethodStackOffset),
+ "kCurrentMethodStackOffset must fit into int16_t");
+ __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+ }
}
void CodeGeneratorMIPS64::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 0b23599665..960f01ce9d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -898,7 +898,12 @@ void CodeGeneratorX86::GenerateFrameEntry() {
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ subl(ESP, Immediate(adjust));
__ cfi().AdjustCFAOffset(adjust);
- __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument);
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument);
+ }
}
void CodeGeneratorX86::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 28638d721d..665d028338 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1140,8 +1140,13 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
}
}
- __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset),
- CpuRegister(kMethodRegisterArgument));
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset),
+ CpuRegister(kMethodRegisterArgument));
+ }
}
void CodeGeneratorX86_64::GenerateFrameExit() {
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index aa3f26809a..adfe09ba9f 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -343,14 +343,7 @@ void HDeadCodeElimination::RemoveDeadInstructions() {
for (i.Advance(); !i.Done(); i.Advance()) {
HInstruction* inst = i.Current();
DCHECK(!inst->IsControlFlow());
- if (!inst->HasSideEffects()
- && !inst->CanThrow()
- && !inst->IsSuspendCheck()
- && !inst->IsNativeDebugInfo()
- // If we added an explicit barrier then we should keep it.
- && !inst->IsMemoryBarrier()
- && !inst->IsParameterValue()
- && !inst->HasUses()) {
+ if (inst->IsDeadAndRemovable()) {
block->RemoveInstruction(inst);
MaybeRecordStat(MethodCompilationStat::kRemovedDeadInstruction);
}
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index c501ccf80f..55fcb12fa8 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -87,11 +87,12 @@ HInductionVarAnalysis::HInductionVarAnalysis(HGraph* graph)
: HOptimization(graph, kInductionPassName),
global_depth_(0),
stack_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
- scc_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
map_(std::less<HInstruction*>(),
graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ scc_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
cycle_(std::less<HInstruction*>(),
graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ type_(Primitive::kPrimVoid),
induction_(std::less<HLoopInformation*>(),
graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)) {
}
@@ -103,7 +104,6 @@ void HInductionVarAnalysis::Run() {
for (HReversePostOrderIterator it_graph(*graph_); !it_graph.Done(); it_graph.Advance()) {
HBasicBlock* graph_block = it_graph.Current();
// Don't analyze irreducible loops.
- // TODO(ajcbik): could/should we remove this restriction?
if (graph_block->IsLoopHeader() && !graph_block->GetLoopInformation()->IsIrreducible()) {
VisitLoop(graph_block->GetLoopInformation());
}
@@ -121,7 +121,7 @@ void HInductionVarAnalysis::VisitLoop(HLoopInformation* loop) {
HBasicBlock* loop_block = it_loop.Current();
DCHECK(loop_block->IsInLoop());
if (loop_block->GetLoopInformation() != loop) {
- continue; // Inner loops already visited.
+ continue; // Inner loops visited later.
}
// Visit phi-operations and instructions.
for (HInstructionIterator it(loop_block->GetPhis()); !it.Done(); it.Advance()) {
@@ -285,6 +285,9 @@ void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
} else if (instruction->IsSub()) {
update = SolveAddSub(
loop, phi, instruction, instruction->InputAt(0), instruction->InputAt(1), kSub, true);
+ } else if (instruction->IsXor()) {
+ update = SolveXor(
+ loop, phi, instruction, instruction->InputAt(0), instruction->InputAt(1), true);
} else if (instruction->IsTypeConversion()) {
update = SolveCnv(instruction->AsTypeConversion());
}
@@ -553,6 +556,27 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolveAddSub(HLoopIn
return nullptr;
}
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolveXor(HLoopInformation* loop,
+ HInstruction* entry_phi,
+ HInstruction* instruction,
+ HInstruction* x,
+ HInstruction* y,
+ bool is_first_call) {
+ InductionInfo* b = LookupInfo(loop, y);
+ // Solve within a tight cycle on x = x ^ c.
+ if (b != nullptr && b->induction_class == kInvariant) {
+ if (x == entry_phi && entry_phi->InputCount() == 2 && instruction == entry_phi->InputAt(1)) {
+ InductionInfo* initial = LookupInfo(loop, entry_phi->InputAt(0));
+ return CreateInduction(kPeriodic, CreateInvariantOp(kXor, initial, b), initial, type_);
+ }
+ }
+ // Try the other way around if considered for first time.
+ if (is_first_call) {
+ return SolveXor(loop, entry_phi, instruction, y, x, false);
+ }
+ return nullptr;
+}
+
HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolveCnv(HTypeConversion* conversion) {
Primitive::Type from = conversion->GetInputType();
Primitive::Type to = conversion->GetResultType();
@@ -850,8 +874,8 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::CreateSimplifiedInv
int64_t value = -1;
if (IsExact(a, &value)) {
if (value == 0) {
- // Simplify 0 + b = b, 0 * b = 0.
- if (op == kAdd) {
+ // Simplify 0 + b = b, 0 ^ b = b, 0 * b = 0.
+ if (op == kAdd || op == kXor) {
return b;
} else if (op == kMul) {
return a;
@@ -867,8 +891,8 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::CreateSimplifiedInv
}
if (IsExact(b, &value)) {
if (value == 0) {
- // Simplify a + 0 = a, a - 0 = a, a * 0 = 0, -0 = 0.
- if (op == kAdd || op == kSub) {
+ // Simplify a + 0 = a, a - 0 = a, a ^ 0 = a, a * 0 = 0, -0 = 0.
+ if (op == kAdd || op == kSub || op == kXor) {
return a;
} else if (op == kMul || op == kNeg) {
return b;
@@ -939,6 +963,7 @@ std::string HInductionVarAnalysis::InductionToString(InductionInfo* info) {
case kNeg: inv += " - "; break;
case kMul: inv += " * "; break;
case kDiv: inv += " / "; break;
+ case kXor: inv += " ^ "; break;
case kLT: inv += " < "; break;
case kLE: inv += " <= "; break;
case kGT: inv += " > "; break;
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index cd4c830645..06aee31b88 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -64,6 +64,7 @@ class HInductionVarAnalysis : public HOptimization {
kNeg,
kMul,
kDiv,
+ kXor,
kFetch,
// Trip-counts.
kTripCountInLoop, // valid in full loop; loop is finite
@@ -171,7 +172,13 @@ class HInductionVarAnalysis : public HOptimization {
HInstruction* x,
HInstruction* y,
InductionOp op,
- bool is_first_call);
+ bool is_first_call); // possibly swaps x and y to try again
+ InductionInfo* SolveXor(HLoopInformation* loop,
+ HInstruction* entry_phi,
+ HInstruction* instruction,
+ HInstruction* x,
+ HInstruction* y,
+ bool is_first_call); // possibly swaps x and y to try again
InductionInfo* SolveCnv(HTypeConversion* conversion);
// Trip count information.
@@ -219,8 +226,8 @@ class HInductionVarAnalysis : public HOptimization {
// Temporary book-keeping during the analysis.
uint32_t global_depth_;
ArenaVector<HInstruction*> stack_;
- ArenaVector<HInstruction*> scc_;
ArenaSafeMap<HInstruction*, NodeInfo> map_;
+ ArenaVector<HInstruction*> scc_;
ArenaSafeMap<HInstruction*, InductionInfo*> cycle_;
Primitive::Type type_;
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 292bc4e06e..7c467f6c94 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -107,7 +107,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
}
// Builds if-statement at depth d.
- HPhi* BuildIf(int d, HBasicBlock** ifT, HBasicBlock **ifF) {
+ HPhi* BuildIf(int d, HBasicBlock** ifT, HBasicBlock** ifF) {
HBasicBlock* cond = new (&allocator_) HBasicBlock(graph_);
HBasicBlock* ifTrue = new (&allocator_) HBasicBlock(graph_);
HBasicBlock* ifFalse = new (&allocator_) HBasicBlock(graph_);
@@ -259,15 +259,15 @@ TEST_F(InductionVarAnalysisTest, FindDerivedInduction) {
// k = - i;
// }
BuildLoopNest(1);
- HInstruction *add = InsertInstruction(
+ HInstruction* add = InsertInstruction(
new (&allocator_) HAdd(Primitive::kPrimInt, constant100_, basic_[0]), 0);
- HInstruction *sub = InsertInstruction(
+ HInstruction* sub = InsertInstruction(
new (&allocator_) HSub(Primitive::kPrimInt, constant100_, basic_[0]), 0);
- HInstruction *mul = InsertInstruction(
+ HInstruction* mul = InsertInstruction(
new (&allocator_) HMul(Primitive::kPrimInt, constant100_, basic_[0]), 0);
- HInstruction *shl = InsertInstruction(
+ HInstruction* shl = InsertInstruction(
new (&allocator_) HShl(Primitive::kPrimInt, basic_[0], constant1_), 0);
- HInstruction *neg = InsertInstruction(
+ HInstruction* neg = InsertInstruction(
new (&allocator_) HNeg(Primitive::kPrimInt, basic_[0]), 0);
PerformInductionVarAnalysis();
@@ -291,10 +291,10 @@ TEST_F(InductionVarAnalysisTest, FindChainInduction) {
HPhi* k = InsertLoopPhi(0, 0);
k->AddInput(constant0_);
- HInstruction *add = InsertInstruction(
+ HInstruction* add = InsertInstruction(
new (&allocator_) HAdd(Primitive::kPrimInt, k, constant100_), 0);
HInstruction* store1 = InsertArrayStore(add, 0);
- HInstruction *sub = InsertInstruction(
+ HInstruction* sub = InsertInstruction(
new (&allocator_) HSub(Primitive::kPrimInt, add, constant1_), 0);
HInstruction* store2 = InsertArrayStore(sub, 0);
k->AddInput(sub);
@@ -381,7 +381,7 @@ TEST_F(InductionVarAnalysisTest, FindFirstOrderWrapAroundInduction) {
k->AddInput(constant0_);
HInstruction* store = InsertArrayStore(k, 0);
- HInstruction *sub = InsertInstruction(
+ HInstruction* sub = InsertInstruction(
new (&allocator_) HSub(Primitive::kPrimInt, constant100_, basic_[0]), 0);
k->AddInput(sub);
PerformInductionVarAnalysis();
@@ -407,7 +407,7 @@ TEST_F(InductionVarAnalysisTest, FindSecondOrderWrapAroundInduction) {
HInstruction* store = InsertArrayStore(k, 0);
k->AddInput(t);
- HInstruction *sub = InsertInstruction(
+ HInstruction* sub = InsertInstruction(
new (&allocator_) HSub(Primitive::kPrimInt, constant100_, basic_[0], 0), 0);
t->AddInput(sub);
PerformInductionVarAnalysis();
@@ -431,15 +431,15 @@ TEST_F(InductionVarAnalysisTest, FindWrapAroundDerivedInduction) {
HPhi* k = InsertLoopPhi(0, 0);
k->AddInput(constant0_);
- HInstruction *add = InsertInstruction(
+ HInstruction* add = InsertInstruction(
new (&allocator_) HAdd(Primitive::kPrimInt, k, constant100_), 0);
- HInstruction *sub = InsertInstruction(
+ HInstruction* sub = InsertInstruction(
new (&allocator_) HSub(Primitive::kPrimInt, k, constant100_), 0);
- HInstruction *mul = InsertInstruction(
+ HInstruction* mul = InsertInstruction(
new (&allocator_) HMul(Primitive::kPrimInt, k, constant100_), 0);
- HInstruction *shl = InsertInstruction(
+ HInstruction* shl = InsertInstruction(
new (&allocator_) HShl(Primitive::kPrimInt, k, constant1_), 0);
- HInstruction *neg = InsertInstruction(
+ HInstruction* neg = InsertInstruction(
new (&allocator_) HNeg(Primitive::kPrimInt, k), 0);
k->AddInput(
InsertInstruction(new (&allocator_) HShl(Primitive::kPrimInt, basic_[0], constant1_), 0));
@@ -497,7 +497,7 @@ TEST_F(InductionVarAnalysisTest, FindIdiomaticPeriodicInduction) {
k->AddInput(constant0_);
HInstruction* store = InsertArrayStore(k, 0);
- HInstruction *sub = InsertInstruction(
+ HInstruction* sub = InsertInstruction(
new (&allocator_) HSub(Primitive::kPrimInt, constant1_, k), 0);
k->AddInput(sub);
PerformInductionVarAnalysis();
@@ -506,6 +506,45 @@ TEST_F(InductionVarAnalysisTest, FindIdiomaticPeriodicInduction) {
EXPECT_STREQ("periodic((1), (0)):PrimInt", GetInductionInfo(sub, 0).c_str());
}
+TEST_F(InductionVarAnalysisTest, FindXorPeriodicInduction) {
+ // Setup:
+ // k = 0;
+ // for (int i = 0; i < 100; i++) {
+ // a[k] = 0;
+ // k = k ^ 1;
+ // }
+ BuildLoopNest(1);
+ HPhi* k = InsertLoopPhi(0, 0);
+ k->AddInput(constant0_);
+
+ HInstruction* store = InsertArrayStore(k, 0);
+ HInstruction* x = InsertInstruction(
+ new (&allocator_) HXor(Primitive::kPrimInt, k, constant1_), 0);
+ k->AddInput(x);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ("periodic((0), (1)):PrimInt", GetInductionInfo(store->InputAt(1), 0).c_str());
+ EXPECT_STREQ("periodic((1), (0)):PrimInt", GetInductionInfo(x, 0).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, FindXor100PeriodicInduction) {
+ // Setup:
+ // k = 100;
+ // for (int i = 0; i < 100; i++) {
+ // k = k ^ 100;
+ // }
+ BuildLoopNest(1);
+ HPhi* k = InsertLoopPhi(0, 0);
+ k->AddInput(constant100_);
+
+ HInstruction* x = InsertInstruction(
+ new (&allocator_) HXor(Primitive::kPrimInt, k, constant100_), 0);
+ k->AddInput(x);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ("periodic(((100) ^ (100)), (100)):PrimInt", GetInductionInfo(x, 0).c_str());
+}
+
TEST_F(InductionVarAnalysisTest, FindDerivedPeriodicInduction) {
// Setup:
// k = 0;
@@ -526,15 +565,15 @@ TEST_F(InductionVarAnalysisTest, FindDerivedPeriodicInduction) {
k_header->AddInput(k_body);
// Derived expressions.
- HInstruction *add = InsertInstruction(
+ HInstruction* add = InsertInstruction(
new (&allocator_) HAdd(Primitive::kPrimInt, k_body, constant100_), 0);
- HInstruction *sub = InsertInstruction(
+ HInstruction* sub = InsertInstruction(
new (&allocator_) HSub(Primitive::kPrimInt, k_body, constant100_), 0);
- HInstruction *mul = InsertInstruction(
+ HInstruction* mul = InsertInstruction(
new (&allocator_) HMul(Primitive::kPrimInt, k_body, constant100_), 0);
- HInstruction *shl = InsertInstruction(
+ HInstruction* shl = InsertInstruction(
new (&allocator_) HShl(Primitive::kPrimInt, k_body, constant1_), 0);
- HInstruction *neg = InsertInstruction(
+ HInstruction* neg = InsertInstruction(
new (&allocator_) HNeg(Primitive::kPrimInt, k_body), 0);
PerformInductionVarAnalysis();
@@ -563,7 +602,7 @@ TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) {
k[d] = InsertLoopPhi(0, d);
}
- HInstruction *inc = InsertInstruction(
+ HInstruction* inc = InsertInstruction(
new (&allocator_) HAdd(Primitive::kPrimInt, constant1_, k[9]), 9);
HInstruction* store = InsertArrayStore(inc, 9);
@@ -597,7 +636,7 @@ TEST_F(InductionVarAnalysisTest, ByteInductionIntLoopControl) {
// a[i] = 0;
// }
BuildLoopNest(1);
- HInstruction *conv = InsertInstruction(
+ HInstruction* conv = InsertInstruction(
new (&allocator_) HTypeConversion(Primitive::kPrimByte, basic_[0], -1), 0);
HInstruction* store1 = InsertArrayStore(conv, 0);
HInstruction* store2 = InsertArrayStore(basic_[0], 0);
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index cd8b7c7960..140c7f0c40 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -525,6 +525,8 @@ InductionVarRange::Value InductionVarRange::GetVal(HInductionVarAnalysis::Induct
return GetMul(info->op_a, info->op_b, trip, in_body, is_min);
case HInductionVarAnalysis::kDiv:
return GetDiv(info->op_a, info->op_b, trip, in_body, is_min);
+ case HInductionVarAnalysis::kXor:
+ return GetXor(info->op_a, info->op_b);
case HInductionVarAnalysis::kFetch:
return GetFetch(info->fetch, trip, in_body, is_min);
case HInductionVarAnalysis::kTripCountInLoop:
@@ -626,6 +628,21 @@ InductionVarRange::Value InductionVarRange::GetDiv(HInductionVarAnalysis::Induct
return Value();
}
+InductionVarRange::Value InductionVarRange::GetXor(
+ HInductionVarAnalysis::InductionInfo* info1,
+ HInductionVarAnalysis::InductionInfo* info2) const {
+ int64_t v1 = 0;
+ int64_t v2 = 0;
+ // Only accept exact values.
+ if (IsConstant(info1, kExact, &v1) && IsConstant(info2, kExact, &v2)) {
+ int64_t value = v1 ^ v2;
+ if (CanLongValueFitIntoInt(value)) {
+ return Value(static_cast<int32_t>(value));
+ }
+ }
+ return Value();
+}
+
InductionVarRange::Value InductionVarRange::MulRangeAndConstant(
int64_t value,
HInductionVarAnalysis::InductionInfo* info,
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index 63850b34b8..895130064a 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -131,6 +131,14 @@ class InductionVarRange {
*/
void Replace(HInstruction* instruction, HInstruction* fetch, HInstruction* replacement);
+ /**
+ * Incrementally updates induction information for just the given loop.
+ */
+ void ReVisit(HLoopInformation* loop) {
+ induction_analysis_->induction_.erase(loop);
+ induction_analysis_->VisitLoop(loop);
+ }
+
private:
/*
* Enum used in IsConstant() request.
@@ -185,6 +193,8 @@ class InductionVarRange {
HInductionVarAnalysis::InductionInfo* trip,
bool in_body,
bool is_min) const;
+ Value GetXor(HInductionVarAnalysis::InductionInfo* info1,
+ HInductionVarAnalysis::InductionInfo* info2) const;
Value MulRangeAndConstant(int64_t value,
HInductionVarAnalysis::InductionInfo* info,
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 486626b1fe..a1dcd58a84 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -38,7 +38,7 @@ class HInliner : public HOptimization {
const DexCompilationUnit& outer_compilation_unit,
const DexCompilationUnit& caller_compilation_unit,
CompilerDriver* compiler_driver,
- StackHandleScopeCollection* handles,
+ VariableSizedHandleScope* handles,
OptimizingCompilerStats* stats,
size_t total_number_of_dex_registers,
size_t depth)
@@ -197,7 +197,7 @@ class HInliner : public HOptimization {
const size_t total_number_of_dex_registers_;
const size_t depth_;
size_t number_of_inlined_instructions_;
- StackHandleScopeCollection* const handles_;
+ VariableSizedHandleScope* const handles_;
DISALLOW_COPY_AND_ASSIGN(HInliner);
};
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 93c6c20d7c..33fa87d568 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -69,33 +69,6 @@ static bool IsEmptyBody(HBasicBlock* block, ArenaSet<HInstruction*>* iset) {
i->GetNext() != nullptr && i->GetNext()->IsGoto();
}
-static HBasicBlock* TryRemovePreHeader(HBasicBlock* preheader, HBasicBlock* entry_block) {
- if (preheader->GetPredecessors().size() == 1) {
- HBasicBlock* entry = preheader->GetSinglePredecessor();
- HInstruction* anchor = entry->GetLastInstruction();
- // If the pre-header has a single predecessor we can remove it too if
- // either the pre-header just contains a goto, or if the predecessor
- // is not the entry block so we can push instructions backward
- // (moving computation into the entry block is too dangerous!).
- if (preheader->GetFirstInstruction() == nullptr ||
- preheader->GetFirstInstruction()->IsGoto() ||
- (entry != entry_block && anchor->IsGoto())) {
- // Push non-goto statements backward to empty the pre-header.
- for (HInstructionIterator it(preheader->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instruction = it.Current();
- if (!instruction->IsGoto()) {
- if (!instruction->CanBeMoved()) {
- return nullptr; // pushing failed to move all
- }
- it.Current()->MoveBefore(anchor);
- }
- }
- return entry;
- }
- }
- return nullptr;
-}
-
static void RemoveFromCycle(HInstruction* instruction) {
// A bit more elaborate than the usual instruction removal,
// since there may be a cycle in the use structure.
@@ -115,7 +88,8 @@ HLoopOptimization::HLoopOptimization(HGraph* graph,
loop_allocator_(nullptr),
top_loop_(nullptr),
last_loop_(nullptr),
- iset_(nullptr) {
+ iset_(nullptr),
+ induction_simplication_count_(0) {
}
void HLoopOptimization::Run() {
@@ -211,11 +185,17 @@ void HLoopOptimization::RemoveLoop(LoopNode* node) {
void HLoopOptimization::TraverseLoopsInnerToOuter(LoopNode* node) {
for ( ; node != nullptr; node = node->next) {
+ int current_induction_simplification_count = induction_simplication_count_;
if (node->inner != nullptr) {
TraverseLoopsInnerToOuter(node->inner);
}
- // Visit loop after its inner loops have been visited.
+ // Visit loop after its inner loops have been visited. If the induction of any inner
+ // loop has been simplified, recompute the induction information of this loop first.
+ if (current_induction_simplification_count != induction_simplication_count_) {
+ induction_range_.ReVisit(node->loop_info);
+ }
SimplifyInduction(node);
+ SimplifyBlocks(node);
RemoveIfEmptyLoop(node);
}
}
@@ -233,11 +213,41 @@ void HLoopOptimization::SimplifyInduction(LoopNode* node) {
iset_->clear();
int32_t use_count = 0;
if (IsPhiInduction(phi, iset_) &&
- IsOnlyUsedAfterLoop(*node->loop_info, phi, &use_count) &&
+ IsOnlyUsedAfterLoop(node->loop_info, phi, &use_count) &&
TryReplaceWithLastValue(phi, use_count, preheader)) {
for (HInstruction* i : *iset_) {
RemoveFromCycle(i);
}
+ induction_simplication_count_++;
+ }
+ }
+}
+
+void HLoopOptimization::SimplifyBlocks(LoopNode* node) {
+ for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ // Remove instructions that are dead, usually resulting from eliminating induction cycles.
+ for (HBackwardInstructionIterator i(block->GetInstructions()); !i.Done(); i.Advance()) {
+ HInstruction* instruction = i.Current();
+ if (instruction->IsDeadAndRemovable()) {
+ block->RemoveInstruction(instruction);
+ }
+ }
+ // Remove trivial control flow blocks from the loop body, again usually resulting
+ // from eliminating induction cycles.
+ if (block->GetPredecessors().size() == 1 &&
+ block->GetSuccessors().size() == 1 &&
+ block->GetFirstInstruction()->IsGoto()) {
+ HBasicBlock* pred = block->GetSinglePredecessor();
+ HBasicBlock* succ = block->GetSingleSuccessor();
+ if (succ->GetPredecessors().size() == 1) {
+ pred->ReplaceSuccessor(block, succ);
+ block->ClearDominanceInformation();
+ block->SetDominator(pred); // needed by next disconnect.
+ block->DisconnectAndDelete();
+ pred->AddDominatedBlock(succ);
+ succ->SetDominator(pred);
+ }
}
}
}
@@ -272,41 +282,31 @@ void HLoopOptimization::RemoveIfEmptyLoop(LoopNode* node) {
int32_t use_count = 0;
if (IsEmptyHeader(header, iset_) &&
IsEmptyBody(body, iset_) &&
- IsOnlyUsedAfterLoop(*node->loop_info, header->GetFirstPhi(), &use_count) &&
+ IsOnlyUsedAfterLoop(node->loop_info, header->GetFirstPhi(), &use_count) &&
TryReplaceWithLastValue(header->GetFirstPhi(), use_count, preheader)) {
- HBasicBlock* entry = TryRemovePreHeader(preheader, graph_->GetEntryBlock());
body->DisconnectAndDelete();
exit->RemovePredecessor(header);
header->RemoveSuccessor(exit);
header->ClearDominanceInformation();
header->SetDominator(preheader); // needed by next disconnect.
header->DisconnectAndDelete();
- // If allowed, remove preheader too, which may expose next outer empty loop
- // Otherwise, link preheader directly to exit to restore the flow graph.
- if (entry != nullptr) {
- entry->ReplaceSuccessor(preheader, exit);
- entry->AddDominatedBlock(exit);
- exit->SetDominator(entry);
- preheader->DisconnectAndDelete();
- } else {
- preheader->AddSuccessor(exit);
- preheader->AddInstruction(new (graph_->GetArena()) HGoto()); // global allocator
- preheader->AddDominatedBlock(exit);
- exit->SetDominator(preheader);
- }
+ preheader->AddSuccessor(exit);
+ preheader->AddInstruction(new (graph_->GetArena()) HGoto()); // global allocator
+ preheader->AddDominatedBlock(exit);
+ exit->SetDominator(preheader);
// Update hierarchy.
RemoveLoop(node);
}
}
-bool HLoopOptimization::IsOnlyUsedAfterLoop(const HLoopInformation& loop_info,
+bool HLoopOptimization::IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
/*out*/ int32_t* use_count) {
for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
HInstruction* user = use.GetUser();
if (iset_->find(user) == iset_->end()) { // not excluded?
HLoopInformation* other_loop_info = user->GetBlock()->GetLoopInformation();
- if (other_loop_info != nullptr && other_loop_info->IsIn(loop_info)) {
+ if (other_loop_info != nullptr && other_loop_info->IsIn(*loop_info)) {
return false;
}
++*use_count;
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index b2bf1c8507..9c4b462a1f 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -46,7 +46,7 @@ class HLoopOptimization : public HOptimization {
inner(nullptr),
previous(nullptr),
next(nullptr) {}
- const HLoopInformation* const loop_info;
+ HLoopInformation* const loop_info;
LoopNode* outer;
LoopNode* inner;
LoopNode* previous;
@@ -61,9 +61,10 @@ class HLoopOptimization : public HOptimization {
void TraverseLoopsInnerToOuter(LoopNode* node);
void SimplifyInduction(LoopNode* node);
+ void SimplifyBlocks(LoopNode* node);
void RemoveIfEmptyLoop(LoopNode* node);
- bool IsOnlyUsedAfterLoop(const HLoopInformation& loop_info,
+ bool IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
/*out*/ int32_t* use_count);
void ReplaceAllUses(HInstruction* instruction, HInstruction* replacement);
@@ -87,6 +88,11 @@ class HLoopOptimization : public HOptimization {
// Contents reside in phase-local heap memory.
ArenaSet<HInstruction*>* iset_;
+ // Counter that tracks how many induction cycles have been simplified. Useful
+ // to trigger incremental updates of induction variable analysis of outer loops
+ // when the induction of inner loops has changed.
+ int32_t induction_simplication_count_;
+
friend class LoopOptimizationTest;
DISALLOW_COPY_AND_ASSIGN(HLoopOptimization);
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 874c1edf35..1e69966b98 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -35,7 +35,7 @@ namespace art {
// double).
static constexpr bool kEnableFloatingPointStaticEvaluation = (FLT_EVAL_METHOD == 0);
-void HGraph::InitializeInexactObjectRTI(StackHandleScopeCollection* handles) {
+void HGraph::InitializeInexactObjectRTI(VariableSizedHandleScope* handles) {
ScopedObjectAccess soa(Thread::Current());
// Create the inexact Object reference type and store it in the HGraph.
ClassLinker* linker = Runtime::Current()->GetClassLinker();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 828c0e51c8..6f4f3c9505 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -336,7 +336,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
}
// Acquires and stores RTI of inexact Object to be used when creating HNullConstant.
- void InitializeInexactObjectRTI(StackHandleScopeCollection* handles);
+ void InitializeInexactObjectRTI(VariableSizedHandleScope* handles);
ArenaAllocator* GetArena() const { return arena_; }
const ArenaVector<HBasicBlock*>& GetBlocks() const { return blocks_; }
@@ -1931,6 +1931,19 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
return !HasEnvironmentUses() && GetUses().HasExactlyOneElement();
}
+ bool IsDeadAndRemovable() const {
+ return
+ !HasSideEffects() &&
+ !CanThrow() &&
+ !IsSuspendCheck() &&
+ !IsControlFlow() &&
+ !IsNativeDebugInfo() &&
+ !IsParameterValue() &&
+ !HasUses() &&
+ // If we added an explicit barrier then we should keep it.
+ !IsMemoryBarrier();
+ }
+
// Does this instruction strictly dominate `other_instruction`?
// Returns false if this instruction and `other_instruction` are the same.
// Aborts if this instruction and `other_instruction` are both phis.
@@ -2080,10 +2093,10 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
// to the current method. Such instructions are:
// (1): Instructions that require an environment, as calling the runtime requires
// to walk the stack and have the current method stored at a specific stack address.
- // (2): Object literals like classes and strings, that are loaded from the dex cache
- // fields of the current method.
+ // (2): HCurrentMethod, potentially used by HInvokeStaticOrDirect, HLoadString, or HLoadClass
+ // to access the dex cache.
bool NeedsCurrentMethod() const {
- return NeedsEnvironment() || IsLoadClass() || IsLoadString();
+ return NeedsEnvironment() || IsCurrentMethod();
}
// Returns whether the code generation of the instruction will require to have access
diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc
index 6c5030c9cb..f735dc8cb3 100644
--- a/compiler/optimizing/optimizing_cfi_test_expected.inc
+++ b/compiler/optimizing/optimizing_cfi_test_expected.inc
@@ -1,10 +1,10 @@
static constexpr uint8_t expected_asm_kThumb2[] = {
- 0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x00, 0x90, 0x0B, 0xB0,
+ 0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x0B, 0xB0,
0xBD, 0xEC, 0x02, 0x8A, 0x60, 0xBD,
};
static constexpr uint8_t expected_cfi_kThumb2[] = {
0x42, 0x0E, 0x0C, 0x85, 0x03, 0x86, 0x02, 0x8E, 0x01, 0x44, 0x0E, 0x14,
- 0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x42, 0x0A, 0x42,
+ 0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x0A, 0x42,
0x0E, 0x14, 0x44, 0x0E, 0x0C, 0x06, 0x50, 0x06, 0x51, 0x42, 0x0B, 0x0E,
0x40,
};
@@ -19,20 +19,19 @@ static constexpr uint8_t expected_cfi_kThumb2[] = {
// 0x00000006: .cfi_offset_extended: r81 at cfa-16
// 0x00000006: sub sp, sp, #44
// 0x00000008: .cfi_def_cfa_offset: 64
-// 0x00000008: str r0, [sp, #0]
-// 0x0000000a: .cfi_remember_state
-// 0x0000000a: add sp, sp, #44
-// 0x0000000c: .cfi_def_cfa_offset: 20
-// 0x0000000c: vpop.f32 {s16-s17}
-// 0x00000010: .cfi_def_cfa_offset: 12
-// 0x00000010: .cfi_restore_extended: r80
-// 0x00000010: .cfi_restore_extended: r81
-// 0x00000010: pop {r5, r6, pc}
-// 0x00000012: .cfi_restore_state
-// 0x00000012: .cfi_def_cfa_offset: 64
+// 0x00000008: .cfi_remember_state
+// 0x00000008: add sp, sp, #44
+// 0x0000000a: .cfi_def_cfa_offset: 20
+// 0x0000000a: vpop.f32 {s16-s17}
+// 0x0000000e: .cfi_def_cfa_offset: 12
+// 0x0000000e: .cfi_restore_extended: r80
+// 0x0000000e: .cfi_restore_extended: r81
+// 0x0000000e: pop {r5, r6, pc}
+// 0x00000010: .cfi_restore_state
+// 0x00000010: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kArm64[] = {
- 0xE0, 0x0F, 0x1C, 0xF8, 0xF4, 0x17, 0x00, 0xF9, 0xF5, 0x7B, 0x03, 0xA9,
+ 0xFF, 0x03, 0x01, 0xD1, 0xF4, 0x17, 0x00, 0xF9, 0xF5, 0x7B, 0x03, 0xA9,
0xE8, 0xA7, 0x01, 0x6D, 0xE8, 0xA7, 0x41, 0x6D, 0xF4, 0x17, 0x40, 0xF9,
0xF5, 0x7B, 0x43, 0xA9, 0xFF, 0x03, 0x01, 0x91, 0xC0, 0x03, 0x5F, 0xD6,
};
@@ -41,7 +40,7 @@ static constexpr uint8_t expected_cfi_kArm64[] = {
0x05, 0x48, 0x0A, 0x05, 0x49, 0x08, 0x0A, 0x44, 0x06, 0x48, 0x06, 0x49,
0x44, 0xD4, 0x44, 0xD5, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0x40,
};
-// 0x00000000: str x0, [sp, #-64]!
+// 0x00000000: sub sp, sp, #0x40 (64)
// 0x00000004: .cfi_def_cfa_offset: 64
// 0x00000004: str x20, [sp, #40]
// 0x00000008: .cfi_offset: r20 at cfa-24
@@ -67,12 +66,12 @@ static constexpr uint8_t expected_cfi_kArm64[] = {
// 0x00000024: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kX86[] = {
- 0x56, 0x55, 0x83, 0xEC, 0x34, 0x89, 0x04, 0x24, 0x83, 0xC4, 0x34, 0x5D,
+ 0x56, 0x55, 0x83, 0xEC, 0x34, 0x83, 0xC4, 0x34, 0x5D,
0x5E, 0xC3,
};
static constexpr uint8_t expected_cfi_kX86[] = {
0x41, 0x0E, 0x08, 0x86, 0x02, 0x41, 0x0E, 0x0C, 0x85, 0x03, 0x43, 0x0E,
- 0x40, 0x43, 0x0A, 0x43, 0x0E, 0x0C, 0x41, 0x0E, 0x08, 0xC5, 0x41, 0x0E,
+ 0x40, 0x0A, 0x43, 0x0E, 0x0C, 0x41, 0x0E, 0x08, 0xC5, 0x41, 0x0E,
0x04, 0xC6, 0x41, 0x0B, 0x0E, 0x40,
};
// 0x00000000: push esi
@@ -83,29 +82,28 @@ static constexpr uint8_t expected_cfi_kX86[] = {
// 0x00000002: .cfi_offset: r5 at cfa-12
// 0x00000002: sub esp, 52
// 0x00000005: .cfi_def_cfa_offset: 64
-// 0x00000005: mov [esp], eax
-// 0x00000008: .cfi_remember_state
-// 0x00000008: add esp, 52
-// 0x0000000b: .cfi_def_cfa_offset: 12
-// 0x0000000b: pop ebp
-// 0x0000000c: .cfi_def_cfa_offset: 8
-// 0x0000000c: .cfi_restore: r5
-// 0x0000000c: pop esi
-// 0x0000000d: .cfi_def_cfa_offset: 4
-// 0x0000000d: .cfi_restore: r6
-// 0x0000000d: ret
-// 0x0000000e: .cfi_restore_state
-// 0x0000000e: .cfi_def_cfa_offset: 64
+// 0x00000005: .cfi_remember_state
+// 0x00000005: add esp, 52
+// 0x00000008: .cfi_def_cfa_offset: 12
+// 0x00000008: pop ebp
+// 0x0000000a: .cfi_def_cfa_offset: 8
+// 0x0000000a: .cfi_restore: r5
+// 0x0000000a: pop esi
+// 0x0000000b: .cfi_def_cfa_offset: 4
+// 0x0000000b: .cfi_restore: r6
+// 0x0000000b: ret
+// 0x0000000c: .cfi_restore_state
+// 0x0000000c: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kX86_64[] = {
0x55, 0x53, 0x48, 0x83, 0xEC, 0x28, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24,
- 0x20, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18, 0x48, 0x89, 0x3C, 0x24,
+ 0x20, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18,
0xF2, 0x44, 0x0F, 0x10, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C,
0x24, 0x20, 0x48, 0x83, 0xC4, 0x28, 0x5B, 0x5D, 0xC3,
};
static constexpr uint8_t expected_cfi_kX86_64[] = {
0x41, 0x0E, 0x10, 0x86, 0x04, 0x41, 0x0E, 0x18, 0x83, 0x06, 0x44, 0x0E,
- 0x40, 0x47, 0x9E, 0x08, 0x47, 0x9D, 0x0A, 0x44, 0x0A, 0x47, 0xDD, 0x47,
+ 0x40, 0x47, 0x9E, 0x08, 0x47, 0x9D, 0x0A, 0x0A, 0x47, 0xDD, 0x47,
0xDE, 0x44, 0x0E, 0x18, 0x41, 0x0E, 0x10, 0xC3, 0x41, 0x0E, 0x08, 0xC6,
0x41, 0x0B, 0x0E, 0x40,
};
@@ -121,34 +119,33 @@ static constexpr uint8_t expected_cfi_kX86_64[] = {
// 0x0000000d: .cfi_offset: r30 at cfa-32
// 0x0000000d: movsd [rsp + 24], xmm12
// 0x00000014: .cfi_offset: r29 at cfa-40
-// 0x00000014: movq [rsp], rdi
-// 0x00000018: .cfi_remember_state
-// 0x00000018: movsd xmm12, [rsp + 24]
-// 0x0000001f: .cfi_restore: r29
-// 0x0000001f: movsd xmm13, [rsp + 32]
-// 0x00000026: .cfi_restore: r30
-// 0x00000026: addq rsp, 40
-// 0x0000002a: .cfi_def_cfa_offset: 24
-// 0x0000002a: pop rbx
-// 0x0000002b: .cfi_def_cfa_offset: 16
-// 0x0000002b: .cfi_restore: r3
-// 0x0000002b: pop rbp
-// 0x0000002c: .cfi_def_cfa_offset: 8
-// 0x0000002c: .cfi_restore: r6
-// 0x0000002c: ret
-// 0x0000002d: .cfi_restore_state
-// 0x0000002d: .cfi_def_cfa_offset: 64
+// 0x00000014: .cfi_remember_state
+// 0x00000014: movsd xmm12, [rsp + 24]
+// 0x0000001c: .cfi_restore: r29
+// 0x0000001c: movsd xmm13, [rsp + 32]
+// 0x00000022: .cfi_restore: r30
+// 0x00000022: addq rsp, 40
+// 0x00000026: .cfi_def_cfa_offset: 24
+// 0x00000026: pop rbx
+// 0x00000027: .cfi_def_cfa_offset: 16
+// 0x00000027: .cfi_restore: r3
+// 0x00000027: pop rbp
+// 0x00000028: .cfi_def_cfa_offset: 8
+// 0x00000028: .cfi_restore: r6
+// 0x00000028: ret
+// 0x00000029: .cfi_restore_state
+// 0x00000029: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips[] = {
0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xB1, 0xAF,
0x34, 0x00, 0xB0, 0xAF, 0x28, 0x00, 0xB6, 0xF7, 0x20, 0x00, 0xB4, 0xF7,
- 0x00, 0x00, 0xA4, 0xAF, 0x3C, 0x00, 0xBF, 0x8F, 0x38, 0x00, 0xB1, 0x8F,
+ 0x3C, 0x00, 0xBF, 0x8F, 0x38, 0x00, 0xB1, 0x8F,
0x34, 0x00, 0xB0, 0x8F, 0x28, 0x00, 0xB6, 0xD7, 0x20, 0x00, 0xB4, 0xD7,
0x09, 0x00, 0xE0, 0x03, 0x40, 0x00, 0xBD, 0x27,
};
static constexpr uint8_t expected_cfi_kMips[] = {
0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x91, 0x02, 0x44, 0x90, 0x03,
- 0x4C, 0x0A, 0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x50, 0x0E, 0x00, 0x0B,
+ 0x48, 0x0A, 0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x50, 0x0E, 0x00, 0x0B,
0x0E, 0x40,
};
// 0x00000000: addiu r29, r29, -64
@@ -161,33 +158,33 @@ static constexpr uint8_t expected_cfi_kMips[] = {
// 0x00000010: .cfi_offset: r16 at cfa-12
// 0x00000010: sdc1 f22, +40(r29)
// 0x00000014: sdc1 f20, +32(r29)
-// 0x00000018: sw r4, +0(r29)
-// 0x0000001c: .cfi_remember_state
-// 0x0000001c: lw r31, +60(r29)
-// 0x00000020: .cfi_restore: r31
-// 0x00000020: lw r17, +56(r29)
-// 0x00000024: .cfi_restore: r17
-// 0x00000024: lw r16, +52(r29)
-// 0x00000028: .cfi_restore: r16
-// 0x00000028: ldc1 f22, +40(r29)
-// 0x0000002c: ldc1 f20, +32(r29)
-// 0x00000030: jr r31
-// 0x00000034: addiu r29, r29, 64
-// 0x00000038: .cfi_def_cfa_offset: 0
-// 0x00000038: .cfi_restore_state
-// 0x00000038: .cfi_def_cfa_offset: 64
+// 0x00000018: .cfi_remember_state
+// 0x00000018: lw r31, +60(r29)
+// 0x0000001c: .cfi_restore: r31
+// 0x0000001c: lw r17, +56(r29)
+// 0x00000020: .cfi_restore: r17
+// 0x00000020: lw r16, +52(r29)
+// 0x00000024: .cfi_restore: r16
+// 0x00000024: ldc1 f22, +40(r29)
+// 0x00000028: ldc1 f20, +32(r29)
+// 0x0000002c: jr r31
+// 0x00000030: addiu r29, r29, 64
+// 0x00000034: .cfi_def_cfa_offset: 0
+// 0x00000034: .cfi_restore_state
+// 0x00000034: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips64[] = {
0xD8, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBF, 0xFF, 0x18, 0x00, 0xB1, 0xFF,
0x10, 0x00, 0xB0, 0xFF, 0x08, 0x00, 0xB9, 0xF7, 0x00, 0x00, 0xB8, 0xF7,
- 0xE8, 0xFF, 0xBD, 0x67, 0x00, 0x00, 0xA4, 0xFF, 0x18, 0x00, 0xBD, 0x67,
+ 0xE8, 0xFF, 0xBD, 0x67, 0x18, 0x00, 0xBD, 0x67,
0x00, 0x00, 0xB8, 0xD7, 0x08, 0x00, 0xB9, 0xD7, 0x10, 0x00, 0xB0, 0xDF,
0x18, 0x00, 0xB1, 0xDF, 0x20, 0x00, 0xBF, 0xDF, 0x28, 0x00, 0xBD, 0x67,
0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
};
+
static constexpr uint8_t expected_cfi_kMips64[] = {
0x44, 0x0E, 0x28, 0x44, 0x9F, 0x02, 0x44, 0x91, 0x04, 0x44, 0x90, 0x06,
- 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x44, 0x0E, 0x40, 0x44, 0x0A, 0x44,
+ 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x44, 0x0E, 0x40, 0x0A, 0x44,
0x0E, 0x28, 0x44, 0xF8, 0x44, 0xF9, 0x44, 0xD0, 0x44, 0xD1, 0x44, 0xDF,
0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
};
@@ -205,29 +202,28 @@ static constexpr uint8_t expected_cfi_kMips64[] = {
// 0x00000018: .cfi_offset: r56 at cfa-40
// 0x00000018: daddiu r29, r29, -24
// 0x0000001c: .cfi_def_cfa_offset: 64
-// 0x0000001c: sd r4, +0(r29)
-// 0x00000020: .cfi_remember_state
-// 0x00000020: daddiu r29, r29, 24
-// 0x00000024: .cfi_def_cfa_offset: 40
-// 0x00000024: ldc1 f24, +0(r29)
-// 0x00000028: .cfi_restore: r56
-// 0x00000028: ldc1 f25, +8(r29)
-// 0x0000002c: .cfi_restore: r57
-// 0x0000002c: ld r16, +16(r29)
-// 0x00000030: .cfi_restore: r16
-// 0x00000030: ld r17, +24(r29)
-// 0x00000034: .cfi_restore: r17
-// 0x00000034: ld r31, +32(r29)
-// 0x00000038: .cfi_restore: r31
-// 0x00000038: daddiu r29, r29, 40
-// 0x0000003c: .cfi_def_cfa_offset: 0
-// 0x0000003c: jr r31
-// 0x00000040: nop
-// 0x00000044: .cfi_restore_state
-// 0x00000044: .cfi_def_cfa_offset: 64
+// 0x0000001c: .cfi_remember_state
+// 0x0000001c: daddiu r29, r29, 24
+// 0x00000020: .cfi_def_cfa_offset: 40
+// 0x00000020: ldc1 f24, +0(r29)
+// 0x00000024: .cfi_restore: r56
+// 0x00000024: ldc1 f25, +8(r29)
+// 0x00000028: .cfi_restore: r57
+// 0x00000028: ld r16, +16(r29)
+// 0x0000002c: .cfi_restore: r16
+// 0x0000002c: ld r17, +24(r29)
+// 0x00000030: .cfi_restore: r17
+// 0x00000030: ld r31, +32(r29)
+// 0x00000034: .cfi_restore: r31
+// 0x00000034: daddiu r29, r29, 40
+// 0x00000038: .cfi_def_cfa_offset: 0
+// 0x00000038: jr r31
+// 0x0000003c: nop
+// 0x00000040: .cfi_restore_state
+// 0x00000040: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kThumb2_adjust[] = {
- 0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x00, 0x90, 0x00, 0x28,
+ 0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x00, 0x28,
0x40, 0xD0, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
@@ -243,7 +239,7 @@ static constexpr uint8_t expected_asm_kThumb2_adjust[] = {
};
static constexpr uint8_t expected_cfi_kThumb2_adjust[] = {
0x42, 0x0E, 0x0C, 0x85, 0x03, 0x86, 0x02, 0x8E, 0x01, 0x44, 0x0E, 0x14,
- 0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x02, 0x88, 0x0A,
+ 0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x02, 0x86, 0x0A,
0x42, 0x0E, 0x14, 0x44, 0x0E, 0x0C, 0x06, 0x50, 0x06, 0x51, 0x42, 0x0B,
0x0E, 0x40,
};
@@ -258,9 +254,9 @@ static constexpr uint8_t expected_cfi_kThumb2_adjust[] = {
// 0x00000006: .cfi_offset_extended: r81 at cfa-16
// 0x00000006: sub sp, sp, #44
// 0x00000008: .cfi_def_cfa_offset: 64
-// 0x00000008: str r0, [sp, #0]
-// 0x0000000a: cmp r0, #0
-// 0x0000000c: beq +128 (0x00000090)
+// 0x00000008: cmp r0, #0
+// 0x0000000a: beq +128 (0x00000090)
+// 0x0000000c: ldr r0, [r0, #0]
// 0x0000000e: ldr r0, [r0, #0]
// 0x00000010: ldr r0, [r0, #0]
// 0x00000012: ldr r0, [r0, #0]
@@ -325,22 +321,21 @@ static constexpr uint8_t expected_cfi_kThumb2_adjust[] = {
// 0x00000088: ldr r0, [r0, #0]
// 0x0000008a: ldr r0, [r0, #0]
// 0x0000008c: ldr r0, [r0, #0]
-// 0x0000008e: ldr r0, [r0, #0]
-// 0x00000090: .cfi_remember_state
-// 0x00000090: add sp, sp, #44
-// 0x00000092: .cfi_def_cfa_offset: 20
-// 0x00000092: vpop.f32 {s16-s17}
-// 0x00000096: .cfi_def_cfa_offset: 12
-// 0x00000096: .cfi_restore_extended: r80
-// 0x00000096: .cfi_restore_extended: r81
-// 0x00000096: pop {r5, r6, pc}
-// 0x00000098: .cfi_restore_state
-// 0x00000098: .cfi_def_cfa_offset: 64
+// 0x0000008e: .cfi_remember_state
+// 0x0000008e: add sp, sp, #44
+// 0x00000090: .cfi_def_cfa_offset: 20
+// 0x00000090: vpop.f32 {s16-s17}
+// 0x00000094: .cfi_def_cfa_offset: 12
+// 0x00000094: .cfi_restore_extended: r80
+// 0x00000094: .cfi_restore_extended: r81
+// 0x00000094: pop {r5, r6, pc}
+// 0x00000096: .cfi_restore_state
+// 0x00000096: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips_adjust_head[] = {
0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xB1, 0xAF,
0x34, 0x00, 0xB0, 0xAF, 0x28, 0x00, 0xB6, 0xF7, 0x20, 0x00, 0xB4, 0xF7,
- 0x00, 0x00, 0xA4, 0xAF, 0x08, 0x00, 0x04, 0x14, 0xFC, 0xFF, 0xBD, 0x27,
+ 0x08, 0x00, 0x04, 0x14, 0xFC, 0xFF, 0xBD, 0x27,
0x00, 0x00, 0xBF, 0xAF, 0x00, 0x00, 0x10, 0x04, 0x02, 0x00, 0x01, 0x3C,
0x18, 0x00, 0x21, 0x34, 0x21, 0x08, 0x3F, 0x00, 0x00, 0x00, 0xBF, 0x8F,
0x09, 0x00, 0x20, 0x00, 0x04, 0x00, 0xBD, 0x27,
@@ -352,7 +347,7 @@ static constexpr uint8_t expected_asm_kMips_adjust_tail[] = {
};
static constexpr uint8_t expected_cfi_kMips_adjust[] = {
0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x91, 0x02, 0x44, 0x90, 0x03,
- 0x54, 0x0E, 0x44, 0x60, 0x0E, 0x40, 0x04, 0x04, 0x00, 0x02, 0x00, 0x0A,
+ 0x50, 0x0E, 0x44, 0x60, 0x0E, 0x40, 0x04, 0x04, 0x00, 0x02, 0x00, 0x0A,
0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x50, 0x0E, 0x00, 0x0B, 0x0E, 0x40,
};
// 0x00000000: addiu r29, r29, -64
@@ -365,41 +360,40 @@ static constexpr uint8_t expected_cfi_kMips_adjust[] = {
// 0x00000010: .cfi_offset: r16 at cfa-12
// 0x00000010: sdc1 f22, +40(r29)
// 0x00000014: sdc1 f20, +32(r29)
-// 0x00000018: sw r4, +0(r29)
-// 0x0000001c: bne r0, r4, 0x00000040 ; +36
-// 0x00000020: addiu r29, r29, -4
-// 0x00000024: .cfi_def_cfa_offset: 68
-// 0x00000024: sw r31, +0(r29)
-// 0x00000028: bltzal r0, 0x0000002c ; +4
-// 0x0000002c: lui r1, 0x20000
-// 0x00000030: ori r1, r1, 24
-// 0x00000034: addu r1, r1, r31
-// 0x00000038: lw r31, +0(r29)
-// 0x0000003c: jr r1
-// 0x00000040: addiu r29, r29, 4
-// 0x00000044: .cfi_def_cfa_offset: 64
-// 0x00000044: nop
+// 0x00000018: bne r0, r4, 0x00000040 ; +36
+// 0x0000001c: addiu r29, r29, -4
+// 0x00000020: .cfi_def_cfa_offset: 68
+// 0x00000020: sw r31, +0(r29)
+// 0x00000024: bltzal r0, 0x0000002c ; +4
+// 0x00000028: lui r1, 0x20000
+// 0x0000002c: ori r1, r1, 24
+// 0x00000030: addu r1, r1, r31
+// 0x00000034: lw r31, +0(r29)
+// 0x00000038: jr r1
+// 0x0000003c: addiu r29, r29, 4
+// 0x00000040: .cfi_def_cfa_offset: 64
+// 0x00000040: nop
// ...
-// 0x00020044: nop
-// 0x00020048: .cfi_remember_state
-// 0x00020048: lw r31, +60(r29)
-// 0x0002004c: .cfi_restore: r31
-// 0x0002004c: lw r17, +56(r29)
-// 0x00020050: .cfi_restore: r17
-// 0x00020050: lw r16, +52(r29)
-// 0x00020054: .cfi_restore: r16
-// 0x00020054: ldc1 f22, +40(r29)
-// 0x00020058: ldc1 f20, +32(r29)
-// 0x0002005c: jr r31
-// 0x00020060: addiu r29, r29, 64
-// 0x00020064: .cfi_def_cfa_offset: 0
-// 0x00020064: .cfi_restore_state
-// 0x00020064: .cfi_def_cfa_offset: 64
+// 0x00020040: nop
+// 0x00020044: .cfi_remember_state
+// 0x00020044: lw r31, +60(r29)
+// 0x00020048: .cfi_restore: r31
+// 0x00020048: lw r17, +56(r29)
+// 0x0002004c: .cfi_restore: r17
+// 0x0002004c: lw r16, +52(r29)
+// 0x00020050: .cfi_restore: r16
+// 0x00020050: ldc1 f22, +40(r29)
+// 0x00020054: ldc1 f20, +32(r29)
+// 0x00020058: jr r31
+// 0x0002005c: addiu r29, r29, 64
+// 0x00020060: .cfi_def_cfa_offset: 0
+// 0x00020060: .cfi_restore_state
+// 0x00020060: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips64_adjust_head[] = {
0xD8, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBF, 0xFF, 0x18, 0x00, 0xB1, 0xFF,
0x10, 0x00, 0xB0, 0xFF, 0x08, 0x00, 0xB9, 0xF7, 0x00, 0x00, 0xB8, 0xF7,
- 0xE8, 0xFF, 0xBD, 0x67, 0x00, 0x00, 0xA4, 0xFF, 0x02, 0x00, 0xA6, 0x60,
+ 0xE8, 0xFF, 0xBD, 0x67, 0x02, 0x00, 0xA6, 0x60,
0x02, 0x00, 0x3E, 0xEC, 0x0C, 0x00, 0x01, 0xD8,
};
static constexpr uint8_t expected_asm_kMips64_adjust_tail[] = {
@@ -409,7 +403,7 @@ static constexpr uint8_t expected_asm_kMips64_adjust_tail[] = {
};
static constexpr uint8_t expected_cfi_kMips64_adjust[] = {
0x44, 0x0E, 0x28, 0x44, 0x9F, 0x02, 0x44, 0x91, 0x04, 0x44, 0x90, 0x06,
- 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x44, 0x0E, 0x40, 0x04, 0x14, 0x00,
+ 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x44, 0x0E, 0x40, 0x04, 0x10, 0x00,
0x02, 0x00, 0x0A, 0x44, 0x0E, 0x28, 0x44, 0xF8, 0x44, 0xF9, 0x44, 0xD0,
0x44, 0xD1, 0x44, 0xDF, 0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
};
@@ -427,29 +421,28 @@ static constexpr uint8_t expected_cfi_kMips64_adjust[] = {
// 0x00000018: .cfi_offset: r56 at cfa-40
// 0x00000018: daddiu r29, r29, -24
// 0x0000001c: .cfi_def_cfa_offset: 64
-// 0x0000001c: sd r4, +0(r29)
-// 0x00000020: bnec r5, r6, 0x0000002c ; +12
-// 0x00000024: auipc r1, 2
-// 0x00000028: jic r1, 12 ; b 0x00020030 ; +131080
-// 0x0000002c: nop
+// 0x0000001c: bnec r5, r6, 0x0000002c ; +12
+// 0x00000020: auipc r1, 2
+// 0x00000024: jic r1, 12 ; b 0x00020030 ; +131080
+// 0x00000028: nop
// ...
-// 0x0002002c: nop
-// 0x00020030: .cfi_remember_state
-// 0x00020030: daddiu r29, r29, 24
-// 0x00020034: .cfi_def_cfa_offset: 40
-// 0x00020034: ldc1 f24, +0(r29)
-// 0x00020038: .cfi_restore: r56
-// 0x00020038: ldc1 f25, +8(r29)
-// 0x0002003c: .cfi_restore: r57
-// 0x0002003c: ld r16, +16(r29)
-// 0x00020040: .cfi_restore: r16
-// 0x00020040: ld r17, +24(r29)
-// 0x00020044: .cfi_restore: r17
-// 0x00020044: ld r31, +32(r29)
-// 0x00020048: .cfi_restore: r31
-// 0x00020048: daddiu r29, r29, 40
-// 0x0002004c: .cfi_def_cfa_offset: 0
-// 0x0002004c: jr r31
-// 0x00020050: nop
-// 0x00020054: .cfi_restore_state
-// 0x00020054: .cfi_def_cfa_offset: 64
+// 0x00020028: nop
+// 0x0002002c: .cfi_remember_state
+// 0x0002002c: daddiu r29, r29, 24
+// 0x00020030: .cfi_def_cfa_offset: 40
+// 0x00020030: ldc1 f24, +0(r29)
+// 0x00020034: .cfi_restore: r56
+// 0x00020034: ldc1 f25, +8(r29)
+// 0x00020038: .cfi_restore: r57
+// 0x00020038: ld r16, +16(r29)
+// 0x0002003c: .cfi_restore: r16
+// 0x0002003c: ld r17, +24(r29)
+// 0x00020040: .cfi_restore: r17
+// 0x00020040: ld r31, +32(r29)
+// 0x00020044: .cfi_restore: r31
+// 0x00020044: daddiu r29, r29, 40
+// 0x00020047: .cfi_def_cfa_offset: 0
+// 0x00020048: jr r31
+// 0x0002004c: nop
+// 0x00020050: .cfi_restore_state
+// 0x00020050: .cfi_def_cfa_offset: 64
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d6f8307ac2..4370a84bd2 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -319,7 +319,7 @@ class OptimizingCompiler FINAL : public Compiler {
CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer,
- StackHandleScopeCollection* handles) const;
+ VariableSizedHandleScope* handles) const;
void RunOptimizations(HOptimization* optimizations[],
size_t length,
@@ -358,7 +358,7 @@ class OptimizingCompiler FINAL : public Compiler {
CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer,
- StackHandleScopeCollection* handles) const;
+ VariableSizedHandleScope* handles) const;
void RunArchOptimizations(InstructionSet instruction_set,
HGraph* graph,
@@ -442,7 +442,7 @@ static HOptimization* BuildOptimization(
CodeGenerator* codegen,
CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
- StackHandleScopeCollection* handles,
+ VariableSizedHandleScope* handles,
SideEffectsAnalysis* most_recent_side_effects,
HInductionVarAnalysis* most_recent_induction) {
std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
@@ -524,7 +524,7 @@ static ArenaVector<HOptimization*> BuildOptimizations(
CodeGenerator* codegen,
CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
- StackHandleScopeCollection* handles) {
+ VariableSizedHandleScope* handles) {
// Few HOptimizations constructors require SideEffectsAnalysis or HInductionVarAnalysis
// instances. This method assumes that each of them expects the nearest instance preceeding it
// in the pass name list.
@@ -570,7 +570,7 @@ void OptimizingCompiler::MaybeRunInliner(HGraph* graph,
CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer,
- StackHandleScopeCollection* handles) const {
+ VariableSizedHandleScope* handles) const {
OptimizingCompilerStats* stats = compilation_stats_.get();
const CompilerOptions& compiler_options = driver->GetCompilerOptions();
bool should_inline = (compiler_options.GetInlineDepthLimit() > 0)
@@ -707,7 +707,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer,
- StackHandleScopeCollection* handles) const {
+ VariableSizedHandleScope* handles) const {
OptimizingCompilerStats* stats = compilation_stats_.get();
ArenaAllocator* arena = graph->GetArena();
if (driver->GetCompilerOptions().GetPassesToRun() != nullptr) {
@@ -949,7 +949,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
{
ScopedObjectAccess soa(Thread::Current());
- StackHandleScopeCollection handles(soa.Self());
+ VariableSizedHandleScope handles(soa.Self());
// Do not hold `mutator_lock_` between optimizations.
ScopedThreadSuspension sts(soa.Self(), kNative);
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 2a23c92f1f..58d90176cd 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -90,7 +90,7 @@ inline HGraph* CreateCFG(ArenaAllocator* allocator,
{
ScopedObjectAccess soa(Thread::Current());
- StackHandleScopeCollection handles(soa.Self());
+ VariableSizedHandleScope handles(soa.Self());
HGraphBuilder builder(graph, *item, &handles, return_type);
bool graph_built = (builder.BuildGraph() == kAnalysisSuccess);
return graph_built ? graph : nullptr;
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 45a3ce411e..83698adba4 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -35,7 +35,7 @@ static inline mirror::DexCache* FindDexCacheWithHint(Thread* self,
}
}
-static inline ReferenceTypeInfo::TypeHandle GetRootHandle(StackHandleScopeCollection* handles,
+static inline ReferenceTypeInfo::TypeHandle GetRootHandle(VariableSizedHandleScope* handles,
ClassLinker::ClassRoot class_root,
ReferenceTypeInfo::TypeHandle* cache) {
if (!ReferenceTypeInfo::IsValidHandle(*cache)) {
@@ -109,7 +109,7 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph,
Handle<mirror::DexCache> hint_dex_cache,
- StackHandleScopeCollection* handles,
+ VariableSizedHandleScope* handles,
bool is_first_run,
const char* name)
: HOptimization(graph, name),
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index 61428b2a45..4663471729 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -34,7 +34,7 @@ class ReferenceTypePropagation : public HOptimization {
public:
ReferenceTypePropagation(HGraph* graph,
Handle<mirror::DexCache> hint_dex_cache,
- StackHandleScopeCollection* handles,
+ VariableSizedHandleScope* handles,
bool is_first_run,
const char* name = kReferenceTypePropagationPassName);
@@ -56,7 +56,7 @@ class ReferenceTypePropagation : public HOptimization {
private:
class HandleCache {
public:
- explicit HandleCache(StackHandleScopeCollection* handles) : handles_(handles) { }
+ explicit HandleCache(VariableSizedHandleScope* handles) : handles_(handles) { }
template <typename T>
MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -74,7 +74,7 @@ class ReferenceTypePropagation : public HOptimization {
ReferenceTypeInfo::TypeHandle GetThrowableClassHandle();
private:
- StackHandleScopeCollection* handles_;
+ VariableSizedHandleScope* handles_;
ReferenceTypeInfo::TypeHandle object_class_handle_;
ReferenceTypeInfo::TypeHandle class_class_handle_;
diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc
index 75a4eac538..b061c871b0 100644
--- a/compiler/optimizing/reference_type_propagation_test.cc
+++ b/compiler/optimizing/reference_type_propagation_test.cc
@@ -35,7 +35,7 @@ class ReferenceTypePropagationTest : public CommonCompilerTest {
~ReferenceTypePropagationTest() { }
- void SetupPropagation(StackHandleScopeCollection* handles) {
+ void SetupPropagation(VariableSizedHandleScope* handles) {
graph_->InitializeInexactObjectRTI(handles);
propagation_ = new (&allocator_) ReferenceTypePropagation(graph_,
Handle<mirror::DexCache>(),
@@ -79,7 +79,7 @@ class ReferenceTypePropagationTest : public CommonCompilerTest {
TEST_F(ReferenceTypePropagationTest, ProperSetup) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScopeCollection handles(soa.Self());
+ VariableSizedHandleScope handles(soa.Self());
SetupPropagation(&handles);
EXPECT_TRUE(propagation_ != nullptr);
@@ -88,7 +88,7 @@ TEST_F(ReferenceTypePropagationTest, ProperSetup) {
TEST_F(ReferenceTypePropagationTest, MergeInvalidTypes) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScopeCollection handles(soa.Self());
+ VariableSizedHandleScope handles(soa.Self());
SetupPropagation(&handles);
// Two invalid types.
@@ -120,7 +120,7 @@ TEST_F(ReferenceTypePropagationTest, MergeInvalidTypes) {
TEST_F(ReferenceTypePropagationTest, MergeValidTypes) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScopeCollection handles(soa.Self());
+ VariableSizedHandleScope handles(soa.Self());
SetupPropagation(&handles);
// Same types.
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index d7360adef8..45dac54115 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -49,7 +49,7 @@ class SsaBuilder : public ValueObject {
public:
SsaBuilder(HGraph* graph,
Handle<mirror::DexCache> dex_cache,
- StackHandleScopeCollection* handles)
+ VariableSizedHandleScope* handles)
: graph_(graph),
dex_cache_(dex_cache),
handles_(handles),
@@ -116,7 +116,7 @@ class SsaBuilder : public ValueObject {
HGraph* graph_;
Handle<mirror::DexCache> dex_cache_;
- StackHandleScopeCollection* const handles_;
+ VariableSizedHandleScope* const handles_;
// True if types of ambiguous ArrayGets have been resolved.
bool agets_fixed_;