summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/boolean_simplifier.cc5
-rw-r--r--compiler/optimizing/builder.cc68
-rw-r--r--compiler/optimizing/builder.h11
-rw-r--r--compiler/optimizing/dead_code_elimination.cc2
-rw-r--r--compiler/optimizing/graph_checker.cc160
-rw-r--r--compiler/optimizing/graph_checker.h3
-rw-r--r--compiler/optimizing/graph_visualizer.cc48
-rw-r--r--compiler/optimizing/graph_visualizer.h2
-rw-r--r--compiler/optimizing/gvn.cc4
-rw-r--r--compiler/optimizing/gvn_test.cc25
-rw-r--r--compiler/optimizing/inliner.cc3
-rw-r--r--compiler/optimizing/licm.cc2
-rw-r--r--compiler/optimizing/licm_test.cc195
-rw-r--r--compiler/optimizing/nodes.cc137
-rw-r--r--compiler/optimizing/nodes.h253
-rw-r--r--compiler/optimizing/optimization.h2
-rw-r--r--compiler/optimizing/optimizing_compiler.cc49
-rw-r--r--compiler/optimizing/side_effects_analysis.cc9
-rw-r--r--compiler/optimizing/side_effects_test.cc219
-rw-r--r--compiler/optimizing/ssa_builder.cc41
-rw-r--r--compiler/optimizing/ssa_builder.h19
-rw-r--r--compiler/optimizing/ssa_phi_elimination.cc11
-rw-r--r--compiler/optimizing/stack_map_stream.cc2
-rw-r--r--compiler/optimizing/stack_map_test.cc15
24 files changed, 1094 insertions, 191 deletions
diff --git a/compiler/optimizing/boolean_simplifier.cc b/compiler/optimizing/boolean_simplifier.cc
index 329112a377..84201c39a7 100644
--- a/compiler/optimizing/boolean_simplifier.cc
+++ b/compiler/optimizing/boolean_simplifier.cc
@@ -154,11 +154,6 @@ void HBooleanSimplifier::TryRemovingBooleanSelection(HBasicBlock* block) {
// entry block. Any following blocks would have had the join block
// as a dominator, and `MergeWith` handles changing that to the
// entry block.
-
- // Remove the original condition if it is now unused.
- if (!if_condition->HasUses()) {
- if_condition->GetBlock()->RemoveInstructionOrPhi(if_condition);
- }
}
void HBooleanSimplifier::Run() {
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 1319f2c62a..52a3a1534a 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -804,7 +804,9 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
invoke_type = kDirect;
break;
case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_VIRTUAL_QUICK:
case Instruction::INVOKE_VIRTUAL_RANGE:
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
invoke_type = kVirtual;
break;
case Instruction::INVOKE_INTERFACE:
@@ -1051,7 +1053,15 @@ bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
bool is_put) {
uint32_t source_or_dest_reg = instruction.VRegA_22c();
uint32_t obj_reg = instruction.VRegB_22c();
- uint16_t field_index = instruction.VRegC_22c();
+ uint16_t field_index;
+ if (instruction.IsQuickened()) {
+ if (!CanDecodeQuickenedInfo()) {
+ return false;
+ }
+ field_index = LookupQuickenedInfo(dex_pc);
+ } else {
+ field_index = instruction.VRegC_22c();
+ }
ScopedObjectAccess soa(Thread::Current());
ArtField* resolved_field =
@@ -1560,6 +1570,17 @@ void HGraphBuilder::PotentiallyAddSuspendCheck(HBasicBlock* target, uint32_t dex
}
}
+bool HGraphBuilder::CanDecodeQuickenedInfo() const {
+ return interpreter_metadata_ != nullptr;
+}
+
+uint16_t HGraphBuilder::LookupQuickenedInfo(uint32_t dex_pc) {
+ DCHECK(interpreter_metadata_ != nullptr);
+ uint32_t dex_pc_in_map = DecodeUnsignedLeb128(&interpreter_metadata_);
+ DCHECK_EQ(dex_pc, dex_pc_in_map);
+ return DecodeUnsignedLeb128(&interpreter_metadata_);
+}
+
bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_pc) {
if (current_block_ == nullptr) {
return true; // Dead code
@@ -1657,6 +1678,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::RETURN_VOID_NO_BARRIER:
case Instruction::RETURN_VOID: {
BuildReturn(instruction, Primitive::kPrimVoid);
break;
@@ -1705,8 +1727,17 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::INVOKE_INTERFACE:
case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_SUPER:
- case Instruction::INVOKE_VIRTUAL: {
- uint32_t method_idx = instruction.VRegB_35c();
+ case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_VIRTUAL_QUICK: {
+ uint16_t method_idx;
+ if (instruction.Opcode() == Instruction::INVOKE_VIRTUAL_QUICK) {
+ if (!CanDecodeQuickenedInfo()) {
+ return false;
+ }
+ method_idx = LookupQuickenedInfo(dex_pc);
+ } else {
+ method_idx = instruction.VRegB_35c();
+ }
uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
uint32_t args[5];
instruction.GetVarArgs(args);
@@ -1721,8 +1752,17 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::INVOKE_INTERFACE_RANGE:
case Instruction::INVOKE_STATIC_RANGE:
case Instruction::INVOKE_SUPER_RANGE:
- case Instruction::INVOKE_VIRTUAL_RANGE: {
- uint32_t method_idx = instruction.VRegB_3rc();
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
+ uint16_t method_idx;
+ if (instruction.Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK) {
+ if (!CanDecodeQuickenedInfo()) {
+ return false;
+ }
+ method_idx = LookupQuickenedInfo(dex_pc);
+ } else {
+ method_idx = instruction.VRegB_3rc();
+ }
uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
uint32_t register_index = instruction.VRegC();
if (!BuildInvoke(instruction, dex_pc, method_idx,
@@ -2375,12 +2415,19 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
case Instruction::IGET:
+ case Instruction::IGET_QUICK:
case Instruction::IGET_WIDE:
+ case Instruction::IGET_WIDE_QUICK:
case Instruction::IGET_OBJECT:
+ case Instruction::IGET_OBJECT_QUICK:
case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BOOLEAN_QUICK:
case Instruction::IGET_BYTE:
+ case Instruction::IGET_BYTE_QUICK:
case Instruction::IGET_CHAR:
- case Instruction::IGET_SHORT: {
+ case Instruction::IGET_CHAR_QUICK:
+ case Instruction::IGET_SHORT:
+ case Instruction::IGET_SHORT_QUICK: {
if (!BuildInstanceFieldAccess(instruction, dex_pc, false)) {
return false;
}
@@ -2388,12 +2435,19 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
case Instruction::IPUT:
+ case Instruction::IPUT_QUICK:
case Instruction::IPUT_WIDE:
+ case Instruction::IPUT_WIDE_QUICK:
case Instruction::IPUT_OBJECT:
+ case Instruction::IPUT_OBJECT_QUICK:
case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BOOLEAN_QUICK:
case Instruction::IPUT_BYTE:
+ case Instruction::IPUT_BYTE_QUICK:
case Instruction::IPUT_CHAR:
- case Instruction::IPUT_SHORT: {
+ case Instruction::IPUT_CHAR_QUICK:
+ case Instruction::IPUT_SHORT:
+ case Instruction::IPUT_SHORT_QUICK: {
if (!BuildInstanceFieldAccess(instruction, dex_pc, true)) {
return false;
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 76610f5be2..ad5d92345b 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -39,7 +39,8 @@ class HGraphBuilder : public ValueObject {
const DexCompilationUnit* const outer_compilation_unit,
const DexFile* dex_file,
CompilerDriver* driver,
- OptimizingCompilerStats* compiler_stats)
+ OptimizingCompilerStats* compiler_stats,
+ const uint8_t* interpreter_metadata)
: arena_(graph->GetArena()),
branch_targets_(graph->GetArena(), 0),
locals_(graph->GetArena(), 0),
@@ -55,7 +56,8 @@ class HGraphBuilder : public ValueObject {
code_start_(nullptr),
latest_result_(nullptr),
can_use_baseline_for_string_init_(true),
- compilation_stats_(compiler_stats) {}
+ compilation_stats_(compiler_stats),
+ interpreter_metadata_(interpreter_metadata) {}
// Only for unit testing.
HGraphBuilder(HGraph* graph, Primitive::Type return_type = Primitive::kPrimInt)
@@ -120,6 +122,9 @@ class HGraphBuilder : public ValueObject {
const DexFile::CodeItem& code_item,
const DexFile::TryItem& try_item);
+ bool CanDecodeQuickenedInfo() const;
+ uint16_t LookupQuickenedInfo(uint32_t dex_pc);
+
void InitializeLocals(uint16_t count);
HLocal* GetLocalAt(int register_index) const;
void UpdateLocal(int register_index, HInstruction* instruction) const;
@@ -307,6 +312,8 @@ class HGraphBuilder : public ValueObject {
OptimizingCompilerStats* compilation_stats_;
+ const uint8_t* interpreter_metadata_;
+
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
};
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 5de629d605..6269d1628e 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -128,7 +128,7 @@ void HDeadCodeElimination::RemoveDeadInstructions() {
for (i.Advance(); !i.Done(); i.Advance()) {
HInstruction* inst = i.Current();
DCHECK(!inst->IsControlFlow());
- if (!inst->HasSideEffects()
+ if (!inst->DoesAnyWrite()
&& !inst->CanThrow()
&& !inst->IsSuspendCheck()
// If we added an explicit barrier then we should keep it.
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 9679d0ab70..cfebb77dd7 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -136,6 +136,33 @@ void GraphChecker::VisitBoundsCheck(HBoundsCheck* check) {
VisitInstruction(check);
}
+void GraphChecker::VisitTryBoundary(HTryBoundary* try_boundary) {
+ // Ensure that all exception handlers are catch blocks and that handlers
+ // are not listed multiple times.
+ // Note that a normal-flow successor may be a catch block before CFG
+ // simplification. We only test normal-flow successors in SsaChecker.
+ for (HExceptionHandlerIterator it(*try_boundary); !it.Done(); it.Advance()) {
+ HBasicBlock* handler = it.Current();
+ if (!handler->IsCatchBlock()) {
+ AddError(StringPrintf("Block %d with %s:%d has exceptional successor %d which "
+ "is not a catch block.",
+ current_block_->GetBlockId(),
+ try_boundary->DebugName(),
+ try_boundary->GetId(),
+ handler->GetBlockId()));
+ }
+ if (current_block_->GetSuccessors().Contains(
+ handler, /* start_from */ it.CurrentSuccessorIndex() + 1)) {
+ AddError(StringPrintf("Exception handler block %d of %s:%d is listed multiple times.",
+ handler->GetBlockId(),
+ try_boundary->DebugName(),
+ try_boundary->GetId()));
+ }
+ }
+
+ VisitInstruction(try_boundary);
+}
+
void GraphChecker::VisitInstruction(HInstruction* instruction) {
if (seen_ids_.IsBitSet(instruction->GetId())) {
AddError(StringPrintf("Instruction id %d is duplicate in graph.",
@@ -301,11 +328,32 @@ void GraphChecker::VisitInstanceOf(HInstanceOf* instruction) {
void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
super_type::VisitBasicBlock(block);
+ // Ensure that catch blocks are not normal successors, and normal blocks are
+ // never exceptional successors.
+ const size_t num_normal_successors = block->NumberOfNormalSuccessors();
+ for (size_t j = 0; j < num_normal_successors; ++j) {
+ HBasicBlock* successor = block->GetSuccessors().Get(j);
+ if (successor->IsCatchBlock()) {
+ AddError(StringPrintf("Catch block %d is a normal successor of block %d.",
+ successor->GetBlockId(),
+ block->GetBlockId()));
+ }
+ }
+ for (size_t j = num_normal_successors, e = block->GetSuccessors().Size(); j < e; ++j) {
+ HBasicBlock* successor = block->GetSuccessors().Get(j);
+ if (!successor->IsCatchBlock()) {
+ AddError(StringPrintf("Normal block %d is an exceptional successor of block %d.",
+ successor->GetBlockId(),
+ block->GetBlockId()));
+ }
+ }
+
// Ensure there is no critical edge (i.e., an edge connecting a
// block with multiple successors to a block with multiple
- // predecessors).
- if (block->GetSuccessors().Size() > 1) {
- for (size_t j = 0; j < block->GetSuccessors().Size(); ++j) {
+ // predecessors). Exceptional edges are synthesized and hence
+ // not accounted for.
+ if (block->NumberOfNormalSuccessors() > 1) {
+ for (size_t j = 0, e = block->NumberOfNormalSuccessors(); j < e; ++j) {
HBasicBlock* successor = block->GetSuccessors().Get(j);
if (successor->GetPredecessors().Size() > 1) {
AddError(StringPrintf("Critical edge between blocks %d and %d.",
@@ -326,6 +374,54 @@ void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
}
}
+ // Ensure try membership information is consistent.
+ HTryBoundary* try_entry = block->GetTryEntry();
+ if (block->IsCatchBlock()) {
+ if (try_entry != nullptr) {
+ AddError(StringPrintf("Catch blocks should not be try blocks but catch block %d "
+ "has try entry %s:%d.",
+ block->GetBlockId(),
+ try_entry->DebugName(),
+ try_entry->GetId()));
+ }
+
+ if (block->IsLoopHeader()) {
+ AddError(StringPrintf("Catch blocks should not be loop headers but catch block %d is.",
+ block->GetBlockId()));
+ }
+ } else {
+ for (size_t i = 0; i < block->GetPredecessors().Size(); ++i) {
+ HBasicBlock* predecessor = block->GetPredecessors().Get(i);
+ HTryBoundary* incoming_try_entry = predecessor->ComputeTryEntryOfSuccessors();
+ if (try_entry == nullptr) {
+ if (incoming_try_entry != nullptr) {
+ AddError(StringPrintf("Block %d has no try entry but try entry %s:%d follows "
+ "from predecessor %d.",
+ block->GetBlockId(),
+ incoming_try_entry->DebugName(),
+ incoming_try_entry->GetId(),
+ predecessor->GetBlockId()));
+ }
+ } else if (incoming_try_entry == nullptr) {
+ AddError(StringPrintf("Block %d has try entry %s:%d but no try entry follows "
+ "from predecessor %d.",
+ block->GetBlockId(),
+ try_entry->DebugName(),
+ try_entry->GetId(),
+ predecessor->GetBlockId()));
+ } else if (!incoming_try_entry->HasSameExceptionHandlersAs(*try_entry)) {
+ AddError(StringPrintf("Block %d has try entry %s:%d which is not consistent "
+ "with %s:%d that follows from predecessor %d.",
+ block->GetBlockId(),
+ try_entry->DebugName(),
+ try_entry->GetId(),
+ incoming_try_entry->DebugName(),
+ incoming_try_entry->GetId(),
+ predecessor->GetBlockId()));
+ }
+ }
+ }
+
if (block->IsLoopHeader()) {
CheckLoop(block);
}
@@ -472,32 +568,6 @@ void SSAChecker::VisitPhi(HPhi* phi) {
phi->GetBlock()->GetBlockId()));
}
- // Ensure the number of inputs of a phi is the same as the number of
- // its predecessors.
- const GrowableArray<HBasicBlock*>& predecessors =
- phi->GetBlock()->GetPredecessors();
- if (phi->InputCount() != predecessors.Size()) {
- AddError(StringPrintf(
- "Phi %d in block %d has %zu inputs, "
- "but block %d has %zu predecessors.",
- phi->GetId(), phi->GetBlock()->GetBlockId(), phi->InputCount(),
- phi->GetBlock()->GetBlockId(), predecessors.Size()));
- } else {
- // Ensure phi input at index I either comes from the Ith
- // predecessor or from a block that dominates this predecessor.
- for (size_t i = 0, e = phi->InputCount(); i < e; ++i) {
- HInstruction* input = phi->InputAt(i);
- HBasicBlock* predecessor = predecessors.Get(i);
- if (!(input->GetBlock() == predecessor
- || input->GetBlock()->Dominates(predecessor))) {
- AddError(StringPrintf(
- "Input %d at index %zu of phi %d from block %d is not defined in "
- "predecessor number %zu nor in a block dominating it.",
- input->GetId(), i, phi->GetId(), phi->GetBlock()->GetBlockId(),
- i));
- }
- }
- }
// Ensure that the inputs have the same primitive kind as the phi.
for (size_t i = 0, e = phi->InputCount(); i < e; ++i) {
HInstruction* input = phi->InputAt(i);
@@ -516,6 +586,38 @@ void SSAChecker::VisitPhi(HPhi* phi) {
phi->GetBlock()->GetBlockId(),
Primitive::PrettyDescriptor(phi->GetType())));
}
+
+ if (phi->IsCatchPhi()) {
+ // The number of inputs of a catch phi corresponds to the total number of
+ // throwing instructions caught by this catch block.
+ } else {
+ // Ensure the number of inputs of a non-catch phi is the same as the number
+ // of its predecessors.
+ const GrowableArray<HBasicBlock*>& predecessors =
+ phi->GetBlock()->GetPredecessors();
+ if (phi->InputCount() != predecessors.Size()) {
+ AddError(StringPrintf(
+ "Phi %d in block %d has %zu inputs, "
+ "but block %d has %zu predecessors.",
+ phi->GetId(), phi->GetBlock()->GetBlockId(), phi->InputCount(),
+ phi->GetBlock()->GetBlockId(), predecessors.Size()));
+ } else {
+ // Ensure phi input at index I either comes from the Ith
+ // predecessor or from a block that dominates this predecessor.
+ for (size_t i = 0, e = phi->InputCount(); i < e; ++i) {
+ HInstruction* input = phi->InputAt(i);
+ HBasicBlock* predecessor = predecessors.Get(i);
+ if (!(input->GetBlock() == predecessor
+ || input->GetBlock()->Dominates(predecessor))) {
+ AddError(StringPrintf(
+ "Input %d at index %zu of phi %d from block %d is not defined in "
+ "predecessor number %zu nor in a block dominating it.",
+ input->GetId(), i, phi->GetId(), phi->GetBlock()->GetBlockId(),
+ i));
+ }
+ }
+ }
+ }
}
void SSAChecker::HandleBooleanInput(HInstruction* instruction, size_t input_index) {
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 7c72e23e2d..0e270dbe18 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -48,6 +48,9 @@ class GraphChecker : public HGraphDelegateVisitor {
// Check that the HasBoundsChecks() flag is set for bounds checks.
void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
+ // Check successors of blocks ending in TryBoundary.
+ void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE;
+
// Check that HCheckCast and HInstanceOf have HLoadClass as second input.
void VisitCheckCast(HCheckCast* check) OVERRIDE;
void VisitInstanceOf(HInstanceOf* check) OVERRIDE;
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index aaf7a6d8f5..694b68ce94 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -158,12 +158,14 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
std::ostream& output,
const char* pass_name,
bool is_after_pass,
+ bool graph_in_bad_state,
const CodeGenerator& codegen,
const DisassemblyInformation* disasm_info = nullptr)
: HGraphDelegateVisitor(graph),
output_(output),
pass_name_(pass_name),
is_after_pass_(is_after_pass),
+ graph_in_bad_state_(graph_in_bad_state),
codegen_(codegen),
disasm_info_(disasm_info),
disassembler_(disasm_info_ != nullptr
@@ -251,11 +253,9 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void PrintSuccessors(HBasicBlock* block) {
AddIndent();
output_ << "successors";
- for (size_t i = 0, e = block->GetSuccessors().Size(); i < e; ++i) {
- if (!block->IsExceptionalSuccessor(i)) {
- HBasicBlock* successor = block->GetSuccessors().Get(i);
- output_ << " \"B" << successor->GetBlockId() << "\" ";
- }
+ for (size_t i = 0; i < block->NumberOfNormalSuccessors(); ++i) {
+ HBasicBlock* successor = block->GetSuccessors().Get(i);
+ output_ << " \"B" << successor->GetBlockId() << "\" ";
}
output_<< std::endl;
}
@@ -263,11 +263,9 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void PrintExceptionHandlers(HBasicBlock* block) {
AddIndent();
output_ << "xhandlers";
- for (size_t i = 0, e = block->GetSuccessors().Size(); i < e; ++i) {
- if (block->IsExceptionalSuccessor(i)) {
- HBasicBlock* handler = block->GetSuccessors().Get(i);
- output_ << " \"B" << handler->GetBlockId() << "\" ";
- }
+ for (size_t i = block->NumberOfNormalSuccessors(); i < block->GetSuccessors().Size(); ++i) {
+ HBasicBlock* handler = block->GetSuccessors().Get(i);
+ output_ << " \"B" << handler->GetBlockId() << "\" ";
}
if (block->IsExitBlock() &&
(disasm_info_ != nullptr) &&
@@ -351,6 +349,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void VisitPhi(HPhi* phi) OVERRIDE {
StartAttributeStream("reg") << phi->GetRegNumber();
+ StartAttributeStream("is_catch_phi") << std::boolalpha << phi->IsCatchPhi() << std::noboolalpha;
}
void VisitMemoryBarrier(HMemoryBarrier* barrier) OVERRIDE {
@@ -586,7 +585,11 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void Run() {
StartTag("cfg");
- std::string pass_desc = std::string(pass_name_) + (is_after_pass_ ? " (after)" : " (before)");
+ std::string pass_desc = std::string(pass_name_)
+ + " ("
+ + (is_after_pass_ ? "after" : "before")
+ + (graph_in_bad_state_ ? ", bad_state" : "")
+ + ")";
PrintProperty("name", pass_desc.c_str());
if (disasm_info_ != nullptr) {
DumpDisassemblyBlockForFrameEntry();
@@ -655,6 +658,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
std::ostream& output_;
const char* pass_name_;
const bool is_after_pass_;
+ const bool graph_in_bad_state_;
const CodeGenerator& codegen_;
const DisassemblyInformation* disasm_info_;
std::unique_ptr<HGraphVisualizerDisassembler> disassembler_;
@@ -670,7 +674,7 @@ HGraphVisualizer::HGraphVisualizer(std::ostream* output,
void HGraphVisualizer::PrintHeader(const char* method_name) const {
DCHECK(output_ != nullptr);
- HGraphVisualizerPrinter printer(graph_, *output_, "", true, codegen_);
+ HGraphVisualizerPrinter printer(graph_, *output_, "", true, false, codegen_);
printer.StartTag("compilation");
printer.PrintProperty("name", method_name);
printer.PrintProperty("method", method_name);
@@ -678,10 +682,17 @@ void HGraphVisualizer::PrintHeader(const char* method_name) const {
printer.EndTag("compilation");
}
-void HGraphVisualizer::DumpGraph(const char* pass_name, bool is_after_pass) const {
+void HGraphVisualizer::DumpGraph(const char* pass_name,
+ bool is_after_pass,
+ bool graph_in_bad_state) const {
DCHECK(output_ != nullptr);
if (!graph_->GetBlocks().IsEmpty()) {
- HGraphVisualizerPrinter printer(graph_, *output_, pass_name, is_after_pass, codegen_);
+ HGraphVisualizerPrinter printer(graph_,
+ *output_,
+ pass_name,
+ is_after_pass,
+ graph_in_bad_state,
+ codegen_);
printer.Run();
}
}
@@ -689,8 +700,13 @@ void HGraphVisualizer::DumpGraph(const char* pass_name, bool is_after_pass) cons
void HGraphVisualizer::DumpGraphWithDisassembly() const {
DCHECK(output_ != nullptr);
if (!graph_->GetBlocks().IsEmpty()) {
- HGraphVisualizerPrinter printer(
- graph_, *output_, "disassembly", true, codegen_, codegen_.GetDisassemblyInformation());
+ HGraphVisualizerPrinter printer(graph_,
+ *output_,
+ "disassembly",
+ /* is_after_pass */ true,
+ /* graph_in_bad_state */ false,
+ codegen_,
+ codegen_.GetDisassemblyInformation());
printer.Run();
}
}
diff --git a/compiler/optimizing/graph_visualizer.h b/compiler/optimizing/graph_visualizer.h
index b6b66df601..66588f6e36 100644
--- a/compiler/optimizing/graph_visualizer.h
+++ b/compiler/optimizing/graph_visualizer.h
@@ -104,7 +104,7 @@ class HGraphVisualizer : public ValueObject {
const CodeGenerator& codegen);
void PrintHeader(const char* method_name) const;
- void DumpGraph(const char* pass_name, bool is_after_pass = true) const;
+ void DumpGraph(const char* pass_name, bool is_after_pass, bool graph_in_bad_state) const;
void DumpGraphWithDisassembly() const;
private:
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 708733e28c..39006465d5 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -120,7 +120,7 @@ class ValueSet : public ArenaObject<kArenaAllocMisc> {
// Removes all instructions in the set affected by the given side effects.
void Kill(SideEffects side_effects) {
DeleteAllImpureWhich([side_effects](Node* node) {
- return node->GetInstruction()->GetSideEffects().DependsOn(side_effects);
+ return node->GetInstruction()->GetSideEffects().MayDependOn(side_effects);
});
}
@@ -264,7 +264,7 @@ class ValueSet : public ArenaObject<kArenaAllocMisc> {
// odd buckets to speed up deletion.
size_t HashCode(HInstruction* instruction) const {
size_t hash_code = instruction->ComputeHashCode();
- if (instruction->GetSideEffects().HasDependencies()) {
+ if (instruction->GetSideEffects().DoesAnyRead()) {
return (hash_code << 1) | 0;
} else {
return (hash_code << 1) | 1;
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index d8a09ffc38..5c6239b3f9 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -206,7 +206,7 @@ TEST(GVNTest, LoopFieldElimination) {
// and the body to be GVN'ed.
loop_body->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
parameter,
- Primitive::kPrimNot,
+ Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
@@ -323,9 +323,10 @@ TEST(GVNTest, LoopSideEffects) {
SideEffectsAnalysis side_effects(graph);
side_effects.Run();
- ASSERT_TRUE(side_effects.GetBlockEffects(entry).HasSideEffects());
- ASSERT_FALSE(side_effects.GetLoopEffects(outer_loop_header).HasSideEffects());
- ASSERT_FALSE(side_effects.GetLoopEffects(inner_loop_header).HasSideEffects());
+ ASSERT_TRUE(side_effects.GetBlockEffects(entry).DoesAnyWrite());
+ ASSERT_FALSE(side_effects.GetBlockEffects(outer_loop_body).DoesAnyWrite());
+ ASSERT_FALSE(side_effects.GetLoopEffects(outer_loop_header).DoesAnyWrite());
+ ASSERT_FALSE(side_effects.GetLoopEffects(inner_loop_header).DoesAnyWrite());
}
// Check that the side effects of the outer loop does not affect the inner loop.
@@ -343,10 +344,10 @@ TEST(GVNTest, LoopSideEffects) {
SideEffectsAnalysis side_effects(graph);
side_effects.Run();
- ASSERT_TRUE(side_effects.GetBlockEffects(entry).HasSideEffects());
- ASSERT_TRUE(side_effects.GetBlockEffects(outer_loop_body).HasSideEffects());
- ASSERT_TRUE(side_effects.GetLoopEffects(outer_loop_header).HasSideEffects());
- ASSERT_FALSE(side_effects.GetLoopEffects(inner_loop_header).HasSideEffects());
+ ASSERT_TRUE(side_effects.GetBlockEffects(entry).DoesAnyWrite());
+ ASSERT_TRUE(side_effects.GetBlockEffects(outer_loop_body).DoesAnyWrite());
+ ASSERT_TRUE(side_effects.GetLoopEffects(outer_loop_header).DoesAnyWrite());
+ ASSERT_FALSE(side_effects.GetLoopEffects(inner_loop_header).DoesAnyWrite());
}
// Check that the side effects of the inner loop affects the outer loop.
@@ -365,10 +366,10 @@ TEST(GVNTest, LoopSideEffects) {
SideEffectsAnalysis side_effects(graph);
side_effects.Run();
- ASSERT_TRUE(side_effects.GetBlockEffects(entry).HasSideEffects());
- ASSERT_FALSE(side_effects.GetBlockEffects(outer_loop_body).HasSideEffects());
- ASSERT_TRUE(side_effects.GetLoopEffects(outer_loop_header).HasSideEffects());
- ASSERT_TRUE(side_effects.GetLoopEffects(inner_loop_header).HasSideEffects());
+ ASSERT_TRUE(side_effects.GetBlockEffects(entry).DoesAnyWrite());
+ ASSERT_FALSE(side_effects.GetBlockEffects(outer_loop_body).DoesAnyWrite());
+ ASSERT_TRUE(side_effects.GetLoopEffects(outer_loop_header).DoesAnyWrite());
+ ASSERT_TRUE(side_effects.GetLoopEffects(inner_loop_header).DoesAnyWrite());
}
}
} // namespace art
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 3efe7c77fa..cea7dd9b8d 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -326,7 +326,8 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
&outer_compilation_unit_,
resolved_method->GetDexFile(),
compiler_driver_,
- &inline_stats);
+ &inline_stats,
+ resolved_method->GetQuickenedInfo());
if (!builder.BuildGraph(*code_item)) {
VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc
index 2535ea274a..5b89b4ec74 100644
--- a/compiler/optimizing/licm.cc
+++ b/compiler/optimizing/licm.cc
@@ -115,7 +115,7 @@ void LICM::Run() {
HInstruction* instruction = inst_it.Current();
if (instruction->CanBeMoved()
&& (!instruction->CanThrow() || !found_first_non_hoisted_throwing_instruction_in_loop)
- && !instruction->GetSideEffects().DependsOn(loop_effects)
+ && !instruction->GetSideEffects().MayDependOn(loop_effects)
&& InputsAreDefinedBeforeLoop(instruction)) {
// We need to update the environment if the instruction has a loop header
// phi in it.
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
new file mode 100644
index 0000000000..6e6e0b5803
--- /dev/null
+++ b/compiler/optimizing/licm_test.cc
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/arena_allocator.h"
+#include "builder.h"
+#include "gtest/gtest.h"
+#include "licm.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+#include "side_effects_analysis.h"
+
+namespace art {
+
+/**
+ * Fixture class for the LICM tests.
+ */
+class LICMTest : public testing::Test {
+ public:
+ LICMTest() : pool_(), allocator_(&pool_) {
+ graph_ = CreateGraph(&allocator_);
+ }
+
+ ~LICMTest() { }
+
+ // Builds a singly-nested loop structure in CFG. Tests can further populate
+ // the basic blocks with instructions to set up interesting scenarios.
+ void BuildLoop() {
+ entry_ = new (&allocator_) HBasicBlock(graph_);
+ loop_preheader_ = new (&allocator_) HBasicBlock(graph_);
+ loop_header_ = new (&allocator_) HBasicBlock(graph_);
+ loop_body_ = new (&allocator_) HBasicBlock(graph_);
+ exit_ = new (&allocator_) HBasicBlock(graph_);
+
+ graph_->AddBlock(entry_);
+ graph_->AddBlock(loop_preheader_);
+ graph_->AddBlock(loop_header_);
+ graph_->AddBlock(loop_body_);
+ graph_->AddBlock(exit_);
+
+ graph_->SetEntryBlock(entry_);
+ graph_->SetExitBlock(exit_);
+
+ // Set up loop flow in CFG.
+ entry_->AddSuccessor(loop_preheader_);
+ loop_preheader_->AddSuccessor(loop_header_);
+ loop_header_->AddSuccessor(loop_body_);
+ loop_header_->AddSuccessor(exit_);
+ loop_body_->AddSuccessor(loop_header_);
+
+ // Provide boiler-plate instructions.
+ parameter_ = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
+ entry_->AddInstruction(parameter_);
+ constant_ = new (&allocator_) HConstant(Primitive::kPrimInt);
+ loop_preheader_->AddInstruction(constant_);
+ loop_header_->AddInstruction(new (&allocator_) HIf(parameter_));
+ loop_body_->AddInstruction(new (&allocator_) HGoto());
+ exit_->AddInstruction(new (&allocator_) HExit());
+ }
+
+ // Performs LICM optimizations (after proper set up).
+ void PerformLICM() {
+ ASSERT_TRUE(graph_->TryBuildingSsa());
+ SideEffectsAnalysis side_effects(graph_);
+ side_effects.Run();
+ LICM licm(graph_, side_effects);
+ licm.Run();
+ }
+
+ // General building fields.
+ ArenaPool pool_;
+ ArenaAllocator allocator_;
+ HGraph* graph_;
+
+ // Specific basic blocks.
+ HBasicBlock* entry_;
+ HBasicBlock* loop_preheader_;
+ HBasicBlock* loop_header_;
+ HBasicBlock* loop_body_;
+ HBasicBlock* exit_;
+
+ HInstruction* parameter_; // "this"
+ HInstruction* constant_;
+};
+
+//
+// The actual LICM tests.
+//
+
+TEST_F(LICMTest, ConstantHoisting) {
+ BuildLoop();
+
+ // Populate the loop with instructions: set array to constant.
+ HInstruction* constant = new (&allocator_) HConstant(Primitive::kPrimDouble);
+ loop_body_->InsertInstructionBefore(constant, loop_body_->GetLastInstruction());
+ HInstruction* set_array = new (&allocator_) HArraySet(
+ parameter_, constant_, constant, Primitive::kPrimDouble, 0);
+ loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
+
+ CHECK_EQ(constant->GetBlock(), loop_body_);
+ CHECK_EQ(set_array->GetBlock(), loop_body_);
+ PerformLICM();
+ CHECK_EQ(constant->GetBlock(), loop_preheader_);
+ CHECK_EQ(set_array->GetBlock(), loop_body_);
+}
+
+TEST_F(LICMTest, FieldHoisting) {
+ BuildLoop();
+
+ // Populate the loop with instructions: set/get field with different types.
+ HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
+ parameter_, Primitive::kPrimLong, MemberOffset(10),
+ false, kUnknownFieldIndex, graph_->GetDexFile());
+ loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
+ HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
+ parameter_, constant_, Primitive::kPrimInt, MemberOffset(20),
+ false, kUnknownFieldIndex, graph_->GetDexFile());
+ loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
+
+ CHECK_EQ(get_field->GetBlock(), loop_body_);
+ CHECK_EQ(set_field->GetBlock(), loop_body_);
+ PerformLICM();
+ CHECK_EQ(get_field->GetBlock(), loop_preheader_);
+ CHECK_EQ(set_field->GetBlock(), loop_body_);
+}
+
+TEST_F(LICMTest, NoFieldHoisting) {
+ BuildLoop();
+
+ // Populate the loop with instructions: set/get field with same types.
+ HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
+ parameter_, Primitive::kPrimLong, MemberOffset(10),
+ false, kUnknownFieldIndex, graph_->GetDexFile());
+ loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
+ HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
+ parameter_, get_field, Primitive::kPrimLong, MemberOffset(10),
+ false, kUnknownFieldIndex, graph_->GetDexFile());
+ loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
+
+ CHECK_EQ(get_field->GetBlock(), loop_body_);
+ CHECK_EQ(set_field->GetBlock(), loop_body_);
+ PerformLICM();
+ CHECK_EQ(get_field->GetBlock(), loop_body_);
+ CHECK_EQ(set_field->GetBlock(), loop_body_);
+}
+
+TEST_F(LICMTest, ArrayHoisting) {
+ BuildLoop();
+
+ // Populate the loop with instructions: set/get array with different types.
+ HInstruction* get_array = new (&allocator_) HArrayGet(
+ parameter_, constant_, Primitive::kPrimLong);
+ loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
+ HInstruction* set_array = new (&allocator_) HArraySet(
+ parameter_, constant_, constant_, Primitive::kPrimInt, 0);
+ loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
+
+ CHECK_EQ(get_array->GetBlock(), loop_body_);
+ CHECK_EQ(set_array->GetBlock(), loop_body_);
+ PerformLICM();
+ CHECK_EQ(get_array->GetBlock(), loop_preheader_);
+ CHECK_EQ(set_array->GetBlock(), loop_body_);
+}
+
+TEST_F(LICMTest, NoArrayHoisting) {
+ BuildLoop();
+
+ // Populate the loop with instructions: set/get array with same types.
+ HInstruction* get_array = new (&allocator_) HArrayGet(
+ parameter_, constant_, Primitive::kPrimLong);
+ loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
+ HInstruction* set_array = new (&allocator_) HArraySet(
+ parameter_, get_array, constant_, Primitive::kPrimLong, 0);
+ loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
+
+ CHECK_EQ(get_array->GetBlock(), loop_body_);
+ CHECK_EQ(set_array->GetBlock(), loop_body_);
+ PerformLICM();
+ CHECK_EQ(get_array->GetBlock(), loop_body_);
+ CHECK_EQ(set_array->GetBlock(), loop_body_);
+}
+
+} // namespace art
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 588ab70001..519fa005a6 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -98,26 +98,31 @@ void HGraph::VisitBlockForBackEdges(HBasicBlock* block,
}
void HGraph::BuildDominatorTree() {
+ // (1) Simplify the CFG so that catch blocks have only exceptional incoming
+ // edges. This invariant simplifies building SSA form because Phis cannot
+ // collect both normal- and exceptional-flow values at the same time.
+ SimplifyCatchBlocks();
+
ArenaBitVector visited(arena_, blocks_.Size(), false);
- // (1) Find the back edges in the graph doing a DFS traversal.
+ // (2) Find the back edges in the graph doing a DFS traversal.
FindBackEdges(&visited);
- // (2) Remove instructions and phis from blocks not visited during
+ // (3) Remove instructions and phis from blocks not visited during
// the initial DFS as users from other instructions, so that
// users can be safely removed before uses later.
RemoveInstructionsAsUsersFromDeadBlocks(visited);
- // (3) Remove blocks not visited during the initial DFS.
+ // (4) Remove blocks not visited during the initial DFS.
// Step (4) requires dead blocks to be removed from the
// predecessors list of live blocks.
RemoveDeadBlocks(visited);
- // (4) Simplify the CFG now, so that we don't need to recompute
+ // (5) Simplify the CFG now, so that we don't need to recompute
// dominators and the reverse post order.
SimplifyCFG();
- // (5) Compute the dominance information and the reverse post order.
+ // (6) Compute the dominance information and the reverse post order.
ComputeDominanceInformation();
}
@@ -261,6 +266,83 @@ void HGraph::SimplifyLoop(HBasicBlock* header) {
info->SetSuspendCheck(first_instruction->AsSuspendCheck());
}
+static bool CheckIfPredecessorAtIsExceptional(const HBasicBlock& block, size_t pred_idx) {
+ HBasicBlock* predecessor = block.GetPredecessors().Get(pred_idx);
+ if (!predecessor->EndsWithTryBoundary()) {
+ // Only edges from HTryBoundary can be exceptional.
+ return false;
+ }
+ HTryBoundary* try_boundary = predecessor->GetLastInstruction()->AsTryBoundary();
+ if (try_boundary->GetNormalFlowSuccessor() == &block) {
+ // This block is the normal-flow successor of `try_boundary`, but it could
+ // also be one of its exception handlers if catch blocks have not been
+ // simplified yet. Predecessors are unordered, so we will consider the first
+ // occurrence to be the normal edge and a possible second occurrence to be
+ // the exceptional edge.
+ return !block.IsFirstIndexOfPredecessor(predecessor, pred_idx);
+ } else {
+ // This is not the normal-flow successor of `try_boundary`, hence it must be
+ // one of its exception handlers.
+ DCHECK(try_boundary->HasExceptionHandler(block));
+ return true;
+ }
+}
+
+void HGraph::SimplifyCatchBlocks() {
+ for (size_t i = 0; i < blocks_.Size(); ++i) {
+ HBasicBlock* catch_block = blocks_.Get(i);
+ if (!catch_block->IsCatchBlock()) {
+ continue;
+ }
+
+ bool exceptional_predecessors_only = true;
+ for (size_t j = 0; j < catch_block->GetPredecessors().Size(); ++j) {
+ if (!CheckIfPredecessorAtIsExceptional(*catch_block, j)) {
+ exceptional_predecessors_only = false;
+ break;
+ }
+ }
+
+ if (!exceptional_predecessors_only) {
+ // Catch block has normal-flow predecessors and needs to be simplified.
+ // Splitting the block before its first instruction moves all its
+ // instructions into `normal_block` and links the two blocks with a Goto.
+ // Afterwards, incoming normal-flow edges are re-linked to `normal_block`,
+ // leaving `catch_block` with the exceptional edges only.
+ // Note that catch blocks with normal-flow predecessors cannot begin with
+ // a MOVE_EXCEPTION instruction, as guaranteed by the verifier.
+ DCHECK(!catch_block->GetFirstInstruction()->IsLoadException());
+ HBasicBlock* normal_block = catch_block->SplitBefore(catch_block->GetFirstInstruction());
+ for (size_t j = 0; j < catch_block->GetPredecessors().Size(); ++j) {
+ if (!CheckIfPredecessorAtIsExceptional(*catch_block, j)) {
+ catch_block->GetPredecessors().Get(j)->ReplaceSuccessor(catch_block, normal_block);
+ --j;
+ }
+ }
+ }
+ }
+}
+
+void HGraph::ComputeTryBlockInformation() {
+ // Iterate in reverse post order to propagate try membership information from
+ // predecessors to their successors.
+ for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ if (block->IsEntryBlock() || block->IsCatchBlock()) {
+ // Catch blocks after simplification have only exceptional predecessors
+ // and hence are never in tries.
+ continue;
+ }
+
+ // Infer try membership from the first predecessor. Having simplified loops,
+ // the first predecessor can never be a back edge and therefore it must have
+ // been visited already and had its try membership set.
+ HBasicBlock* first_predecessor = block->GetPredecessors().Get(0);
+ DCHECK(!block->IsLoopHeader() || !block->GetLoopInformation()->IsBackEdge(*first_predecessor));
+ block->SetTryEntry(first_predecessor->ComputeTryEntryOfSuccessors());
+ }
+}
+
void HGraph::SimplifyCFG() {
// Simplify the CFG for future analysis, and code generation:
// (1): Split critical edges.
@@ -268,9 +350,10 @@ void HGraph::SimplifyCFG() {
for (size_t i = 0; i < blocks_.Size(); ++i) {
HBasicBlock* block = blocks_.Get(i);
if (block == nullptr) continue;
- if (block->GetSuccessors().Size() > 1) {
+ if (block->NumberOfNormalSuccessors() > 1) {
for (size_t j = 0; j < block->GetSuccessors().Size(); ++j) {
HBasicBlock* successor = block->GetSuccessors().Get(j);
+ DCHECK(!successor->IsCatchBlock());
if (successor->GetPredecessors().Size() > 1) {
SplitCriticalEdge(block, successor);
--j;
@@ -288,6 +371,11 @@ bool HGraph::AnalyzeNaturalLoops() const {
for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
if (block->IsLoopHeader()) {
+ if (block->IsCatchBlock()) {
+ // TODO: Dealing with exceptional back edges could be tricky because
+ // they only approximate the real control flow. Bail out for now.
+ return false;
+ }
HLoopInformation* info = block->GetLoopInformation();
if (!info->Populate()) {
// Abort if the loop is non natural. We currently bailout in such cases.
@@ -1086,10 +1174,20 @@ HBasicBlock* HBasicBlock::SplitAfter(HInstruction* cursor) {
return new_block;
}
-bool HBasicBlock::IsExceptionalSuccessor(size_t idx) const {
- return !GetInstructions().IsEmpty()
- && GetLastInstruction()->IsTryBoundary()
- && GetLastInstruction()->AsTryBoundary()->IsExceptionalSuccessor(idx);
+HTryBoundary* HBasicBlock::ComputeTryEntryOfSuccessors() const {
+ if (EndsWithTryBoundary()) {
+ HTryBoundary* try_boundary = GetLastInstruction()->AsTryBoundary();
+ if (try_boundary->IsEntry()) {
+ DCHECK(try_entry_ == nullptr);
+ return try_boundary;
+ } else {
+ DCHECK(try_entry_ != nullptr);
+ DCHECK(try_entry_->HasSameExceptionHandlersAs(*try_boundary));
+ return nullptr;
+ }
+ } else {
+ return try_entry_;
+ }
}
static bool HasOnlyOneInstruction(const HBasicBlock& block) {
@@ -1114,10 +1212,29 @@ bool HBasicBlock::EndsWithIf() const {
return !GetInstructions().IsEmpty() && GetLastInstruction()->IsIf();
}
+bool HBasicBlock::EndsWithTryBoundary() const {
+ return !GetInstructions().IsEmpty() && GetLastInstruction()->IsTryBoundary();
+}
+
bool HBasicBlock::HasSinglePhi() const {
return !GetPhis().IsEmpty() && GetFirstPhi()->GetNext() == nullptr;
}
+bool HTryBoundary::HasSameExceptionHandlersAs(const HTryBoundary& other) const {
+ if (GetBlock()->GetSuccessors().Size() != other.GetBlock()->GetSuccessors().Size()) {
+ return false;
+ }
+
+ // Exception handler lists cannot contain duplicates, which makes it
+ // sufficient to test inclusion only in one direction.
+ for (HExceptionHandlerIterator it(other); !it.Done(); it.Advance()) {
+ if (!HasExceptionHandler(*it.Current())) {
+ return false;
+ }
+ }
+ return true;
+}
+
size_t HInstructionList::CountSize() const {
size_t size = 0;
HInstruction* current = first_instruction_;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 57c7829c88..903e02e0ea 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -49,6 +49,7 @@ class HLongConstant;
class HNullConstant;
class HPhi;
class HSuspendCheck;
+class HTryBoundary;
class LiveInterval;
class LocationSummary;
class SlowPathCode;
@@ -182,6 +183,10 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
// visit for eliminating dead phis: a dead phi can only have loop header phi
// users remaining when being visited.
if (!AnalyzeNaturalLoops()) return false;
+ // Precompute per-block try membership before entering the SSA builder,
+ // which needs the information to build catch block phis from values of
+ // locals at throwing instructions inside try blocks.
+ ComputeTryBlockInformation();
TransformToSsa();
in_ssa_form_ = true;
return true;
@@ -193,12 +198,17 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
void BuildDominatorTree();
void TransformToSsa();
void SimplifyCFG();
+ void SimplifyCatchBlocks();
// Analyze all natural loops in this graph. Returns false if one
// loop is not natural, that is the header does not dominate the
// back edge.
bool AnalyzeNaturalLoops() const;
+ // Iterate over blocks to compute try block membership. Needs reverse post
+ // order and loop information.
+ void ComputeTryBlockInformation();
+
// Inline this graph in `outer_graph`, replacing the given `invoke` instruction.
void InlineInto(HGraph* outer_graph, HInvoke* invoke);
@@ -730,8 +740,11 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
return GetPredecessorIndexOf(predecessor) == idx;
}
- // Returns whether successor at index `idx` is an exception handler.
- bool IsExceptionalSuccessor(size_t idx) const;
+ // Returns the number of non-exceptional successors. SsaChecker ensures that
+ // these are stored at the beginning of the successor list.
+ size_t NumberOfNormalSuccessors() const {
+ return EndsWithTryBoundary() ? 1 : GetSuccessors().Size();
+ }
// Split the block into two blocks just before `cursor`. Returns the newly
// created, latter block. Note that this method will add the block to the
@@ -830,6 +843,15 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
bool IsInLoop() const { return loop_information_ != nullptr; }
+ HTryBoundary* GetTryEntry() const { return try_entry_; }
+ void SetTryEntry(HTryBoundary* try_entry) { try_entry_ = try_entry; }
+ bool IsInTry() const { return try_entry_ != nullptr; }
+
+ // Returns the try entry that this block's successors should have. They will
+ // be in the same try, unless the block ends in a try boundary. In that case,
+ // the appropriate try entry will be returned.
+ HTryBoundary* ComputeTryEntryOfSuccessors() const;
+
// Returns whether this block dominates the blocked passed as parameter.
bool Dominates(HBasicBlock* block) const;
@@ -846,6 +868,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
bool EndsWithControlFlowInstruction() const;
bool EndsWithIf() const;
+ bool EndsWithTryBoundary() const;
bool HasSinglePhi() const;
private:
@@ -864,6 +887,10 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
size_t lifetime_end_;
bool is_catch_block_;
+ // If this block is in a try block, `try_entry_` stores one of, possibly
+ // several, TryBoundary instructions entering it.
+ HTryBoundary* try_entry_;
+
friend class HGraph;
friend class HInstruction;
@@ -1155,13 +1182,25 @@ class HUserRecord : public ValueObject {
HUseListNode<T>* use_node_;
};
-// TODO: Add better documentation to this class and maybe refactor with more suggestive names.
-// - Has(All)SideEffects suggests that all the side effects are present but only ChangesSomething
-// flag is consider.
-// - DependsOn suggests that there is a real dependency between side effects but it only
-// checks DependendsOnSomething flag.
-//
-// Represents the side effects an instruction may have.
+/**
+ * Side-effects representation for write/read dependences on fields/arrays.
+ *
+ * The dependence analysis uses type disambiguation (e.g. a float field write
+ * cannot modify the value of an integer field read) and the access type (e.g.
+ * a reference array write cannot modify the value of a reference field read
+ * [although it may modify the reference fetch prior to reading the field,
+ * which is represented by its own write/read dependence]). The analysis
+ * makes conservative points-to assumptions on reference types (e.g. two same
+ * typed arrays are assumed to be the same, and any reference read depends
+ * on any reference read without further regard of its type).
+ *
+ * The internal representation uses the following 36-bit flags assignments:
+ *
+ * |ARRAY-R |FIELD-R |ARRAY-W |FIELD-W |
+ * +---------+---------+---------+---------+
+ * |543210987|654321098|765432109|876543210|
+ * |DFJISCBZL|DFJISCBZL|DFJISCBZL|DFJISCBZL|
+ */
class SideEffects : public ValueObject {
public:
SideEffects() : flags_(0) {}
@@ -1171,57 +1210,125 @@ class SideEffects : public ValueObject {
}
static SideEffects All() {
- return SideEffects(ChangesSomething().flags_ | DependsOnSomething().flags_);
+ return SideEffects(kAllWrites | kAllReads);
}
- static SideEffects ChangesSomething() {
- return SideEffects((1 << kFlagChangesCount) - 1);
+ static SideEffects AllWrites() {
+ return SideEffects(kAllWrites);
}
- static SideEffects DependsOnSomething() {
- int count = kFlagDependsOnCount - kFlagChangesCount;
- return SideEffects(((1 << count) - 1) << kFlagChangesCount);
+ static SideEffects AllReads() {
+ return SideEffects(kAllReads);
}
+ static SideEffects FieldWriteOfType(Primitive::Type type, bool is_volatile) {
+ return is_volatile
+ ? All()
+ : SideEffects(TypeFlagWithAlias(type, kFieldWriteOffset));
+ }
+
+ static SideEffects ArrayWriteOfType(Primitive::Type type) {
+ return SideEffects(TypeFlagWithAlias(type, kArrayWriteOffset));
+ }
+
+ static SideEffects FieldReadOfType(Primitive::Type type, bool is_volatile) {
+ return is_volatile
+ ? All()
+ : SideEffects(TypeFlagWithAlias(type, kFieldReadOffset));
+ }
+
+ static SideEffects ArrayReadOfType(Primitive::Type type) {
+ return SideEffects(TypeFlagWithAlias(type, kArrayReadOffset));
+ }
+
+ // Combines the side-effects of this and the other.
SideEffects Union(SideEffects other) const {
return SideEffects(flags_ | other.flags_);
}
- bool HasSideEffects() const {
- size_t all_bits_set = (1 << kFlagChangesCount) - 1;
- return (flags_ & all_bits_set) != 0;
+ // Returns true if something is written.
+ bool DoesAnyWrite() const {
+ return (flags_ & kAllWrites);
}
- bool HasAllSideEffects() const {
- size_t all_bits_set = (1 << kFlagChangesCount) - 1;
- return all_bits_set == (flags_ & all_bits_set);
+ // Returns true if something is read.
+ bool DoesAnyRead() const {
+ return (flags_ & kAllReads);
}
- bool DependsOn(SideEffects other) const {
- size_t depends_flags = other.ComputeDependsFlags();
- return (flags_ & depends_flags) != 0;
+ // Returns true if nothing is written or read.
+ bool DoesNothing() const {
+ return flags_ == 0;
}
- bool HasDependencies() const {
- int count = kFlagDependsOnCount - kFlagChangesCount;
- size_t all_bits_set = (1 << count) - 1;
- return ((flags_ >> kFlagChangesCount) & all_bits_set) != 0;
+ // Returns true if potentially everything is written and read
+ // (every type and every kind of access).
+ bool DoesAll() const {
+ return flags_ == (kAllWrites | kAllReads);
}
- private:
- static constexpr int kFlagChangesSomething = 0;
- static constexpr int kFlagChangesCount = kFlagChangesSomething + 1;
+ // Returns true if this may read something written by other.
+ bool MayDependOn(SideEffects other) const {
+ const uint64_t reads = (flags_ & kAllReads) >> kFieldReadOffset;
+ return (other.flags_ & reads);
+ }
- static constexpr int kFlagDependsOnSomething = kFlagChangesCount;
- static constexpr int kFlagDependsOnCount = kFlagDependsOnSomething + 1;
+ // Returns string representation of flags (for debugging only).
+ // Format: |DFJISCBZL|DFJISCBZL|DFJISCBZL|DFJISCBZL|
+ std::string ToString() const {
+ static const char *kDebug = "LZBCSIJFD";
+ std::string flags = "|";
+ for (int s = 35; s >= 0; s--) {
+ const int t = s % kBits;
+ if ((flags_ >> s) & 1)
+ flags += kDebug[t];
+ if (t == 0)
+ flags += "|";
+ }
+ return flags;
+ }
- explicit SideEffects(size_t flags) : flags_(flags) {}
+ private:
+ static constexpr int kBits = 9;
+ static constexpr int kFieldWriteOffset = 0 * kBits;
+ static constexpr int kArrayWriteOffset = 1 * kBits;
+ static constexpr int kFieldReadOffset = 2 * kBits;
+ static constexpr int kArrayReadOffset = 3 * kBits;
+
+ static constexpr uint64_t kAllWrites = 0x0003ffff;
+ static constexpr uint64_t kAllReads = kAllWrites << kFieldReadOffset;
+
+ // Work around the fact that HIR aliases I/F and J/D.
+ // TODO: remove this interceptor once HIR types are clean
+ static uint64_t TypeFlagWithAlias(Primitive::Type type, int offset) {
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ return TypeFlag(Primitive::kPrimInt, offset) |
+ TypeFlag(Primitive::kPrimFloat, offset);
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ return TypeFlag(Primitive::kPrimLong, offset) |
+ TypeFlag(Primitive::kPrimDouble, offset);
+ default:
+ return TypeFlag(type, offset);
+ }
+ }
- size_t ComputeDependsFlags() const {
- return flags_ << kFlagChangesCount;
+ // Translates type to bit flag.
+ static uint64_t TypeFlag(Primitive::Type type, int offset) {
+ CHECK_NE(type, Primitive::kPrimVoid);
+ const uint64_t one = 1;
+ const int shift = type; // 0-based consecutive enum
+ DCHECK_LE(kFieldWriteOffset, shift);
+ DCHECK_LT(shift, kArrayWriteOffset);
+ return one << (type + offset);
}
- size_t flags_;
+ // Private constructor on direct flags value.
+ explicit SideEffects(uint64_t flags) : flags_(flags) {}
+
+ uint64_t flags_;
};
// A HEnvironment object contains the values of virtual registers at a given location.
@@ -1484,7 +1591,8 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> {
}
virtual bool IsControlFlow() const { return false; }
virtual bool CanThrow() const { return false; }
- bool HasSideEffects() const { return side_effects_.HasSideEffects(); }
+
+ bool DoesAnyWrite() const { return side_effects_.DoesAnyWrite(); }
// Does not apply for all instructions, but having this at top level greatly
// simplifies the null check elimination.
@@ -1976,29 +2084,24 @@ class HTryBoundary : public HTemplateInstruction<0> {
// Returns whether `handler` is among its exception handlers (non-zero index
// successors).
- bool HasExceptionHandler(HBasicBlock* handler) const {
- DCHECK(handler->IsCatchBlock());
- return GetBlock()->GetSuccessors().Contains(handler, /* start_from */ 1);
- }
-
- // Returns whether successor at index `idx` is an exception handler.
- bool IsExceptionalSuccessor(size_t idx) const {
- DCHECK_LT(idx, GetBlock()->GetSuccessors().Size());
- bool is_handler = (idx != 0);
- DCHECK(!is_handler || GetBlock()->GetSuccessors().Get(idx)->IsCatchBlock());
- return is_handler;
+ bool HasExceptionHandler(const HBasicBlock& handler) const {
+ DCHECK(handler.IsCatchBlock());
+ return GetBlock()->GetSuccessors().Contains(
+ const_cast<HBasicBlock*>(&handler), /* start_from */ 1);
}
// If not present already, adds `handler` to its block's list of exception
// handlers.
void AddExceptionHandler(HBasicBlock* handler) {
- if (!HasExceptionHandler(handler)) {
+ if (!HasExceptionHandler(*handler)) {
GetBlock()->AddSuccessor(handler);
}
}
bool IsEntry() const { return kind_ == BoundaryKind::kEntry; }
+ bool HasSameExceptionHandlersAs(const HTryBoundary& other) const;
+
DECLARE_INSTRUCTION(TryBoundary);
private:
@@ -2007,6 +2110,24 @@ class HTryBoundary : public HTemplateInstruction<0> {
DISALLOW_COPY_AND_ASSIGN(HTryBoundary);
};
+// Iterator over exception handlers of a given HTryBoundary, i.e. over
+// exceptional successors of its basic block.
+class HExceptionHandlerIterator : public ValueObject {
+ public:
+ explicit HExceptionHandlerIterator(const HTryBoundary& try_boundary)
+ : block_(*try_boundary.GetBlock()), index_(block_.NumberOfNormalSuccessors()) {}
+
+ bool Done() const { return index_ == block_.GetSuccessors().Size(); }
+ HBasicBlock* Current() const { return block_.GetSuccessors().Get(index_); }
+ size_t CurrentSuccessorIndex() const { return index_; }
+ void Advance() { ++index_; }
+
+ private:
+ const HBasicBlock& block_;
+ size_t index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HExceptionHandlerIterator);
+};
// Deoptimize to interpreter, upon checking a condition.
class HDeoptimize : public HTemplateInstruction<1> {
@@ -2693,7 +2814,7 @@ class HInvoke : public HInstruction {
uint32_t dex_pc,
uint32_t dex_method_index,
InvokeType original_invoke_type)
- : HInstruction(SideEffects::All()),
+ : HInstruction(SideEffects::All()), // assume write/read on all fields/arrays
number_of_arguments_(number_of_arguments),
inputs_(arena, number_of_arguments),
return_type_(return_type),
@@ -3368,6 +3489,8 @@ class HPhi : public HInstruction {
}
}
+ bool IsCatchPhi() const { return GetBlock()->IsCatchBlock(); }
+
size_t InputCount() const OVERRIDE { return inputs_.Size(); }
void AddInput(HInstruction* input);
@@ -3483,7 +3606,9 @@ class HInstanceFieldGet : public HExpression<1> {
bool is_volatile,
uint32_t field_idx,
const DexFile& dex_file)
- : HExpression(field_type, SideEffects::DependsOnSomething()),
+ : HExpression(
+ field_type,
+ SideEffects::FieldReadOfType(field_type, is_volatile)),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file) {
SetRawInputAt(0, value);
}
@@ -3525,7 +3650,8 @@ class HInstanceFieldSet : public HTemplateInstruction<2> {
bool is_volatile,
uint32_t field_idx,
const DexFile& dex_file)
- : HTemplateInstruction(SideEffects::ChangesSomething()),
+ : HTemplateInstruction(
+ SideEffects::FieldWriteOfType(field_type, is_volatile)),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file),
value_can_be_null_(true) {
SetRawInputAt(0, object);
@@ -3556,7 +3682,7 @@ class HInstanceFieldSet : public HTemplateInstruction<2> {
class HArrayGet : public HExpression<2> {
public:
HArrayGet(HInstruction* array, HInstruction* index, Primitive::Type type)
- : HExpression(type, SideEffects::DependsOnSomething()) {
+ : HExpression(type, SideEffects::ArrayReadOfType(type)) {
SetRawInputAt(0, array);
SetRawInputAt(1, index);
}
@@ -3594,7 +3720,7 @@ class HArraySet : public HTemplateInstruction<3> {
HInstruction* value,
Primitive::Type expected_component_type,
uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::ChangesSomething()),
+ : HTemplateInstruction(SideEffects::ArrayWriteOfType(expected_component_type)),
dex_pc_(dex_pc),
expected_component_type_(expected_component_type),
needs_type_check_(value->GetType() == Primitive::kPrimNot),
@@ -3893,7 +4019,9 @@ class HLoadString : public HExpression<1> {
class HClinitCheck : public HExpression<1> {
public:
explicit HClinitCheck(HLoadClass* constant, uint32_t dex_pc)
- : HExpression(Primitive::kPrimNot, SideEffects::ChangesSomething()),
+ : HExpression(
+ Primitive::kPrimNot,
+ SideEffects::AllWrites()), // assume write on all fields/arrays
dex_pc_(dex_pc) {
SetRawInputAt(0, constant);
}
@@ -3929,7 +4057,9 @@ class HStaticFieldGet : public HExpression<1> {
bool is_volatile,
uint32_t field_idx,
const DexFile& dex_file)
- : HExpression(field_type, SideEffects::DependsOnSomething()),
+ : HExpression(
+ field_type,
+ SideEffects::FieldReadOfType(field_type, is_volatile)),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file) {
SetRawInputAt(0, cls);
}
@@ -3968,7 +4098,8 @@ class HStaticFieldSet : public HTemplateInstruction<2> {
bool is_volatile,
uint32_t field_idx,
const DexFile& dex_file)
- : HTemplateInstruction(SideEffects::ChangesSomething()),
+ : HTemplateInstruction(
+ SideEffects::FieldWriteOfType(field_type, is_volatile)),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file),
value_can_be_null_(true) {
SetRawInputAt(0, cls);
@@ -4155,7 +4286,8 @@ class HCheckCast : public HTemplateInstruction<2> {
class HMemoryBarrier : public HTemplateInstruction<0> {
public:
explicit HMemoryBarrier(MemBarrierKind barrier_kind)
- : HTemplateInstruction(SideEffects::None()),
+ : HTemplateInstruction(
+ SideEffects::All()), // assume write/read on all fields/arrays
barrier_kind_(barrier_kind) {}
MemBarrierKind GetBarrierKind() { return barrier_kind_; }
@@ -4176,7 +4308,8 @@ class HMonitorOperation : public HTemplateInstruction<1> {
};
HMonitorOperation(HInstruction* object, OperationKind kind, uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::ChangesSomething()), kind_(kind), dex_pc_(dex_pc) {
+ : HTemplateInstruction(SideEffects::All()), // assume write/read on all fields/arrays
+ kind_(kind), dex_pc_(dex_pc) {
SetRawInputAt(0, object);
}
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index bc565468b2..f793a65bf3 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -40,7 +40,7 @@ class HOptimization : public ArenaObject<kArenaAllocMisc> {
// Return the name of the pass.
const char* GetPassName() const { return pass_name_; }
- // Peform the analysis itself.
+ // Perform the analysis itself.
virtual void Run() = 0;
protected:
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index ae1958afcb..601d668995 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -35,6 +35,7 @@
#include "dex/verified_method.h"
#include "dex/verification_results.h"
#include "driver/compiler_driver.h"
+#include "driver/compiler_driver-inl.h"
#include "driver/compiler_options.h"
#include "driver/dex_compilation_unit.h"
#include "elf_writer_quick.h"
@@ -132,7 +133,7 @@ class PassObserver : public ValueObject {
void StartPass(const char* pass_name) {
// Dump graph first, then start timer.
if (visualizer_enabled_) {
- visualizer_.DumpGraph(pass_name, /* is_after_pass */ false);
+ visualizer_.DumpGraph(pass_name, /* is_after_pass */ false, graph_in_bad_state_);
}
if (timing_logger_enabled_) {
timing_logger_.StartTiming(pass_name);
@@ -145,7 +146,7 @@ class PassObserver : public ValueObject {
timing_logger_.EndTiming();
}
if (visualizer_enabled_) {
- visualizer_.DumpGraph(pass_name, /* is_after_pass */ true);
+ visualizer_.DumpGraph(pass_name, /* is_after_pass */ true, graph_in_bad_state_);
}
// Validate the HGraph if running in debug mode.
@@ -556,8 +557,8 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
}
// Implementation of the space filter: do not compile a code item whose size in
- // code units is bigger than 256.
- static constexpr size_t kSpaceFilterOptimizingThreshold = 256;
+ // code units is bigger than 128.
+ static constexpr size_t kSpaceFilterOptimizingThreshold = 128;
const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions();
if ((compiler_options.GetCompilerFilter() == CompilerOptions::kSpace)
&& (code_item->insns_size_in_code_units_ > kSpaceFilterOptimizingThreshold)) {
@@ -566,7 +567,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
}
DexCompilationUnit dex_compilation_unit(
- nullptr, class_loader, art::Runtime::Current()->GetClassLinker(), dex_file, code_item,
+ nullptr, class_loader, Runtime::Current()->GetClassLinker(), dex_file, code_item,
class_def_idx, method_idx, access_flags,
compiler_driver->GetVerifiedMethod(&dex_file, method_idx));
@@ -603,12 +604,29 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
visualizer_output_.get(),
compiler_driver);
+ const uint8_t* interpreter_metadata = nullptr;
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<4> hs(soa.Self());
+ ClassLinker* class_linker = dex_compilation_unit.GetClassLinker();
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader*>(class_loader)));
+ ArtMethod* art_method = compiler_driver->ResolveMethod(
+ soa, dex_cache, loader, &dex_compilation_unit, method_idx, invoke_type);
+ // We may not get a method, for example if its class is erroneous.
+ // TODO: Clean this up, the compiler driver should just pass the ArtMethod to compile.
+ if (art_method != nullptr) {
+ interpreter_metadata = art_method->GetQuickenedInfo();
+ }
+ }
HGraphBuilder builder(graph,
&dex_compilation_unit,
&dex_compilation_unit,
&dex_file,
compiler_driver,
- compilation_stats_.get());
+ compilation_stats_.get(),
+ interpreter_metadata);
VLOG(compiler) << "Building " << method_name;
@@ -629,7 +647,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
// or the debuggable flag). If it is set, we can run baseline. Otherwise, we fall back
// to Quick.
bool can_use_baseline = !run_optimizations_ && builder.CanUseBaselineForStringInit();
- if (run_optimizations_ && can_optimize && can_allocate_registers) {
+ if (run_optimizations_ && can_allocate_registers) {
VLOG(compiler) << "Optimizing " << method_name;
{
@@ -638,16 +656,21 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
// We could not transform the graph to SSA, bailout.
LOG(INFO) << "Skipping compilation of " << method_name << ": it contains a non natural loop";
MaybeRecordStat(MethodCompilationStat::kNotCompiledCannotBuildSSA);
+ pass_observer.SetGraphInBadState();
return nullptr;
}
}
- return CompileOptimized(graph,
- codegen.get(),
- compiler_driver,
- dex_compilation_unit,
- &pass_observer);
- } else if (shouldOptimize && can_allocate_registers) {
+ if (can_optimize) {
+ return CompileOptimized(graph,
+ codegen.get(),
+ compiler_driver,
+ dex_compilation_unit,
+ &pass_observer);
+ }
+ }
+
+ if (shouldOptimize && can_allocate_registers) {
LOG(FATAL) << "Could not allocate registers in optimizing compiler";
UNREACHABLE();
} else if (can_use_baseline) {
diff --git a/compiler/optimizing/side_effects_analysis.cc b/compiler/optimizing/side_effects_analysis.cc
index ea1ca5a731..9dbf638442 100644
--- a/compiler/optimizing/side_effects_analysis.cc
+++ b/compiler/optimizing/side_effects_analysis.cc
@@ -24,14 +24,15 @@ void SideEffectsAnalysis::Run() {
block_effects_.SetSize(graph_->GetBlocks().Size());
loop_effects_.SetSize(graph_->GetBlocks().Size());
+ // In DEBUG mode, ensure side effects are properly initialized to empty.
if (kIsDebugBuild) {
for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
SideEffects effects = GetBlockEffects(block);
- DCHECK(!effects.HasSideEffects() && !effects.HasDependencies());
+ DCHECK(effects.DoesNothing());
if (block->IsLoopHeader()) {
effects = GetLoopEffects(block);
- DCHECK(!effects.HasSideEffects() && !effects.HasDependencies());
+ DCHECK(effects.DoesNothing());
}
}
}
@@ -46,7 +47,9 @@ void SideEffectsAnalysis::Run() {
inst_it.Advance()) {
HInstruction* instruction = inst_it.Current();
effects = effects.Union(instruction->GetSideEffects());
- if (effects.HasAllSideEffects()) {
+ // If every possible write/read is represented, scanning further
+ // will not add any more information to side-effects of this block.
+ if (effects.DoesAll()) {
break;
}
}
diff --git a/compiler/optimizing/side_effects_test.cc b/compiler/optimizing/side_effects_test.cc
new file mode 100644
index 0000000000..8db5a8a350
--- /dev/null
+++ b/compiler/optimizing/side_effects_test.cc
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not read this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+#include "nodes.h"
+#include "primitive.h"
+
+namespace art {
+
+/**
+ * Tests for the SideEffects class.
+ */
+
+//
+// Helper methods.
+//
+
+void testWriteAndReadSanity(SideEffects write, SideEffects read) {
+ EXPECT_FALSE(write.DoesNothing());
+ EXPECT_FALSE(read.DoesNothing());
+
+ EXPECT_TRUE(write.DoesAnyWrite());
+ EXPECT_FALSE(write.DoesAnyRead());
+ EXPECT_FALSE(read.DoesAnyWrite());
+ EXPECT_TRUE(read.DoesAnyRead());
+
+ // All-dependences.
+ SideEffects all = SideEffects::All();
+ EXPECT_TRUE(all.MayDependOn(write));
+ EXPECT_FALSE(write.MayDependOn(all));
+ EXPECT_FALSE(all.MayDependOn(read));
+ EXPECT_TRUE(read.MayDependOn(all));
+
+ // None-dependences.
+ SideEffects none = SideEffects::None();
+ EXPECT_FALSE(none.MayDependOn(write));
+ EXPECT_FALSE(write.MayDependOn(none));
+ EXPECT_FALSE(none.MayDependOn(read));
+ EXPECT_FALSE(read.MayDependOn(none));
+}
+
+void testWriteAndReadDependence(SideEffects write, SideEffects read) {
+ testWriteAndReadSanity(write, read);
+
+ // Dependence only in one direction.
+ EXPECT_FALSE(write.MayDependOn(read));
+ EXPECT_TRUE(read.MayDependOn(write));
+}
+
+void testNoWriteAndReadDependence(SideEffects write, SideEffects read) {
+ testWriteAndReadSanity(write, read);
+
+ // No dependence in any direction.
+ EXPECT_FALSE(write.MayDependOn(read));
+ EXPECT_FALSE(read.MayDependOn(write));
+}
+
+//
+// Actual tests.
+//
+
+TEST(SideEffectsTest, All) {
+ SideEffects all = SideEffects::All();
+ EXPECT_TRUE(all.DoesAnyWrite());
+ EXPECT_TRUE(all.DoesAnyRead());
+ EXPECT_FALSE(all.DoesNothing());
+ EXPECT_TRUE(all.DoesAll());
+}
+
+TEST(SideEffectsTest, None) {
+ SideEffects none = SideEffects::None();
+ EXPECT_FALSE(none.DoesAnyWrite());
+ EXPECT_FALSE(none.DoesAnyRead());
+ EXPECT_TRUE(none.DoesNothing());
+ EXPECT_FALSE(none.DoesAll());
+}
+
+TEST(SideEffectsTest, DependencesAndNoDependences) {
+ // Apply test to each individual primitive type.
+ for (Primitive::Type type = Primitive::kPrimNot;
+ type < Primitive::kPrimVoid;
+ type = Primitive::Type(type + 1)) {
+ // Same primitive type and access type: proper write/read dep.
+ testWriteAndReadDependence(
+ SideEffects::FieldWriteOfType(type, false),
+ SideEffects::FieldReadOfType(type, false));
+ testWriteAndReadDependence(
+ SideEffects::ArrayWriteOfType(type),
+ SideEffects::ArrayReadOfType(type));
+ // Same primitive type but different access type: no write/read dep.
+ testNoWriteAndReadDependence(
+ SideEffects::FieldWriteOfType(type, false),
+ SideEffects::ArrayReadOfType(type));
+ testNoWriteAndReadDependence(
+ SideEffects::ArrayWriteOfType(type),
+ SideEffects::FieldReadOfType(type, false));
+ }
+}
+
+TEST(SideEffectsTest, NoDependences) {
+ // Different primitive type, same access type: no write/read dep.
+ testNoWriteAndReadDependence(
+ SideEffects::FieldWriteOfType(Primitive::kPrimInt, false),
+ SideEffects::FieldReadOfType(Primitive::kPrimDouble, false));
+ testNoWriteAndReadDependence(
+ SideEffects::ArrayWriteOfType(Primitive::kPrimInt),
+ SideEffects::ArrayReadOfType(Primitive::kPrimDouble));
+ // Everything different: no write/read dep.
+ testNoWriteAndReadDependence(
+ SideEffects::FieldWriteOfType(Primitive::kPrimInt, false),
+ SideEffects::ArrayReadOfType(Primitive::kPrimDouble));
+ testNoWriteAndReadDependence(
+ SideEffects::ArrayWriteOfType(Primitive::kPrimInt),
+ SideEffects::FieldReadOfType(Primitive::kPrimDouble, false));
+}
+
+TEST(SideEffectsTest, VolatileDependences) {
+ SideEffects volatile_write =
+ SideEffects::FieldWriteOfType(Primitive::kPrimInt, true);
+ SideEffects any_write =
+ SideEffects::FieldWriteOfType(Primitive::kPrimInt, false);
+ SideEffects volatile_read =
+ SideEffects::FieldReadOfType(Primitive::kPrimByte, true);
+ SideEffects any_read =
+ SideEffects::FieldReadOfType(Primitive::kPrimByte, false);
+
+ EXPECT_FALSE(volatile_write.MayDependOn(any_read));
+ EXPECT_TRUE(any_read.MayDependOn(volatile_write));
+ EXPECT_TRUE(volatile_write.MayDependOn(any_write));
+ EXPECT_FALSE(any_write.MayDependOn(volatile_write));
+
+ EXPECT_FALSE(volatile_read.MayDependOn(any_read));
+ EXPECT_TRUE(any_read.MayDependOn(volatile_read));
+ EXPECT_TRUE(volatile_read.MayDependOn(any_write));
+ EXPECT_FALSE(any_write.MayDependOn(volatile_read));
+}
+
+TEST(SideEffectsTest, SameWidthTypes) {
+ // Type I/F.
+ testWriteAndReadDependence(
+ SideEffects::FieldWriteOfType(Primitive::kPrimInt, false),
+ SideEffects::FieldReadOfType(Primitive::kPrimFloat, false));
+ testWriteAndReadDependence(
+ SideEffects::ArrayWriteOfType(Primitive::kPrimInt),
+ SideEffects::ArrayReadOfType(Primitive::kPrimFloat));
+ // Type L/D.
+ testWriteAndReadDependence(
+ SideEffects::FieldWriteOfType(Primitive::kPrimLong, false),
+ SideEffects::FieldReadOfType(Primitive::kPrimDouble, false));
+ testWriteAndReadDependence(
+ SideEffects::ArrayWriteOfType(Primitive::kPrimLong),
+ SideEffects::ArrayReadOfType(Primitive::kPrimDouble));
+}
+
+TEST(SideEffectsTest, AllWritesAndReads) {
+ SideEffects s = SideEffects::None();
+ // Keep taking the union of different writes and reads.
+ for (Primitive::Type type = Primitive::kPrimNot;
+ type < Primitive::kPrimVoid;
+ type = Primitive::Type(type + 1)) {
+ s = s.Union(SideEffects::FieldWriteOfType(type, false));
+ s = s.Union(SideEffects::ArrayWriteOfType(type));
+ s = s.Union(SideEffects::FieldReadOfType(type, false));
+ s = s.Union(SideEffects::ArrayReadOfType(type));
+ }
+ EXPECT_TRUE(s.DoesAll());
+}
+
+TEST(SideEffectsTest, BitStrings) {
+ EXPECT_STREQ(
+ "|||||",
+ SideEffects::None().ToString().c_str());
+ EXPECT_STREQ(
+ "|DFJISCBZL|DFJISCBZL|DFJISCBZL|DFJISCBZL|",
+ SideEffects::All().ToString().c_str());
+ EXPECT_STREQ(
+ "|||DFJISCBZL|DFJISCBZL|",
+ SideEffects::AllWrites().ToString().c_str());
+ EXPECT_STREQ(
+ "|DFJISCBZL|DFJISCBZL|||",
+ SideEffects::AllReads().ToString().c_str());
+ EXPECT_STREQ(
+ "||||L|",
+ SideEffects::FieldWriteOfType(Primitive::kPrimNot, false).ToString().c_str());
+ EXPECT_STREQ(
+ "|||Z||",
+ SideEffects::ArrayWriteOfType(Primitive::kPrimBoolean).ToString().c_str());
+ EXPECT_STREQ(
+ "||B|||",
+ SideEffects::FieldReadOfType(Primitive::kPrimByte, false).ToString().c_str());
+ EXPECT_STREQ(
+ "|DJ||||", // note: DJ alias
+ SideEffects::ArrayReadOfType(Primitive::kPrimDouble).ToString().c_str());
+ SideEffects s = SideEffects::None();
+ s = s.Union(SideEffects::FieldWriteOfType(Primitive::kPrimChar, false));
+ s = s.Union(SideEffects::FieldWriteOfType(Primitive::kPrimLong, false));
+ s = s.Union(SideEffects::ArrayWriteOfType(Primitive::kPrimShort));
+ s = s.Union(SideEffects::FieldReadOfType(Primitive::kPrimInt, false));
+ s = s.Union(SideEffects::ArrayReadOfType(Primitive::kPrimFloat));
+ s = s.Union(SideEffects::ArrayReadOfType(Primitive::kPrimDouble));
+ EXPECT_STREQ(
+ "|DFJI|FI|S|DJC|", // note: DJ/FI alias.
+ s.ToString().c_str());
+}
+
+} // namespace art
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index c37b1995fa..ff2e6ad821 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -350,7 +350,9 @@ HInstruction* SsaBuilder::ValueOfLocal(HBasicBlock* block, size_t local) {
void SsaBuilder::VisitBasicBlock(HBasicBlock* block) {
current_locals_ = GetLocalsFor(block);
- if (block->IsLoopHeader()) {
+ if (block->IsCatchBlock()) {
+ // Catch phis were already created and inputs collected from throwing sites.
+ } else if (block->IsLoopHeader()) {
// If the block is a loop header, we know we only have visited the pre header
// because we are visiting in reverse post order. We create phis for all initialized
// locals from the pre header. Their inputs will be populated at the end of
@@ -551,19 +553,32 @@ void SsaBuilder::VisitStoreLocal(HStoreLocal* store) {
}
void SsaBuilder::VisitInstruction(HInstruction* instruction) {
- if (!instruction->NeedsEnvironment()) {
- return;
+ if (instruction->NeedsEnvironment()) {
+ HEnvironment* environment = new (GetGraph()->GetArena()) HEnvironment(
+ GetGraph()->GetArena(),
+ current_locals_->Size(),
+ GetGraph()->GetDexFile(),
+ GetGraph()->GetMethodIdx(),
+ instruction->GetDexPc(),
+ GetGraph()->GetInvokeType(),
+ instruction);
+ environment->CopyFrom(*current_locals_);
+ instruction->SetRawEnvironment(environment);
+ }
+
+ // If in a try block, propagate values of locals into catch blocks.
+ if (instruction->GetBlock()->IsInTry() && instruction->CanThrow()) {
+ HTryBoundary* try_block = instruction->GetBlock()->GetTryEntry();
+ for (HExceptionHandlerIterator it(*try_block); !it.Done(); it.Advance()) {
+ GrowableArray<HInstruction*>* handler_locals = GetLocalsFor(it.Current());
+ for (size_t i = 0, e = current_locals_->Size(); i < e; ++i) {
+ HInstruction* local_value = current_locals_->Get(i);
+ if (local_value != nullptr) {
+ handler_locals->Get(i)->AsPhi()->AddInput(local_value);
+ }
+ }
+ }
}
- HEnvironment* environment = new (GetGraph()->GetArena()) HEnvironment(
- GetGraph()->GetArena(),
- current_locals_->Size(),
- GetGraph()->GetDexFile(),
- GetGraph()->GetMethodIdx(),
- instruction->GetDexPc(),
- GetGraph()->GetInvokeType(),
- instruction);
- environment->CopyFrom(*current_locals_);
- instruction->SetRawEnvironment(environment);
}
void SsaBuilder::VisitTemporary(HTemporary* temp) {
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 1c83c4ba48..64600db648 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -61,9 +61,22 @@ class SsaBuilder : public HGraphVisitor {
GrowableArray<HInstruction*>* GetLocalsFor(HBasicBlock* block) {
GrowableArray<HInstruction*>* locals = locals_for_.Get(block->GetBlockId());
if (locals == nullptr) {
- locals = new (GetGraph()->GetArena()) GrowableArray<HInstruction*>(
- GetGraph()->GetArena(), GetGraph()->GetNumberOfVRegs());
- locals->SetSize(GetGraph()->GetNumberOfVRegs());
+ const size_t vregs = GetGraph()->GetNumberOfVRegs();
+ ArenaAllocator* arena = GetGraph()->GetArena();
+ locals = new (arena) GrowableArray<HInstruction*>(arena, vregs);
+ locals->SetSize(vregs);
+
+ if (block->IsCatchBlock()) {
+ // We record incoming inputs of catch phis at throwing instructions and
+ // must therefore eagerly create the phis. Unused phis will be removed
+ // in the dead phi analysis.
+ for (size_t i = 0; i < vregs; ++i) {
+ HPhi* phi = new (arena) HPhi(arena, i, 0, Primitive::kPrimVoid);
+ block->AddPhi(phi);
+ locals->Put(i, phi);
+ }
+ }
+
locals_for_.Put(block->GetBlockId(), locals);
}
return locals;
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index 2f2e2d1fab..917341a1e7 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -114,6 +114,12 @@ void SsaRedundantPhiElimination::Run() {
continue;
}
+ if (phi->InputCount() == 0) {
+ DCHECK(phi->IsCatchPhi());
+ DCHECK(phi->IsDead());
+ continue;
+ }
+
// Find if the inputs of the phi are the same instruction.
HInstruction* candidate = phi->InputAt(0);
// A loop phi cannot have itself as the first phi. Note that this
@@ -137,6 +143,11 @@ void SsaRedundantPhiElimination::Run() {
continue;
}
+ // The candidate may not dominate a phi in a catch block.
+ if (phi->IsCatchPhi() && !candidate->StrictlyDominates(phi)) {
+ continue;
+ }
+
if (phi->IsInLoop()) {
// Because we're updating the users of this phi, we may have new
// phis candidate for elimination if this phi is in a loop. Add phis that
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 65610d54a6..1f1530fa1e 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -248,7 +248,7 @@ void StackMapStream::FillIn(MemoryRegion region) {
DCHECK_EQ(code_info.GetStackMapsSize(code_info.ExtractEncoding()), stack_maps_size_);
// Set the Dex register location catalog.
- code_info.SetNumberOfDexRegisterLocationCatalogEntries(location_catalog_entries_.Size());
+ code_info.SetNumberOfLocationCatalogEntries(location_catalog_entries_.Size());
MemoryRegion dex_register_location_catalog_region = region.Subregion(
dex_register_location_catalog_start_, dex_register_location_catalog_size_);
DexRegisterLocationCatalog dex_register_location_catalog(dex_register_location_catalog_region);
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index b4ac1b4d1a..33207d92d2 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -55,8 +55,7 @@ TEST(StackMapTest, Test1) {
ASSERT_EQ(0u, encoding.NumberOfBytesForStackMask());
ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
- uint32_t number_of_location_catalog_entries =
- code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
ASSERT_EQ(2u, number_of_location_catalog_entries);
DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
// The Dex register location catalog contains:
@@ -154,8 +153,7 @@ TEST(StackMapTest, Test2) {
ASSERT_EQ(2u, encoding.NumberOfBytesForStackMask());
ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
- uint32_t number_of_location_catalog_entries =
- code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
ASSERT_EQ(4u, number_of_location_catalog_entries);
DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
// The Dex register location catalog contains:
@@ -304,8 +302,7 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
ASSERT_EQ(0u, encoding.NumberOfBytesForStackMask());
ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
- uint32_t number_of_location_catalog_entries =
- code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
ASSERT_EQ(1u, number_of_location_catalog_entries);
DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
// The Dex register location catalog contains:
@@ -398,8 +395,7 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
// The location catalog contains two entries (DexRegisterLocation(kConstant, 0)
// and DexRegisterLocation(kConstant, 1)), therefore the location catalog index
// has a size of 1 bit.
- uint32_t number_of_location_catalog_entries =
- code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
ASSERT_EQ(2u, number_of_location_catalog_entries);
ASSERT_EQ(1u, DexRegisterMap::SingleEntrySizeInBits(number_of_location_catalog_entries));
@@ -501,8 +497,7 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
ASSERT_EQ(0u, encoding.NumberOfBytesForStackMask());
ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
- uint32_t number_of_location_catalog_entries =
- code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
ASSERT_EQ(0u, number_of_location_catalog_entries);
DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
ASSERT_EQ(0u, location_catalog.Size());