summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_arm.cc162
-rw-r--r--compiler/optimizing/code_generator_mips64.cc2
-rw-r--r--compiler/optimizing/code_generator_mips64.h9
-rw-r--r--compiler/optimizing/code_generator_x86.cc2
-rw-r--r--compiler/optimizing/dead_code_elimination.cc12
-rw-r--r--compiler/optimizing/graph_checker.cc37
-rw-r--r--compiler/optimizing/graph_visualizer.cc15
-rw-r--r--compiler/optimizing/inliner.cc17
-rw-r--r--compiler/optimizing/nodes.cc166
-rw-r--r--compiler/optimizing/nodes.h58
-rw-r--r--compiler/optimizing/optimizing_compiler.cc16
-rw-r--r--compiler/optimizing/reference_type_propagation.cc24
-rw-r--r--compiler/optimizing/reference_type_propagation.h2
-rw-r--r--compiler/optimizing/register_allocator.cc6
-rw-r--r--compiler/optimizing/ssa_builder.cc3
15 files changed, 338 insertions, 193 deletions
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 6d05293277..54c6cc8890 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -2694,7 +2694,7 @@ void LocationsBuilderARM::VisitDiv(HDiv* div) {
case Primitive::kPrimInt: {
if (div->InputAt(1)->IsConstant()) {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
+ locations->SetInAt(1, Location::ConstantLocation(div->InputAt(1)->AsConstant()));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue());
if (abs_imm <= 1) {
@@ -2818,7 +2818,7 @@ void LocationsBuilderARM::VisitRem(HRem* rem) {
case Primitive::kPrimInt: {
if (rem->InputAt(1)->IsConstant()) {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
+ locations->SetInAt(1, Location::ConstantLocation(rem->InputAt(1)->AsConstant()));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue());
if (abs_imm <= 1) {
@@ -2989,17 +2989,29 @@ void LocationsBuilderARM::HandleShift(HBinaryOperation* op) {
switch (op->GetResultType()) {
case Primitive::kPrimInt: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(op->InputAt(1)));
- // Make the output overlap, as it will be used to hold the masked
- // second input.
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ if (op->InputAt(1)->IsConstant()) {
+ locations->SetInAt(1, Location::ConstantLocation(op->InputAt(1)->AsConstant()));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ } else {
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Make the output overlap, as it will be used to hold the masked
+ // second input.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ }
break;
}
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->AddTemp(Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ if (op->InputAt(1)->IsConstant()) {
+ locations->SetInAt(1, Location::ConstantLocation(op->InputAt(1)->AsConstant()));
+ // For simplicity, use kOutputOverlap even though we only require that low registers
+ // don't clash with high registers which the register allocator currently guarantees.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ } else {
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ }
break;
}
default:
@@ -3020,9 +3032,9 @@ void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
case Primitive::kPrimInt: {
Register out_reg = out.AsRegister<Register>();
Register first_reg = first.AsRegister<Register>();
- // Arm doesn't mask the shift count so we need to do it ourselves.
if (second.IsRegister()) {
Register second_reg = second.AsRegister<Register>();
+ // Arm doesn't mask the shift count so we need to do it ourselves.
__ and_(out_reg, second_reg, ShifterOperand(kMaxIntShiftValue));
if (op->IsShl()) {
__ Lsl(out_reg, first_reg, out_reg);
@@ -3050,57 +3062,103 @@ void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
Register o_h = out.AsRegisterPairHigh<Register>();
Register o_l = out.AsRegisterPairLow<Register>();
- Register temp = locations->GetTemp(0).AsRegister<Register>();
-
Register high = first.AsRegisterPairHigh<Register>();
Register low = first.AsRegisterPairLow<Register>();
- Register second_reg = second.AsRegister<Register>();
-
- if (op->IsShl()) {
- __ and_(o_l, second_reg, ShifterOperand(kMaxLongShiftValue));
- // Shift the high part
- __ Lsl(o_h, high, o_l);
- // Shift the low part and `or` what overflew on the high part
- __ rsb(temp, o_l, ShifterOperand(kArmBitsPerWord));
- __ Lsr(temp, low, temp);
- __ orr(o_h, o_h, ShifterOperand(temp));
- // If the shift is > 32 bits, override the high part
- __ subs(temp, o_l, ShifterOperand(kArmBitsPerWord));
- __ it(PL);
- __ Lsl(o_h, low, temp, PL);
- // Shift the low part
- __ Lsl(o_l, low, o_l);
- } else if (op->IsShr()) {
- __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
- // Shift the low part
- __ Lsr(o_l, low, o_h);
- // Shift the high part and `or` what underflew on the low part
- __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
- __ Lsl(temp, high, temp);
- __ orr(o_l, o_l, ShifterOperand(temp));
- // If the shift is > 32 bits, override the low part
- __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
- __ it(PL);
- __ Asr(o_l, high, temp, PL);
- // Shift the high part
- __ Asr(o_h, high, o_h);
+ if (second.IsRegister()) {
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+
+ Register second_reg = second.AsRegister<Register>();
+
+ if (op->IsShl()) {
+ __ and_(o_l, second_reg, ShifterOperand(kMaxLongShiftValue));
+ // Shift the high part
+ __ Lsl(o_h, high, o_l);
+ // Shift the low part and `or` what overflew on the high part
+ __ rsb(temp, o_l, ShifterOperand(kArmBitsPerWord));
+ __ Lsr(temp, low, temp);
+ __ orr(o_h, o_h, ShifterOperand(temp));
+ // If the shift is > 32 bits, override the high part
+ __ subs(temp, o_l, ShifterOperand(kArmBitsPerWord));
+ __ it(PL);
+ __ Lsl(o_h, low, temp, PL);
+ // Shift the low part
+ __ Lsl(o_l, low, o_l);
+ } else if (op->IsShr()) {
+ __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
+ // Shift the low part
+ __ Lsr(o_l, low, o_h);
+ // Shift the high part and `or` what underflew on the low part
+ __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
+ __ Lsl(temp, high, temp);
+ __ orr(o_l, o_l, ShifterOperand(temp));
+ // If the shift is > 32 bits, override the low part
+ __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
+ __ it(PL);
+ __ Asr(o_l, high, temp, PL);
+ // Shift the high part
+ __ Asr(o_h, high, o_h);
+ } else {
+ __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
+ // same as Shr except we use `Lsr`s and not `Asr`s
+ __ Lsr(o_l, low, o_h);
+ __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
+ __ Lsl(temp, high, temp);
+ __ orr(o_l, o_l, ShifterOperand(temp));
+ __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
+ __ it(PL);
+ __ Lsr(o_l, high, temp, PL);
+ __ Lsr(o_h, high, o_h);
+ }
} else {
- __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
- // same as Shr except we use `Lsr`s and not `Asr`s
- __ Lsr(o_l, low, o_h);
- __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
- __ Lsl(temp, high, temp);
- __ orr(o_l, o_l, ShifterOperand(temp));
- __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
- __ it(PL);
- __ Lsr(o_l, high, temp, PL);
- __ Lsr(o_h, high, o_h);
+ // Register allocator doesn't create partial overlap.
+ DCHECK_NE(o_l, high);
+ DCHECK_NE(o_h, low);
+ int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
+ uint32_t shift_value = static_cast<uint32_t>(cst & kMaxLongShiftValue);
+ if (shift_value > 32) {
+ if (op->IsShl()) {
+ __ Lsl(o_h, low, shift_value - 32);
+ __ LoadImmediate(o_l, 0);
+ } else if (op->IsShr()) {
+ __ Asr(o_l, high, shift_value - 32);
+ __ Asr(o_h, high, 31);
+ } else {
+ __ Lsr(o_l, high, shift_value - 32);
+ __ LoadImmediate(o_h, 0);
+ }
+ } else if (shift_value == 32) {
+ if (op->IsShl()) {
+ __ mov(o_h, ShifterOperand(low));
+ __ LoadImmediate(o_l, 0);
+ } else if (op->IsShr()) {
+ __ mov(o_l, ShifterOperand(high));
+ __ Asr(o_h, high, 31);
+ } else {
+ __ mov(o_l, ShifterOperand(high));
+ __ LoadImmediate(o_h, 0);
+ }
+ } else { // shift_value < 32
+ if (op->IsShl()) {
+ __ Lsl(o_h, high, shift_value);
+ __ orr(o_h, o_h, ShifterOperand(low, LSR, 32 - shift_value));
+ __ Lsl(o_l, low, shift_value);
+ } else if (op->IsShr()) {
+ __ Lsr(o_l, low, shift_value);
+ __ orr(o_l, o_l, ShifterOperand(high, LSL, 32 - shift_value));
+ __ Asr(o_h, high, shift_value);
+ } else {
+ __ Lsr(o_l, low, shift_value);
+ __ orr(o_l, o_l, ShifterOperand(high, LSL, 32 - shift_value));
+ __ Lsr(o_h, high, shift_value);
+ }
+ }
}
break;
}
default:
LOG(FATAL) << "Unexpected operation type " << type;
+ UNREACHABLE();
}
}
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index b36a04216d..9b78dec6c4 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2977,7 +2977,7 @@ void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
CodeGenerator::CreateLoadClassLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(A0));
+ calling_convention.GetReturnLocation(cls->GetType()));
}
void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 58c6e0fa83..ac3162f736 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -119,9 +119,12 @@ class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention {
Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return Location::RegisterLocation(V0);
}
- Location GetSetValueLocation(
- Primitive::Type type ATTRIBUTE_UNUSED, bool is_instance) const OVERRIDE {
- return is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1);
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterLocation(A2)
+ : (is_instance
+ ? Location::RegisterLocation(A2)
+ : Location::RegisterLocation(A1));
}
Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return Location::FpuRegisterLocation(F0);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index d5d6c210bf..0147b010f2 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -3024,7 +3024,7 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr
DCHECK_EQ(EAX, first.AsRegister<Register>());
DCHECK_EQ(is_div ? EAX : EDX, out.AsRegister<Register>());
- if (instruction->InputAt(1)->IsIntConstant()) {
+ if (second.IsConstant()) {
int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
if (imm == 0) {
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 9754043f32..02e5dab3d4 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -123,20 +123,21 @@ void HDeadCodeElimination::RemoveDeadBlocks() {
}
// If we removed at least one block, we need to recompute the full
- // dominator tree.
+ // dominator tree and try block membership.
if (removed_one_or_more_blocks) {
graph_->ClearDominanceInformation();
graph_->ComputeDominanceInformation();
+ graph_->ComputeTryBlockInformation();
}
// Connect successive blocks created by dead branches. Order does not matter.
for (HReversePostOrderIterator it(*graph_); !it.Done();) {
HBasicBlock* block = it.Current();
- if (block->IsEntryBlock() || block->GetSuccessors().size() != 1u) {
+ if (block->IsEntryBlock() || !block->GetLastInstruction()->IsGoto()) {
it.Advance();
continue;
}
- HBasicBlock* successor = block->GetSuccessors()[0];
+ HBasicBlock* successor = block->GetSingleSuccessor();
if (successor->IsExitBlock() || successor->GetPredecessors().size() != 1u) {
it.Advance();
continue;
@@ -176,10 +177,7 @@ void HDeadCodeElimination::RemoveDeadInstructions() {
}
void HDeadCodeElimination::Run() {
- if (!graph_->HasTryCatch()) {
- // TODO: Update dead block elimination and enable for try/catch.
- RemoveDeadBlocks();
- }
+ RemoveDeadBlocks();
SsaRedundantPhiElimination(graph_).Run();
RemoveDeadInstructions();
}
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 0d7c796837..5814d7556f 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -163,12 +163,12 @@ void GraphChecker::VisitBoundsCheck(HBoundsCheck* check) {
}
void GraphChecker::VisitTryBoundary(HTryBoundary* try_boundary) {
- // Ensure that all exception handlers are catch blocks and that handlers
- // are not listed multiple times.
+ ArrayRef<HBasicBlock* const> handlers = try_boundary->GetExceptionHandlers();
+
+ // Ensure that all exception handlers are catch blocks.
// Note that a normal-flow successor may be a catch block before CFG
// simplification. We only test normal-flow successors in SsaChecker.
- for (HExceptionHandlerIterator it(*try_boundary); !it.Done(); it.Advance()) {
- HBasicBlock* handler = it.Current();
+ for (HBasicBlock* handler : handlers) {
if (!handler->IsCatchBlock()) {
AddError(StringPrintf("Block %d with %s:%d has exceptional successor %d which "
"is not a catch block.",
@@ -177,9 +177,13 @@ void GraphChecker::VisitTryBoundary(HTryBoundary* try_boundary) {
try_boundary->GetId(),
handler->GetBlockId()));
}
- if (current_block_->HasSuccessor(handler, it.CurrentSuccessorIndex() + 1)) {
- AddError(StringPrintf("Exception handler block %d of %s:%d is listed multiple times.",
- handler->GetBlockId(),
+ }
+
+ // Ensure that handlers are not listed multiple times.
+ for (size_t i = 0, e = handlers.size(); i < e; ++i) {
+ if (ContainsElement(handlers, handlers[i], i + 1)) {
+ AddError(StringPrintf("Exception handler block %d of %s:%d is listed multiple times.",
+ handlers[i]->GetBlockId(),
try_boundary->DebugName(),
try_boundary->GetId()));
}
@@ -371,17 +375,14 @@ void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
// Ensure that catch blocks are not normal successors, and normal blocks are
// never exceptional successors.
- const size_t num_normal_successors = block->NumberOfNormalSuccessors();
- for (size_t j = 0; j < num_normal_successors; ++j) {
- HBasicBlock* successor = block->GetSuccessors()[j];
+ for (HBasicBlock* successor : block->GetNormalSuccessors()) {
if (successor->IsCatchBlock()) {
AddError(StringPrintf("Catch block %d is a normal successor of block %d.",
successor->GetBlockId(),
block->GetBlockId()));
}
}
- for (size_t j = num_normal_successors, e = block->GetSuccessors().size(); j < e; ++j) {
- HBasicBlock* successor = block->GetSuccessors()[j];
+ for (HBasicBlock* successor : block->GetExceptionalSuccessors()) {
if (!successor->IsCatchBlock()) {
AddError(StringPrintf("Normal block %d is an exceptional successor of block %d.",
successor->GetBlockId(),
@@ -393,10 +394,14 @@ void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
// block with multiple successors to a block with multiple
// predecessors). Exceptional edges are synthesized and hence
// not accounted for.
- if (block->NumberOfNormalSuccessors() > 1) {
- for (size_t j = 0, e = block->NumberOfNormalSuccessors(); j < e; ++j) {
- HBasicBlock* successor = block->GetSuccessors()[j];
- if (successor->GetPredecessors().size() > 1) {
+ if (block->GetSuccessors().size() > 1) {
+ for (HBasicBlock* successor : block->GetNormalSuccessors()) {
+ if (successor->IsExitBlock() &&
+ block->IsSingleTryBoundary() &&
+ block->GetPredecessors().size() == 1u &&
+ block->GetSinglePredecessor()->GetLastInstruction()->IsThrow()) {
+ // Allowed critical edge Throw->TryBoundary->Exit.
+ } else if (successor->GetPredecessors().size() > 1) {
AddError(StringPrintf("Critical edge between blocks %d and %d.",
block->GetBlockId(),
successor->GetBlockId()));
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 4111671a9b..2b7790184a 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -24,6 +24,7 @@
#include "code_generator.h"
#include "dead_code_elimination.h"
#include "disassembler.h"
+#include "inliner.h"
#include "licm.h"
#include "nodes.h"
#include "optimization.h"
@@ -252,8 +253,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void PrintSuccessors(HBasicBlock* block) {
AddIndent();
output_ << "successors";
- for (size_t i = 0; i < block->NumberOfNormalSuccessors(); ++i) {
- HBasicBlock* successor = block->GetSuccessors()[i];
+ for (HBasicBlock* successor : block->GetNormalSuccessors()) {
output_ << " \"B" << successor->GetBlockId() << "\" ";
}
output_<< std::endl;
@@ -262,8 +262,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void PrintExceptionHandlers(HBasicBlock* block) {
AddIndent();
output_ << "xhandlers";
- for (size_t i = block->NumberOfNormalSuccessors(); i < block->GetSuccessors().size(); ++i) {
- HBasicBlock* handler = block->GetSuccessors()[i];
+ for (HBasicBlock* handler : block->GetExceptionalSuccessors()) {
output_ << " \"B" << handler->GetBlockId() << "\" ";
}
if (block->IsExitBlock() &&
@@ -424,11 +423,6 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
return strcmp(pass_name_, name) == 0;
}
- bool IsReferenceTypePropagationPass() {
- return strstr(pass_name_, ReferenceTypePropagation::kReferenceTypePropagationPassName)
- != nullptr;
- }
-
void PrintInstruction(HInstruction* instruction) {
output_ << instruction->DebugName();
if (instruction->InputCount() > 0) {
@@ -492,7 +486,8 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
} else {
StartAttributeStream("loop") << "B" << info->GetHeader()->GetBlockId();
}
- } else if (IsReferenceTypePropagationPass()
+ } else if ((IsPass(ReferenceTypePropagation::kReferenceTypePropagationPassName)
+ || IsPass(HInliner::kInlinerPassName))
&& (instruction->GetType() == Primitive::kPrimNot)) {
ReferenceTypeInfo info = instruction->IsLoadClass()
? instruction->AsLoadClass()->GetLoadedClassRTI()
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 353881e47a..0363f203b2 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -148,7 +148,7 @@ static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resol
// the target method. Since we check above the exact type of the receiver,
// the only reason this can happen is an IncompatibleClassChangeError.
return nullptr;
- } else if (resolved_method->IsAbstract()) {
+ } else if (!resolved_method->IsInvokable()) {
// The information we had on the receiver was not enough to find
// the target method. Since we check above the exact type of the receiver,
// the only reason this can happen is an IncompatibleClassChangeError.
@@ -406,8 +406,8 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
&type_propagation,
&sharpening,
&simplify,
- &dce,
&fold,
+ &dce,
};
for (size_t i = 0; i < arraysize(optimizations); ++i) {
@@ -534,6 +534,7 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
ReferenceTypeInfo::Create(obj_handle, false /* is_exact */));
}
+ // Check the integrity of reference types and run another type propagation if needed.
if ((return_replacement != nullptr)
&& (return_replacement->GetType() == Primitive::kPrimNot)) {
if (!return_replacement->GetReferenceTypeInfo().IsValid()) {
@@ -544,10 +545,20 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
DCHECK(return_replacement->IsPhi());
size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
ReferenceTypeInfo::TypeHandle return_handle =
- handles_->NewHandle(resolved_method->GetReturnType(true /* resolve */, pointer_size));
+ handles_->NewHandle(resolved_method->GetReturnType(true /* resolve */, pointer_size));
return_replacement->SetReferenceTypeInfo(ReferenceTypeInfo::Create(
return_handle, return_handle->CannotBeAssignedFromOtherTypes() /* is_exact */));
}
+
+ // If the return type is a refinement of the declared type run the type propagation again.
+ ReferenceTypeInfo return_rti = return_replacement->GetReferenceTypeInfo();
+ ReferenceTypeInfo invoke_rti = invoke_instruction->GetReferenceTypeInfo();
+ if (invoke_rti.IsStrictSupertypeOf(return_rti)
+ || (return_rti.IsExact() && !invoke_rti.IsExact())
+ || !return_replacement->CanBeNull()) {
+ ReferenceTypePropagation rtp_fixup(graph_, handles_);
+ rtp_fixup.Run();
+ }
}
return true;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 83f53d791b..73a44ee2cb 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -362,7 +362,11 @@ void HGraph::ComputeTryBlockInformation() {
HBasicBlock* first_predecessor = block->GetPredecessors()[0];
DCHECK(!block->IsLoopHeader() || !block->GetLoopInformation()->IsBackEdge(*first_predecessor));
const HTryBoundary* try_entry = first_predecessor->ComputeTryEntryOfSuccessors();
- if (try_entry != nullptr) {
+ if (try_entry != nullptr &&
+ (block->GetTryCatchInformation() == nullptr ||
+ try_entry != &block->GetTryCatchInformation()->GetTryEntry())) {
+ // We are either setting try block membership for the first time or it
+ // has changed.
block->SetTryCatchInformation(new (arena_) TryCatchInformation(*try_entry));
}
}
@@ -381,8 +385,9 @@ void HGraph::SimplifyCFG() {
// Only split normal-flow edges. We cannot split exceptional edges as they
// are synthesized (approximate real control flow), and we do not need to
// anyway. Moves that would be inserted there are performed by the runtime.
- for (size_t j = 0, e = block->NumberOfNormalSuccessors(); j < e; ++j) {
- HBasicBlock* successor = block->GetSuccessors()[j];
+ ArrayRef<HBasicBlock* const> normal_successors = block->GetNormalSuccessors();
+ for (size_t j = 0, e = normal_successors.size(); j < e; ++j) {
+ HBasicBlock* successor = normal_successors[j];
DCHECK(!successor->IsCatchBlock());
if (successor == exit_block_) {
// Throw->TryBoundary->Exit. Special case which we do not want to split
@@ -391,7 +396,11 @@ void HGraph::SimplifyCFG() {
DCHECK(block->GetSinglePredecessor()->GetLastInstruction()->IsThrow());
} else if (successor->GetPredecessors().size() > 1) {
SplitCriticalEdge(block, successor);
- --j;
+ // SplitCriticalEdge could have invalidated the `normal_successors`
+ // ArrayRef. We must re-acquire it.
+ normal_successors = block->GetNormalSuccessors();
+ DCHECK_EQ(normal_successors[j]->GetSingleSuccessor(), successor);
+ DCHECK_EQ(e, normal_successors.size());
}
}
}
@@ -1086,6 +1095,8 @@ HConstant* HBinaryOperation::TryStaticEvaluation() const {
} else if (GetRight()->IsLongConstant()) {
return Evaluate(GetLeft()->AsLongConstant(), GetRight()->AsLongConstant());
}
+ } else if (GetLeft()->IsNullConstant() && GetRight()->IsNullConstant()) {
+ return Evaluate(GetLeft()->AsNullConstant(), GetRight()->AsNullConstant());
}
return nullptr;
}
@@ -1325,17 +1336,38 @@ bool HBasicBlock::HasSinglePhi() const {
return !GetPhis().IsEmpty() && GetFirstPhi()->GetNext() == nullptr;
}
+ArrayRef<HBasicBlock* const> HBasicBlock::GetNormalSuccessors() const {
+ if (EndsWithTryBoundary()) {
+ // The normal-flow successor of HTryBoundary is always stored at index zero.
+ DCHECK_EQ(successors_[0], GetLastInstruction()->AsTryBoundary()->GetNormalFlowSuccessor());
+ return ArrayRef<HBasicBlock* const>(successors_).SubArray(0u, 1u);
+ } else {
+ // All successors of blocks not ending with TryBoundary are normal.
+ return ArrayRef<HBasicBlock* const>(successors_);
+ }
+}
+
+ArrayRef<HBasicBlock* const> HBasicBlock::GetExceptionalSuccessors() const {
+ if (EndsWithTryBoundary()) {
+ return GetLastInstruction()->AsTryBoundary()->GetExceptionHandlers();
+ } else {
+ // Blocks not ending with TryBoundary do not have exceptional successors.
+ return ArrayRef<HBasicBlock* const>();
+ }
+}
+
bool HTryBoundary::HasSameExceptionHandlersAs(const HTryBoundary& other) const {
- if (GetBlock()->GetSuccessors().size() != other.GetBlock()->GetSuccessors().size()) {
+ ArrayRef<HBasicBlock* const> handlers1 = GetExceptionHandlers();
+ ArrayRef<HBasicBlock* const> handlers2 = other.GetExceptionHandlers();
+
+ size_t length = handlers1.size();
+ if (length != handlers2.size()) {
return false;
}
// Exception handlers need to be stored in the same order.
- for (HExceptionHandlerIterator it1(*this), it2(other);
- !it1.Done();
- it1.Advance(), it2.Advance()) {
- DCHECK(!it2.Done());
- if (it1.Current() != it2.Current()) {
+ for (size_t i = 0; i < length; ++i) {
+ if (handlers1[i] != handlers2[i]) {
return false;
}
}
@@ -1388,7 +1420,7 @@ void HBasicBlock::DisconnectAndDelete() {
// iteration.
DCHECK(dominated_blocks_.empty());
- // Remove the block from all loops it is included in.
+ // (1) Remove the block from all loops it is included in.
for (HLoopInformationOutwardIterator it(*this); !it.Done(); it.Advance()) {
HLoopInformation* loop_info = it.Current();
loop_info->Remove(this);
@@ -1400,17 +1432,34 @@ void HBasicBlock::DisconnectAndDelete() {
}
}
- // Disconnect the block from its predecessors and update their control-flow
- // instructions.
+ // (2) Disconnect the block from its predecessors and update their
+ // control-flow instructions.
for (HBasicBlock* predecessor : predecessors_) {
HInstruction* last_instruction = predecessor->GetLastInstruction();
+ if (last_instruction->IsTryBoundary() && !IsCatchBlock()) {
+ // This block is the only normal-flow successor of the TryBoundary which
+ // makes `predecessor` dead. Since DCE removes blocks in post order,
+ // exception handlers of this TryBoundary were already visited and any
+ // remaining handlers therefore must be live. We remove `predecessor` from
+ // their list of predecessors.
+ DCHECK_EQ(last_instruction->AsTryBoundary()->GetNormalFlowSuccessor(), this);
+ while (predecessor->GetSuccessors().size() > 1) {
+ HBasicBlock* handler = predecessor->GetSuccessors()[1];
+ DCHECK(handler->IsCatchBlock());
+ predecessor->RemoveSuccessor(handler);
+ handler->RemovePredecessor(predecessor);
+ }
+ }
+
predecessor->RemoveSuccessor(this);
uint32_t num_pred_successors = predecessor->GetSuccessors().size();
if (num_pred_successors == 1u) {
// If we have one successor after removing one, then we must have
- // had an HIf or HPackedSwitch, as they have more than one successor.
- // Replace those with a HGoto.
- DCHECK(last_instruction->IsIf() || last_instruction->IsPackedSwitch());
+ // had an HIf, HPackedSwitch or HTryBoundary, as they have more than one
+ // successor. Replace those with a HGoto.
+ DCHECK(last_instruction->IsIf() ||
+ last_instruction->IsPackedSwitch() ||
+ (last_instruction->IsTryBoundary() && IsCatchBlock()));
predecessor->RemoveInstruction(last_instruction);
predecessor->AddInstruction(new (graph_->GetArena()) HGoto(last_instruction->GetDexPc()));
} else if (num_pred_successors == 0u) {
@@ -1419,15 +1468,17 @@ void HBasicBlock::DisconnectAndDelete() {
// SSAChecker fails unless it is not removed during the pass too.
predecessor->RemoveInstruction(last_instruction);
} else {
- // There are multiple successors left. This must come from a HPackedSwitch
- // and we are in the middle of removing the HPackedSwitch. Like above, leave
- // this alone, and the SSAChecker will fail if it is not removed as well.
- DCHECK(last_instruction->IsPackedSwitch());
+ // There are multiple successors left. The removed block might be a successor
+ // of a PackedSwitch which will be completely removed (perhaps replaced with
+ // a Goto), or we are deleting a catch block from a TryBoundary. In either
+ // case, leave `last_instruction` as is for now.
+ DCHECK(last_instruction->IsPackedSwitch() ||
+ (last_instruction->IsTryBoundary() && IsCatchBlock()));
}
}
predecessors_.clear();
- // Disconnect the block from its successors and update their phis.
+ // (3) Disconnect the block from its successors and update their phis.
for (HBasicBlock* successor : successors_) {
// Delete this block from the list of predecessors.
size_t this_index = successor->GetPredecessorIndexOf(this);
@@ -1437,30 +1488,57 @@ void HBasicBlock::DisconnectAndDelete() {
// dominator of `successor` which violates the order DCHECKed at the top.
DCHECK(!successor->predecessors_.empty());
- // Remove this block's entries in the successor's phis.
- if (successor->predecessors_.size() == 1u) {
- // The successor has just one predecessor left. Replace phis with the only
- // remaining input.
- for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
- HPhi* phi = phi_it.Current()->AsPhi();
- phi->ReplaceWith(phi->InputAt(1 - this_index));
- successor->RemovePhi(phi);
- }
- } else {
- for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
- phi_it.Current()->AsPhi()->RemoveInputAt(this_index);
+ // Remove this block's entries in the successor's phis. Skip exceptional
+ // successors because catch phi inputs do not correspond to predecessor
+ // blocks but throwing instructions. Their inputs will be updated in step (4).
+ if (!successor->IsCatchBlock()) {
+ if (successor->predecessors_.size() == 1u) {
+ // The successor has just one predecessor left. Replace phis with the only
+ // remaining input.
+ for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ HPhi* phi = phi_it.Current()->AsPhi();
+ phi->ReplaceWith(phi->InputAt(1 - this_index));
+ successor->RemovePhi(phi);
+ }
+ } else {
+ for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ phi_it.Current()->AsPhi()->RemoveInputAt(this_index);
+ }
}
}
}
successors_.clear();
+ // (4) Remove instructions and phis. Instructions should have no remaining uses
+ // except in catch phis. If an instruction is used by a catch phi at `index`,
+ // remove `index`-th input of all phis in the catch block since they are
+ // guaranteed dead. Note that we may miss dead inputs this way but the
+ // graph will always remain consistent.
+ for (HBackwardInstructionIterator it(GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* insn = it.Current();
+ while (insn->HasUses()) {
+ DCHECK(IsTryBlock());
+ HUseListNode<HInstruction*>* use = insn->GetUses().GetFirst();
+ size_t use_index = use->GetIndex();
+ HBasicBlock* user_block = use->GetUser()->GetBlock();
+ DCHECK(use->GetUser()->IsPhi() && user_block->IsCatchBlock());
+ for (HInstructionIterator phi_it(user_block->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ phi_it.Current()->AsPhi()->RemoveInputAt(use_index);
+ }
+ }
+
+ RemoveInstruction(insn);
+ }
+ for (HInstructionIterator it(GetPhis()); !it.Done(); it.Advance()) {
+ RemovePhi(it.Current()->AsPhi());
+ }
+
// Disconnect from the dominator.
dominator_->RemoveDominatedBlock(this);
SetDominator(nullptr);
- // Delete from the graph. The function safely deletes remaining instructions
- // and updates the reverse post order.
- graph_->DeleteDeadBlock(this);
+ // Delete from the graph, update reverse post order.
+ graph_->DeleteDeadEmptyBlock(this);
SetGraph(nullptr);
}
@@ -1507,7 +1585,7 @@ void HBasicBlock::MergeWith(HBasicBlock* other) {
other->predecessors_.clear();
// Delete `other` from the graph. The function updates reverse post order.
- graph_->DeleteDeadBlock(other);
+ graph_->DeleteDeadEmptyBlock(other);
other->SetGraph(nullptr);
}
@@ -1571,19 +1649,14 @@ static void MakeRoomFor(ArenaVector<HBasicBlock*>* blocks,
std::copy_backward(blocks->begin() + after + 1u, blocks->begin() + old_size, blocks->end());
}
-void HGraph::DeleteDeadBlock(HBasicBlock* block) {
+void HGraph::DeleteDeadEmptyBlock(HBasicBlock* block) {
DCHECK_EQ(block->GetGraph(), this);
DCHECK(block->GetSuccessors().empty());
DCHECK(block->GetPredecessors().empty());
DCHECK(block->GetDominatedBlocks().empty());
DCHECK(block->GetDominator() == nullptr);
-
- for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- block->RemoveInstruction(it.Current());
- }
- for (HBackwardInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- block->RemovePhi(it.Current()->AsPhi());
- }
+ DCHECK(block->GetInstructions().IsEmpty());
+ DCHECK(block->GetPhis().IsEmpty());
if (block->IsExitBlock()) {
exit_block_ = nullptr;
@@ -1686,6 +1759,9 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
// (2) the reverse post order of that graph,
// (3) the potential loop information they are now in,
// (4) try block membership.
+ // Note that we do not need to update catch phi inputs because they
+ // correspond to the register file of the outer method which the inlinee
+ // cannot modify.
// We don't add the entry block, the exit block, and the first block, which
// has been merged with `at`.
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 4c4d0f268d..2878ac9899 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -35,6 +35,7 @@
#include "mirror/class.h"
#include "offsets.h"
#include "primitive.h"
+#include "utils/array_ref.h"
namespace art {
@@ -240,8 +241,9 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// put deoptimization instructions, etc.
void TransformLoopHeaderForBCE(HBasicBlock* header);
- // Removes `block` from the graph.
- void DeleteDeadBlock(HBasicBlock* block);
+ // Removes `block` from the graph. Assumes `block` has been disconnected from
+ // other blocks and has no instructions or phis.
+ void DeleteDeadEmptyBlock(HBasicBlock* block);
// Splits the edge between `block` and `successor` while preserving the
// indices in the predecessor/successor lists. If there are multiple edges
@@ -659,6 +661,9 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
return successors_;
}
+ ArrayRef<HBasicBlock* const> GetNormalSuccessors() const;
+ ArrayRef<HBasicBlock* const> GetExceptionalSuccessors() const;
+
bool HasSuccessor(const HBasicBlock* block, size_t start_from = 0u) {
return ContainsElement(successors_, block, start_from);
}
@@ -809,12 +814,6 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
return GetPredecessorIndexOf(predecessor) == idx;
}
- // Returns the number of non-exceptional successors. SsaChecker ensures that
- // these are stored at the beginning of the successor list.
- size_t NumberOfNormalSuccessors() const {
- return EndsWithTryBoundary() ? 1 : GetSuccessors().size();
- }
-
// Create a new block between this block and its predecessors. The new block
// is added to the graph, all predecessor edges are relinked to it and an edge
// is created to `this`. Returns the new empty block. Reverse post order or
@@ -1732,6 +1731,13 @@ class ReferenceTypeInfo : ValueObject {
return GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
}
+ bool IsStrictSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ DCHECK(rti.IsValid());
+ return GetTypeHandle().Get() != rti.GetTypeHandle().Get() &&
+ GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
+ }
+
// Returns true if the type information provide the same amount of details.
// Note that it does not mean that the instructions have the same actual type
// (because the type can be the result of a merge).
@@ -2397,6 +2403,10 @@ class HTryBoundary : public HTemplateInstruction<0> {
// Returns the block's non-exceptional successor (index zero).
HBasicBlock* GetNormalFlowSuccessor() const { return GetBlock()->GetSuccessors()[0]; }
+ ArrayRef<HBasicBlock* const> GetExceptionHandlers() const {
+ return ArrayRef<HBasicBlock* const>(GetBlock()->GetSuccessors()).SubArray(1u);
+ }
+
// Returns whether `handler` is among its exception handlers (non-zero index
// successors).
bool HasExceptionHandler(const HBasicBlock& handler) const {
@@ -2424,25 +2434,6 @@ class HTryBoundary : public HTemplateInstruction<0> {
DISALLOW_COPY_AND_ASSIGN(HTryBoundary);
};
-// Iterator over exception handlers of a given HTryBoundary, i.e. over
-// exceptional successors of its basic block.
-class HExceptionHandlerIterator : public ValueObject {
- public:
- explicit HExceptionHandlerIterator(const HTryBoundary& try_boundary)
- : block_(*try_boundary.GetBlock()), index_(block_.NumberOfNormalSuccessors()) {}
-
- bool Done() const { return index_ == block_.GetSuccessors().size(); }
- HBasicBlock* Current() const { return block_.GetSuccessors()[index_]; }
- size_t CurrentSuccessorIndex() const { return index_; }
- void Advance() { ++index_; }
-
- private:
- const HBasicBlock& block_;
- size_t index_;
-
- DISALLOW_COPY_AND_ASSIGN(HExceptionHandlerIterator);
-};
-
// Deoptimize to interpreter, upon checking a condition.
class HDeoptimize : public HTemplateInstruction<1> {
public:
@@ -2611,6 +2602,11 @@ class HBinaryOperation : public HExpression<2> {
VLOG(compiler) << DebugName() << " is not defined for the (long, int) case.";
return nullptr;
}
+ virtual HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
+ HNullConstant* y ATTRIBUTE_UNUSED) const {
+ VLOG(compiler) << DebugName() << " is not defined for the (null, null) case.";
+ return nullptr;
+ }
// Returns an input that can legally be used as the right input and is
// constant, or null.
@@ -2701,6 +2697,10 @@ class HEqual : public HCondition {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
+ HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
+ HNullConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(1);
+ }
DECLARE_INSTRUCTION(Equal);
@@ -2733,6 +2733,10 @@ class HNotEqual : public HCondition {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
+ HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
+ HNullConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(0);
+ }
DECLARE_INSTRUCTION(NotEqual);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 6e85a82849..2be0680561 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -405,20 +405,9 @@ static void MaybeRunInliner(HGraph* graph,
if (!should_inline) {
return;
}
-
- ArenaAllocator* arena = graph->GetArena();
- HInliner* inliner = new (arena) HInliner(
+ HInliner* inliner = new (graph->GetArena()) HInliner(
graph, codegen, dex_compilation_unit, dex_compilation_unit, driver, handles, stats);
- ReferenceTypePropagation* type_propagation =
- new (arena) ReferenceTypePropagation(graph, handles,
- "reference_type_propagation_after_inlining");
-
- HOptimization* optimizations[] = {
- inliner,
- // Run another type propagation phase: inlining will open up more opportunities
- // to remove checkcast/instanceof and null checks.
- type_propagation,
- };
+ HOptimization* optimizations[] = { inliner };
RunOptimizations(optimizations, arraysize(optimizations), pass_observer);
}
@@ -530,6 +519,7 @@ static void RunOptimizations(HGraph* graph,
// pipeline for all methods.
if (graph->HasTryCatch()) {
HOptimization* optimizations2[] = {
+ boolean_simplify,
side_effects,
gvn,
dce2,
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 659da068a9..ecc085b985 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -99,17 +99,9 @@ ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph,
}
}
-void ReferenceTypePropagation::Run() {
- // To properly propagate type info we need to visit in the dominator-based order.
- // Reverse post order guarantees a node's dominators are visited first.
- // We take advantage of this order in `VisitBasicBlock`.
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- VisitBasicBlock(it.Current());
- }
- ProcessWorklist();
-
+void ReferenceTypePropagation::ValidateTypes() {
+ // TODO: move this to the graph checker.
if (kIsDebugBuild) {
- // TODO: move this to the graph checker.
ScopedObjectAccess soa(Thread::Current());
for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
@@ -135,6 +127,18 @@ void ReferenceTypePropagation::Run() {
}
}
+void ReferenceTypePropagation::Run() {
+ // To properly propagate type info we need to visit in the dominator-based order.
+ // Reverse post order guarantees a node's dominators are visited first.
+ // We take advantage of this order in `VisitBasicBlock`.
+ for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+ VisitBasicBlock(it.Current());
+ }
+
+ ProcessWorklist();
+ ValidateTypes();
+}
+
void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
RTPVisitor visitor(graph_,
handles_,
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index 5493601adc..5c05592726 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -56,6 +56,8 @@ class ReferenceTypePropagation : public HOptimization {
ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a, const ReferenceTypeInfo& b)
SHARED_REQUIRES(Locks::mutator_lock_);
+ void ValidateTypes();
+
StackHandleScopeCollection* handles_;
ArenaVector<HInstruction*> worklist_;
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index ef22c816a0..d399bc2d7a 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -1525,7 +1525,7 @@ void RegisterAllocator::InsertParallelMoveAtExitOf(HBasicBlock* block,
DCHECK(IsValidDestination(destination)) << destination;
if (source.Equals(destination)) return;
- DCHECK_EQ(block->NumberOfNormalSuccessors(), 1u);
+ DCHECK_EQ(block->GetNormalSuccessors().size(), 1u);
HInstruction* last = block->GetLastInstruction();
// We insert moves at exit for phi predecessors and connecting blocks.
// A block ending with an if or a packed switch cannot branch to a block
@@ -1752,7 +1752,7 @@ void RegisterAllocator::ConnectSplitSiblings(LiveInterval* interval,
// If `from` has only one successor, we can put the moves at the exit of it. Otherwise
// we need to put the moves at the entry of `to`.
- if (from->NumberOfNormalSuccessors() == 1) {
+ if (from->GetNormalSuccessors().size() == 1) {
InsertParallelMoveAtExitOf(from,
interval->GetParent()->GetDefinedBy(),
source->ToLocation(),
@@ -1894,7 +1894,7 @@ void RegisterAllocator::Resolve() {
HInstruction* phi = inst_it.Current();
for (size_t i = 0, e = current->GetPredecessors().size(); i < e; ++i) {
HBasicBlock* predecessor = current->GetPredecessors()[i];
- DCHECK_EQ(predecessor->NumberOfNormalSuccessors(), 1u);
+ DCHECK_EQ(predecessor->GetNormalSuccessors().size(), 1u);
HInstruction* input = phi->InputAt(i);
Location source = input->GetLiveInterval()->GetLocationAt(
predecessor->GetLifetimeEnd() - 1);
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 4565590bc3..5190eb3b26 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -660,8 +660,7 @@ void SsaBuilder::VisitInstruction(HInstruction* instruction) {
if (instruction->CanThrowIntoCatchBlock()) {
const HTryBoundary& try_entry =
instruction->GetBlock()->GetTryCatchInformation()->GetTryEntry();
- for (HExceptionHandlerIterator it(try_entry); !it.Done(); it.Advance()) {
- HBasicBlock* catch_block = it.Current();
+ for (HBasicBlock* catch_block : try_entry.GetExceptionHandlers()) {
ArenaVector<HInstruction*>* handler_locals = GetLocalsFor(catch_block);
DCHECK_EQ(handler_locals->size(), current_locals_->size());
for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {