diff options
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/code_generator.cc | 6 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 257 | ||||
-rw-r--r-- | compiler/optimizing/dead_code_elimination.cc | 1 | ||||
-rw-r--r-- | compiler/optimizing/dead_code_elimination.h | 7 | ||||
-rw-r--r-- | compiler/optimizing/graph_checker.cc | 25 | ||||
-rw-r--r-- | compiler/optimizing/inliner.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/instruction_simplifier.cc | 4 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm64.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_x86.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_x86_64.cc | 65 | ||||
-rw-r--r-- | compiler/optimizing/nodes.cc | 72 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 41 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 8 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler_stats.h | 6 |
15 files changed, 347 insertions, 153 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 8ab759d393..b14b69ba39 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -827,7 +827,9 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) { HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves(); - return (first_next_not_move != nullptr) && first_next_not_move->CanDoImplicitNullCheck(); + + return (first_next_not_move != nullptr) + && first_next_not_move->CanDoImplicitNullCheckOn(null_check->InputAt(0)); } void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) { @@ -842,7 +844,7 @@ void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) { return; } - if (!instr->CanDoImplicitNullCheck()) { + if (!instr->CanDoImplicitNullCheckOn(instr->InputAt(0))) { return; } diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index d8d2ae381b..b404f8de3d 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -1023,14 +1023,14 @@ void LocationsBuilderX86_64::VisitCompare(HCompare* compare) { switch (compare->InputAt(0)->GetType()) { case Primitive::kPrimLong: { locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RegisterOrInt32LongConstant(compare->InputAt(1))); + locations->SetInAt(1, Location::Any()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } case Primitive::kPrimFloat: case Primitive::kPrimDouble: { locations->SetInAt(0, Location::RequiresFpuRegister()); - locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::Any()); locations->SetOut(Location::RequiresRegister()); break; } @@ -1052,24 +1052,46 @@ void InstructionCodeGeneratorX86_64::VisitCompare(HCompare* compare) { CpuRegister left_reg = left.AsRegister<CpuRegister>(); if (right.IsConstant()) { int64_t value = right.GetConstant()->AsLongConstant()->GetValue(); - DCHECK(IsInt<32>(value)); - if (value == 0) { - __ testq(left_reg, left_reg); + if (IsInt<32>(value)) { + if (value == 0) { + __ testq(left_reg, left_reg); + } else { + __ cmpq(left_reg, Immediate(static_cast<int32_t>(value))); + } } else { - __ cmpq(left_reg, Immediate(static_cast<int32_t>(value))); + // Value won't fit in an int. + __ cmpq(left_reg, codegen_->LiteralInt64Address(value)); } + } else if (right.IsDoubleStackSlot()) { + __ cmpq(left_reg, Address(CpuRegister(RSP), right.GetStackIndex())); } else { __ cmpq(left_reg, right.AsRegister<CpuRegister>()); } break; } case Primitive::kPrimFloat: { - __ ucomiss(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>()); + XmmRegister left_reg = left.AsFpuRegister<XmmRegister>(); + if (right.IsConstant()) { + float value = right.GetConstant()->AsFloatConstant()->GetValue(); + __ ucomiss(left_reg, codegen_->LiteralFloatAddress(value)); + } else if (right.IsStackSlot()) { + __ ucomiss(left_reg, Address(CpuRegister(RSP), right.GetStackIndex())); + } else { + __ ucomiss(left_reg, right.AsFpuRegister<XmmRegister>()); + } __ j(kUnordered, compare->IsGtBias() ? &greater : &less); break; } case Primitive::kPrimDouble: { - __ ucomisd(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>()); + XmmRegister left_reg = left.AsFpuRegister<XmmRegister>(); + if (right.IsConstant()) { + double value = right.GetConstant()->AsDoubleConstant()->GetValue(); + __ ucomisd(left_reg, codegen_->LiteralDoubleAddress(value)); + } else if (right.IsDoubleStackSlot()) { + __ ucomisd(left_reg, Address(CpuRegister(RSP), right.GetStackIndex())); + } else { + __ ucomisd(left_reg, right.AsFpuRegister<XmmRegister>()); + } __ j(kUnordered, compare->IsGtBias() ? &greater : &less); break; } @@ -1178,8 +1200,7 @@ void LocationsBuilderX86_64::VisitReturn(HReturn* ret) { case Primitive::kPrimFloat: case Primitive::kPrimDouble: - locations->SetInAt(0, - Location::FpuRegisterLocation(XMM0)); + locations->SetInAt(0, Location::FpuRegisterLocation(XMM0)); break; default: @@ -1419,7 +1440,6 @@ void LocationsBuilderX86_64::VisitNeg(HNeg* neg) { case Primitive::kPrimDouble: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::SameAsFirstInput()); - locations->AddTemp(Location::RequiresRegister()); locations->AddTemp(Location::RequiresFpuRegister()); break; @@ -1447,26 +1467,22 @@ void InstructionCodeGeneratorX86_64::VisitNeg(HNeg* neg) { case Primitive::kPrimFloat: { DCHECK(in.Equals(out)); - CpuRegister constant = locations->GetTemp(0).AsRegister<CpuRegister>(); - XmmRegister mask = locations->GetTemp(1).AsFpuRegister<XmmRegister>(); + XmmRegister mask = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); // Implement float negation with an exclusive or with value // 0x80000000 (mask for bit 31, representing the sign of a // single-precision floating-point number). - __ movq(constant, Immediate(INT64_C(0x80000000))); - __ movd(mask, constant); + __ movss(mask, codegen_->LiteralInt32Address(0x80000000)); __ xorps(out.AsFpuRegister<XmmRegister>(), mask); break; } case Primitive::kPrimDouble: { DCHECK(in.Equals(out)); - CpuRegister constant = locations->GetTemp(0).AsRegister<CpuRegister>(); - XmmRegister mask = locations->GetTemp(1).AsFpuRegister<XmmRegister>(); + XmmRegister mask = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); // Implement double negation with an exclusive or with value // 0x8000000000000000 (mask for bit 63, representing the sign of // a double-precision floating-point number). - __ movq(constant, Immediate(INT64_C(0x8000000000000000))); - __ movd(mask, constant); + __ movsd(mask, codegen_->LiteralInt64Address(INT64_C(0x8000000000000000))); __ xorpd(out.AsFpuRegister<XmmRegister>(), mask); break; } @@ -1613,19 +1629,19 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) { case Primitive::kPrimInt: case Primitive::kPrimChar: // Processing a Dex `int-to-float' instruction. - locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister()); break; case Primitive::kPrimLong: // Processing a Dex `long-to-float' instruction. - locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister()); break; case Primitive::kPrimDouble: // Processing a Dex `double-to-float' instruction. - locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); break; @@ -1644,19 +1660,19 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) { case Primitive::kPrimInt: case Primitive::kPrimChar: // Processing a Dex `int-to-double' instruction. - locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister()); break; case Primitive::kPrimLong: // Processing a Dex `long-to-double' instruction. - locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister()); break; case Primitive::kPrimFloat: // Processing a Dex `float-to-double' instruction. - locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); break; @@ -1910,17 +1926,56 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver case Primitive::kPrimInt: case Primitive::kPrimChar: // Processing a Dex `int-to-float' instruction. - __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false); + if (in.IsRegister()) { + __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false); + } else if (in.IsConstant()) { + int32_t v = in.GetConstant()->AsIntConstant()->GetValue(); + XmmRegister dest = out.AsFpuRegister<XmmRegister>(); + if (v == 0) { + __ xorps(dest, dest); + } else { + __ movss(dest, codegen_->LiteralFloatAddress(static_cast<float>(v))); + } + } else { + __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), + Address(CpuRegister(RSP), in.GetStackIndex()), false); + } break; case Primitive::kPrimLong: // Processing a Dex `long-to-float' instruction. - __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true); + if (in.IsRegister()) { + __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true); + } else if (in.IsConstant()) { + int64_t v = in.GetConstant()->AsLongConstant()->GetValue(); + XmmRegister dest = out.AsFpuRegister<XmmRegister>(); + if (v == 0) { + __ xorps(dest, dest); + } else { + __ movss(dest, codegen_->LiteralFloatAddress(static_cast<float>(v))); + } + } else { + __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), + Address(CpuRegister(RSP), in.GetStackIndex()), true); + } break; case Primitive::kPrimDouble: // Processing a Dex `double-to-float' instruction. - __ cvtsd2ss(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>()); + if (in.IsFpuRegister()) { + __ cvtsd2ss(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>()); + } else if (in.IsConstant()) { + double v = in.GetConstant()->AsDoubleConstant()->GetValue(); + XmmRegister dest = out.AsFpuRegister<XmmRegister>(); + if (bit_cast<int64_t, double>(v) == 0) { + __ xorps(dest, dest); + } else { + __ movss(dest, codegen_->LiteralFloatAddress(static_cast<float>(v))); + } + } else { + __ cvtsd2ss(out.AsFpuRegister<XmmRegister>(), + Address(CpuRegister(RSP), in.GetStackIndex())); + } break; default: @@ -1938,17 +1993,56 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver case Primitive::kPrimInt: case Primitive::kPrimChar: // Processing a Dex `int-to-double' instruction. - __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false); + if (in.IsRegister()) { + __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false); + } else if (in.IsConstant()) { + int32_t v = in.GetConstant()->AsIntConstant()->GetValue(); + XmmRegister dest = out.AsFpuRegister<XmmRegister>(); + if (v == 0) { + __ xorpd(dest, dest); + } else { + __ movsd(dest, codegen_->LiteralDoubleAddress(static_cast<double>(v))); + } + } else { + __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), + Address(CpuRegister(RSP), in.GetStackIndex()), false); + } break; case Primitive::kPrimLong: // Processing a Dex `long-to-double' instruction. - __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true); + if (in.IsRegister()) { + __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true); + } else if (in.IsConstant()) { + int64_t v = in.GetConstant()->AsLongConstant()->GetValue(); + XmmRegister dest = out.AsFpuRegister<XmmRegister>(); + if (v == 0) { + __ xorpd(dest, dest); + } else { + __ movsd(dest, codegen_->LiteralDoubleAddress(static_cast<double>(v))); + } + } else { + __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), + Address(CpuRegister(RSP), in.GetStackIndex()), true); + } break; case Primitive::kPrimFloat: // Processing a Dex `float-to-double' instruction. - __ cvtss2sd(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>()); + if (in.IsFpuRegister()) { + __ cvtss2sd(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>()); + } else if (in.IsConstant()) { + float v = in.GetConstant()->AsFloatConstant()->GetValue(); + XmmRegister dest = out.AsFpuRegister<XmmRegister>(); + if (bit_cast<int32_t, float>(v) == 0) { + __ xorpd(dest, dest); + } else { + __ movsd(dest, codegen_->LiteralDoubleAddress(static_cast<double>(v))); + } + } else { + __ cvtss2sd(out.AsFpuRegister<XmmRegister>(), + Address(CpuRegister(RSP), in.GetStackIndex())); + } break; default: @@ -3128,7 +3222,7 @@ void LocationsBuilderX86_64::HandleFieldSet(HInstruction* instruction, if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) { locations->SetInAt(1, Location::RequiresFpuRegister()); } else { - locations->SetInAt(1, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrInt32LongConstant(instruction->InputAt(1))); } if (needs_write_barrier) { // Temporary registers for the write barrier. @@ -3155,24 +3249,46 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction, switch (field_type) { case Primitive::kPrimBoolean: case Primitive::kPrimByte: { - __ movb(Address(base, offset), value.AsRegister<CpuRegister>()); + if (value.IsConstant()) { + int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant()); + __ movb(Address(base, offset), Immediate(v)); + } else { + __ movb(Address(base, offset), value.AsRegister<CpuRegister>()); + } break; } case Primitive::kPrimShort: case Primitive::kPrimChar: { - __ movw(Address(base, offset), value.AsRegister<CpuRegister>()); + if (value.IsConstant()) { + int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant()); + __ movw(Address(base, offset), Immediate(v)); + } else { + __ movw(Address(base, offset), value.AsRegister<CpuRegister>()); + } break; } case Primitive::kPrimInt: case Primitive::kPrimNot: { - __ movl(Address(base, offset), value.AsRegister<CpuRegister>()); + if (value.IsConstant()) { + int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant()); + __ movw(Address(base, offset), Immediate(v)); + } else { + __ movl(Address(base, offset), value.AsRegister<CpuRegister>()); + } break; } case Primitive::kPrimLong: { - __ movq(Address(base, offset), value.AsRegister<CpuRegister>()); + if (value.IsConstant()) { + int64_t v = value.GetConstant()->AsLongConstant()->GetValue(); + DCHECK(IsInt<32>(v)); + int32_t v_32 = v; + __ movq(Address(base, offset), Immediate(v_32)); + } else { + __ movq(Address(base, offset), value.AsRegister<CpuRegister>()); + } break; } @@ -3291,8 +3407,7 @@ void LocationsBuilderX86_64::VisitArrayGet(HArrayGet* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt( - 1, Location::RegisterOrConstant(instruction->InputAt(1))); + locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); if (Primitive::IsFloatingPointType(instruction->GetType())) { locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); } else { @@ -3431,7 +3546,7 @@ void LocationsBuilderX86_64::VisitArraySet(HArraySet* instruction) { 1, Location::RegisterOrConstant(instruction->InputAt(1))); locations->SetInAt(2, Location::RequiresRegister()); if (value_type == Primitive::kPrimLong) { - locations->SetInAt(2, Location::RequiresRegister()); + locations->SetInAt(2, Location::RegisterOrInt32LongConstant(instruction->InputAt(2))); } else if (value_type == Primitive::kPrimFloat || value_type == Primitive::kPrimDouble) { locations->SetInAt(2, Location::RequiresFpuRegister()); } else { @@ -3519,8 +3634,8 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { __ movl(Address(obj, offset), value.AsRegister<CpuRegister>()); } else { DCHECK(value.IsConstant()) << value; - __ movl(Address(obj, offset), - Immediate(value.GetConstant()->AsIntConstant()->GetValue())); + int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant()); + __ movl(Address(obj, offset), Immediate(v)); } } else { DCHECK(index.IsRegister()) << index; @@ -3529,8 +3644,9 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { value.AsRegister<CpuRegister>()); } else { DCHECK(value.IsConstant()) << value; + int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant()); __ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset), - Immediate(value.GetConstant()->AsIntConstant()->GetValue())); + Immediate(v)); } } codegen_->MaybeRecordImplicitNullCheck(instruction); @@ -3554,12 +3670,25 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value(); if (index.IsConstant()) { size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; - DCHECK(value.IsRegister()); - __ movq(Address(obj, offset), value.AsRegister<CpuRegister>()); + if (value.IsRegister()) { + __ movq(Address(obj, offset), value.AsRegister<CpuRegister>()); + } else { + int64_t v = value.GetConstant()->AsLongConstant()->GetValue(); + DCHECK(IsInt<32>(v)); + int32_t v_32 = v; + __ movq(Address(obj, offset), Immediate(v_32)); + } } else { - DCHECK(value.IsRegister()); - __ movq(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset), - value.AsRegister<CpuRegister>()); + if (value.IsRegister()) { + __ movq(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset), + value.AsRegister<CpuRegister>()); + } else { + int64_t v = value.GetConstant()->AsLongConstant()->GetValue(); + DCHECK(IsInt<32>(v)); + int32_t v_32 = v; + __ movq(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset), + Immediate(v_32)); + } } codegen_->MaybeRecordImplicitNullCheck(instruction); break; @@ -4145,13 +4274,7 @@ void LocationsBuilderX86_64::HandleBitwiseOperation(HBinaryOperation* instructio DCHECK(instruction->GetResultType() == Primitive::kPrimInt || instruction->GetResultType() == Primitive::kPrimLong); locations->SetInAt(0, Location::RequiresRegister()); - if (instruction->GetType() == Primitive::kPrimInt) { - locations->SetInAt(1, Location::Any()); - } else { - // We can handle 32 bit constants. - locations->SetInAt(1, Location::RequiresRegister()); - locations->SetInAt(1, Location::RegisterOrInt32LongConstant(instruction->InputAt(1))); - } + locations->SetInAt(1, Location::Any()); locations->SetOut(Location::SameAsFirstInput()); } @@ -4212,25 +4335,43 @@ void InstructionCodeGeneratorX86_64::HandleBitwiseOperation(HBinaryOperation* in if (second.IsConstant()) { second_is_constant = true; value = second.GetConstant()->AsLongConstant()->GetValue(); - DCHECK(IsInt<32>(value)); } + bool is_int32_value = IsInt<32>(value); if (instruction->IsAnd()) { if (second_is_constant) { - __ andq(first_reg, Immediate(static_cast<int32_t>(value))); + if (is_int32_value) { + __ andq(first_reg, Immediate(static_cast<int32_t>(value))); + } else { + __ andq(first_reg, codegen_->LiteralInt64Address(value)); + } + } else if (second.IsDoubleStackSlot()) { + __ andq(first_reg, Address(CpuRegister(RSP), second.GetStackIndex())); } else { __ andq(first_reg, second.AsRegister<CpuRegister>()); } } else if (instruction->IsOr()) { if (second_is_constant) { - __ orq(first_reg, Immediate(static_cast<int32_t>(value))); + if (is_int32_value) { + __ orq(first_reg, Immediate(static_cast<int32_t>(value))); + } else { + __ orq(first_reg, codegen_->LiteralInt64Address(value)); + } + } else if (second.IsDoubleStackSlot()) { + __ orq(first_reg, Address(CpuRegister(RSP), second.GetStackIndex())); } else { __ orq(first_reg, second.AsRegister<CpuRegister>()); } } else { DCHECK(instruction->IsXor()); if (second_is_constant) { - __ xorq(first_reg, Immediate(static_cast<int32_t>(value))); + if (is_int32_value) { + __ xorq(first_reg, Immediate(static_cast<int32_t>(value))); + } else { + __ xorq(first_reg, codegen_->LiteralInt64Address(value)); + } + } else if (second.IsDoubleStackSlot()) { + __ xorq(first_reg, Address(CpuRegister(RSP), second.GetStackIndex())); } else { __ xorq(first_reg, second.AsRegister<CpuRegister>()); } diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc index 94990402e5..8045cc5027 100644 --- a/compiler/optimizing/dead_code_elimination.cc +++ b/compiler/optimizing/dead_code_elimination.cc @@ -41,6 +41,7 @@ void HDeadCodeElimination::Run() { && !inst->IsMemoryBarrier() // If we added an explicit barrier then we should keep it. && !inst->HasUses()) { block->RemoveInstruction(inst); + MaybeRecordStat(MethodCompilationStat::kRemovedDeadInstruction); } } } diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h index 3db2c3ff3f..cee9364c84 100644 --- a/compiler/optimizing/dead_code_elimination.h +++ b/compiler/optimizing/dead_code_elimination.h @@ -19,6 +19,7 @@ #include "nodes.h" #include "optimization.h" +#include "optimizing_compiler_stats.h" namespace art { @@ -28,8 +29,10 @@ namespace art { */ class HDeadCodeElimination : public HOptimization { public: - explicit HDeadCodeElimination(HGraph* graph) - : HOptimization(graph, true, kDeadCodeEliminationPassName) {} + HDeadCodeElimination(HGraph* graph, + OptimizingCompilerStats* stats = nullptr, + const char* name = kDeadCodeEliminationPassName) + : HOptimization(graph, true, name, stats) {} void Run() OVERRIDE; diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc index e743d8eca8..8950635d6a 100644 --- a/compiler/optimizing/graph_checker.cc +++ b/compiler/optimizing/graph_checker.cc @@ -88,23 +88,36 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) { // Visit this block's list of phis. for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { + HInstruction* current = it.Current(); // Ensure this block's list of phis contains only phis. - if (!it.Current()->IsPhi()) { + if (!current->IsPhi()) { AddError(StringPrintf("Block %d has a non-phi in its phi list.", current_block_->GetBlockId())); } - it.Current()->Accept(this); + if (current->GetNext() == nullptr && current != block->GetLastPhi()) { + AddError(StringPrintf("The recorded last phi of block %d does not match " + "the actual last phi %d.", + current_block_->GetBlockId(), + current->GetId())); + } + current->Accept(this); } // Visit this block's list of instructions. - for (HInstructionIterator it(block->GetInstructions()); !it.Done(); - it.Advance()) { + for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { + HInstruction* current = it.Current(); // Ensure this block's list of instructions does not contains phis. - if (it.Current()->IsPhi()) { + if (current->IsPhi()) { AddError(StringPrintf("Block %d has a phi in its non-phi list.", current_block_->GetBlockId())); } - it.Current()->Accept(this); + if (current->GetNext() == nullptr && current != block->GetLastInstruction()) { + AddError(StringPrintf("The recorded last instruction of block %d does not match " + "the actual last instruction %d.", + current_block_->GetBlockId(), + current->GetId())); + } + current->Accept(this); } } diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 6d2a8d77e2..bffd639e83 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -190,7 +190,7 @@ bool HInliner::TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method, } // Run simple optimizations on the graph. - HDeadCodeElimination dce(callee_graph); + HDeadCodeElimination dce(callee_graph, stats_); HConstantFolding fold(callee_graph); InstructionSimplifier simplify(callee_graph, stats_); diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index f30c9a6cef..98c0eedeb2 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -89,10 +89,6 @@ void InstructionSimplifierVisitor::Run() { // current index, so don't advance the iterator. continue; } - if (simplifications_at_current_position_ >= kMaxSamePositionSimplifications) { - LOG(WARNING) << "Too many simplifications (" << simplifications_at_current_position_ - << ") occurred at the current position."; - } simplifications_at_current_position_ = 0; it.Advance(); } diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index 9a6062fedf..932192e4fd 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -863,7 +863,7 @@ void IntrinsicCodeGeneratorARM::VisitStringCompareTo(HInvoke* invoke) { LocationSummary* locations = invoke->GetLocations(); // Note that the null check must have been done earlier. - DCHECK(!invoke->CanDoImplicitNullCheck()); + DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); Register argument = locations->InAt(1).AsRegister<Register>(); __ cmp(argument, ShifterOperand(0)); diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index d3a4e6ca15..117d6a4279 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -1007,7 +1007,7 @@ void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) { LocationSummary* locations = invoke->GetLocations(); // Note that the null check must have been done earlier. - DCHECK(!invoke->CanDoImplicitNullCheck()); + DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); Register argument = WRegisterFrom(locations->InAt(1)); __ Cmp(argument, 0); diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 95ab90de23..a8e2cdf1f6 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -962,7 +962,7 @@ void IntrinsicCodeGeneratorX86::VisitStringCompareTo(HInvoke* invoke) { LocationSummary* locations = invoke->GetLocations(); // Note that the null check must have been done earlier. - DCHECK(!invoke->CanDoImplicitNullCheck()); + DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); Register argument = locations->InAt(1).AsRegister<Register>(); __ testl(argument, argument); diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index d9a1c31c77..5d24d1fbfb 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -704,7 +704,6 @@ static void CreateSSE41FPToIntLocations(ArenaAllocator* arena, locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister()); locations->AddTemp(Location::RequiresFpuRegister()); - locations->AddTemp(Location::RequiresFpuRegister()); return; } @@ -732,14 +731,12 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) { // Implement RoundFloat as t1 = floor(input + 0.5f); convert to int. XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>(); CpuRegister out = locations->Out().AsRegister<CpuRegister>(); - XmmRegister maxInt = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); - XmmRegister inPlusPointFive = locations->GetTemp(1).AsFpuRegister<XmmRegister>(); + XmmRegister inPlusPointFive = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); Label done, nan; X86_64Assembler* assembler = GetAssembler(); - // Generate 0.5 into inPlusPointFive. - __ movl(out, Immediate(bit_cast<int32_t, float>(0.5f))); - __ movd(inPlusPointFive, out, false); + // Load 0.5 into inPlusPointFive. + __ movss(inPlusPointFive, codegen_->LiteralFloatAddress(0.5f)); // Add in the input. __ addss(inPlusPointFive, in); @@ -747,12 +744,8 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) { // And truncate to an integer. __ roundss(inPlusPointFive, inPlusPointFive, Immediate(1)); - __ movl(out, Immediate(kPrimIntMax)); - // maxInt = int-to-float(out) - __ cvtsi2ss(maxInt, out); - // if inPlusPointFive >= maxInt goto done - __ comiss(inPlusPointFive, maxInt); + __ comiss(inPlusPointFive, codegen_->LiteralFloatAddress(static_cast<float>(kPrimIntMax))); __ j(kAboveEqual, &done); // if input == NaN goto nan @@ -782,14 +775,12 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) { // Implement RoundDouble as t1 = floor(input + 0.5); convert to long. XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>(); CpuRegister out = locations->Out().AsRegister<CpuRegister>(); - XmmRegister maxLong = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); - XmmRegister inPlusPointFive = locations->GetTemp(1).AsFpuRegister<XmmRegister>(); + XmmRegister inPlusPointFive = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); Label done, nan; X86_64Assembler* assembler = GetAssembler(); - // Generate 0.5 into inPlusPointFive. - __ movq(out, Immediate(bit_cast<int64_t, double>(0.5))); - __ movd(inPlusPointFive, out, true); + // Load 0.5 into inPlusPointFive. + __ movsd(inPlusPointFive, codegen_->LiteralDoubleAddress(0.5)); // Add in the input. __ addsd(inPlusPointFive, in); @@ -797,12 +788,8 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) { // And truncate to an integer. __ roundsd(inPlusPointFive, inPlusPointFive, Immediate(1)); - __ movq(out, Immediate(kPrimLongMax)); - // maxLong = long-to-double(out) - __ cvtsi2sd(maxLong, out, true); - // if inPlusPointFive >= maxLong goto done - __ comisd(inPlusPointFive, maxLong); + __ comisd(inPlusPointFive, codegen_->LiteralDoubleAddress(static_cast<double>(kPrimLongMax))); __ j(kAboveEqual, &done); // if input == NaN goto nan @@ -886,7 +873,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringCompareTo(HInvoke* invoke) { LocationSummary* locations = invoke->GetLocations(); // Note that the null check must have been done earlier. - DCHECK(!invoke->CanDoImplicitNullCheck()); + DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); CpuRegister argument = locations->InAt(1).AsRegister<CpuRegister>(); __ testl(argument, argument); @@ -960,26 +947,48 @@ static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrInt32LongConstant(invoke->InputAt(1))); } static void GenPoke(LocationSummary* locations, Primitive::Type size, X86_64Assembler* assembler) { CpuRegister address = locations->InAt(0).AsRegister<CpuRegister>(); - CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>(); + Location value = locations->InAt(1); // x86 allows unaligned access. We do not have to check the input or use specific instructions // to avoid a SIGBUS. switch (size) { case Primitive::kPrimByte: - __ movb(Address(address, 0), value); + if (value.IsConstant()) { + __ movb(Address(address, 0), + Immediate(CodeGenerator::GetInt32ValueOf(value.GetConstant()))); + } else { + __ movb(Address(address, 0), value.AsRegister<CpuRegister>()); + } break; case Primitive::kPrimShort: - __ movw(Address(address, 0), value); + if (value.IsConstant()) { + __ movw(Address(address, 0), + Immediate(CodeGenerator::GetInt32ValueOf(value.GetConstant()))); + } else { + __ movw(Address(address, 0), value.AsRegister<CpuRegister>()); + } break; case Primitive::kPrimInt: - __ movl(Address(address, 0), value); + if (value.IsConstant()) { + __ movl(Address(address, 0), + Immediate(CodeGenerator::GetInt32ValueOf(value.GetConstant()))); + } else { + __ movl(Address(address, 0), value.AsRegister<CpuRegister>()); + } break; case Primitive::kPrimLong: - __ movq(Address(address, 0), value); + if (value.IsConstant()) { + int64_t v = value.GetConstant()->AsLongConstant()->GetValue(); + DCHECK(IsInt<32>(v)); + int32_t v_32 = v; + __ movq(Address(address, 0), Immediate(v_32)); + } else { + __ movq(Address(address, 0), value.AsRegister<CpuRegister>()); + } break; default: LOG(FATAL) << "Type not recognized for poke: " << size; diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 4b9d4fc26b..bef5896491 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -416,26 +416,6 @@ static void UpdateInputsUsers(HInstruction* instruction) { DCHECK(!instruction->HasEnvironment()); } -void HBasicBlock::InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor) { - DCHECK(!cursor->IsPhi()); - DCHECK(!instruction->IsPhi()); - DCHECK_EQ(instruction->GetId(), -1); - DCHECK_NE(cursor->GetId(), -1); - DCHECK_EQ(cursor->GetBlock(), this); - DCHECK(!instruction->IsControlFlow()); - instruction->next_ = cursor; - instruction->previous_ = cursor->previous_; - cursor->previous_ = instruction; - if (GetFirstInstruction() == cursor) { - instructions_.first_instruction_ = instruction; - } else { - instruction->previous_->next_ = instruction; - } - instruction->SetBlock(this); - instruction->SetId(GetGraph()->GetNextInstructionId()); - UpdateInputsUsers(instruction); -} - void HBasicBlock::ReplaceAndRemoveInstructionWith(HInstruction* initial, HInstruction* replacement) { DCHECK(initial->GetBlock() == this); @@ -463,23 +443,27 @@ void HBasicBlock::AddPhi(HPhi* phi) { Add(&phis_, this, phi); } +void HBasicBlock::InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor) { + DCHECK(!cursor->IsPhi()); + DCHECK(!instruction->IsPhi()); + DCHECK_EQ(instruction->GetId(), -1); + DCHECK_NE(cursor->GetId(), -1); + DCHECK_EQ(cursor->GetBlock(), this); + DCHECK(!instruction->IsControlFlow()); + instruction->SetBlock(this); + instruction->SetId(GetGraph()->GetNextInstructionId()); + UpdateInputsUsers(instruction); + instructions_.InsertInstructionBefore(instruction, cursor); +} + void HBasicBlock::InsertPhiAfter(HPhi* phi, HPhi* cursor) { DCHECK_EQ(phi->GetId(), -1); DCHECK_NE(cursor->GetId(), -1); DCHECK_EQ(cursor->GetBlock(), this); - if (cursor->next_ == nullptr) { - cursor->next_ = phi; - phi->previous_ = cursor; - DCHECK(phi->next_ == nullptr); - } else { - phi->next_ = cursor->next_; - phi->previous_ = cursor; - cursor->next_ = phi; - phi->next_->previous_ = phi; - } phi->SetBlock(this); phi->SetId(GetGraph()->GetNextInstructionId()); UpdateInputsUsers(phi); + phis_.InsertInstructionAfter(phi, cursor); } static void Remove(HInstructionList* instruction_list, @@ -546,6 +530,34 @@ void HInstructionList::AddInstruction(HInstruction* instruction) { } } +void HInstructionList::InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor) { + DCHECK(Contains(cursor)); + if (cursor == first_instruction_) { + cursor->previous_ = instruction; + instruction->next_ = cursor; + first_instruction_ = instruction; + } else { + instruction->previous_ = cursor->previous_; + instruction->next_ = cursor; + cursor->previous_ = instruction; + instruction->previous_->next_ = instruction; + } +} + +void HInstructionList::InsertInstructionAfter(HInstruction* instruction, HInstruction* cursor) { + DCHECK(Contains(cursor)); + if (cursor == last_instruction_) { + cursor->next_ = instruction; + instruction->previous_ = cursor; + last_instruction_ = instruction; + } else { + instruction->next_ = cursor->next_; + instruction->previous_ = cursor; + cursor->next_ = instruction; + instruction->next_->previous_ = instruction; + } +} + void HInstructionList::RemoveInstruction(HInstruction* instruction) { if (instruction->previous_ != nullptr) { instruction->previous_->next_ = instruction->next_; diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 1565f58977..1a24cb516b 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -75,6 +75,10 @@ class HInstructionList { void AddInstruction(HInstruction* instruction); void RemoveInstruction(HInstruction* instruction); + // Insert `instruction` before/after an existing instruction `cursor`. + void InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor); + void InsertInstructionAfter(HInstruction* instruction, HInstruction* cursor); + // Return true if this list contains `instruction`. bool Contains(HInstruction* instruction) const; @@ -467,8 +471,9 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> { HInstruction* GetFirstInstruction() const { return instructions_.first_instruction_; } HInstruction* GetLastInstruction() const { return instructions_.last_instruction_; } const HInstructionList& GetInstructions() const { return instructions_; } - const HInstructionList& GetPhis() const { return phis_; } HInstruction* GetFirstPhi() const { return phis_.first_instruction_; } + HInstruction* GetLastPhi() const { return phis_.last_instruction_; } + const HInstructionList& GetPhis() const { return phis_; } void AddSuccessor(HBasicBlock* block) { successors_.Add(block); @@ -1158,7 +1163,10 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> { return true; } - virtual bool CanDoImplicitNullCheck() const { return false; } + virtual bool CanDoImplicitNullCheckOn(HInstruction* obj) const { + UNUSED(obj); + return false; + } void SetReferenceTypeInfo(ReferenceTypeInfo reference_type_info) { DCHECK_EQ(GetType(), Primitive::kPrimNot); @@ -2225,7 +2233,8 @@ class HInvokeStaticOrDirect : public HInvoke { invoke_type_(invoke_type), is_recursive_(is_recursive) {} - bool CanDoImplicitNullCheck() const OVERRIDE { + bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { + UNUSED(obj); // We access the method via the dex cache so we can't do an implicit null check. // TODO: for intrinsics we can generate implicit null checks. return false; @@ -2257,9 +2266,9 @@ class HInvokeVirtual : public HInvoke { : HInvoke(arena, number_of_arguments, return_type, dex_pc, dex_method_index), vtable_index_(vtable_index) {} - bool CanDoImplicitNullCheck() const OVERRIDE { + bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { // TODO: Add implicit null checks in intrinsics. - return !GetLocations()->Intrinsified(); + return (obj == InputAt(0)) && !GetLocations()->Intrinsified(); } uint32_t GetVTableIndex() const { return vtable_index_; } @@ -2283,9 +2292,9 @@ class HInvokeInterface : public HInvoke { : HInvoke(arena, number_of_arguments, return_type, dex_pc, dex_method_index), imt_index_(imt_index) {} - bool CanDoImplicitNullCheck() const OVERRIDE { + bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { // TODO: Add implicit null checks in intrinsics. - return !GetLocations()->Intrinsified(); + return (obj == InputAt(0)) && !GetLocations()->Intrinsified(); } uint32_t GetImtIndex() const { return imt_index_; } @@ -2855,8 +2864,8 @@ class HInstanceFieldGet : public HExpression<1> { return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue(); } - bool CanDoImplicitNullCheck() const OVERRIDE { - return GetFieldOffset().Uint32Value() < kPageSize; + bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { + return (obj == InputAt(0)) && GetFieldOffset().Uint32Value() < kPageSize; } size_t ComputeHashCode() const OVERRIDE { @@ -2889,8 +2898,8 @@ class HInstanceFieldSet : public HTemplateInstruction<2> { SetRawInputAt(1, value); } - bool CanDoImplicitNullCheck() const OVERRIDE { - return GetFieldOffset().Uint32Value() < kPageSize; + bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { + return (obj == InputAt(0)) && GetFieldOffset().Uint32Value() < kPageSize; } const FieldInfo& GetFieldInfo() const { return field_info_; } @@ -2920,7 +2929,8 @@ class HArrayGet : public HExpression<2> { UNUSED(other); return true; } - bool CanDoImplicitNullCheck() const OVERRIDE { + bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { + UNUSED(obj); // TODO: We can be smarter here. // Currently, the array access is always preceded by an ArrayLength or a NullCheck // which generates the implicit null check. There are cases when these can be removed @@ -2962,7 +2972,8 @@ class HArraySet : public HTemplateInstruction<3> { return needs_type_check_; } - bool CanDoImplicitNullCheck() const OVERRIDE { + bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { + UNUSED(obj); // TODO: Same as for ArrayGet. return false; } @@ -3014,7 +3025,9 @@ class HArrayLength : public HExpression<1> { UNUSED(other); return true; } - bool CanDoImplicitNullCheck() const OVERRIDE { return true; } + bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { + return obj == InputAt(0); + } DECLARE_INSTRUCTION(ArrayLength); diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 2ec8536cdf..218894fe02 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -320,7 +320,8 @@ static void RunOptimizations(HGraph* graph, const DexCompilationUnit& dex_compilation_unit, PassInfoPrinter* pass_info_printer, StackHandleScopeCollection* handles) { - HDeadCodeElimination dce(graph); + HDeadCodeElimination dce1(graph, stats); + HDeadCodeElimination dce2(graph, stats, "dead_code_elimination_final"); HConstantFolding fold1(graph); InstructionSimplifier simplify1(graph, stats); HBooleanSimplifier boolean_not(graph); @@ -339,7 +340,7 @@ static void RunOptimizations(HGraph* graph, HOptimization* optimizations[] = { &intrinsics, - &dce, + &dce1, &fold1, &simplify1, // BooleanSimplifier depends on the InstructionSimplifier removing redundant @@ -352,7 +353,8 @@ static void RunOptimizations(HGraph* graph, &licm, &bce, &type_propagation, - &simplify2 + &simplify2, + &dce2, }; RunOptimizations(optimizations, arraysize(optimizations), pass_info_printer); diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h index 9bfa543401..e6508c9851 100644 --- a/compiler/optimizing/optimizing_compiler_stats.h +++ b/compiler/optimizing/optimizing_compiler_stats.h @@ -29,6 +29,7 @@ enum MethodCompilationStat { kCompiledBaseline, kCompiledOptimized, kCompiledQuick, + kInstructionSimplifications, kInlinedInvoke, kNotCompiledUnsupportedIsa, kNotCompiledPathological, @@ -48,8 +49,8 @@ enum MethodCompilationStat { kNotCompiledVerifyAtRuntime, kNotCompiledClassNotVerified, kRemovedCheckedCast, + kRemovedDeadInstruction, kRemovedNullCheck, - kInstructionSimplifications, kLastStat }; @@ -96,6 +97,7 @@ class OptimizingCompilerStats { case kCompiledOptimized : return "kCompiledOptimized"; case kCompiledQuick : return "kCompiledQuick"; case kInlinedInvoke : return "kInlinedInvoke"; + case kInstructionSimplifications: return "kInstructionSimplifications"; case kNotCompiledUnsupportedIsa : return "kNotCompiledUnsupportedIsa"; case kNotCompiledPathological : return "kNotCompiledPathological"; case kNotCompiledHugeMethod : return "kNotCompiledHugeMethod"; @@ -114,8 +116,8 @@ class OptimizingCompilerStats { case kNotCompiledVerifyAtRuntime : return "kNotCompiledVerifyAtRuntime"; case kNotCompiledClassNotVerified : return "kNotCompiledClassNotVerified"; case kRemovedCheckedCast: return "kRemovedCheckedCast"; + case kRemovedDeadInstruction: return "kRemovedDeadInstruction"; case kRemovedNullCheck: return "kRemovedNullCheck"; - case kInstructionSimplifications: return "kInstructionSimplifications"; default: LOG(FATAL) << "invalid stat"; } return ""; |