summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator.cc28
-rw-r--r--compiler/optimizing/code_generator.h3
-rw-r--r--compiler/optimizing/code_generator_arm.cc122
-rw-r--r--compiler/optimizing/code_generator_arm.h3
-rw-r--r--compiler/optimizing/code_generator_arm64.cc35
-rw-r--r--compiler/optimizing/code_generator_arm64.h6
-rw-r--r--compiler/optimizing/code_generator_mips.cc10
-rw-r--r--compiler/optimizing/code_generator_mips64.cc10
-rw-r--r--compiler/optimizing/code_generator_x86.cc25
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc25
-rw-r--r--compiler/optimizing/codegen_test.cc2
-rw-r--r--compiler/optimizing/common_arm64.h9
-rw-r--r--compiler/optimizing/dead_code_elimination.h6
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_arm.h4
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_mips.h4
-rw-r--r--compiler/optimizing/induction_var_analysis.h2
-rw-r--r--compiler/optimizing/instruction_simplifier_arm.h4
-rw-r--r--compiler/optimizing/instruction_simplifier_arm64.h5
-rw-r--r--compiler/optimizing/intrinsics.h3
-rw-r--r--compiler/optimizing/intrinsics_arm.cc8
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc32
-rw-r--r--compiler/optimizing/intrinsics_mips.cc8
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc8
-rw-r--r--compiler/optimizing/intrinsics_x86.cc93
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc125
-rw-r--r--compiler/optimizing/locations.h35
-rw-r--r--compiler/optimizing/optimization.h5
-rw-r--r--compiler/optimizing/optimizing_compiler.cc188
-rw-r--r--compiler/optimizing/pc_relative_fixups_mips.h2
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.cc1
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.h4
-rw-r--r--compiler/optimizing/register_allocator.cc16
-rw-r--r--compiler/optimizing/register_allocator.h3
-rw-r--r--compiler/optimizing/register_allocator_graph_color.cc1987
-rw-r--r--compiler/optimizing/register_allocator_graph_color.h201
-rw-r--r--compiler/optimizing/register_allocator_linear_scan.h1
-rw-r--r--compiler/optimizing/register_allocator_test.cc126
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc21
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h74
-rw-r--r--compiler/optimizing/x86_memory_gen.cc4
-rw-r--r--compiler/optimizing/x86_memory_gen.h4
41 files changed, 2888 insertions, 364 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 4a4b98cc48..5152075499 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -765,16 +765,24 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
LocationSummary* locations = instruction->GetLocations();
uint32_t register_mask = locations->GetRegisterMask();
- if (locations->OnlyCallsOnSlowPath()) {
- // In case of slow path, we currently set the location of caller-save registers
- // to register (instead of their stack location when pushed before the slow-path
- // call). Therefore register_mask contains both callee-save and caller-save
- // registers that hold objects. We must remove the caller-save from the mask, since
- // they will be overwritten by the callee.
- register_mask &= core_callee_save_mask_;
+ if (instruction->IsSuspendCheck()) {
+ // Suspend check has special ABI that saves the caller-save registers in callee,
+ // so we want to emit stack maps containing the registers.
+ // TODO: Register allocator still reserves space for the caller-save registers.
+ // We should add slow-path-specific caller-save information into LocationSummary
+ // and refactor the code here as well as in the register allocator to use it.
+ } else {
+ if (locations->OnlyCallsOnSlowPath()) {
+ // In case of slow path, we currently set the location of caller-save registers
+ // to register (instead of their stack location when pushed before the slow-path
+ // call). Therefore register_mask contains both callee-save and caller-save
+ // registers that hold objects. We must remove the caller-save from the mask, since
+ // they will be overwritten by the callee.
+ register_mask &= core_callee_save_mask_;
+ }
+ // The register mask must be a subset of callee-save registers.
+ DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
}
- // The register mask must be a subset of callee-save registers.
- DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
stack_map_stream_.BeginStackMapEntry(outer_dex_pc,
native_pc,
register_mask,
@@ -1174,7 +1182,7 @@ void CodeGenerator::ValidateInvokeRuntime(HInstruction* instruction, SlowPathCod
<< "instruction->DebugName()=" << instruction->DebugName()
<< " instruction->GetSideEffects().ToString()=" << instruction->GetSideEffects().ToString();
} else {
- DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal())
+ DCHECK(instruction->GetLocations()->CallsOnSlowPath() || slow_path->IsFatal())
<< "instruction->DebugName()=" << instruction->DebugName()
<< " slow_path->GetDescription()=" << slow_path->GetDescription();
DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index ad02ecf609..fd396c474c 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -340,6 +340,9 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
bool* GetBlockedCoreRegisters() const { return blocked_core_registers_; }
bool* GetBlockedFloatingPointRegisters() const { return blocked_fpu_registers_; }
+ bool IsBlockedCoreRegister(size_t i) { return blocked_core_registers_[i]; }
+ bool IsBlockedFloatingPointRegister(size_t i) { return blocked_fpu_registers_[i]; }
+
// Helper that returns the pointer offset of an index in an object array.
// Note: this method assumes we always have the same pointer size, regardless
// of the architecture.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index cd7a90e280..4c4128c5f8 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -59,8 +59,8 @@ static constexpr DRegister DTMP = D31;
static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7;
-// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
-#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())-> // NOLINT
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, x).Int32Value()
class NullCheckSlowPathARM : public SlowPathCode {
@@ -119,11 +119,9 @@ class SuspendCheckSlowPathARM : public SlowPathCode {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ b(GetReturnLabel());
} else {
@@ -698,8 +696,8 @@ class ReadBarrierForRootSlowPathARM : public SlowPathCode {
};
#undef __
-// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
-#define __ down_cast<ArmAssembler*>(GetAssembler())-> // NOLINT
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<ArmAssembler*>(GetAssembler())-> // NOLINT
inline Condition ARMCondition(IfCondition cond) {
switch (cond) {
@@ -2523,7 +2521,7 @@ void LocationsBuilderARM::VisitAdd(HAdd* add) {
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, ArmEncodableConstantOrRegister(add->InputAt(1), ADD));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
}
@@ -2560,13 +2558,18 @@ void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
break;
case Primitive::kPrimLong: {
- DCHECK(second.IsRegisterPair());
- __ adds(out.AsRegisterPairLow<Register>(),
- first.AsRegisterPairLow<Register>(),
- ShifterOperand(second.AsRegisterPairLow<Register>()));
- __ adc(out.AsRegisterPairHigh<Register>(),
- first.AsRegisterPairHigh<Register>(),
- ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ if (second.IsConstant()) {
+ uint64_t value = static_cast<uint64_t>(Int64FromConstant(second.GetConstant()));
+ GenerateAddLongConst(out, first, value);
+ } else {
+ DCHECK(second.IsRegisterPair());
+ __ adds(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ adc(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ }
break;
}
@@ -2600,7 +2603,7 @@ void LocationsBuilderARM::VisitSub(HSub* sub) {
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, ArmEncodableConstantOrRegister(sub->InputAt(1), SUB));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
}
@@ -2636,13 +2639,18 @@ void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
}
case Primitive::kPrimLong: {
- DCHECK(second.IsRegisterPair());
- __ subs(out.AsRegisterPairLow<Register>(),
- first.AsRegisterPairLow<Register>(),
- ShifterOperand(second.AsRegisterPairLow<Register>()));
- __ sbc(out.AsRegisterPairHigh<Register>(),
- first.AsRegisterPairHigh<Register>(),
- ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ if (second.IsConstant()) {
+ uint64_t value = static_cast<uint64_t>(Int64FromConstant(second.GetConstant()));
+ GenerateAddLongConst(out, first, -value);
+ } else {
+ DCHECK(second.IsRegisterPair());
+ __ subs(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ sbc(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ }
break;
}
@@ -4044,31 +4052,51 @@ bool LocationsBuilderARM::CanEncodeConstantAsImmediate(HConstant* input_cst,
Opcode opcode) {
uint64_t value = static_cast<uint64_t>(Int64FromConstant(input_cst));
if (Primitive::Is64BitType(input_cst->GetType())) {
- return CanEncodeConstantAsImmediate(Low32Bits(value), opcode) &&
- CanEncodeConstantAsImmediate(High32Bits(value), opcode);
+ Opcode high_opcode = opcode;
+ SetCc low_set_cc = kCcDontCare;
+ switch (opcode) {
+ case SUB:
+ // Flip the operation to an ADD.
+ value = -value;
+ opcode = ADD;
+ FALLTHROUGH_INTENDED;
+ case ADD:
+ if (Low32Bits(value) == 0u) {
+ return CanEncodeConstantAsImmediate(High32Bits(value), opcode, kCcDontCare);
+ }
+ high_opcode = ADC;
+ low_set_cc = kCcSet;
+ break;
+ default:
+ break;
+ }
+ return CanEncodeConstantAsImmediate(Low32Bits(value), opcode, low_set_cc) &&
+ CanEncodeConstantAsImmediate(High32Bits(value), high_opcode, kCcDontCare);
} else {
return CanEncodeConstantAsImmediate(Low32Bits(value), opcode);
}
}
-bool LocationsBuilderARM::CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode) {
+bool LocationsBuilderARM::CanEncodeConstantAsImmediate(uint32_t value,
+ Opcode opcode,
+ SetCc set_cc) {
ShifterOperand so;
ArmAssembler* assembler = codegen_->GetAssembler();
- if (assembler->ShifterOperandCanHold(kNoRegister, kNoRegister, opcode, value, &so)) {
+ if (assembler->ShifterOperandCanHold(kNoRegister, kNoRegister, opcode, value, set_cc, &so)) {
return true;
}
Opcode neg_opcode = kNoOperand;
switch (opcode) {
- case AND:
- neg_opcode = BIC;
- break;
- case ORR:
- neg_opcode = ORN;
- break;
+ case AND: neg_opcode = BIC; value = ~value; break;
+ case ORR: neg_opcode = ORN; value = ~value; break;
+ case ADD: neg_opcode = SUB; value = -value; break;
+ case ADC: neg_opcode = SBC; value = ~value; break;
+ case SUB: neg_opcode = ADD; value = -value; break;
+ case SBC: neg_opcode = ADC; value = ~value; break;
default:
return false;
}
- return assembler->ShifterOperandCanHold(kNoRegister, kNoRegister, neg_opcode, ~value, &so);
+ return assembler->ShifterOperandCanHold(kNoRegister, kNoRegister, neg_opcode, value, set_cc, &so);
}
void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
@@ -6198,6 +6226,34 @@ void InstructionCodeGeneratorARM::GenerateEorConst(Register out, Register first,
__ eor(out, first, ShifterOperand(value));
}
+void InstructionCodeGeneratorARM::GenerateAddLongConst(Location out,
+ Location first,
+ uint64_t value) {
+ Register out_low = out.AsRegisterPairLow<Register>();
+ Register out_high = out.AsRegisterPairHigh<Register>();
+ Register first_low = first.AsRegisterPairLow<Register>();
+ Register first_high = first.AsRegisterPairHigh<Register>();
+ uint32_t value_low = Low32Bits(value);
+ uint32_t value_high = High32Bits(value);
+ if (value_low == 0u) {
+ if (out_low != first_low) {
+ __ mov(out_low, ShifterOperand(first_low));
+ }
+ __ AddConstant(out_high, first_high, value_high);
+ return;
+ }
+ __ AddConstantSetFlags(out_low, first_low, value_low);
+ ShifterOperand so;
+ if (__ ShifterOperandCanHold(out_high, first_high, ADC, value_high, kCcDontCare, &so)) {
+ __ adc(out_high, first_high, so);
+ } else if (__ ShifterOperandCanHold(out_low, first_low, SBC, ~value_high, kCcDontCare, &so)) {
+ __ sbc(out_high, first_high, so);
+ } else {
+ LOG(FATAL) << "Unexpected constant " << value_high;
+ UNREACHABLE();
+ }
+}
+
void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
LocationSummary* locations = instruction->GetLocations();
Location first = locations->InAt(0);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index fa7709b9a3..5d9b2dce1c 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -183,7 +183,7 @@ class LocationsBuilderARM : public HGraphVisitor {
Location ArithmeticZeroOrFpuRegister(HInstruction* input);
Location ArmEncodableConstantOrRegister(HInstruction* constant, Opcode opcode);
bool CanEncodeConstantAsImmediate(HConstant* input_cst, Opcode opcode);
- bool CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode);
+ bool CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode, SetCc set_cc = kCcDontCare);
CodeGeneratorARM* const codegen_;
InvokeDexCallingConventionVisitorARM parameter_visitor_;
@@ -220,6 +220,7 @@ class InstructionCodeGeneratorARM : public InstructionCodeGenerator {
void GenerateAndConst(Register out, Register first, uint32_t value);
void GenerateOrrConst(Register out, Register first, uint32_t value);
void GenerateEorConst(Register out, Register first, uint32_t value);
+ void GenerateAddLongConst(Location out, Location first, uint64_t value);
void HandleBitwiseOperation(HBinaryOperation* operation);
void HandleCondition(HCondition* condition);
void HandleIntegerRotate(LocationSummary* locations);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 115cee6492..d95e7df6b4 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -131,8 +131,8 @@ Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type retur
return ARM64ReturnLocation(return_type);
}
-// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
-#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()-> // NOLINT
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, x).Int32Value()
// Calculate memory accessing operand for save/restore live registers.
@@ -398,11 +398,9 @@ class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
arm64_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ B(GetReturnLabel());
} else {
@@ -609,6 +607,8 @@ class ReadBarrierMarkSlowPathARM64 : public SlowPathCodeARM64 {
DCHECK_NE(obj_.reg(), LR);
DCHECK_NE(obj_.reg(), WSP);
DCHECK_NE(obj_.reg(), WZR);
+ // WIP0 is used by the slow path as a temp, it can not be the object register.
+ DCHECK_NE(obj_.reg(), IP0);
DCHECK(0 <= obj_.reg() && obj_.reg() < kNumberOfWRegisters) << obj_.reg();
// "Compact" slow path, saving two moves.
//
@@ -751,10 +751,7 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 {
(instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile))
<< instruction_->AsInvoke()->GetIntrinsic();
DCHECK_EQ(offset_, 0U);
- DCHECK(index_.IsRegisterPair());
- // UnsafeGet's offset location is a register pair, the low
- // part contains the correct offset.
- index = index_.ToLow();
+ DCHECK(index_.IsRegister());
}
}
@@ -1284,17 +1281,21 @@ void CodeGeneratorARM64::MoveLocation(Location destination,
UseScratchRegisterScope temps(GetVIXLAssembler());
HConstant* src_cst = source.GetConstant();
CPURegister temp;
- if (src_cst->IsIntConstant() || src_cst->IsNullConstant()) {
- temp = temps.AcquireW();
- } else if (src_cst->IsLongConstant()) {
- temp = temps.AcquireX();
- } else if (src_cst->IsFloatConstant()) {
- temp = temps.AcquireS();
+ if (src_cst->IsZeroBitPattern()) {
+ temp = (src_cst->IsLongConstant() || src_cst->IsDoubleConstant()) ? xzr : wzr;
} else {
- DCHECK(src_cst->IsDoubleConstant());
- temp = temps.AcquireD();
+ if (src_cst->IsIntConstant()) {
+ temp = temps.AcquireW();
+ } else if (src_cst->IsLongConstant()) {
+ temp = temps.AcquireX();
+ } else if (src_cst->IsFloatConstant()) {
+ temp = temps.AcquireS();
+ } else {
+ DCHECK(src_cst->IsDoubleConstant());
+ temp = temps.AcquireD();
+ }
+ MoveConstant(temp, src_cst);
}
- MoveConstant(temp, src_cst);
__ Str(temp, StackOperandFrom(destination));
} else {
DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 1b5fa857e7..921ce10aaa 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -27,11 +27,11 @@
#include "utils/arm64/assembler_arm64.h"
#include "utils/type_reference.h"
-// TODO: make vixl clean wrt -Wshadow.
+// TODO(VIXL): Make VIXL compile with -Wshadow.
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
-#include "a64/disasm-a64.h"
-#include "a64/macro-assembler-a64.h"
+#include "aarch64/disasm-aarch64.h"
+#include "aarch64/macro-assembler-aarch64.h"
#pragma GCC diagnostic pop
namespace art {
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 8dd82ef9cb..58879bc2f1 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -145,8 +145,8 @@ Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type)
return MipsReturnLocation(type);
}
-// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
-#define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()-> // NOLINT
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
@@ -351,14 +351,12 @@ class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
instruction_,
instruction_->GetDexPc(),
this,
IsDirectEntrypoint(kQuickTestSuspend));
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ B(GetReturnLabel());
} else {
@@ -503,8 +501,8 @@ CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
}
#undef __
-// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
-#define __ down_cast<MipsAssembler*>(GetAssembler())-> // NOLINT
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<MipsAssembler*>(GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 3472830379..4e7a2728b1 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -102,8 +102,8 @@ Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type)
return Mips64ReturnLocation(type);
}
-// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
-#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()-> // NOLINT
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, x).Int32Value()
class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
@@ -300,13 +300,11 @@ class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
instruction_,
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ Bc(GetReturnLabel());
} else {
@@ -429,8 +427,8 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
}
#undef __
-// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
-#define __ down_cast<Mips64Assembler*>(GetAssembler())-> // NOLINT
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<Mips64Assembler*>(GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, x).Int32Value()
void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a2fa24542c..7a561bb4ad 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -47,8 +47,8 @@ static constexpr int kC2ConditionMask = 0x400;
static constexpr int kFakeReturnRegister = Register(8);
-// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
-#define __ down_cast<X86Assembler*>(codegen->GetAssembler())-> // NOLINT
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<X86Assembler*>(codegen->GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, x).Int32Value()
class NullCheckSlowPathX86 : public SlowPathCode {
@@ -192,13 +192,11 @@ class SuspendCheckSlowPathX86 : public SlowPathCode {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
instruction_,
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
} else {
@@ -731,8 +729,8 @@ class ReadBarrierForRootSlowPathX86 : public SlowPathCode {
};
#undef __
-// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
-#define __ down_cast<X86Assembler*>(GetAssembler())-> /* NOLINT */
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<X86Assembler*>(GetAssembler())-> // NOLINT
inline Condition X86Condition(IfCondition cond) {
switch (cond) {
@@ -7101,12 +7099,6 @@ void CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* i
// /* LockWord */ lock_word = LockWord(monitor)
static_assert(sizeof(LockWord) == sizeof(int32_t),
"art::LockWord and int32_t have different sizes.");
- // /* uint32_t */ rb_state = lock_word.ReadBarrierState()
- __ shrl(temp_reg, Immediate(LockWord::kReadBarrierStateShift));
- __ andl(temp_reg, Immediate(LockWord::kReadBarrierStateMask));
- static_assert(
- LockWord::kReadBarrierStateMask == ReadBarrier::rb_ptr_mask_,
- "art::LockWord::kReadBarrierStateMask is not equal to art::ReadBarrier::rb_ptr_mask_.");
// Load fence to prevent load-load reordering.
// Note that this is a no-op, thanks to the x86 memory model.
@@ -7126,8 +7118,13 @@ void CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* i
// if (rb_state == ReadBarrier::gray_ptr_)
// ref = ReadBarrier::Mark(ref);
- __ cmpl(temp_reg, Immediate(ReadBarrier::gray_ptr_));
- __ j(kEqual, slow_path->GetEntryLabel());
+ // Given the numeric representation, it's enough to check the low bit of the
+ // rb_state. We do that by shifting the bit out of the lock word with SHR.
+ static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
+ static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+ __ shrl(temp_reg, Immediate(LockWord::kReadBarrierStateShift + 1));
+ __ j(kCarrySet, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 5d5fa8504a..cf01a791ee 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -51,8 +51,8 @@ static constexpr FloatRegister kFpuCalleeSaves[] = { XMM12, XMM13, XMM14, XMM15
static constexpr int kC2ConditionMask = 0x400;
-// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
-#define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())-> // NOLINT
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, x).Int32Value()
class NullCheckSlowPathX86_64 : public SlowPathCode {
@@ -149,13 +149,11 @@ class SuspendCheckSlowPathX86_64 : public SlowPathCode {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
instruction_,
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
} else {
@@ -750,8 +748,8 @@ class ReadBarrierForRootSlowPathX86_64 : public SlowPathCode {
};
#undef __
-// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
-#define __ down_cast<X86_64Assembler*>(GetAssembler())-> // NOLINT
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<X86_64Assembler*>(GetAssembler())-> // NOLINT
inline Condition X86_64IntegerCondition(IfCondition cond) {
switch (cond) {
@@ -6553,12 +6551,6 @@ void CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction
// /* LockWord */ lock_word = LockWord(monitor)
static_assert(sizeof(LockWord) == sizeof(int32_t),
"art::LockWord and int32_t have different sizes.");
- // /* uint32_t */ rb_state = lock_word.ReadBarrierState()
- __ shrl(temp_reg, Immediate(LockWord::kReadBarrierStateShift));
- __ andl(temp_reg, Immediate(LockWord::kReadBarrierStateMask));
- static_assert(
- LockWord::kReadBarrierStateMask == ReadBarrier::rb_ptr_mask_,
- "art::LockWord::kReadBarrierStateMask is not equal to art::ReadBarrier::rb_ptr_mask_.");
// Load fence to prevent load-load reordering.
// Note that this is a no-op, thanks to the x86-64 memory model.
@@ -6578,8 +6570,13 @@ void CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction
// if (rb_state == ReadBarrier::gray_ptr_)
// ref = ReadBarrier::Mark(ref);
- __ cmpl(temp_reg, Immediate(ReadBarrier::gray_ptr_));
- __ j(kEqual, slow_path->GetEntryLabel());
+ // Given the numeric representation, it's enough to check the low bit of the
+ // rb_state. We do that by shifting the bit out of the lock word with SHR.
+ static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
+ static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+ __ shrl(temp_reg, Immediate(LockWord::kReadBarrierStateShift + 1));
+ __ j(kCarrySet, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index fe9a7af250..18db507c48 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -247,7 +247,7 @@ static void RunCode(InstructionSet target_isa,
} else if (target_isa == kX86) {
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), compiler_options);
+ TestCodeGeneratorX86 codegenX86(graph, *features_x86.get(), compiler_options);
RunCode(&codegenX86, graph, hook_before_codegen, has_result, expected);
} else if (target_isa == kX86_64) {
std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64(
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index af0ee4e197..cc949c5275 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -22,8 +22,13 @@
#include "nodes.h"
#include "utils/arm64/assembler_arm64.h"
-#include "a64/disasm-a64.h"
-#include "a64/macro-assembler-a64.h"
+// TODO(VIXL): Make VIXL compile with -Wshadow.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "aarch64/disasm-aarch64.h"
+#include "aarch64/macro-assembler-aarch64.h"
+#include "aarch64/simulator-aarch64.h"
+#pragma GCC diagnostic pop
namespace art {
namespace arm64 {
diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h
index 0ce0ec1402..58e700deba 100644
--- a/compiler/optimizing/dead_code_elimination.h
+++ b/compiler/optimizing/dead_code_elimination.h
@@ -31,13 +31,11 @@ class HDeadCodeElimination : public HOptimization {
public:
HDeadCodeElimination(HGraph* graph,
OptimizingCompilerStats* stats = nullptr,
- const char* name = kInitialDeadCodeEliminationPassName)
+ const char* name = kDeadCodeEliminationPassName)
: HOptimization(graph, name, stats) {}
void Run() OVERRIDE;
-
- static constexpr const char* kInitialDeadCodeEliminationPassName = "dead_code_elimination";
- static constexpr const char* kFinalDeadCodeEliminationPassName = "dead_code_elimination_final";
+ static constexpr const char* kDeadCodeEliminationPassName = "dead_code_elimination";
private:
void MaybeRecordDeadBlock(HBasicBlock* block);
diff --git a/compiler/optimizing/dex_cache_array_fixups_arm.h b/compiler/optimizing/dex_cache_array_fixups_arm.h
index 015f910328..9142e29eff 100644
--- a/compiler/optimizing/dex_cache_array_fixups_arm.h
+++ b/compiler/optimizing/dex_cache_array_fixups_arm.h
@@ -26,7 +26,9 @@ namespace arm {
class DexCacheArrayFixups : public HOptimization {
public:
DexCacheArrayFixups(HGraph* graph, OptimizingCompilerStats* stats)
- : HOptimization(graph, "dex_cache_array_fixups_arm", stats) {}
+ : HOptimization(graph, kDexCacheArrayFixupsArmPassName, stats) {}
+
+ static constexpr const char* kDexCacheArrayFixupsArmPassName = "dex_cache_array_fixups_arm";
void Run() OVERRIDE;
};
diff --git a/compiler/optimizing/dex_cache_array_fixups_mips.h b/compiler/optimizing/dex_cache_array_fixups_mips.h
index 21056e130a..861a199d6c 100644
--- a/compiler/optimizing/dex_cache_array_fixups_mips.h
+++ b/compiler/optimizing/dex_cache_array_fixups_mips.h
@@ -29,9 +29,11 @@ namespace mips {
class DexCacheArrayFixups : public HOptimization {
public:
DexCacheArrayFixups(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
- : HOptimization(graph, "dex_cache_array_fixups_mips", stats),
+ : HOptimization(graph, kDexCacheArrayFixupsMipsPassName, stats),
codegen_(codegen) {}
+ static constexpr const char* kDexCacheArrayFixupsMipsPassName = "dex_cache_array_fixups_mips";
+
void Run() OVERRIDE;
private:
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 7c74816c26..cd4c830645 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -39,9 +39,9 @@ class HInductionVarAnalysis : public HOptimization {
void Run() OVERRIDE;
- private:
static constexpr const char* kInductionPassName = "induction_var_analysis";
+ private:
struct NodeInfo {
explicit NodeInfo(uint32_t d) : depth(d), done(false) {}
uint32_t depth;
diff --git a/compiler/optimizing/instruction_simplifier_arm.h b/compiler/optimizing/instruction_simplifier_arm.h
index 3d297dacc0..782110c40a 100644
--- a/compiler/optimizing/instruction_simplifier_arm.h
+++ b/compiler/optimizing/instruction_simplifier_arm.h
@@ -48,7 +48,9 @@ class InstructionSimplifierArmVisitor : public HGraphVisitor {
class InstructionSimplifierArm : public HOptimization {
public:
InstructionSimplifierArm(HGraph* graph, OptimizingCompilerStats* stats)
- : HOptimization(graph, "instruction_simplifier_arm", stats) {}
+ : HOptimization(graph, kInstructionSimplifierArmPassName, stats) {}
+
+ static constexpr const char* kInstructionSimplifierArmPassName = "instruction_simplifier_arm";
void Run() OVERRIDE {
InstructionSimplifierArmVisitor visitor(graph_, stats_);
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index 28648b3bea..f71684efe9 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -82,8 +82,9 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor {
class InstructionSimplifierArm64 : public HOptimization {
public:
InstructionSimplifierArm64(HGraph* graph, OptimizingCompilerStats* stats)
- : HOptimization(graph, "instruction_simplifier_arm64", stats) {}
-
+ : HOptimization(graph, kInstructionSimplifierArm64PassName, stats) {}
+ static constexpr const char* kInstructionSimplifierArm64PassName
+ = "instruction_simplifier_arm64";
void Run() OVERRIDE {
InstructionSimplifierArm64Visitor visitor(graph_, stats_);
visitor.VisitReversePostOrder();
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 3429a8fdbb..1a8eb58857 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -27,9 +27,6 @@ namespace art {
class CompilerDriver;
class DexFile;
-// Temporary measure until we have caught up with the Java 7 definition of Math.round. b/26327751
-static constexpr bool kRoundIsPlusPointFive = false;
-
// Positive floating-point infinities.
static constexpr uint32_t kPositiveInfinityFloat = 0x7f800000U;
static constexpr uint64_t kPositiveInfinityDouble = UINT64_C(0x7ff0000000000000);
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index be061f53f7..27d9d48560 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1212,7 +1212,7 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke,
void IntrinsicLocationsBuilderARM::VisitStringIndexOf(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
@@ -1232,7 +1232,7 @@ void IntrinsicCodeGeneratorARM::VisitStringIndexOf(HInvoke* invoke) {
void IntrinsicLocationsBuilderARM::VisitStringIndexOfAfter(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
@@ -1250,7 +1250,7 @@ void IntrinsicCodeGeneratorARM::VisitStringIndexOfAfter(HInvoke* invoke) {
void IntrinsicLocationsBuilderARM::VisitStringNewStringFromBytes(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -1311,7 +1311,7 @@ void IntrinsicCodeGeneratorARM::VisitStringNewStringFromChars(HInvoke* invoke) {
void IntrinsicLocationsBuilderARM::VisitStringNewStringFromString(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index e3a9d27a53..9cfe3ce569 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -29,11 +29,11 @@
using namespace vixl::aarch64; // NOLINT(build/namespaces)
-// TODO: make vixl clean wrt -Wshadow.
+// TODO(VIXL): Make VIXL compile with -Wshadow.
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
-#include "a64/disasm-a64.h"
-#include "a64/macro-assembler-a64.h"
+#include "aarch64/disasm-aarch64.h"
+#include "aarch64/macro-assembler-aarch64.h"
#pragma GCC diagnostic pop
namespace art {
@@ -1160,8 +1160,10 @@ void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) {
MacroAssembler* masm = GetVIXLAssembler();
LocationSummary* locations = invoke->GetLocations();
- Register str = XRegisterFrom(locations->InAt(0));
- Register arg = XRegisterFrom(locations->InAt(1));
+ Register str = InputRegisterAt(invoke, 0);
+ Register arg = InputRegisterAt(invoke, 1);
+ DCHECK(str.IsW());
+ DCHECK(arg.IsW());
Register out = OutputRegister(invoke);
Register temp0 = WRegisterFrom(locations->GetTemp(0));
@@ -1192,8 +1194,8 @@ void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) {
__ Subs(out, str, arg);
__ B(&end, eq);
// Load lengths of this and argument strings.
- __ Ldr(temp0, MemOperand(str.X(), count_offset));
- __ Ldr(temp1, MemOperand(arg.X(), count_offset));
+ __ Ldr(temp0, HeapOperand(str, count_offset));
+ __ Ldr(temp1, HeapOperand(arg, count_offset));
// Return zero if both strings are empty.
__ Orr(out, temp0, temp1);
__ Cbz(out, &end);
@@ -1222,8 +1224,8 @@ void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) {
// Loop to compare 4x16-bit characters at a time (ok because of string data alignment).
__ Bind(&loop);
- __ Ldr(temp4, MemOperand(str.X(), temp1));
- __ Ldr(temp0, MemOperand(arg.X(), temp1));
+ __ Ldr(temp4, MemOperand(str.X(), temp1.X()));
+ __ Ldr(temp0, MemOperand(arg.X(), temp1.X()));
__ Cmp(temp4, temp0);
__ B(ne, &find_char_diff);
__ Add(temp1, temp1, char_size * 4);
@@ -1242,14 +1244,14 @@ void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) {
__ Clz(temp1, temp1);
// If the number of 16-bit chars remaining <= the index where the difference occurs (0-3), then
// the difference occurs outside the remaining string data, so just return length diff (out).
- __ Cmp(temp2, Operand(temp1, LSR, 4));
+ __ Cmp(temp2, Operand(temp1.W(), LSR, 4));
__ B(le, &end);
// Extract the characters and calculate the difference.
__ Bic(temp1, temp1, 0xf);
__ Lsr(temp0, temp0, temp1);
__ Lsr(temp4, temp4, temp1);
__ And(temp4, temp4, 0xffff);
- __ Sub(out, temp4, Operand(temp0, UXTH));
+ __ Sub(out, temp4.W(), Operand(temp0.W(), UXTH));
__ Bind(&end);
@@ -1408,7 +1410,7 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke,
void IntrinsicLocationsBuilderARM64::VisitStringIndexOf(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
@@ -1428,7 +1430,7 @@ void IntrinsicCodeGeneratorARM64::VisitStringIndexOf(HInvoke* invoke) {
void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
@@ -1446,7 +1448,7 @@ void IntrinsicCodeGeneratorARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
@@ -1505,7 +1507,7 @@ void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromChars(HInvoke* invoke)
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromString(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 9449f79169..55e1ab2451 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2070,7 +2070,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
// int java.lang.String.indexOf(int ch)
void IntrinsicLocationsBuilderMIPS::VisitStringIndexOf(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
@@ -2095,7 +2095,7 @@ void IntrinsicCodeGeneratorMIPS::VisitStringIndexOf(HInvoke* invoke) {
// int java.lang.String.indexOf(int ch, int fromIndex)
void IntrinsicLocationsBuilderMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
@@ -2121,7 +2121,7 @@ void IntrinsicCodeGeneratorMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromBytes(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -2186,7 +2186,7 @@ void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromChars(HInvoke* invoke)
// java.lang.StringFactory.newStringFromString(String toCopy)
void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromString(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 8d4d3e5e91..1e18540e1a 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1707,7 +1707,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
// int java.lang.String.indexOf(int ch)
void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
@@ -1728,7 +1728,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) {
// int java.lang.String.indexOf(int ch, int fromIndex)
void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
@@ -1748,7 +1748,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -1816,7 +1816,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromChars(HInvoke* invoke
// java.lang.StringFactory.newStringFromString(String toCopy)
void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 65f4def48b..22f4181b92 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -752,20 +752,20 @@ void IntrinsicCodeGeneratorX86::VisitMathRint(HInvoke* invoke) {
GenSSE41FPToFPIntrinsic(codegen_, invoke, GetAssembler(), 0);
}
-// Note that 32 bit x86 doesn't have the capability to inline MathRoundDouble,
-// as it needs 64 bit instructions.
void IntrinsicLocationsBuilderX86::VisitMathRoundFloat(HInvoke* invoke) {
- // See intrinsics.h.
- if (!kRoundIsPlusPointFive) {
- return;
- }
-
// Do we have instruction support?
if (codegen_->GetInstructionSetFeatures().HasSSE4_1()) {
+ HInvokeStaticOrDirect* static_or_direct = invoke->AsInvokeStaticOrDirect();
+ DCHECK(static_or_direct != nullptr);
LocationSummary* locations = new (arena_) LocationSummary(invoke,
LocationSummary::kNoCall,
kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
+ if (static_or_direct->HasSpecialInput() &&
+ invoke->InputAt(
+ static_or_direct->GetSpecialInputIndex())->IsX86ComputeBaseMethodAddress()) {
+ locations->SetInAt(1, Location::RequiresRegister());
+ }
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
@@ -774,7 +774,7 @@ void IntrinsicLocationsBuilderX86::VisitMathRoundFloat(HInvoke* invoke) {
// We have to fall back to a call to the intrinsic.
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly);
+ LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::RegisterLocation(EAX));
@@ -784,47 +784,54 @@ void IntrinsicLocationsBuilderX86::VisitMathRoundFloat(HInvoke* invoke) {
void IntrinsicCodeGeneratorX86::VisitMathRoundFloat(HInvoke* invoke) {
LocationSummary* locations = invoke->GetLocations();
- if (locations->WillCall()) {
+ if (locations->WillCall()) { // TODO: can we reach this?
InvokeOutOfLineIntrinsic(codegen_, invoke);
return;
}
- // Implement RoundFloat as t1 = floor(input + 0.5f); convert to int.
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
+ XmmRegister t1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ XmmRegister t2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
Register out = locations->Out().AsRegister<Register>();
- XmmRegister maxInt = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- XmmRegister inPlusPointFive = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
- NearLabel done, nan;
+ NearLabel skip_incr, done;
X86Assembler* assembler = GetAssembler();
- // Generate 0.5 into inPlusPointFive.
- __ movl(out, Immediate(bit_cast<int32_t, float>(0.5f)));
- __ movd(inPlusPointFive, out);
-
- // Add in the input.
- __ addss(inPlusPointFive, in);
-
- // And truncate to an integer.
- __ roundss(inPlusPointFive, inPlusPointFive, Immediate(1));
-
+ // Since no direct x86 rounding instruction matches the required semantics,
+ // this intrinsic is implemented as follows:
+ // result = floor(in);
+ // if (in - result >= 0.5f)
+ // result = result + 1.0f;
+ __ movss(t2, in);
+ __ roundss(t1, in, Immediate(1));
+ __ subss(t2, t1);
+ if (locations->GetInputCount() == 2 && locations->InAt(1).IsValid()) {
+ // Direct constant area available.
+ Register constant_area = locations->InAt(1).AsRegister<Register>();
+ __ comiss(t2, codegen_->LiteralInt32Address(bit_cast<int32_t, float>(0.5f), constant_area));
+ __ j(kBelow, &skip_incr);
+ __ addss(t1, codegen_->LiteralInt32Address(bit_cast<int32_t, float>(1.0f), constant_area));
+ __ Bind(&skip_incr);
+ } else {
+ // No constant area: go through stack.
+ __ pushl(Immediate(bit_cast<int32_t, float>(0.5f)));
+ __ pushl(Immediate(bit_cast<int32_t, float>(1.0f)));
+ __ comiss(t2, Address(ESP, 4));
+ __ j(kBelow, &skip_incr);
+ __ addss(t1, Address(ESP, 0));
+ __ Bind(&skip_incr);
+ __ addl(ESP, Immediate(8));
+ }
+
+ // Final conversion to an integer. Unfortunately this also does not have a
+ // direct x86 instruction, since NaN should map to 0 and large positive
+ // values need to be clipped to the extreme value.
__ movl(out, Immediate(kPrimIntMax));
- // maxInt = int-to-float(out)
- __ cvtsi2ss(maxInt, out);
-
- // if inPlusPointFive >= maxInt goto done
- __ comiss(inPlusPointFive, maxInt);
- __ j(kAboveEqual, &done);
-
- // if input == NaN goto nan
- __ j(kUnordered, &nan);
-
- // output = float-to-int-truncate(input)
- __ cvttss2si(out, inPlusPointFive);
- __ jmp(&done);
- __ Bind(&nan);
-
- // output = 0
- __ xorl(out, out);
+ __ cvtsi2ss(t2, out);
+ __ comiss(t1, t2);
+ __ j(kAboveEqual, &done); // clipped to max (already in out), does not jump on unordered
+ __ movl(out, Immediate(0)); // does not change flags
+ __ j(kUnordered, &done); // NaN mapped to 0 (just moved in out)
+ __ cvttss2si(out, t1);
__ Bind(&done);
}
@@ -1216,7 +1223,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopyChar(HInvoke* invoke) {
void IntrinsicLocationsBuilderX86::VisitStringCompareTo(HInvoke* invoke) {
// The inputs plus one temp.
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -1490,7 +1497,7 @@ void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) {
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -1543,7 +1550,7 @@ void IntrinsicCodeGeneratorX86::VisitStringNewStringFromChars(HInvoke* invoke) {
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromString(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 7e0d72930c..ab8b05c3d4 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -583,6 +583,7 @@ static void CreateSSE41FPToIntLocations(ArenaAllocator* arena,
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
return;
}
@@ -597,10 +598,7 @@ static void CreateSSE41FPToIntLocations(ArenaAllocator* arena,
}
void IntrinsicLocationsBuilderX86_64::VisitMathRoundFloat(HInvoke* invoke) {
- // See intrinsics.h.
- if (kRoundIsPlusPointFive) {
- CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
- }
+ CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) {
@@ -610,47 +608,41 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) {
return;
}
- // Implement RoundFloat as t1 = floor(input + 0.5f); convert to int.
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
- XmmRegister inPlusPointFive = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- NearLabel done, nan;
+ XmmRegister t1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ XmmRegister t2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ NearLabel skip_incr, done;
X86_64Assembler* assembler = GetAssembler();
- // Load 0.5 into inPlusPointFive.
- __ movss(inPlusPointFive, codegen_->LiteralFloatAddress(0.5f));
-
- // Add in the input.
- __ addss(inPlusPointFive, in);
-
- // And truncate to an integer.
- __ roundss(inPlusPointFive, inPlusPointFive, Immediate(1));
-
- // Load maxInt into out.
- codegen_->Load64BitValue(out, kPrimIntMax);
-
- // if inPlusPointFive >= maxInt goto done
- __ comiss(inPlusPointFive, codegen_->LiteralFloatAddress(static_cast<float>(kPrimIntMax)));
- __ j(kAboveEqual, &done);
-
- // if input == NaN goto nan
- __ j(kUnordered, &nan);
-
- // output = float-to-int-truncate(input)
- __ cvttss2si(out, inPlusPointFive);
- __ jmp(&done);
- __ Bind(&nan);
-
- // output = 0
- __ xorl(out, out);
+ // Since no direct x86 rounding instruction matches the required semantics,
+ // this intrinsic is implemented as follows:
+ // result = floor(in);
+ // if (in - result >= 0.5f)
+ // result = result + 1.0f;
+ __ movss(t2, in);
+ __ roundss(t1, in, Immediate(1));
+ __ subss(t2, t1);
+ __ comiss(t2, codegen_->LiteralFloatAddress(0.5f));
+ __ j(kBelow, &skip_incr);
+ __ addss(t1, codegen_->LiteralFloatAddress(1.0f));
+ __ Bind(&skip_incr);
+
+ // Final conversion to an integer. Unfortunately this also does not have a
+ // direct x86 instruction, since NaN should map to 0 and large positive
+ // values need to be clipped to the extreme value.
+ codegen_->Load32BitValue(out, kPrimIntMax);
+ __ cvtsi2ss(t2, out);
+ __ comiss(t1, t2);
+ __ j(kAboveEqual, &done); // clipped to max (already in out), does not jump on unordered
+ __ movl(out, Immediate(0)); // does not change flags
+ __ j(kUnordered, &done); // NaN mapped to 0 (just moved in out)
+ __ cvttss2si(out, t1);
__ Bind(&done);
}
void IntrinsicLocationsBuilderX86_64::VisitMathRoundDouble(HInvoke* invoke) {
- // See intrinsics.h.
- if (kRoundIsPlusPointFive) {
- CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
- }
+ CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) {
@@ -660,39 +652,36 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) {
return;
}
- // Implement RoundDouble as t1 = floor(input + 0.5); convert to long.
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
- XmmRegister inPlusPointFive = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- NearLabel done, nan;
+ XmmRegister t1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ XmmRegister t2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ NearLabel skip_incr, done;
X86_64Assembler* assembler = GetAssembler();
- // Load 0.5 into inPlusPointFive.
- __ movsd(inPlusPointFive, codegen_->LiteralDoubleAddress(0.5));
-
- // Add in the input.
- __ addsd(inPlusPointFive, in);
-
- // And truncate to an integer.
- __ roundsd(inPlusPointFive, inPlusPointFive, Immediate(1));
-
- // Load maxLong into out.
+ // Since no direct x86 rounding instruction matches the required semantics,
+ // this intrinsic is implemented as follows:
+ // result = floor(in);
+ // if (in - result >= 0.5)
+ // result = result + 1.0f;
+ __ movsd(t2, in);
+ __ roundsd(t1, in, Immediate(1));
+ __ subsd(t2, t1);
+ __ comisd(t2, codegen_->LiteralDoubleAddress(0.5));
+ __ j(kBelow, &skip_incr);
+ __ addsd(t1, codegen_->LiteralDoubleAddress(1.0f));
+ __ Bind(&skip_incr);
+
+ // Final conversion to an integer. Unfortunately this also does not have a
+ // direct x86 instruction, since NaN should map to 0 and large positive
+ // values need to be clipped to the extreme value.
codegen_->Load64BitValue(out, kPrimLongMax);
-
- // if inPlusPointFive >= maxLong goto done
- __ comisd(inPlusPointFive, codegen_->LiteralDoubleAddress(static_cast<double>(kPrimLongMax)));
- __ j(kAboveEqual, &done);
-
- // if input == NaN goto nan
- __ j(kUnordered, &nan);
-
- // output = double-to-long-truncate(input)
- __ cvttsd2si(out, inPlusPointFive, /* is64bit */ true);
- __ jmp(&done);
- __ Bind(&nan);
-
- // output = 0
- __ xorl(out, out);
+ __ cvtsi2sd(t2, out, /* is64bit */ true);
+ __ comisd(t1, t2);
+ __ j(kAboveEqual, &done); // clipped to max (already in out), does not jump on unordered
+ __ movl(out, Immediate(0)); // does not change flags, implicit zero extension to 64-bit
+ __ j(kUnordered, &done); // NaN mapped to 0 (just moved in out)
+ __ cvttsd2si(out, t1, /* is64bit */ true);
__ Bind(&done);
}
@@ -1303,7 +1292,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
void IntrinsicLocationsBuilderX86_64::VisitStringCompareTo(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -1577,7 +1566,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -1634,7 +1623,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromChars(HInvoke* invoke
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromString(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 7a78bfdc8d..5fdfb9b6ca 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -376,6 +376,10 @@ class Location : public ValueObject {
return PolicyField::Decode(GetPayload());
}
+ bool RequiresRegisterKind() const {
+ return GetPolicy() == kRequiresRegister || GetPolicy() == kRequiresFpuRegister;
+ }
+
uintptr_t GetEncoding() const {
return GetPayload();
}
@@ -480,6 +484,7 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
public:
enum CallKind {
kNoCall,
+ kCallOnMainAndSlowPath,
kCallOnSlowPath,
kCallOnMainOnly
};
@@ -540,10 +545,29 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
Location Out() const { return output_; }
- bool CanCall() const { return call_kind_ != kNoCall; }
- bool WillCall() const { return call_kind_ == kCallOnMainOnly; }
- bool OnlyCallsOnSlowPath() const { return call_kind_ == kCallOnSlowPath; }
- bool NeedsSafepoint() const { return CanCall(); }
+ bool CanCall() const {
+ return call_kind_ != kNoCall;
+ }
+
+ bool WillCall() const {
+ return call_kind_ == kCallOnMainOnly || call_kind_ == kCallOnMainAndSlowPath;
+ }
+
+ bool CallsOnSlowPath() const {
+ return call_kind_ == kCallOnSlowPath || call_kind_ == kCallOnMainAndSlowPath;
+ }
+
+ bool OnlyCallsOnSlowPath() const {
+ return call_kind_ == kCallOnSlowPath;
+ }
+
+ bool CallsOnMainAndSlowPath() const {
+ return call_kind_ == kCallOnMainAndSlowPath;
+ }
+
+ bool NeedsSafepoint() const {
+ return CanCall();
+ }
void SetStackBit(uint32_t index) {
stack_mask_->SetBit(index);
@@ -629,8 +653,7 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
// Whether these are locations for an intrinsified call.
bool intrinsified_;
- ART_FRIEND_TEST(RegisterAllocatorTest, ExpectedInRegisterHint);
- ART_FRIEND_TEST(RegisterAllocatorTest, SameAsFirstInputHint);
+ friend class RegisterAllocatorTest;
DISALLOW_COPY_AND_ASSIGN(LocationSummary);
};
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index 2f59d4cd5b..0819fb01ac 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -37,7 +37,10 @@ class HOptimization : public ArenaObject<kArenaAllocOptimization> {
virtual ~HOptimization() {}
- // Return the name of the pass.
+ // Return the name of the pass. Pass names for a single HOptimization should be of form
+ // <optimization_name> or <optimization_name>$<pass_name> for common <optimization_name> prefix.
+ // Example: 'instruction_simplifier', 'instruction_simplifier$after_bce',
+ // 'instruction_simplifier$before_codegen'.
const char* GetPassName() const { return pass_name_; }
// Perform the analysis itself.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d5b0d77fe5..f7c82d1987 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -95,6 +95,8 @@ namespace art {
static constexpr size_t kArenaAllocatorMemoryReportThreshold = 8 * MB;
+static constexpr const char* kPassNameSeparator = "$";
+
/**
* Used by the code generator, to allocate the code in a vector.
*/
@@ -266,7 +268,7 @@ class PassScope : public ValueObject {
class OptimizingCompiler FINAL : public Compiler {
public:
explicit OptimizingCompiler(CompilerDriver* driver);
- ~OptimizingCompiler();
+ ~OptimizingCompiler() OVERRIDE;
bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const OVERRIDE;
@@ -305,17 +307,17 @@ class OptimizingCompiler FINAL : public Compiler {
OVERRIDE
SHARED_REQUIRES(Locks::mutator_lock_);
- protected:
- virtual void RunOptimizations(HGraph* graph,
- CodeGenerator* codegen,
- CompilerDriver* driver,
- const DexCompilationUnit& dex_compilation_unit,
- PassObserver* pass_observer,
- StackHandleScopeCollection* handles) const;
+ private:
+ void RunOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ CompilerDriver* driver,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ StackHandleScopeCollection* handles) const;
- virtual void RunOptimizations(HOptimization* optimizations[],
- size_t length,
- PassObserver* pass_observer) const;
+ void RunOptimizations(HOptimization* optimizations[],
+ size_t length,
+ PassObserver* pass_observer) const;
private:
// Create a 'CompiledMethod' for an optimized graph.
@@ -420,6 +422,117 @@ static bool InstructionSetSupportsReadBarrier(InstructionSet instruction_set) {
|| instruction_set == kX86_64;
}
+static HOptimization* BuildOptimization(
+ const std::string& opt_name,
+ ArenaAllocator* arena,
+ HGraph* graph,
+ OptimizingCompilerStats* stats,
+ CodeGenerator* codegen,
+ CompilerDriver* driver,
+ const DexCompilationUnit& dex_compilation_unit,
+ StackHandleScopeCollection* handles,
+ SideEffectsAnalysis* most_recent_side_effects,
+ HInductionVarAnalysis* most_recent_induction) {
+ if (opt_name == arm::InstructionSimplifierArm::kInstructionSimplifierArmPassName) {
+ return new (arena) arm::InstructionSimplifierArm(graph, stats);
+ } else if (opt_name == arm64::InstructionSimplifierArm64::kInstructionSimplifierArm64PassName) {
+ return new (arena) arm64::InstructionSimplifierArm64(graph, stats);
+ } else if (opt_name == BoundsCheckElimination::kBoundsCheckEliminationPassName) {
+ CHECK(most_recent_side_effects != nullptr && most_recent_induction != nullptr);
+ return new (arena) BoundsCheckElimination(graph,
+ *most_recent_side_effects,
+ most_recent_induction);
+ } else if (opt_name == GVNOptimization::kGlobalValueNumberingPassName) {
+ CHECK(most_recent_side_effects != nullptr);
+ return new (arena) GVNOptimization(graph, *most_recent_side_effects);
+ } else if (opt_name == HConstantFolding::kConstantFoldingPassName) {
+ return new (arena) HConstantFolding(graph);
+ } else if (opt_name == HDeadCodeElimination::kDeadCodeEliminationPassName) {
+ return new (arena) HDeadCodeElimination(graph, stats);
+ } else if (opt_name == HInliner::kInlinerPassName) {
+ size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
+ return new (arena) HInliner(graph, // outer_graph
+ graph, // outermost_graph
+ codegen,
+ dex_compilation_unit, // outer_compilation_unit
+ dex_compilation_unit, // outermost_compilation_unit
+ driver,
+ handles,
+ stats,
+ number_of_dex_registers,
+ /* depth */ 0);
+ } else if (opt_name == HSharpening::kSharpeningPassName) {
+ return new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver);
+ } else if (opt_name == HSelectGenerator::kSelectGeneratorPassName) {
+ return new (arena) HSelectGenerator(graph, stats);
+ } else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
+ return new (arena) HInductionVarAnalysis(graph);
+ } else if (opt_name == InstructionSimplifier::kInstructionSimplifierPassName) {
+ return new (arena) InstructionSimplifier(graph, stats);
+ } else if (opt_name == IntrinsicsRecognizer::kIntrinsicsRecognizerPassName) {
+ return new (arena) IntrinsicsRecognizer(graph, driver, stats);
+ } else if (opt_name == LICM::kLoopInvariantCodeMotionPassName) {
+ CHECK(most_recent_side_effects != nullptr);
+ return new (arena) LICM(graph, *most_recent_side_effects, stats);
+ } else if (opt_name == LoadStoreElimination::kLoadStoreEliminationPassName) {
+ CHECK(most_recent_side_effects != nullptr);
+ return new (arena) LoadStoreElimination(graph, *most_recent_side_effects);
+ } else if (opt_name == mips::DexCacheArrayFixups::kDexCacheArrayFixupsMipsPassName) {
+ return new (arena) mips::DexCacheArrayFixups(graph, codegen, stats);
+ } else if (opt_name == mips::PcRelativeFixups::kPcRelativeFixupsMipsPassName) {
+ return new (arena) mips::PcRelativeFixups(graph, codegen, stats);
+ } else if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) {
+ return new (arena) SideEffectsAnalysis(graph);
+ } else if (opt_name == x86::PcRelativeFixups::kPcRelativeFixupsX86PassName) {
+ return new (arena) x86::PcRelativeFixups(graph, codegen, stats);
+ } else if (opt_name == x86::X86MemoryOperandGeneration::kX86MemoryOperandGenerationPassName) {
+ return new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
+ }
+ return nullptr;
+}
+
+static ArenaVector<HOptimization*> BuildOptimizations(
+ const std::vector<std::string>& pass_names,
+ ArenaAllocator* arena,
+ HGraph* graph,
+ OptimizingCompilerStats* stats,
+ CodeGenerator* codegen,
+ CompilerDriver* driver,
+ const DexCompilationUnit& dex_compilation_unit,
+ StackHandleScopeCollection* handles) {
+ // Few HOptimizations constructors require SideEffectsAnalysis or HInductionVarAnalysis
+ // instances. This method assumes that each of them expects the nearest instance preceeding it
+ // in the pass name list.
+ SideEffectsAnalysis* most_recent_side_effects = nullptr;
+ HInductionVarAnalysis* most_recent_induction = nullptr;
+ ArenaVector<HOptimization*> ret(arena->Adapter());
+ for (std::string pass_name : pass_names) {
+ size_t pos = pass_name.find(kPassNameSeparator); // Strip suffix to get base pass name.
+ std::string opt_name = pos == std::string::npos ? pass_name : pass_name.substr(0, pos);
+
+ HOptimization* opt = BuildOptimization(
+ opt_name,
+ arena,
+ graph,
+ stats,
+ codegen,
+ driver,
+ dex_compilation_unit,
+ handles,
+ most_recent_side_effects,
+ most_recent_induction);
+ CHECK(opt != nullptr) << "Couldn't build optimization: \"" << pass_name << "\"";
+ ret.push_back(opt);
+
+ if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) {
+ most_recent_side_effects = down_cast<SideEffectsAnalysis*>(opt);
+ } else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
+ most_recent_induction = down_cast<HInductionVarAnalysis*>(opt);
+ }
+ }
+ return ret;
+}
+
void OptimizingCompiler::RunOptimizations(HOptimization* optimizations[],
size_t length,
PassObserver* pass_observer) const {
@@ -444,11 +557,11 @@ void OptimizingCompiler::MaybeRunInliner(HGraph* graph,
}
size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
HInliner* inliner = new (graph->GetArena()) HInliner(
- graph,
- graph,
+ graph, // outer_graph
+ graph, // outermost_graph
codegen,
- dex_compilation_unit,
- dex_compilation_unit,
+ dex_compilation_unit, // outer_compilation_unit
+ dex_compilation_unit, // outermost_compilation_unit
driver,
handles,
stats,
@@ -473,7 +586,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
arm::InstructionSimplifierArm* simplifier =
new (arena) arm::InstructionSimplifierArm(graph, stats);
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN_after_arch");
+ GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
HOptimization* arm_optimizations[] = {
simplifier,
side_effects,
@@ -489,7 +602,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
arm64::InstructionSimplifierArm64* simplifier =
new (arena) arm64::InstructionSimplifierArm64(graph, stats);
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN_after_arch");
+ GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
HOptimization* arm64_optimizations[] = {
simplifier,
side_effects,
@@ -518,7 +631,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
x86::PcRelativeFixups* pc_relative_fixups =
new (arena) x86::PcRelativeFixups(graph, codegen, stats);
x86::X86MemoryOperandGeneration* memory_gen =
- new(arena) x86::X86MemoryOperandGeneration(graph, stats, codegen);
+ new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
HOptimization* x86_optimizations[] = {
pc_relative_fixups,
memory_gen
@@ -530,7 +643,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64: {
x86::X86MemoryOperandGeneration* memory_gen =
- new(arena) x86::X86MemoryOperandGeneration(graph, stats, codegen);
+ new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
HOptimization* x86_64_optimizations[] = {
memory_gen
};
@@ -546,7 +659,8 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
NO_INLINE // Avoid increasing caller's frame size by large stack-allocated objects.
static void AllocateRegisters(HGraph* graph,
CodeGenerator* codegen,
- PassObserver* pass_observer) {
+ PassObserver* pass_observer,
+ RegisterAllocator::Strategy strategy) {
{
PassScope scope(PrepareForRegisterAllocation::kPrepareForRegisterAllocationPassName,
pass_observer);
@@ -559,7 +673,7 @@ static void AllocateRegisters(HGraph* graph,
}
{
PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer);
- RegisterAllocator::Create(graph->GetArena(), codegen, liveness)->AllocateRegisters();
+ RegisterAllocator::Create(graph->GetArena(), codegen, liveness, strategy)->AllocateRegisters();
}
}
@@ -571,15 +685,30 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
StackHandleScopeCollection* handles) const {
OptimizingCompilerStats* stats = compilation_stats_.get();
ArenaAllocator* arena = graph->GetArena();
+ if (driver->GetCompilerOptions().GetPassesToRun() != nullptr) {
+ ArenaVector<HOptimization*> optimizations = BuildOptimizations(
+ *driver->GetCompilerOptions().GetPassesToRun(),
+ arena,
+ graph,
+ stats,
+ codegen,
+ driver,
+ dex_compilation_unit,
+ handles);
+ RunOptimizations(&optimizations[0], optimizations.size(), pass_observer);
+ return;
+ }
+
HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination(
- graph, stats, HDeadCodeElimination::kInitialDeadCodeEliminationPassName);
+ graph, stats, "dead_code_elimination$initial");
HDeadCodeElimination* dce2 = new (arena) HDeadCodeElimination(
- graph, stats, HDeadCodeElimination::kFinalDeadCodeEliminationPassName);
+ graph, stats, "dead_code_elimination$final");
HConstantFolding* fold1 = new (arena) HConstantFolding(graph);
InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats);
HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph, stats);
- HConstantFolding* fold2 = new (arena) HConstantFolding(graph, "constant_folding_after_inlining");
- HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding_after_bce");
+ HConstantFolding* fold2 = new (arena) HConstantFolding(
+ graph, "constant_folding$after_inlining");
+ HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding$after_bce");
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects);
LICM* licm = new (arena) LICM(graph, *side_effects, stats);
@@ -588,9 +717,9 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects, induction);
HSharpening* sharpening = new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver);
InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
- graph, stats, "instruction_simplifier_after_bce");
+ graph, stats, "instruction_simplifier$after_bce");
InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
- graph, stats, "instruction_simplifier_before_codegen");
+ graph, stats, "instruction_simplifier$before_codegen");
IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, driver, stats);
HOptimization* optimizations1[] = {
@@ -626,7 +755,6 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
RunArchOptimizations(driver->GetInstructionSet(), graph, codegen, pass_observer);
- AllocateRegisters(graph, codegen, pass_observer);
}
static ArenaVector<LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) {
@@ -841,6 +969,10 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
&pass_observer,
&handles);
+ RegisterAllocator::Strategy regalloc_strategy =
+ compiler_options.GetRegisterAllocationStrategy();
+ AllocateRegisters(graph, codegen.get(), &pass_observer, regalloc_strategy);
+
codegen->Compile(code_allocator);
pass_observer.DumpDisassembly();
}
diff --git a/compiler/optimizing/pc_relative_fixups_mips.h b/compiler/optimizing/pc_relative_fixups_mips.h
index 1e8b071bb3..5a7397bf9d 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.h
+++ b/compiler/optimizing/pc_relative_fixups_mips.h
@@ -32,6 +32,8 @@ class PcRelativeFixups : public HOptimization {
: HOptimization(graph, "pc_relative_fixups_mips", stats),
codegen_(codegen) {}
+ static constexpr const char* kPcRelativeFixupsMipsPassName = "pc_relative_fixups_mips";
+
void Run() OVERRIDE;
private:
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 921f3dfff6..ad0921d7e6 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -227,6 +227,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
case Intrinsics::kMathMaxFloatFloat:
case Intrinsics::kMathMinDoubleDouble:
case Intrinsics::kMathMinFloatFloat:
+ case Intrinsics::kMathRoundFloat:
if (!base_added) {
DCHECK(invoke_static_or_direct != nullptr);
DCHECK(!invoke_static_or_direct->HasCurrentMethodInput());
diff --git a/compiler/optimizing/pc_relative_fixups_x86.h b/compiler/optimizing/pc_relative_fixups_x86.h
index 03de2fcece..72fa71ea94 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.h
+++ b/compiler/optimizing/pc_relative_fixups_x86.h
@@ -29,9 +29,11 @@ namespace x86 {
class PcRelativeFixups : public HOptimization {
public:
PcRelativeFixups(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
- : HOptimization(graph, "pc_relative_fixups_x86", stats),
+ : HOptimization(graph, kPcRelativeFixupsX86PassName, stats),
codegen_(codegen) {}
+ static constexpr const char* kPcRelativeFixupsX86PassName = "pc_relative_fixups_x86";
+
void Run() OVERRIDE;
private:
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 2367ce1aeb..5b768d5d67 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -21,6 +21,7 @@
#include "base/bit_vector-inl.h"
#include "code_generator.h"
+#include "register_allocator_graph_color.h"
#include "register_allocator_linear_scan.h"
#include "ssa_liveness_analysis.h"
@@ -41,6 +42,8 @@ RegisterAllocator* RegisterAllocator::Create(ArenaAllocator* allocator,
switch (strategy) {
case kRegisterAllocatorLinearScan:
return new (allocator) RegisterAllocatorLinearScan(allocator, codegen, analysis);
+ case kRegisterAllocatorGraphColor:
+ return new (allocator) RegisterAllocatorGraphColor(allocator, codegen, analysis);
default:
LOG(FATAL) << "Invalid register allocation strategy: " << strategy;
UNREACHABLE();
@@ -163,6 +166,19 @@ bool RegisterAllocator::ValidateIntervals(const ArenaVector<LiveInterval*>& inte
} else {
codegen.DumpFloatingPointRegister(message, current->GetRegister());
}
+ for (LiveInterval* interval : intervals) {
+ if (interval->HasRegister()
+ && interval->GetRegister() == current->GetRegister()
+ && interval->CoversSlow(j)) {
+ message << std::endl;
+ if (interval->GetDefinedBy() != nullptr) {
+ message << interval->GetDefinedBy()->GetKind() << " ";
+ } else {
+ message << "physical ";
+ }
+ interval->Dump(message);
+ }
+ }
LOG(FATAL) << message.str();
} else {
return false;
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 729eede66e..7e1fff8e2b 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -40,7 +40,8 @@ class SsaLivenessAnalysis;
class RegisterAllocator : public ArenaObject<kArenaAllocRegisterAllocator> {
public:
enum Strategy {
- kRegisterAllocatorLinearScan
+ kRegisterAllocatorLinearScan,
+ kRegisterAllocatorGraphColor
};
static constexpr Strategy kRegisterAllocatorDefault = kRegisterAllocatorLinearScan;
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
new file mode 100644
index 0000000000..cfdb41ab62
--- /dev/null
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -0,0 +1,1987 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "register_allocator_graph_color.h"
+
+#include "code_generator.h"
+#include "register_allocation_resolver.h"
+#include "ssa_liveness_analysis.h"
+#include "thread-inl.h"
+
+namespace art {
+
+// Highest number of registers that we support for any platform. This can be used for std::bitset,
+// for example, which needs to know its size at compile time.
+static constexpr size_t kMaxNumRegs = 32;
+
+// The maximum number of graph coloring attempts before triggering a DCHECK.
+// This is meant to catch changes to the graph coloring algorithm that undermine its forward
+// progress guarantees. Forward progress for the algorithm means splitting live intervals on
+// every graph coloring attempt so that eventually the interference graph will be sparse enough
+// to color. The main threat to forward progress is trying to split short intervals which cannot be
+// split further; this could cause infinite looping because the interference graph would never
+// change. This is avoided by prioritizing short intervals before long ones, so that long
+// intervals are split when coloring fails.
+static constexpr size_t kMaxGraphColoringAttemptsDebug = 100;
+
+// We always want to avoid spilling inside loops.
+static constexpr size_t kLoopSpillWeightMultiplier = 10;
+
+// If we avoid moves in single jump blocks, we can avoid jumps to jumps.
+static constexpr size_t kSingleJumpBlockWeightMultiplier = 2;
+
+// We avoid moves in blocks that dominate the exit block, since these blocks will
+// be executed on every path through the method.
+static constexpr size_t kDominatesExitBlockWeightMultiplier = 2;
+
+enum class CoalesceKind {
+ kAdjacentSibling, // Prevents moves at interval split points.
+ kFixedOutputSibling, // Prevents moves from a fixed output location.
+ kFixedInput, // Prevents moves into a fixed input location.
+ kNonlinearControlFlow, // Prevents moves between blocks.
+ kPhi, // Prevents phi resolution moves.
+ kFirstInput, // Prevents a single input move.
+ kAnyInput, // May lead to better instruction selection / smaller encodings.
+};
+
+std::ostream& operator<<(std::ostream& os, const CoalesceKind& kind) {
+ return os << static_cast<typename std::underlying_type<CoalesceKind>::type>(kind);
+}
+
+static size_t LoopDepthAt(HBasicBlock* block) {
+ HLoopInformation* loop_info = block->GetLoopInformation();
+ size_t depth = 0;
+ while (loop_info != nullptr) {
+ ++depth;
+ loop_info = loop_info->GetPreHeader()->GetLoopInformation();
+ }
+ return depth;
+}
+
+// Return the runtime cost of inserting a move instruction at the specified location.
+static size_t CostForMoveAt(size_t position, const SsaLivenessAnalysis& liveness) {
+ HBasicBlock* block = liveness.GetBlockFromPosition(position / 2);
+ DCHECK(block != nullptr);
+ size_t cost = 1;
+ if (block->IsSingleJump()) {
+ cost *= kSingleJumpBlockWeightMultiplier;
+ }
+ if (block->Dominates(block->GetGraph()->GetExitBlock())) {
+ cost *= kDominatesExitBlockWeightMultiplier;
+ }
+ for (size_t loop_depth = LoopDepthAt(block); loop_depth > 0; --loop_depth) {
+ cost *= kLoopSpillWeightMultiplier;
+ }
+ return cost;
+}
+
+// In general, we estimate coalesce priority by whether it will definitely avoid a move,
+// and by how likely it is to create an interference graph that's harder to color.
+static size_t ComputeCoalescePriority(CoalesceKind kind,
+ size_t position,
+ const SsaLivenessAnalysis& liveness) {
+ if (kind == CoalesceKind::kAnyInput) {
+ // This type of coalescing can affect instruction selection, but not moves, so we
+ // give it the lowest priority.
+ return 0;
+ } else {
+ return CostForMoveAt(position, liveness);
+ }
+}
+
+enum class CoalesceStage {
+ kWorklist, // Currently in the iterative coalescing worklist.
+ kActive, // Not in a worklist, but could be considered again during iterative coalescing.
+ kInactive, // No longer considered until last-chance coalescing.
+ kDefunct, // Either the two nodes interfere, or have already been coalesced.
+};
+
+std::ostream& operator<<(std::ostream& os, const CoalesceStage& stage) {
+ return os << static_cast<typename std::underlying_type<CoalesceStage>::type>(stage);
+}
+
+// Represents a coalesce opportunity between two nodes.
+struct CoalesceOpportunity : public ArenaObject<kArenaAllocRegisterAllocator> {
+ CoalesceOpportunity(InterferenceNode* a,
+ InterferenceNode* b,
+ CoalesceKind kind,
+ size_t position,
+ const SsaLivenessAnalysis& liveness)
+ : node_a(a),
+ node_b(b),
+ stage(CoalesceStage::kWorklist),
+ priority(ComputeCoalescePriority(kind, position, liveness)) {}
+
+ // Compare two coalesce opportunities based on their priority.
+ // Return true if lhs has a lower priority than that of rhs.
+ static bool CmpPriority(const CoalesceOpportunity* lhs,
+ const CoalesceOpportunity* rhs) {
+ return lhs->priority < rhs->priority;
+ }
+
+ InterferenceNode* const node_a;
+ InterferenceNode* const node_b;
+
+ // The current stage of this coalesce opportunity, indicating whether it is in a worklist,
+ // and whether it should still be considered.
+ CoalesceStage stage;
+
+ // The priority of this coalesce opportunity, based on heuristics.
+ const size_t priority;
+};
+
+enum class NodeStage {
+ kInitial, // Uninitialized.
+ kPrecolored, // Marks fixed nodes.
+ kSafepoint, // Marks safepoint nodes.
+ kPrunable, // Marks uncolored nodes in the interference graph.
+ kSimplifyWorklist, // Marks non-move-related nodes with degree less than the number of registers.
+ kFreezeWorklist, // Marks move-related nodes with degree less than the number of registers.
+ kSpillWorklist, // Marks nodes with degree greater or equal to the number of registers.
+ kPruned // Marks nodes already pruned from the interference graph.
+};
+
+std::ostream& operator<<(std::ostream& os, const NodeStage& stage) {
+ return os << static_cast<typename std::underlying_type<NodeStage>::type>(stage);
+}
+
+// Returns the estimated cost of spilling a particular live interval.
+static float ComputeSpillWeight(LiveInterval* interval, const SsaLivenessAnalysis& liveness) {
+ if (interval->HasRegister()) {
+ // Intervals with a fixed register cannot be spilled.
+ return std::numeric_limits<float>::min();
+ }
+
+ size_t length = interval->GetLength();
+ if (length == 1) {
+ // Tiny intervals should have maximum priority, since they cannot be split any further.
+ return std::numeric_limits<float>::max();
+ }
+
+ size_t use_weight = 0;
+ if (interval->GetDefinedBy() != nullptr && interval->DefinitionRequiresRegister()) {
+ // Cost for spilling at a register definition point.
+ use_weight += CostForMoveAt(interval->GetStart() + 1, liveness);
+ }
+
+ UsePosition* use = interval->GetFirstUse();
+ while (use != nullptr && use->GetPosition() <= interval->GetStart()) {
+ // Skip uses before the start of this live interval.
+ use = use->GetNext();
+ }
+
+ while (use != nullptr && use->GetPosition() <= interval->GetEnd()) {
+ if (use->GetUser() != nullptr && use->RequiresRegister()) {
+ // Cost for spilling at a register use point.
+ use_weight += CostForMoveAt(use->GetUser()->GetLifetimePosition() - 1, liveness);
+ }
+ use = use->GetNext();
+ }
+
+ // We divide by the length of the interval because we want to prioritize
+ // short intervals; we do not benefit much if we split them further.
+ return static_cast<float>(use_weight) / static_cast<float>(length);
+}
+
+// Interference nodes make up the interference graph, which is the primary data structure in
+// graph coloring register allocation. Each node represents a single live interval, and contains
+// a set of adjacent nodes corresponding to intervals overlapping with its own. To save memory,
+// pre-colored nodes never contain outgoing edges (only incoming ones).
+//
+// As nodes are pruned from the interference graph, incoming edges of the pruned node are removed,
+// but outgoing edges remain in order to later color the node based on the colors of its neighbors.
+//
+// Note that a pair interval is represented by a single node in the interference graph, which
+// essentially requires two colors. One consequence of this is that the degree of a node is not
+// necessarily equal to the number of adjacent nodes--instead, the degree reflects the maximum
+// number of colors with which a node could interfere. We model this by giving edges different
+// weights (1 or 2) to control how much it increases the degree of adjacent nodes.
+// For example, the edge between two single nodes will have weight 1. On the other hand,
+// the edge between a single node and a pair node will have weight 2. This is because the pair
+// node could block up to two colors for the single node, and because the single node could
+// block an entire two-register aligned slot for the pair node.
+// The degree is defined this way because we use it to decide whether a node is guaranteed a color,
+// and thus whether it is safe to prune it from the interference graph early on.
+class InterferenceNode : public ArenaObject<kArenaAllocRegisterAllocator> {
+ public:
+ InterferenceNode(ArenaAllocator* allocator,
+ LiveInterval* interval,
+ const SsaLivenessAnalysis& liveness)
+ : stage(NodeStage::kInitial),
+ interval_(interval),
+ adjacent_nodes_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ coalesce_opportunities_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ out_degree_(interval->HasRegister() ? std::numeric_limits<size_t>::max() : 0),
+ alias_(this),
+ spill_weight_(ComputeSpillWeight(interval, liveness)),
+ requires_color_(interval->RequiresRegister()) {
+ DCHECK(!interval->IsHighInterval()) << "Pair nodes should be represented by the low interval";
+ }
+
+ void AddInterference(InterferenceNode* other, bool guaranteed_not_interfering_yet) {
+ DCHECK(!IsPrecolored()) << "To save memory, fixed nodes should not have outgoing interferences";
+ DCHECK_NE(this, other) << "Should not create self loops in the interference graph";
+ DCHECK_EQ(this, alias_) << "Should not add interferences to a node that aliases another";
+ DCHECK_NE(stage, NodeStage::kPruned);
+ DCHECK_NE(other->stage, NodeStage::kPruned);
+ if (guaranteed_not_interfering_yet) {
+ DCHECK(std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other)
+ == adjacent_nodes_.end());
+ adjacent_nodes_.push_back(other);
+ out_degree_ += EdgeWeightWith(other);
+ } else {
+ auto it = std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other);
+ if (it == adjacent_nodes_.end()) {
+ adjacent_nodes_.push_back(other);
+ out_degree_ += EdgeWeightWith(other);
+ }
+ }
+ }
+
+ void RemoveInterference(InterferenceNode* other) {
+ DCHECK_EQ(this, alias_) << "Should not remove interferences from a coalesced node";
+ DCHECK_EQ(other->stage, NodeStage::kPruned) << "Should only remove interferences when pruning";
+ auto it = std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other);
+ if (it != adjacent_nodes_.end()) {
+ adjacent_nodes_.erase(it);
+ out_degree_ -= EdgeWeightWith(other);
+ }
+ }
+
+ bool ContainsInterference(InterferenceNode* other) const {
+ DCHECK(!IsPrecolored()) << "Should not query fixed nodes for interferences";
+ DCHECK_EQ(this, alias_) << "Should not query a coalesced node for interferences";
+ auto it = std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other);
+ return it != adjacent_nodes_.end();
+ }
+
+ LiveInterval* GetInterval() const {
+ return interval_;
+ }
+
+ const ArenaVector<InterferenceNode*>& GetAdjacentNodes() const {
+ return adjacent_nodes_;
+ }
+
+ size_t GetOutDegree() const {
+ // Pre-colored nodes have infinite degree.
+ DCHECK(!IsPrecolored() || out_degree_ == std::numeric_limits<size_t>::max());
+ return out_degree_;
+ }
+
+ void AddCoalesceOpportunity(CoalesceOpportunity* opportunity) {
+ coalesce_opportunities_.push_back(opportunity);
+ }
+
+ void ClearCoalesceOpportunities() {
+ coalesce_opportunities_.clear();
+ }
+
+ bool IsMoveRelated() const {
+ for (CoalesceOpportunity* opportunity : coalesce_opportunities_) {
+ if (opportunity->stage == CoalesceStage::kWorklist ||
+ opportunity->stage == CoalesceStage::kActive) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Return whether this node already has a color.
+ // Used to find fixed nodes in the interference graph before coloring.
+ bool IsPrecolored() const {
+ return interval_->HasRegister();
+ }
+
+ bool IsPair() const {
+ return interval_->HasHighInterval();
+ }
+
+ void SetAlias(InterferenceNode* rep) {
+ DCHECK_NE(rep->stage, NodeStage::kPruned);
+ DCHECK_EQ(this, alias_) << "Should only set a node's alias once";
+ alias_ = rep;
+ }
+
+ InterferenceNode* GetAlias() {
+ if (alias_ != this) {
+ // Recurse in order to flatten tree of alias pointers.
+ alias_ = alias_->GetAlias();
+ }
+ return alias_;
+ }
+
+ const ArenaVector<CoalesceOpportunity*>& GetCoalesceOpportunities() const {
+ return coalesce_opportunities_;
+ }
+
+ float GetSpillWeight() const {
+ return spill_weight_;
+ }
+
+ bool RequiresColor() const {
+ return requires_color_;
+ }
+
+ // We give extra weight to edges adjacent to pair nodes. See the general comment on the
+ // interference graph above.
+ size_t EdgeWeightWith(const InterferenceNode* other) const {
+ return (IsPair() || other->IsPair()) ? 2 : 1;
+ }
+
+ // The current stage of this node, indicating which worklist it belongs to.
+ NodeStage stage;
+
+ private:
+ // The live interval that this node represents.
+ LiveInterval* const interval_;
+
+ // All nodes interfering with this one.
+ // We use an unsorted vector as a set, since a tree or hash set is too heavy for the
+ // set sizes that we encounter. Using a vector leads to much better performance.
+ ArenaVector<InterferenceNode*> adjacent_nodes_;
+
+ // Interference nodes that this node should be coalesced with to reduce moves.
+ ArenaVector<CoalesceOpportunity*> coalesce_opportunities_;
+
+ // The maximum number of colors with which this node could interfere. This could be more than
+ // the number of adjacent nodes if this is a pair node, or if some adjacent nodes are pair nodes.
+ // We use "out" degree because incoming edges come from nodes already pruned from the graph,
+ // and do not affect the coloring of this node.
+ // Pre-colored nodes are treated as having infinite degree.
+ size_t out_degree_;
+
+ // The node representing this node in the interference graph.
+ // Initially set to `this`, and only changed if this node is coalesced into another.
+ InterferenceNode* alias_;
+
+ // The cost of splitting and spilling this interval to the stack.
+ // Nodes with a higher spill weight should be prioritized when assigning registers.
+ // This is essentially based on use density and location; short intervals with many uses inside
+ // deeply nested loops have a high spill weight.
+ const float spill_weight_;
+
+ const bool requires_color_;
+
+ DISALLOW_COPY_AND_ASSIGN(InterferenceNode);
+};
+
+// The order in which we color nodes is important. To guarantee forward progress,
+// we prioritize intervals that require registers, and after that we prioritize
+// short intervals. That way, if we fail to color a node, it either won't require a
+// register, or it will be a long interval that can be split in order to make the
+// interference graph sparser.
+// To improve code quality, we prioritize intervals used frequently in deeply nested loops.
+// (This metric is secondary to the forward progress requirements above.)
+// TODO: May also want to consider:
+// - Constants (since they can be rematerialized)
+// - Allocated spill slots
+static bool HasGreaterNodePriority(const InterferenceNode* lhs,
+ const InterferenceNode* rhs) {
+ // (1) Prioritize the node that requires a color.
+ if (lhs->RequiresColor() != rhs->RequiresColor()) {
+ return lhs->RequiresColor();
+ }
+
+ // (2) Prioritize the interval that has a higher spill weight.
+ return lhs->GetSpillWeight() > rhs->GetSpillWeight();
+}
+
+// A ColoringIteration holds the many data structures needed for a single graph coloring attempt,
+// and provides methods for each phase of the attempt.
+class ColoringIteration {
+ public:
+ ColoringIteration(RegisterAllocatorGraphColor* register_allocator,
+ ArenaAllocator* allocator,
+ bool processing_core_regs,
+ size_t num_regs)
+ : register_allocator_(register_allocator),
+ allocator_(allocator),
+ processing_core_regs_(processing_core_regs),
+ num_regs_(num_regs),
+ interval_node_map_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ prunable_nodes_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ pruned_nodes_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ simplify_worklist_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ freeze_worklist_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ spill_worklist_(HasGreaterNodePriority, allocator->Adapter(kArenaAllocRegisterAllocator)),
+ coalesce_worklist_(CoalesceOpportunity::CmpPriority,
+ allocator->Adapter(kArenaAllocRegisterAllocator)) {}
+
+ // Use the intervals collected from instructions to construct an
+ // interference graph mapping intervals to adjacency lists.
+ // Also, collect synthesized safepoint nodes, used to keep
+ // track of live intervals across safepoints.
+ // TODO: Should build safepoints elsewhere.
+ void BuildInterferenceGraph(const ArenaVector<LiveInterval*>& intervals,
+ const ArenaVector<InterferenceNode*>& physical_nodes,
+ ArenaVector<InterferenceNode*>* safepoints);
+
+ // Add coalesce opportunities to interference nodes.
+ void FindCoalesceOpportunities();
+
+ // Prune nodes from the interference graph to be colored later. Build
+ // a stack (pruned_nodes) containing these intervals in an order determined
+ // by various heuristics.
+ void PruneInterferenceGraph();
+
+ // Process pruned_intervals_ to color the interference graph, spilling when
+ // necessary. Returns true if successful. Else, some intervals have been
+ // split, and the interference graph should be rebuilt for another attempt.
+ bool ColorInterferenceGraph();
+
+ // Return prunable nodes.
+ // The register allocator will need to access prunable nodes after coloring
+ // in order to tell the code generator which registers have been assigned.
+ const ArenaVector<InterferenceNode*>& GetPrunableNodes() const {
+ return prunable_nodes_;
+ }
+
+ private:
+ // Create a coalesce opportunity between two nodes.
+ void CreateCoalesceOpportunity(InterferenceNode* a,
+ InterferenceNode* b,
+ CoalesceKind kind,
+ size_t position);
+
+ // Add an edge in the interference graph, if valid.
+ // Note that `guaranteed_not_interfering_yet` is used to optimize adjacency set insertion
+ // when possible.
+ void AddPotentialInterference(InterferenceNode* from,
+ InterferenceNode* to,
+ bool guaranteed_not_interfering_yet,
+ bool both_directions = true);
+
+ // Invalidate all coalesce opportunities this node has, so that it (and possibly its neighbors)
+ // may be pruned from the interference graph.
+ void FreezeMoves(InterferenceNode* node);
+
+ // Prune a node from the interference graph, updating worklists if necessary.
+ void PruneNode(InterferenceNode* node);
+
+ // Add coalesce opportunities associated with this node to the coalesce worklist.
+ void EnableCoalesceOpportunities(InterferenceNode* node);
+
+ // If needed, from `node` from the freeze worklist to the simplify worklist.
+ void CheckTransitionFromFreezeWorklist(InterferenceNode* node);
+
+ // Return true if `into` is colored, and `from` can be coalesced with `into` conservatively.
+ bool PrecoloredHeuristic(InterferenceNode* from, InterferenceNode* into);
+
+ // Return true if `from` and `into` are uncolored, and can be coalesced conservatively.
+ bool UncoloredHeuristic(InterferenceNode* from, InterferenceNode* into);
+
+ void Coalesce(CoalesceOpportunity* opportunity);
+
+ // Merge `from` into `into` in the interference graph.
+ void Combine(InterferenceNode* from, InterferenceNode* into);
+
+ // A reference to the register allocator instance,
+ // needed to split intervals and assign spill slots.
+ RegisterAllocatorGraphColor* register_allocator_;
+
+ // An arena allocator used for a single graph coloring attempt.
+ ArenaAllocator* allocator_;
+
+ const bool processing_core_regs_;
+
+ const size_t num_regs_;
+
+ // A map from live intervals to interference nodes.
+ ArenaHashMap<LiveInterval*, InterferenceNode*> interval_node_map_;
+
+ // Uncolored nodes that should be pruned from the interference graph.
+ ArenaVector<InterferenceNode*> prunable_nodes_;
+
+ // A stack of nodes pruned from the interference graph, waiting to be pruned.
+ ArenaStdStack<InterferenceNode*> pruned_nodes_;
+
+ // A queue containing low degree, non-move-related nodes that can pruned immediately.
+ ArenaDeque<InterferenceNode*> simplify_worklist_;
+
+ // A queue containing low degree, move-related nodes.
+ ArenaDeque<InterferenceNode*> freeze_worklist_;
+
+ // A queue containing high degree nodes.
+ // If we have to prune from the spill worklist, we cannot guarantee
+ // the pruned node a color, so we order the worklist by priority.
+ ArenaPriorityQueue<InterferenceNode*, decltype(&HasGreaterNodePriority)> spill_worklist_;
+
+ // A queue containing coalesce opportunities.
+ // We order the coalesce worklist by priority, since some coalesce opportunities (e.g., those
+ // inside of loops) are more important than others.
+ ArenaPriorityQueue<CoalesceOpportunity*,
+ decltype(&CoalesceOpportunity::CmpPriority)> coalesce_worklist_;
+
+ DISALLOW_COPY_AND_ASSIGN(ColoringIteration);
+};
+
+static bool IsCoreInterval(LiveInterval* interval) {
+ return !Primitive::IsFloatingPointType(interval->GetType());
+}
+
+static size_t ComputeReservedArtMethodSlots(const CodeGenerator& codegen) {
+ return static_cast<size_t>(InstructionSetPointerSize(codegen.GetInstructionSet())) / kVRegSize;
+}
+
+RegisterAllocatorGraphColor::RegisterAllocatorGraphColor(ArenaAllocator* allocator,
+ CodeGenerator* codegen,
+ const SsaLivenessAnalysis& liveness,
+ bool iterative_move_coalescing)
+ : RegisterAllocator(allocator, codegen, liveness),
+ iterative_move_coalescing_(iterative_move_coalescing),
+ core_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ fp_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ temp_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ safepoints_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ physical_core_nodes_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ physical_fp_nodes_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ int_spill_slot_counter_(0),
+ double_spill_slot_counter_(0),
+ float_spill_slot_counter_(0),
+ long_spill_slot_counter_(0),
+ catch_phi_spill_slot_counter_(0),
+ reserved_art_method_slots_(ComputeReservedArtMethodSlots(*codegen)),
+ reserved_out_slots_(codegen->GetGraph()->GetMaximumNumberOfOutVRegs()),
+ number_of_globally_blocked_core_regs_(0),
+ number_of_globally_blocked_fp_regs_(0),
+ max_safepoint_live_core_regs_(0),
+ max_safepoint_live_fp_regs_(0) {
+ // Before we ask for blocked registers, set them up in the code generator.
+ codegen->SetupBlockedRegisters();
+
+ // Initialize physical core register live intervals and blocked registers.
+ // This includes globally blocked registers, such as the stack pointer.
+ physical_core_nodes_.resize(codegen_->GetNumberOfCoreRegisters(), nullptr);
+ for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
+ LiveInterval* interval = LiveInterval::MakeFixedInterval(allocator_, i, Primitive::kPrimInt);
+ physical_core_nodes_[i] =
+ new (allocator_) InterferenceNode(allocator_, interval, liveness);
+ physical_core_nodes_[i]->stage = NodeStage::kPrecolored;
+ core_intervals_.push_back(interval);
+ if (codegen_->IsBlockedCoreRegister(i)) {
+ ++number_of_globally_blocked_core_regs_;
+ interval->AddRange(0, liveness.GetMaxLifetimePosition());
+ }
+ }
+ // Initialize physical floating point register live intervals and blocked registers.
+ physical_fp_nodes_.resize(codegen_->GetNumberOfFloatingPointRegisters(), nullptr);
+ for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) {
+ LiveInterval* interval = LiveInterval::MakeFixedInterval(allocator_, i, Primitive::kPrimFloat);
+ physical_fp_nodes_[i] =
+ new (allocator_) InterferenceNode(allocator_, interval, liveness);
+ physical_fp_nodes_[i]->stage = NodeStage::kPrecolored;
+ fp_intervals_.push_back(interval);
+ if (codegen_->IsBlockedFloatingPointRegister(i)) {
+ ++number_of_globally_blocked_fp_regs_;
+ interval->AddRange(0, liveness.GetMaxLifetimePosition());
+ }
+ }
+}
+
+void RegisterAllocatorGraphColor::AllocateRegisters() {
+ // (1) Collect and prepare live intervals.
+ ProcessInstructions();
+
+ for (bool processing_core_regs : {true, false}) {
+ ArenaVector<LiveInterval*>& intervals = processing_core_regs
+ ? core_intervals_
+ : fp_intervals_;
+ size_t num_registers = processing_core_regs
+ ? codegen_->GetNumberOfCoreRegisters()
+ : codegen_->GetNumberOfFloatingPointRegisters();
+
+ size_t attempt = 0;
+ while (true) {
+ ++attempt;
+ DCHECK(attempt <= kMaxGraphColoringAttemptsDebug)
+ << "Exceeded debug max graph coloring register allocation attempts. "
+ << "This could indicate that the register allocator is not making forward progress, "
+ << "which could be caused by prioritizing the wrong live intervals. (Short intervals "
+ << "should be prioritized over long ones, because they cannot be split further.)";
+
+ // Many data structures are cleared between graph coloring attempts, so we reduce
+ // total memory usage by using a new arena allocator for each attempt.
+ ArenaAllocator coloring_attempt_allocator(allocator_->GetArenaPool());
+ ColoringIteration iteration(this,
+ &coloring_attempt_allocator,
+ processing_core_regs,
+ num_registers);
+
+ // (2) Build the interference graph. Also gather safepoints.
+ ArenaVector<InterferenceNode*> safepoints(
+ coloring_attempt_allocator.Adapter(kArenaAllocRegisterAllocator));
+ ArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs
+ ? physical_core_nodes_
+ : physical_fp_nodes_;
+ iteration.BuildInterferenceGraph(intervals, physical_nodes, &safepoints);
+
+ // (3) Add coalesce opportunities.
+ // If we have tried coloring the graph a suspiciously high number of times, give
+ // up on move coalescing, just in case the coalescing heuristics are not conservative.
+ // (This situation will be caught if DCHECKs are turned on.)
+ if (iterative_move_coalescing_ && attempt <= kMaxGraphColoringAttemptsDebug) {
+ iteration.FindCoalesceOpportunities();
+ }
+
+ // (4) Prune all uncolored nodes from interference graph.
+ iteration.PruneInterferenceGraph();
+
+ // (5) Color pruned nodes based on interferences.
+ bool successful = iteration.ColorInterferenceGraph();
+
+ // We manually clear coalesce opportunities for physical nodes,
+ // since they persist across coloring attempts.
+ for (InterferenceNode* node : physical_core_nodes_) {
+ node->ClearCoalesceOpportunities();
+ }
+ for (InterferenceNode* node : physical_fp_nodes_) {
+ node->ClearCoalesceOpportunities();
+ }
+
+ if (successful) {
+ // Compute the maximum number of live registers across safepoints.
+ // Notice that we do not count globally blocked registers, such as the stack pointer.
+ if (safepoints.size() > 0) {
+ size_t max_safepoint_live_regs = ComputeMaxSafepointLiveRegisters(safepoints);
+ if (processing_core_regs) {
+ max_safepoint_live_core_regs_ =
+ max_safepoint_live_regs - number_of_globally_blocked_core_regs_;
+ } else {
+ max_safepoint_live_fp_regs_=
+ max_safepoint_live_regs - number_of_globally_blocked_fp_regs_;
+ }
+ }
+
+ // Tell the code generator which registers were allocated.
+ // We only look at prunable_nodes because we already told the code generator about
+ // fixed intervals while processing instructions. We also ignore the fixed intervals
+ // placed at the top of catch blocks.
+ for (InterferenceNode* node : iteration.GetPrunableNodes()) {
+ LiveInterval* interval = node->GetInterval();
+ if (interval->HasRegister()) {
+ Location low_reg = processing_core_regs
+ ? Location::RegisterLocation(interval->GetRegister())
+ : Location::FpuRegisterLocation(interval->GetRegister());
+ codegen_->AddAllocatedRegister(low_reg);
+ if (interval->HasHighInterval()) {
+ LiveInterval* high = interval->GetHighInterval();
+ DCHECK(high->HasRegister());
+ Location high_reg = processing_core_regs
+ ? Location::RegisterLocation(high->GetRegister())
+ : Location::FpuRegisterLocation(high->GetRegister());
+ codegen_->AddAllocatedRegister(high_reg);
+ }
+ } else {
+ DCHECK(!interval->HasHighInterval() || !interval->GetHighInterval()->HasRegister());
+ }
+ }
+
+ break;
+ }
+ } // while unsuccessful
+ } // for processing_core_instructions
+
+ // (6) Resolve locations and deconstruct SSA form.
+ RegisterAllocationResolver(allocator_, codegen_, liveness_)
+ .Resolve(max_safepoint_live_core_regs_,
+ max_safepoint_live_fp_regs_,
+ reserved_art_method_slots_ + reserved_out_slots_,
+ int_spill_slot_counter_,
+ long_spill_slot_counter_,
+ float_spill_slot_counter_,
+ double_spill_slot_counter_,
+ catch_phi_spill_slot_counter_,
+ temp_intervals_);
+
+ if (kIsDebugBuild) {
+ Validate(/*log_fatal_on_failure*/ true);
+ }
+}
+
+bool RegisterAllocatorGraphColor::Validate(bool log_fatal_on_failure) {
+ for (bool processing_core_regs : {true, false}) {
+ ArenaVector<LiveInterval*> intervals(
+ allocator_->Adapter(kArenaAllocRegisterAllocatorValidate));
+ for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) {
+ HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
+ LiveInterval* interval = instruction->GetLiveInterval();
+ if (interval != nullptr && IsCoreInterval(interval) == processing_core_regs) {
+ intervals.push_back(instruction->GetLiveInterval());
+ }
+ }
+
+ ArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs
+ ? physical_core_nodes_
+ : physical_fp_nodes_;
+ for (InterferenceNode* fixed : physical_nodes) {
+ LiveInterval* interval = fixed->GetInterval();
+ if (interval->GetFirstRange() != nullptr) {
+ // Ideally we would check fixed ranges as well, but currently there are times when
+ // two fixed intervals for the same register will overlap. For example, a fixed input
+ // and a fixed output may sometimes share the same register, in which there will be two
+ // fixed intervals for the same place.
+ }
+ }
+
+ for (LiveInterval* temp : temp_intervals_) {
+ if (IsCoreInterval(temp) == processing_core_regs) {
+ intervals.push_back(temp);
+ }
+ }
+
+ size_t spill_slots = int_spill_slot_counter_
+ + long_spill_slot_counter_
+ + float_spill_slot_counter_
+ + double_spill_slot_counter_
+ + catch_phi_spill_slot_counter_;
+ bool ok = ValidateIntervals(intervals,
+ spill_slots,
+ reserved_art_method_slots_ + reserved_out_slots_,
+ *codegen_,
+ allocator_,
+ processing_core_regs,
+ log_fatal_on_failure);
+ if (!ok) {
+ return false;
+ }
+ } // for processing_core_regs
+
+ return true;
+}
+
+void RegisterAllocatorGraphColor::ProcessInstructions() {
+ for (HLinearPostOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+
+ // Note that we currently depend on this ordering, since some helper
+ // code is designed for linear scan register allocation.
+ for (HBackwardInstructionIterator instr_it(block->GetInstructions());
+ !instr_it.Done();
+ instr_it.Advance()) {
+ ProcessInstruction(instr_it.Current());
+ }
+
+ for (HInstructionIterator phi_it(block->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ ProcessInstruction(phi_it.Current());
+ }
+
+ if (block->IsCatchBlock()
+ || (block->IsLoopHeader() && block->GetLoopInformation()->IsIrreducible())) {
+ // By blocking all registers at the top of each catch block or irreducible loop, we force
+ // intervals belonging to the live-in set of the catch/header block to be spilled.
+ // TODO(ngeoffray): Phis in this block could be allocated in register.
+ size_t position = block->GetLifetimeStart();
+ BlockRegisters(position, position + 1);
+ }
+ }
+}
+
+void RegisterAllocatorGraphColor::ProcessInstruction(HInstruction* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations == nullptr) {
+ return;
+ }
+ if (locations->NeedsSafepoint() && codegen_->IsLeafMethod()) {
+ // We do this here because we do not want the suspend check to artificially
+ // create live registers.
+ DCHECK(instruction->IsSuspendCheckEntry());
+ DCHECK_EQ(locations->GetTempCount(), 0u);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ return;
+ }
+
+ CheckForTempLiveIntervals(instruction);
+ CheckForSafepoint(instruction);
+ if (instruction->GetLocations()->WillCall()) {
+ // If a call will happen, create fixed intervals for caller-save registers.
+ // TODO: Note that it may be beneficial to later split intervals at this point,
+ // so that we allow last-minute moves from a caller-save register
+ // to a callee-save register.
+ BlockRegisters(instruction->GetLifetimePosition(),
+ instruction->GetLifetimePosition() + 1,
+ /*caller_save_only*/ true);
+ }
+ CheckForFixedInputs(instruction);
+
+ LiveInterval* interval = instruction->GetLiveInterval();
+ if (interval == nullptr) {
+ // Instructions lacking a valid output location do not have a live interval.
+ DCHECK(!locations->Out().IsValid());
+ return;
+ }
+
+ // Low intervals act as representatives for their corresponding high interval.
+ DCHECK(!interval->IsHighInterval());
+ if (codegen_->NeedsTwoRegisters(interval->GetType())) {
+ interval->AddHighInterval();
+ }
+ AddSafepointsFor(instruction);
+ CheckForFixedOutput(instruction);
+ AllocateSpillSlotForCatchPhi(instruction);
+
+ ArenaVector<LiveInterval*>& intervals = IsCoreInterval(interval)
+ ? core_intervals_
+ : fp_intervals_;
+ if (interval->HasSpillSlot() || instruction->IsConstant()) {
+ // Note that if an interval already has a spill slot, then its value currently resides
+ // in the stack (e.g., parameters). Thus we do not have to allocate a register until its first
+ // register use. This is also true for constants, which can be materialized at any point.
+ size_t first_register_use = interval->FirstRegisterUse();
+ if (first_register_use != kNoLifetime) {
+ LiveInterval* split = SplitBetween(interval, interval->GetStart(), first_register_use - 1);
+ intervals.push_back(split);
+ } else {
+ // We won't allocate a register for this value.
+ }
+ } else {
+ intervals.push_back(interval);
+ }
+}
+
+void RegisterAllocatorGraphColor::CheckForFixedInputs(HInstruction* instruction) {
+ // We simply block physical registers where necessary.
+ // TODO: Ideally we would coalesce the physical register with the register
+ // allocated to the input value, but this can be tricky if, e.g., there
+ // could be multiple physical register uses of the same value at the
+ // same instruction. Furthermore, there's currently no distinction between
+ // fixed inputs to a call (which will be clobbered) and other fixed inputs (which
+ // may not be clobbered).
+ LocationSummary* locations = instruction->GetLocations();
+ size_t position = instruction->GetLifetimePosition();
+ for (size_t i = 0; i < locations->GetInputCount(); ++i) {
+ Location input = locations->InAt(i);
+ if (input.IsRegister() || input.IsFpuRegister()) {
+ BlockRegister(input, position, position + 1);
+ codegen_->AddAllocatedRegister(input);
+ } else if (input.IsPair()) {
+ BlockRegister(input.ToLow(), position, position + 1);
+ BlockRegister(input.ToHigh(), position, position + 1);
+ codegen_->AddAllocatedRegister(input.ToLow());
+ codegen_->AddAllocatedRegister(input.ToHigh());
+ }
+ }
+}
+
+void RegisterAllocatorGraphColor::CheckForFixedOutput(HInstruction* instruction) {
+ // If an instruction has a fixed output location, we give the live interval a register and then
+ // proactively split it just after the definition point to avoid creating too many interferences
+ // with a fixed node.
+ LiveInterval* interval = instruction->GetLiveInterval();
+ Location out = interval->GetDefinedBy()->GetLocations()->Out();
+ size_t position = instruction->GetLifetimePosition();
+ DCHECK_GE(interval->GetEnd() - position, 2u);
+
+ if (out.IsUnallocated() && out.GetPolicy() == Location::kSameAsFirstInput) {
+ out = instruction->GetLocations()->InAt(0);
+ }
+
+ if (out.IsRegister() || out.IsFpuRegister()) {
+ interval->SetRegister(out.reg());
+ codegen_->AddAllocatedRegister(out);
+ Split(interval, position + 1);
+ } else if (out.IsPair()) {
+ interval->SetRegister(out.low());
+ interval->GetHighInterval()->SetRegister(out.high());
+ codegen_->AddAllocatedRegister(out.ToLow());
+ codegen_->AddAllocatedRegister(out.ToHigh());
+ Split(interval, position + 1);
+ } else if (out.IsStackSlot() || out.IsDoubleStackSlot()) {
+ interval->SetSpillSlot(out.GetStackIndex());
+ } else {
+ DCHECK(out.IsUnallocated() || out.IsConstant());
+ }
+}
+
+void RegisterAllocatorGraphColor::AddSafepointsFor(HInstruction* instruction) {
+ LiveInterval* interval = instruction->GetLiveInterval();
+ for (size_t safepoint_index = safepoints_.size(); safepoint_index > 0; --safepoint_index) {
+ HInstruction* safepoint = safepoints_[safepoint_index - 1u];
+ size_t safepoint_position = safepoint->GetLifetimePosition();
+
+ // Test that safepoints_ are ordered in the optimal way.
+ DCHECK(safepoint_index == safepoints_.size() ||
+ safepoints_[safepoint_index]->GetLifetimePosition() < safepoint_position);
+
+ if (safepoint_position == interval->GetStart()) {
+ // The safepoint is for this instruction, so the location of the instruction
+ // does not need to be saved.
+ DCHECK_EQ(safepoint_index, safepoints_.size());
+ DCHECK_EQ(safepoint, instruction);
+ continue;
+ } else if (interval->IsDeadAt(safepoint_position)) {
+ break;
+ } else if (!interval->Covers(safepoint_position)) {
+ // Hole in the interval.
+ continue;
+ }
+ interval->AddSafepoint(safepoint);
+ }
+}
+
+void RegisterAllocatorGraphColor::CheckForTempLiveIntervals(HInstruction* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ size_t position = instruction->GetLifetimePosition();
+ for (size_t i = 0; i < locations->GetTempCount(); ++i) {
+ Location temp = locations->GetTemp(i);
+ if (temp.IsRegister() || temp.IsFpuRegister()) {
+ BlockRegister(temp, position, position + 1);
+ codegen_->AddAllocatedRegister(temp);
+ } else {
+ DCHECK(temp.IsUnallocated());
+ switch (temp.GetPolicy()) {
+ case Location::kRequiresRegister: {
+ LiveInterval* interval =
+ LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimInt);
+ interval->AddTempUse(instruction, i);
+ core_intervals_.push_back(interval);
+ temp_intervals_.push_back(interval);
+ break;
+ }
+
+ case Location::kRequiresFpuRegister: {
+ LiveInterval* interval =
+ LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimDouble);
+ interval->AddTempUse(instruction, i);
+ fp_intervals_.push_back(interval);
+ temp_intervals_.push_back(interval);
+ if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) {
+ interval->AddHighInterval(/*is_temp*/ true);
+ temp_intervals_.push_back(interval->GetHighInterval());
+ }
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected policy for temporary location "
+ << temp.GetPolicy();
+ }
+ }
+ }
+}
+
+void RegisterAllocatorGraphColor::CheckForSafepoint(HInstruction* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ size_t position = instruction->GetLifetimePosition();
+
+ if (locations->NeedsSafepoint()) {
+ safepoints_.push_back(instruction);
+ if (locations->OnlyCallsOnSlowPath()) {
+ // We add a synthesized range at this position to record the live registers
+ // at this position. Ideally, we could just update the safepoints when locations
+ // are updated, but we currently need to know the full stack size before updating
+ // locations (because of parameters and the fact that we don't have a frame pointer).
+ // And knowing the full stack size requires to know the maximum number of live
+ // registers at calls in slow paths.
+ // By adding the following interval in the algorithm, we can compute this
+ // maximum before updating locations.
+ LiveInterval* interval = LiveInterval::MakeSlowPathInterval(allocator_, instruction);
+ interval->AddRange(position, position + 1);
+ core_intervals_.push_back(interval);
+ fp_intervals_.push_back(interval);
+ }
+ }
+}
+
+LiveInterval* RegisterAllocatorGraphColor::TrySplit(LiveInterval* interval, size_t position) {
+ if (interval->GetStart() < position && position < interval->GetEnd()) {
+ return Split(interval, position);
+ } else {
+ return interval;
+ }
+}
+
+void RegisterAllocatorGraphColor::SplitAtRegisterUses(LiveInterval* interval) {
+ DCHECK(!interval->IsHighInterval());
+
+ // Split just after a register definition.
+ if (interval->IsParent() && interval->DefinitionRequiresRegister()) {
+ interval = TrySplit(interval, interval->GetStart() + 1);
+ }
+
+ UsePosition* use = interval->GetFirstUse();
+ while (use != nullptr && use->GetPosition() < interval->GetStart()) {
+ use = use->GetNext();
+ }
+
+ // Split around register uses.
+ size_t end = interval->GetEnd();
+ while (use != nullptr && use->GetPosition() <= end) {
+ if (use->RequiresRegister()) {
+ size_t position = use->GetPosition();
+ interval = TrySplit(interval, position - 1);
+ if (liveness_.GetInstructionFromPosition(position / 2)->IsControlFlow()) {
+ // If we are at the very end of a basic block, we cannot split right
+ // at the use. Split just after instead.
+ interval = TrySplit(interval, position + 1);
+ } else {
+ interval = TrySplit(interval, position);
+ }
+ }
+ use = use->GetNext();
+ }
+}
+
+void RegisterAllocatorGraphColor::AllocateSpillSlotForCatchPhi(HInstruction* instruction) {
+ if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) {
+ HPhi* phi = instruction->AsPhi();
+ LiveInterval* interval = phi->GetLiveInterval();
+
+ HInstruction* previous_phi = phi->GetPrevious();
+ DCHECK(previous_phi == nullptr ||
+ previous_phi->AsPhi()->GetRegNumber() <= phi->GetRegNumber())
+ << "Phis expected to be sorted by vreg number, "
+ << "so that equivalent phis are adjacent.";
+
+ if (phi->IsVRegEquivalentOf(previous_phi)) {
+ // Assign the same spill slot.
+ DCHECK(previous_phi->GetLiveInterval()->HasSpillSlot());
+ interval->SetSpillSlot(previous_phi->GetLiveInterval()->GetSpillSlot());
+ } else {
+ interval->SetSpillSlot(catch_phi_spill_slot_counter_);
+ catch_phi_spill_slot_counter_ += interval->NeedsTwoSpillSlots() ? 2 : 1;
+ }
+ }
+}
+
+void RegisterAllocatorGraphColor::BlockRegister(Location location,
+ size_t start,
+ size_t end) {
+ DCHECK(location.IsRegister() || location.IsFpuRegister());
+ int reg = location.reg();
+ LiveInterval* interval = location.IsRegister()
+ ? physical_core_nodes_[reg]->GetInterval()
+ : physical_fp_nodes_[reg]->GetInterval();
+ DCHECK(interval->GetRegister() == reg);
+ bool blocked_by_codegen = location.IsRegister()
+ ? codegen_->IsBlockedCoreRegister(reg)
+ : codegen_->IsBlockedFloatingPointRegister(reg);
+ if (blocked_by_codegen) {
+ // We've already blocked this register for the entire method. (And adding a
+ // range inside another range violates the preconditions of AddRange).
+ } else {
+ interval->AddRange(start, end);
+ }
+}
+
+void RegisterAllocatorGraphColor::BlockRegisters(size_t start, size_t end, bool caller_save_only) {
+ for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
+ if (!caller_save_only || !codegen_->IsCoreCalleeSaveRegister(i)) {
+ BlockRegister(Location::RegisterLocation(i), start, end);
+ }
+ }
+ for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) {
+ if (!caller_save_only || !codegen_->IsFloatingPointCalleeSaveRegister(i)) {
+ BlockRegister(Location::FpuRegisterLocation(i), start, end);
+ }
+ }
+}
+
+void ColoringIteration::AddPotentialInterference(InterferenceNode* from,
+ InterferenceNode* to,
+ bool guaranteed_not_interfering_yet,
+ bool both_directions) {
+ if (from->IsPrecolored()) {
+ // We save space by ignoring outgoing edges from fixed nodes.
+ } else if (to->GetInterval()->IsSlowPathSafepoint()) {
+ // Safepoint intervals are only there to count max live registers,
+ // so no need to give them incoming interference edges.
+ // This is also necessary for correctness, because we don't want nodes
+ // to remove themselves from safepoint adjacency sets when they're pruned.
+ } else if (to->IsPrecolored()) {
+ // It is important that only a single node represents a given fixed register in the
+ // interference graph. We retrieve that node here.
+ const ArenaVector<InterferenceNode*>& physical_nodes = to->GetInterval()->IsFloatingPoint()
+ ? register_allocator_->physical_fp_nodes_
+ : register_allocator_->physical_core_nodes_;
+ InterferenceNode* physical_node = physical_nodes[to->GetInterval()->GetRegister()];
+ from->AddInterference(physical_node, /*guaranteed_not_interfering_yet*/ false);
+ DCHECK_EQ(to->GetInterval()->GetRegister(), physical_node->GetInterval()->GetRegister());
+ DCHECK_EQ(to->GetAlias(), physical_node) << "Fixed nodes should alias the canonical fixed node";
+
+ // If a node interferes with a fixed pair node, the weight of the edge may
+ // be inaccurate after using the alias of the pair node, because the alias of the pair node
+ // is a singular node.
+ // We could make special pair fixed nodes, but that ends up being too conservative because
+ // a node could then interfere with both {r1} and {r1,r2}, leading to a degree of
+ // three rather than two.
+ // Instead, we explicitly add an interference with the high node of the fixed pair node.
+ // TODO: This is too conservative at time for pair nodes, but the fact that fixed pair intervals
+ // can be unaligned on x86 complicates things.
+ if (to->IsPair()) {
+ InterferenceNode* high_node =
+ physical_nodes[to->GetInterval()->GetHighInterval()->GetRegister()];
+ DCHECK_EQ(to->GetInterval()->GetHighInterval()->GetRegister(),
+ high_node->GetInterval()->GetRegister());
+ from->AddInterference(high_node, /*guaranteed_not_interfering_yet*/ false);
+ }
+ } else {
+ // Standard interference between two uncolored nodes.
+ from->AddInterference(to, guaranteed_not_interfering_yet);
+ }
+
+ if (both_directions) {
+ AddPotentialInterference(to, from, guaranteed_not_interfering_yet, /*both_directions*/ false);
+ }
+}
+
+// Returns true if `in_node` represents an input interval of `out_node`, and the output interval
+// is allowed to have the same register as the input interval.
+// TODO: Ideally we should just produce correct intervals in liveness analysis.
+// We would need to refactor the current live interval layout to do so, which is
+// no small task.
+static bool CheckInputOutputCanOverlap(InterferenceNode* in_node, InterferenceNode* out_node) {
+ LiveInterval* output_interval = out_node->GetInterval();
+ HInstruction* defined_by = output_interval->GetDefinedBy();
+ if (defined_by == nullptr) {
+ // This must not be a definition point.
+ return false;
+ }
+
+ LocationSummary* locations = defined_by->GetLocations();
+ if (locations->OutputCanOverlapWithInputs()) {
+ // This instruction does not allow the output to reuse a register from an input.
+ return false;
+ }
+
+ LiveInterval* input_interval = in_node->GetInterval();
+ LiveInterval* next_sibling = input_interval->GetNextSibling();
+ size_t def_position = defined_by->GetLifetimePosition();
+ size_t use_position = def_position + 1;
+ if (next_sibling != nullptr && next_sibling->GetStart() == use_position) {
+ // The next sibling starts at the use position, so reusing the input register in the output
+ // would clobber the input before it's moved into the sibling interval location.
+ return false;
+ }
+
+ if (!input_interval->IsDeadAt(use_position) && input_interval->CoversSlow(use_position)) {
+ // The input interval is live after the use position.
+ return false;
+ }
+
+ HInputsRef inputs = defined_by->GetInputs();
+ for (size_t i = 0; i < inputs.size(); ++i) {
+ if (inputs[i]->GetLiveInterval()->GetSiblingAt(def_position) == input_interval) {
+ DCHECK(input_interval->SameRegisterKind(*output_interval));
+ return true;
+ }
+ }
+
+ // The input interval was not an input for this instruction.
+ return false;
+}
+
+void ColoringIteration::BuildInterferenceGraph(
+ const ArenaVector<LiveInterval*>& intervals,
+ const ArenaVector<InterferenceNode*>& physical_nodes,
+ ArenaVector<InterferenceNode*>* safepoints) {
+ DCHECK(interval_node_map_.Empty() && prunable_nodes_.empty());
+ // Build the interference graph efficiently by ordering range endpoints
+ // by position and doing a linear sweep to find interferences. (That is, we
+ // jump from endpoint to endpoint, maintaining a set of intervals live at each
+ // point. If two nodes are ever in the live set at the same time, then they
+ // interfere with each other.)
+ //
+ // We order by both position and (secondarily) by whether the endpoint
+ // begins or ends a range; we want to process range endings before range
+ // beginnings at the same position because they should not conflict.
+ //
+ // For simplicity, we create a tuple for each endpoint, and then sort the tuples.
+ // Tuple contents: (position, is_range_beginning, node).
+ ArenaVector<std::tuple<size_t, bool, InterferenceNode*>> range_endpoints(
+ allocator_->Adapter(kArenaAllocRegisterAllocator));
+
+ // We reserve plenty of space to avoid excessive copying.
+ range_endpoints.reserve(4 * prunable_nodes_.size());
+
+ for (LiveInterval* parent : intervals) {
+ for (LiveInterval* sibling = parent; sibling != nullptr; sibling = sibling->GetNextSibling()) {
+ LiveRange* range = sibling->GetFirstRange();
+ if (range != nullptr) {
+ InterferenceNode* node = new (allocator_) InterferenceNode(
+ allocator_, sibling, register_allocator_->liveness_);
+ interval_node_map_.Insert(std::make_pair(sibling, node));
+
+ if (sibling->HasRegister()) {
+ // Fixed nodes should alias the canonical node for the corresponding register.
+ node->stage = NodeStage::kPrecolored;
+ InterferenceNode* physical_node = physical_nodes[sibling->GetRegister()];
+ node->SetAlias(physical_node);
+ DCHECK_EQ(node->GetInterval()->GetRegister(),
+ physical_node->GetInterval()->GetRegister());
+ } else if (sibling->IsSlowPathSafepoint()) {
+ // Safepoint intervals are synthesized to count max live registers.
+ // They will be processed separately after coloring.
+ node->stage = NodeStage::kSafepoint;
+ safepoints->push_back(node);
+ } else {
+ node->stage = NodeStage::kPrunable;
+ prunable_nodes_.push_back(node);
+ }
+
+ while (range != nullptr) {
+ range_endpoints.push_back(std::make_tuple(range->GetStart(), true, node));
+ range_endpoints.push_back(std::make_tuple(range->GetEnd(), false, node));
+ range = range->GetNext();
+ }
+ }
+ }
+ }
+
+ // Sort the endpoints.
+ // We explicitly ignore the third entry of each tuple (the node pointer) in order
+ // to maintain determinism.
+ std::sort(range_endpoints.begin(), range_endpoints.end(),
+ [] (const std::tuple<size_t, bool, InterferenceNode*>& lhs,
+ const std::tuple<size_t, bool, InterferenceNode*>& rhs) {
+ return std::tie(std::get<0>(lhs), std::get<1>(lhs))
+ < std::tie(std::get<0>(rhs), std::get<1>(rhs));
+ });
+
+ // Nodes live at the current position in the linear sweep.
+ ArenaVector<InterferenceNode*> live(
+ allocator_->Adapter(kArenaAllocRegisterAllocator));
+
+ // Linear sweep. When we encounter the beginning of a range, we add the corresponding node to the
+ // live set. When we encounter the end of a range, we remove the corresponding node
+ // from the live set. Nodes interfere if they are in the live set at the same time.
+ for (auto it = range_endpoints.begin(); it != range_endpoints.end(); ++it) {
+ bool is_range_beginning;
+ InterferenceNode* node;
+ size_t position;
+ // Extract information from the tuple, including the node this tuple represents.
+ std::tie(position, is_range_beginning, node) = *it;
+
+ if (is_range_beginning) {
+ bool guaranteed_not_interfering_yet = position == node->GetInterval()->GetStart();
+ for (InterferenceNode* conflicting : live) {
+ DCHECK_NE(node, conflicting);
+ if (CheckInputOutputCanOverlap(conflicting, node)) {
+ // We do not add an interference, because the instruction represented by `node` allows
+ // its output to share a register with an input, represented here by `conflicting`.
+ } else {
+ AddPotentialInterference(node, conflicting, guaranteed_not_interfering_yet);
+ }
+ }
+ DCHECK(std::find(live.begin(), live.end(), node) == live.end());
+ live.push_back(node);
+ } else {
+ // End of range.
+ auto live_it = std::find(live.begin(), live.end(), node);
+ DCHECK(live_it != live.end());
+ live.erase(live_it);
+ }
+ }
+ DCHECK(live.empty());
+}
+
+void ColoringIteration::CreateCoalesceOpportunity(InterferenceNode* a,
+ InterferenceNode* b,
+ CoalesceKind kind,
+ size_t position) {
+ DCHECK_EQ(a->IsPair(), b->IsPair())
+ << "Nodes of different memory widths should never be coalesced";
+ CoalesceOpportunity* opportunity =
+ new (allocator_) CoalesceOpportunity(a, b, kind, position, register_allocator_->liveness_);
+ a->AddCoalesceOpportunity(opportunity);
+ b->AddCoalesceOpportunity(opportunity);
+ coalesce_worklist_.push(opportunity);
+}
+
+// When looking for coalesce opportunities, we use the interval_node_map_ to find the node
+// corresponding to an interval. Note that not all intervals are in this map, notably the parents
+// of constants and stack arguments. (However, these interval should not be involved in coalesce
+// opportunities anyway, because they're not going to be in registers.)
+void ColoringIteration::FindCoalesceOpportunities() {
+ DCHECK(coalesce_worklist_.empty());
+
+ for (InterferenceNode* node : prunable_nodes_) {
+ LiveInterval* interval = node->GetInterval();
+
+ // Coalesce siblings.
+ LiveInterval* next_sibling = interval->GetNextSibling();
+ if (next_sibling != nullptr && interval->GetEnd() == next_sibling->GetStart()) {
+ auto it = interval_node_map_.Find(next_sibling);
+ if (it != interval_node_map_.end()) {
+ InterferenceNode* sibling_node = it->second;
+ CreateCoalesceOpportunity(node,
+ sibling_node,
+ CoalesceKind::kAdjacentSibling,
+ interval->GetEnd());
+ }
+ }
+
+ // Coalesce fixed outputs with this interval if this interval is an adjacent sibling.
+ LiveInterval* parent = interval->GetParent();
+ if (parent->HasRegister()
+ && parent->GetNextSibling() == interval
+ && parent->GetEnd() == interval->GetStart()) {
+ auto it = interval_node_map_.Find(parent);
+ if (it != interval_node_map_.end()) {
+ InterferenceNode* parent_node = it->second;
+ CreateCoalesceOpportunity(node,
+ parent_node,
+ CoalesceKind::kFixedOutputSibling,
+ parent->GetEnd());
+ }
+ }
+
+ // Try to prevent moves across blocks.
+ // Note that this does not lead to many succeeding coalesce attempts, so could be removed
+ // if found to add to compile time.
+ const SsaLivenessAnalysis& liveness = register_allocator_->liveness_;
+ if (interval->IsSplit() && liveness.IsAtBlockBoundary(interval->GetStart() / 2)) {
+ // If the start of this interval is at a block boundary, we look at the
+ // location of the interval in blocks preceding the block this interval
+ // starts at. This can avoid a move between the two blocks.
+ HBasicBlock* block = liveness.GetBlockFromPosition(interval->GetStart() / 2);
+ for (HBasicBlock* predecessor : block->GetPredecessors()) {
+ size_t position = predecessor->GetLifetimeEnd() - 1;
+ LiveInterval* existing = interval->GetParent()->GetSiblingAt(position);
+ if (existing != nullptr) {
+ auto it = interval_node_map_.Find(existing);
+ if (it != interval_node_map_.end()) {
+ InterferenceNode* existing_node = it->second;
+ CreateCoalesceOpportunity(node,
+ existing_node,
+ CoalesceKind::kNonlinearControlFlow,
+ position);
+ }
+ }
+ }
+ }
+
+ // Coalesce phi inputs with the corresponding output.
+ HInstruction* defined_by = interval->GetDefinedBy();
+ if (defined_by != nullptr && defined_by->IsPhi()) {
+ const ArenaVector<HBasicBlock*>& predecessors = defined_by->GetBlock()->GetPredecessors();
+ HInputsRef inputs = defined_by->GetInputs();
+
+ for (size_t i = 0, e = inputs.size(); i < e; ++i) {
+ // We want the sibling at the end of the appropriate predecessor block.
+ size_t position = predecessors[i]->GetLifetimeEnd() - 1;
+ LiveInterval* input_interval = inputs[i]->GetLiveInterval()->GetSiblingAt(position);
+
+ auto it = interval_node_map_.Find(input_interval);
+ if (it != interval_node_map_.end()) {
+ InterferenceNode* input_node = it->second;
+ CreateCoalesceOpportunity(node, input_node, CoalesceKind::kPhi, position);
+ }
+ }
+ }
+
+ // Coalesce output with first input when policy is kSameAsFirstInput.
+ if (defined_by != nullptr) {
+ Location out = defined_by->GetLocations()->Out();
+ if (out.IsUnallocated() && out.GetPolicy() == Location::kSameAsFirstInput) {
+ LiveInterval* input_interval
+ = defined_by->InputAt(0)->GetLiveInterval()->GetSiblingAt(interval->GetStart() - 1);
+ // TODO: Could we consider lifetime holes here?
+ if (input_interval->GetEnd() == interval->GetStart()) {
+ auto it = interval_node_map_.Find(input_interval);
+ if (it != interval_node_map_.end()) {
+ InterferenceNode* input_node = it->second;
+ CreateCoalesceOpportunity(node,
+ input_node,
+ CoalesceKind::kFirstInput,
+ interval->GetStart());
+ }
+ }
+ }
+ }
+
+ // An interval that starts an instruction (that is, it is not split), may
+ // re-use the registers used by the inputs of that instruction, based on the
+ // location summary.
+ if (defined_by != nullptr) {
+ DCHECK(!interval->IsSplit());
+ LocationSummary* locations = defined_by->GetLocations();
+ if (!locations->OutputCanOverlapWithInputs()) {
+ HInputsRef inputs = defined_by->GetInputs();
+ for (size_t i = 0; i < inputs.size(); ++i) {
+ size_t def_point = defined_by->GetLifetimePosition();
+ // TODO: Getting the sibling at the def_point might not be quite what we want
+ // for fixed inputs, since the use will be *at* the def_point rather than after.
+ LiveInterval* input_interval = inputs[i]->GetLiveInterval()->GetSiblingAt(def_point);
+ if (input_interval != nullptr &&
+ input_interval->HasHighInterval() == interval->HasHighInterval()) {
+ auto it = interval_node_map_.Find(input_interval);
+ if (it != interval_node_map_.end()) {
+ InterferenceNode* input_node = it->second;
+ CreateCoalesceOpportunity(node,
+ input_node,
+ CoalesceKind::kAnyInput,
+ interval->GetStart());
+ }
+ }
+ }
+ }
+ }
+
+ // Try to prevent moves into fixed input locations.
+ UsePosition* use = interval->GetFirstUse();
+ for (; use != nullptr && use->GetPosition() <= interval->GetStart(); use = use->GetNext()) {
+ // Skip past uses before the start of this interval.
+ }
+ for (; use != nullptr && use->GetPosition() <= interval->GetEnd(); use = use->GetNext()) {
+ HInstruction* user = use->GetUser();
+ if (user == nullptr) {
+ // User may be null for certain intervals, such as temp intervals.
+ continue;
+ }
+ LocationSummary* locations = user->GetLocations();
+ Location input = locations->InAt(use->GetInputIndex());
+ if (input.IsRegister() || input.IsFpuRegister()) {
+ // TODO: Could try to handle pair interval too, but coalescing with fixed pair nodes
+ // is currently not supported.
+ InterferenceNode* fixed_node = input.IsRegister()
+ ? register_allocator_->physical_core_nodes_[input.reg()]
+ : register_allocator_->physical_fp_nodes_[input.reg()];
+ CreateCoalesceOpportunity(node,
+ fixed_node,
+ CoalesceKind::kFixedInput,
+ user->GetLifetimePosition());
+ }
+ }
+ } // for node in prunable_nodes
+}
+
+static bool IsLowDegreeNode(InterferenceNode* node, size_t num_regs) {
+ return node->GetOutDegree() < num_regs;
+}
+
+static bool IsHighDegreeNode(InterferenceNode* node, size_t num_regs) {
+ return !IsLowDegreeNode(node, num_regs);
+}
+
+void ColoringIteration::PruneInterferenceGraph() {
+ DCHECK(pruned_nodes_.empty()
+ && simplify_worklist_.empty()
+ && freeze_worklist_.empty()
+ && spill_worklist_.empty());
+ // When pruning the graph, we refer to nodes with degree less than num_regs as low degree nodes,
+ // and all others as high degree nodes. The distinction is important: low degree nodes are
+ // guaranteed a color, while high degree nodes are not.
+
+ // Build worklists. Note that the coalesce worklist has already been
+ // filled by FindCoalesceOpportunities().
+ for (InterferenceNode* node : prunable_nodes_) {
+ DCHECK(!node->IsPrecolored()) << "Fixed nodes should never be pruned";
+ DCHECK(!node->GetInterval()->IsSlowPathSafepoint()) << "Safepoint nodes should never be pruned";
+ if (IsLowDegreeNode(node, num_regs_)) {
+ if (node->GetCoalesceOpportunities().empty()) {
+ // Simplify Worklist.
+ node->stage = NodeStage::kSimplifyWorklist;
+ simplify_worklist_.push_back(node);
+ } else {
+ // Freeze Worklist.
+ node->stage = NodeStage::kFreezeWorklist;
+ freeze_worklist_.push_back(node);
+ }
+ } else {
+ // Spill worklist.
+ node->stage = NodeStage::kSpillWorklist;
+ spill_worklist_.push(node);
+ }
+ }
+
+ // Prune graph.
+ // Note that we do not remove a node from its current worklist if it moves to another, so it may
+ // be in multiple worklists at once; the node's `phase` says which worklist it is really in.
+ while (true) {
+ if (!simplify_worklist_.empty()) {
+ // Prune low-degree nodes.
+ // TODO: pop_back() should work as well, but it didn't; we get a
+ // failed check while pruning. We should look into this.
+ InterferenceNode* node = simplify_worklist_.front();
+ simplify_worklist_.pop_front();
+ DCHECK_EQ(node->stage, NodeStage::kSimplifyWorklist) << "Cannot move from simplify list";
+ DCHECK_LT(node->GetOutDegree(), num_regs_) << "Nodes in simplify list should be low degree";
+ DCHECK(!node->IsMoveRelated()) << "Nodes in simplify list should not be move related";
+ PruneNode(node);
+ } else if (!coalesce_worklist_.empty()) {
+ // Coalesce.
+ CoalesceOpportunity* opportunity = coalesce_worklist_.top();
+ coalesce_worklist_.pop();
+ if (opportunity->stage == CoalesceStage::kWorklist) {
+ Coalesce(opportunity);
+ }
+ } else if (!freeze_worklist_.empty()) {
+ // Freeze moves and prune a low-degree move-related node.
+ InterferenceNode* node = freeze_worklist_.front();
+ freeze_worklist_.pop_front();
+ if (node->stage == NodeStage::kFreezeWorklist) {
+ DCHECK_LT(node->GetOutDegree(), num_regs_) << "Nodes in freeze list should be low degree";
+ DCHECK(node->IsMoveRelated()) << "Nodes in freeze list should be move related";
+ FreezeMoves(node);
+ PruneNode(node);
+ }
+ } else if (!spill_worklist_.empty()) {
+ // We spill the lowest-priority node, because pruning a node earlier
+ // gives it a higher chance of being spilled.
+ InterferenceNode* node = spill_worklist_.top();
+ spill_worklist_.pop();
+ if (node->stage == NodeStage::kSpillWorklist) {
+ DCHECK_GE(node->GetOutDegree(), num_regs_) << "Nodes in spill list should be high degree";
+ FreezeMoves(node);
+ PruneNode(node);
+ }
+ } else {
+ // Pruning complete.
+ break;
+ }
+ }
+ DCHECK_EQ(prunable_nodes_.size(), pruned_nodes_.size());
+}
+
+void ColoringIteration::EnableCoalesceOpportunities(InterferenceNode* node) {
+ for (CoalesceOpportunity* opportunity : node->GetCoalesceOpportunities()) {
+ if (opportunity->stage == CoalesceStage::kActive) {
+ opportunity->stage = CoalesceStage::kWorklist;
+ coalesce_worklist_.push(opportunity);
+ }
+ }
+}
+
+void ColoringIteration::PruneNode(InterferenceNode* node) {
+ DCHECK_NE(node->stage, NodeStage::kPruned);
+ DCHECK(!node->IsPrecolored());
+ node->stage = NodeStage::kPruned;
+ pruned_nodes_.push(node);
+
+ for (InterferenceNode* adj : node->GetAdjacentNodes()) {
+ DCHECK(!adj->GetInterval()->IsSlowPathSafepoint())
+ << "Nodes should never interfere with synthesized safepoint nodes";
+ DCHECK_NE(adj->stage, NodeStage::kPruned) << "Should be no interferences with pruned nodes";
+
+ if (adj->IsPrecolored()) {
+ // No effect on pre-colored nodes; they're never pruned.
+ } else {
+ // Remove the interference.
+ bool was_high_degree = IsHighDegreeNode(adj, num_regs_);
+ DCHECK(adj->ContainsInterference(node))
+ << "Missing reflexive interference from non-fixed node";
+ adj->RemoveInterference(node);
+
+ // Handle transitions from high degree to low degree.
+ if (was_high_degree && IsLowDegreeNode(adj, num_regs_)) {
+ EnableCoalesceOpportunities(adj);
+ for (InterferenceNode* adj_adj : adj->GetAdjacentNodes()) {
+ EnableCoalesceOpportunities(adj_adj);
+ }
+
+ DCHECK_EQ(adj->stage, NodeStage::kSpillWorklist);
+ if (adj->IsMoveRelated()) {
+ adj->stage = NodeStage::kFreezeWorklist;
+ freeze_worklist_.push_back(adj);
+ } else {
+ adj->stage = NodeStage::kSimplifyWorklist;
+ simplify_worklist_.push_back(adj);
+ }
+ }
+ }
+ }
+}
+
+void ColoringIteration::CheckTransitionFromFreezeWorklist(InterferenceNode* node) {
+ if (IsLowDegreeNode(node, num_regs_) && !node->IsMoveRelated()) {
+ DCHECK_EQ(node->stage, NodeStage::kFreezeWorklist);
+ node->stage = NodeStage::kSimplifyWorklist;
+ simplify_worklist_.push_back(node);
+ }
+}
+
+void ColoringIteration::FreezeMoves(InterferenceNode* node) {
+ for (CoalesceOpportunity* opportunity : node->GetCoalesceOpportunities()) {
+ if (opportunity->stage == CoalesceStage::kDefunct) {
+ // Constrained moves should remain constrained, since they will not be considered
+ // during last-chance coalescing.
+ } else {
+ opportunity->stage = CoalesceStage::kInactive;
+ }
+ InterferenceNode* other = opportunity->node_a->GetAlias() == node
+ ? opportunity->node_b->GetAlias()
+ : opportunity->node_a->GetAlias();
+ if (other != node && other->stage == NodeStage::kFreezeWorklist) {
+ DCHECK(IsLowDegreeNode(node, num_regs_));
+ CheckTransitionFromFreezeWorklist(other);
+ }
+ }
+}
+
+bool ColoringIteration::PrecoloredHeuristic(InterferenceNode* from,
+ InterferenceNode* into) {
+ if (!into->IsPrecolored()) {
+ // The uncolored heuristic will cover this case.
+ return false;
+ }
+ if (from->IsPair() || into->IsPair()) {
+ // TODO: Merging from a pair node is currently not supported, since fixed pair nodes
+ // are currently represented as two single fixed nodes in the graph, and `into` is
+ // only one of them. (We may lose the implicit connections to the second one in a merge.)
+ return false;
+ }
+
+ // If all adjacent nodes of `from` are "ok", then we can conservatively merge with `into`.
+ // Reasons an adjacent node `adj` can be "ok":
+ // (1) If `adj` is low degree, interference with `into` will not affect its existing
+ // colorable guarantee. (Notice that coalescing cannot increase its degree.)
+ // (2) If `adj` is pre-colored, it already interferes with `into`. See (3).
+ // (3) If there's already an interference with `into`, coalescing will not add interferences.
+ for (InterferenceNode* adj : from->GetAdjacentNodes()) {
+ if (IsLowDegreeNode(adj, num_regs_) || adj->IsPrecolored() || adj->ContainsInterference(into)) {
+ // Ok.
+ } else {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool ColoringIteration::UncoloredHeuristic(InterferenceNode* from,
+ InterferenceNode* into) {
+ if (into->IsPrecolored()) {
+ // The pre-colored heuristic will handle this case.
+ return false;
+ }
+
+ // Arbitrary cap to improve compile time. Tests show that this has negligible affect
+ // on generated code.
+ if (from->GetOutDegree() + into->GetOutDegree() > 2 * num_regs_) {
+ return false;
+ }
+
+ // It's safe to coalesce two nodes if the resulting node has fewer than `num_regs` neighbors
+ // of high degree. (Low degree neighbors can be ignored, because they will eventually be
+ // pruned from the interference graph in the simplify stage.)
+ size_t high_degree_interferences = 0;
+ for (InterferenceNode* adj : from->GetAdjacentNodes()) {
+ if (IsHighDegreeNode(adj, num_regs_)) {
+ high_degree_interferences += from->EdgeWeightWith(adj);
+ }
+ }
+ for (InterferenceNode* adj : into->GetAdjacentNodes()) {
+ if (IsHighDegreeNode(adj, num_regs_)) {
+ if (from->ContainsInterference(adj)) {
+ // We've already counted this adjacent node.
+ // Furthermore, its degree will decrease if coalescing succeeds. Thus, it's possible that
+ // we should not have counted it at all. (This extends the textbook Briggs coalescing test,
+ // but remains conservative.)
+ if (adj->GetOutDegree() - into->EdgeWeightWith(adj) < num_regs_) {
+ high_degree_interferences -= from->EdgeWeightWith(adj);
+ }
+ } else {
+ high_degree_interferences += into->EdgeWeightWith(adj);
+ }
+ }
+ }
+
+ return high_degree_interferences < num_regs_;
+}
+
+void ColoringIteration::Combine(InterferenceNode* from,
+ InterferenceNode* into) {
+ from->SetAlias(into);
+
+ // Add interferences.
+ for (InterferenceNode* adj : from->GetAdjacentNodes()) {
+ bool was_low_degree = IsLowDegreeNode(adj, num_regs_);
+ AddPotentialInterference(adj, into, /*guaranteed_not_interfering_yet*/ false);
+ if (was_low_degree && IsHighDegreeNode(adj, num_regs_)) {
+ // This is a (temporary) transition to a high degree node. Its degree will decrease again
+ // when we prune `from`, but it's best to be consistent about the current worklist.
+ adj->stage = NodeStage::kSpillWorklist;
+ spill_worklist_.push(adj);
+ }
+ }
+
+ // Add coalesce opportunities.
+ for (CoalesceOpportunity* opportunity : from->GetCoalesceOpportunities()) {
+ if (opportunity->stage != CoalesceStage::kDefunct) {
+ into->AddCoalesceOpportunity(opportunity);
+ }
+ }
+ EnableCoalesceOpportunities(from);
+
+ // Prune and update worklists.
+ PruneNode(from);
+ if (IsLowDegreeNode(into, num_regs_)) {
+ // Coalesce(...) takes care of checking for a transition to the simplify worklist.
+ DCHECK_EQ(into->stage, NodeStage::kFreezeWorklist);
+ } else if (into->stage == NodeStage::kFreezeWorklist) {
+ // This is a transition to a high degree node.
+ into->stage = NodeStage::kSpillWorklist;
+ spill_worklist_.push(into);
+ } else {
+ DCHECK(into->stage == NodeStage::kSpillWorklist || into->stage == NodeStage::kPrecolored);
+ }
+}
+
+void ColoringIteration::Coalesce(CoalesceOpportunity* opportunity) {
+ InterferenceNode* from = opportunity->node_a->GetAlias();
+ InterferenceNode* into = opportunity->node_b->GetAlias();
+ DCHECK_NE(from->stage, NodeStage::kPruned);
+ DCHECK_NE(into->stage, NodeStage::kPruned);
+
+ if (from->IsPrecolored()) {
+ // If we have one pre-colored node, make sure it's the `into` node.
+ std::swap(from, into);
+ }
+
+ if (from == into) {
+ // These nodes have already been coalesced.
+ opportunity->stage = CoalesceStage::kDefunct;
+ CheckTransitionFromFreezeWorklist(from);
+ } else if (from->IsPrecolored() || from->ContainsInterference(into)) {
+ // These nodes interfere.
+ opportunity->stage = CoalesceStage::kDefunct;
+ CheckTransitionFromFreezeWorklist(from);
+ CheckTransitionFromFreezeWorklist(into);
+ } else if (PrecoloredHeuristic(from, into)
+ || UncoloredHeuristic(from, into)) {
+ // We can coalesce these nodes.
+ opportunity->stage = CoalesceStage::kDefunct;
+ Combine(from, into);
+ CheckTransitionFromFreezeWorklist(into);
+ } else {
+ // We cannot coalesce, but we may be able to later.
+ opportunity->stage = CoalesceStage::kActive;
+ }
+}
+
+// Build a mask with a bit set for each register assigned to some
+// interval in `intervals`.
+template <typename Container>
+static std::bitset<kMaxNumRegs> BuildConflictMask(Container& intervals) {
+ std::bitset<kMaxNumRegs> conflict_mask;
+ for (InterferenceNode* adjacent : intervals) {
+ LiveInterval* conflicting = adjacent->GetInterval();
+ if (conflicting->HasRegister()) {
+ conflict_mask.set(conflicting->GetRegister());
+ if (conflicting->HasHighInterval()) {
+ DCHECK(conflicting->GetHighInterval()->HasRegister());
+ conflict_mask.set(conflicting->GetHighInterval()->GetRegister());
+ }
+ } else {
+ DCHECK(!conflicting->HasHighInterval()
+ || !conflicting->GetHighInterval()->HasRegister());
+ }
+ }
+ return conflict_mask;
+}
+
+bool RegisterAllocatorGraphColor::IsCallerSave(size_t reg, bool processing_core_regs) {
+ return processing_core_regs
+ ? !codegen_->IsCoreCalleeSaveRegister(reg)
+ : !codegen_->IsCoreCalleeSaveRegister(reg);
+}
+
+static bool RegisterIsAligned(size_t reg) {
+ return reg % 2 == 0;
+}
+
+static size_t FindFirstZeroInConflictMask(std::bitset<kMaxNumRegs> conflict_mask) {
+ // We use CTZ (count trailing zeros) to quickly find the lowest 0 bit.
+ // Note that CTZ is undefined if all bits are 0, so we special-case it.
+ return conflict_mask.all() ? conflict_mask.size() : CTZ(~conflict_mask.to_ulong());
+}
+
+bool ColoringIteration::ColorInterferenceGraph() {
+ DCHECK_LE(num_regs_, kMaxNumRegs) << "kMaxNumRegs is too small";
+ ArenaVector<LiveInterval*> colored_intervals(
+ allocator_->Adapter(kArenaAllocRegisterAllocator));
+ bool successful = true;
+
+ while (!pruned_nodes_.empty()) {
+ InterferenceNode* node = pruned_nodes_.top();
+ pruned_nodes_.pop();
+ LiveInterval* interval = node->GetInterval();
+ size_t reg = 0;
+
+ InterferenceNode* alias = node->GetAlias();
+ if (alias != node) {
+ // This node was coalesced with another.
+ LiveInterval* alias_interval = alias->GetInterval();
+ if (alias_interval->HasRegister()) {
+ reg = alias_interval->GetRegister();
+ DCHECK(!BuildConflictMask(node->GetAdjacentNodes())[reg])
+ << "This node conflicts with the register it was coalesced with";
+ } else {
+ DCHECK(false) << node->GetOutDegree() << " " << alias->GetOutDegree() << " "
+ << "Move coalescing was not conservative, causing a node to be coalesced "
+ << "with another node that could not be colored";
+ if (interval->RequiresRegister()) {
+ successful = false;
+ }
+ }
+ } else {
+ // Search for free register(s).
+ std::bitset<kMaxNumRegs> conflict_mask = BuildConflictMask(node->GetAdjacentNodes());
+ if (interval->HasHighInterval()) {
+ // Note that the graph coloring allocator assumes that pair intervals are aligned here,
+ // excluding pre-colored pair intervals (which can currently be unaligned on x86). If we
+ // change the alignment requirements here, we will have to update the algorithm (e.g.,
+ // be more conservative about the weight of edges adjacent to pair nodes.)
+ while (reg < num_regs_ - 1 && (conflict_mask[reg] || conflict_mask[reg + 1])) {
+ reg += 2;
+ }
+
+ // Try to use a caller-save register first.
+ for (size_t i = 0; i < num_regs_ - 1; i += 2) {
+ bool low_caller_save = register_allocator_->IsCallerSave(i, processing_core_regs_);
+ bool high_caller_save = register_allocator_->IsCallerSave(i + 1, processing_core_regs_);
+ if (!conflict_mask[i] && !conflict_mask[i + 1]) {
+ if (low_caller_save && high_caller_save) {
+ reg = i;
+ break;
+ } else if (low_caller_save || high_caller_save) {
+ reg = i;
+ // Keep looking to try to get both parts in caller-save registers.
+ }
+ }
+ }
+ } else {
+ // Not a pair interval.
+ reg = FindFirstZeroInConflictMask(conflict_mask);
+
+ // Try to use caller-save registers first.
+ for (size_t i = 0; i < num_regs_; ++i) {
+ if (!conflict_mask[i] && register_allocator_->IsCallerSave(i, processing_core_regs_)) {
+ reg = i;
+ break;
+ }
+ }
+ }
+
+ // Last-chance coalescing.
+ for (CoalesceOpportunity* opportunity : node->GetCoalesceOpportunities()) {
+ if (opportunity->stage == CoalesceStage::kDefunct) {
+ continue;
+ }
+ LiveInterval* other_interval = opportunity->node_a->GetAlias() == node
+ ? opportunity->node_b->GetAlias()->GetInterval()
+ : opportunity->node_a->GetAlias()->GetInterval();
+ if (other_interval->HasRegister()) {
+ size_t coalesce_register = other_interval->GetRegister();
+ if (interval->HasHighInterval()) {
+ if (!conflict_mask[coalesce_register] &&
+ !conflict_mask[coalesce_register + 1] &&
+ RegisterIsAligned(coalesce_register)) {
+ reg = coalesce_register;
+ break;
+ }
+ } else if (!conflict_mask[coalesce_register]) {
+ reg = coalesce_register;
+ break;
+ }
+ }
+ }
+ }
+
+ if (reg < (interval->HasHighInterval() ? num_regs_ - 1 : num_regs_)) {
+ // Assign register.
+ DCHECK(!interval->HasRegister());
+ interval->SetRegister(reg);
+ colored_intervals.push_back(interval);
+ if (interval->HasHighInterval()) {
+ DCHECK(!interval->GetHighInterval()->HasRegister());
+ interval->GetHighInterval()->SetRegister(reg + 1);
+ colored_intervals.push_back(interval->GetHighInterval());
+ }
+ } else if (interval->RequiresRegister()) {
+ // The interference graph is too dense to color. Make it sparser by
+ // splitting this live interval.
+ successful = false;
+ register_allocator_->SplitAtRegisterUses(interval);
+ // We continue coloring, because there may be additional intervals that cannot
+ // be colored, and that we should split.
+ } else {
+ // Spill.
+ register_allocator_->AllocateSpillSlotFor(interval);
+ }
+ }
+
+ // If unsuccessful, reset all register assignments.
+ if (!successful) {
+ for (LiveInterval* interval : colored_intervals) {
+ interval->ClearRegister();
+ }
+ }
+
+ return successful;
+}
+
+size_t RegisterAllocatorGraphColor::ComputeMaxSafepointLiveRegisters(
+ const ArenaVector<InterferenceNode*>& safepoints) {
+ size_t max_safepoint_live_regs = 0;
+ for (InterferenceNode* safepoint : safepoints) {
+ DCHECK(safepoint->GetInterval()->IsSlowPathSafepoint());
+ std::bitset<kMaxNumRegs> conflict_mask = BuildConflictMask(safepoint->GetAdjacentNodes());
+ size_t live_regs = conflict_mask.count();
+ max_safepoint_live_regs = std::max(max_safepoint_live_regs, live_regs);
+ }
+ return max_safepoint_live_regs;
+}
+
+void RegisterAllocatorGraphColor::AllocateSpillSlotFor(LiveInterval* interval) {
+ LiveInterval* parent = interval->GetParent();
+ HInstruction* defined_by = parent->GetDefinedBy();
+ if (parent->HasSpillSlot()) {
+ // We already have a spill slot for this value that we can reuse.
+ } else if (defined_by->IsParameterValue()) {
+ // Parameters already have a stack slot.
+ parent->SetSpillSlot(codegen_->GetStackSlotOfParameter(defined_by->AsParameterValue()));
+ } else if (defined_by->IsCurrentMethod()) {
+ // The current method is always at spill slot 0.
+ parent->SetSpillSlot(0);
+ } else if (defined_by->IsConstant()) {
+ // Constants don't need a spill slot.
+ } else {
+ // Allocate a spill slot based on type.
+ size_t* spill_slot_counter;
+ switch (interval->GetType()) {
+ case Primitive::kPrimDouble:
+ spill_slot_counter = &double_spill_slot_counter_;
+ break;
+ case Primitive::kPrimLong:
+ spill_slot_counter = &long_spill_slot_counter_;
+ break;
+ case Primitive::kPrimFloat:
+ spill_slot_counter = &float_spill_slot_counter_;
+ break;
+ case Primitive::kPrimNot:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimShort:
+ spill_slot_counter = &int_spill_slot_counter_;
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected type for interval " << interval->GetType();
+ UNREACHABLE();
+ }
+
+ parent->SetSpillSlot(*spill_slot_counter);
+ *spill_slot_counter += parent->NeedsTwoSpillSlots() ? 2 : 1;
+ // TODO: Could color stack slots if we wanted to, even if
+ // it's just a trivial coloring. See the linear scan implementation,
+ // which simply reuses spill slots for values whose live intervals
+ // have already ended.
+ }
+}
+
+} // namespace art
diff --git a/compiler/optimizing/register_allocator_graph_color.h b/compiler/optimizing/register_allocator_graph_color.h
new file mode 100644
index 0000000000..9dddcea685
--- /dev/null
+++ b/compiler/optimizing/register_allocator_graph_color.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_GRAPH_COLOR_H_
+#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_GRAPH_COLOR_H_
+
+#include "arch/instruction_set.h"
+#include "base/arena_containers.h"
+#include "base/arena_object.h"
+#include "base/macros.h"
+#include "primitive.h"
+#include "register_allocator.h"
+
+namespace art {
+
+class CodeGenerator;
+class HBasicBlock;
+class HGraph;
+class HInstruction;
+class HParallelMove;
+class Location;
+class SsaLivenessAnalysis;
+class InterferenceNode;
+struct CoalesceOpportunity;
+enum class CoalesceKind;
+
+/**
+ * A graph coloring register allocator.
+ *
+ * The algorithm proceeds as follows:
+ * (1) Build an interference graph, where nodes represent live intervals, and edges represent
+ * interferences between two intervals. Coloring this graph with k colors is isomorphic to
+ * finding a valid register assignment with k registers.
+ * (2) To color the graph, first prune all nodes with degree less than k, since these nodes are
+ * guaranteed a color. (No matter how we color their adjacent nodes, we can give them a
+ * different color.) As we prune nodes from the graph, more nodes may drop below degree k,
+ * enabling further pruning. The key is to maintain the pruning order in a stack, so that we
+ * can color the nodes in the reverse order.
+ * When there are no more nodes with degree less than k, we start pruning alternate nodes based
+ * on heuristics. Since these nodes are not guaranteed a color, we are careful to
+ * prioritize nodes that require a register. We also prioritize short intervals, because
+ * short intervals cannot be split very much if coloring fails (see below). "Prioritizing"
+ * a node amounts to pruning it later, since it will have fewer interferences if we prune other
+ * nodes first.
+ * (3) We color nodes in the reverse order in which we pruned them. If we cannot assign
+ * a node a color, we do one of two things:
+ * - If the node requires a register, we consider the current coloring attempt a failure.
+ * However, we split the node's live interval in order to make the interference graph
+ * sparser, so that future coloring attempts may succeed.
+ * - If the node does not require a register, we simply assign it a location on the stack.
+ *
+ * If iterative move coalescing is enabled, the algorithm also attempts to conservatively
+ * combine nodes in the graph that would prefer to have the same color. (For example, the output
+ * of a phi instruction would prefer to have the same register as at least one of its inputs.)
+ * There are several additional steps involved with this:
+ * - We look for coalesce opportunities by examining each live interval, a step similar to that
+ * used by linear scan when looking for register hints.
+ * - When pruning the graph, we maintain a worklist of coalesce opportunities, as well as a worklist
+ * of low degree nodes that have associated coalesce opportunities. Only when we run out of
+ * coalesce opportunities do we start pruning coalesce-associated nodes.
+ * - When pruning a node, if any nodes transition from high degree to low degree, we add
+ * associated coalesce opportunities to the worklist, since these opportunities may now succeed.
+ * - Whether two nodes can be combined is decided by two different heuristics--one used when
+ * coalescing uncolored nodes, and one used for coalescing an uncolored node with a colored node.
+ * It is vital that we only combine two nodes if the node that remains is guaranteed to receive
+ * a color. This is because additionally spilling is more costly than failing to coalesce.
+ * - Even if nodes are not coalesced while pruning, we keep the coalesce opportunities around
+ * to be used as last-chance register hints when coloring. If nothing else, we try to use
+ * caller-save registers before callee-save registers.
+ *
+ * A good reference for graph coloring register allocation is
+ * "Modern Compiler Implementation in Java" (Andrew W. Appel, 2nd Edition).
+ */
+class RegisterAllocatorGraphColor : public RegisterAllocator {
+ public:
+ RegisterAllocatorGraphColor(ArenaAllocator* allocator,
+ CodeGenerator* codegen,
+ const SsaLivenessAnalysis& analysis,
+ bool iterative_move_coalescing = true);
+ ~RegisterAllocatorGraphColor() OVERRIDE {}
+
+ void AllocateRegisters() OVERRIDE;
+
+ bool Validate(bool log_fatal_on_failure);
+
+ private:
+ // Collect all intervals and prepare for register allocation.
+ void ProcessInstructions();
+ void ProcessInstruction(HInstruction* instruction);
+
+ // If any inputs require specific registers, block those registers
+ // at the position of this instruction.
+ void CheckForFixedInputs(HInstruction* instruction);
+
+ // If the output of an instruction requires a specific register, split
+ // the interval and assign the register to the first part.
+ void CheckForFixedOutput(HInstruction* instruction);
+
+ // Add all applicable safepoints to a live interval.
+ // Currently depends on instruction processing order.
+ void AddSafepointsFor(HInstruction* instruction);
+
+ // Collect all live intervals associated with the temporary locations
+ // needed by an instruction.
+ void CheckForTempLiveIntervals(HInstruction* instruction);
+
+ // If a safe point is needed, add a synthesized interval to later record
+ // the number of live registers at this point.
+ void CheckForSafepoint(HInstruction* instruction);
+
+ // Split an interval, but only if `position` is inside of `interval`.
+ // Return either the new interval, or the original interval if not split.
+ static LiveInterval* TrySplit(LiveInterval* interval, size_t position);
+
+ // To ensure every graph can be colored, split live intervals
+ // at their register defs and uses. This creates short intervals with low
+ // degree in the interference graph, which are prioritized during graph
+ // coloring.
+ void SplitAtRegisterUses(LiveInterval* interval);
+
+ // If the given instruction is a catch phi, give it a spill slot.
+ void AllocateSpillSlotForCatchPhi(HInstruction* instruction);
+
+ // Ensure that the given register cannot be allocated for a given range.
+ void BlockRegister(Location location, size_t start, size_t end);
+ void BlockRegisters(size_t start, size_t end, bool caller_save_only = false);
+
+ bool IsCallerSave(size_t reg, bool processing_core_regs);
+
+ // Return the maximum number of registers live at safepoints,
+ // based on the outgoing interference edges of safepoint nodes.
+ size_t ComputeMaxSafepointLiveRegisters(const ArenaVector<InterferenceNode*>& safepoints);
+
+ // If necessary, add the given interval to the list of spilled intervals,
+ // and make sure it's ready to be spilled to the stack.
+ void AllocateSpillSlotFor(LiveInterval* interval);
+
+ // Whether iterative move coalescing should be performed. Iterative move coalescing
+ // improves code quality, but increases compile time.
+ const bool iterative_move_coalescing_;
+
+ // Live intervals, split by kind (core and floating point).
+ // These should not contain high intervals, as those are represented by
+ // the corresponding low interval throughout register allocation.
+ ArenaVector<LiveInterval*> core_intervals_;
+ ArenaVector<LiveInterval*> fp_intervals_;
+
+ // Intervals for temporaries, saved for special handling in the resolution phase.
+ ArenaVector<LiveInterval*> temp_intervals_;
+
+ // Safepoints, saved for special handling while processing instructions.
+ ArenaVector<HInstruction*> safepoints_;
+
+ // Interference nodes representing specific registers. These are "pre-colored" nodes
+ // in the interference graph.
+ ArenaVector<InterferenceNode*> physical_core_nodes_;
+ ArenaVector<InterferenceNode*> physical_fp_nodes_;
+
+ // Allocated stack slot counters.
+ size_t int_spill_slot_counter_;
+ size_t double_spill_slot_counter_;
+ size_t float_spill_slot_counter_;
+ size_t long_spill_slot_counter_;
+ size_t catch_phi_spill_slot_counter_;
+
+ // Number of stack slots needed for the pointer to the current method.
+ // This is 1 for 32-bit architectures, and 2 for 64-bit architectures.
+ const size_t reserved_art_method_slots_;
+
+ // Number of stack slots needed for outgoing arguments.
+ const size_t reserved_out_slots_;
+
+ // The number of globally blocked core and floating point registers, such as the stack pointer.
+ size_t number_of_globally_blocked_core_regs_;
+ size_t number_of_globally_blocked_fp_regs_;
+
+ // The maximum number of registers live at safe points. Needed by the code generator.
+ size_t max_safepoint_live_core_regs_;
+ size_t max_safepoint_live_fp_regs_;
+
+ friend class ColoringIteration;
+
+ DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorGraphColor);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_GRAPH_COLOR_H_
diff --git a/compiler/optimizing/register_allocator_linear_scan.h b/compiler/optimizing/register_allocator_linear_scan.h
index b6e4f92e42..1a643a0d1a 100644
--- a/compiler/optimizing/register_allocator_linear_scan.h
+++ b/compiler/optimizing/register_allocator_linear_scan.h
@@ -43,6 +43,7 @@ class RegisterAllocatorLinearScan : public RegisterAllocator {
RegisterAllocatorLinearScan(ArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& analysis);
+ ~RegisterAllocatorLinearScan() OVERRIDE {}
void AllocateRegisters() OVERRIDE;
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index cbb7b2f1c5..55ea99e592 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -31,12 +31,29 @@
namespace art {
+using Strategy = RegisterAllocator::Strategy;
+
// Note: the register allocator tests rely on the fact that constants have live
// intervals and registers get allocated to them.
-class RegisterAllocatorTest : public CommonCompilerTest {};
+class RegisterAllocatorTest : public CommonCompilerTest {
+ protected:
+ // These functions need to access private variables of LocationSummary, so we declare it
+ // as a member of RegisterAllocatorTest, which we make a friend class.
+ static void SameAsFirstInputHint(Strategy strategy);
+ static void ExpectedInRegisterHint(Strategy strategy);
+};
+
+// This macro should include all register allocation strategies that should be tested.
+#define TEST_ALL_STRATEGIES(test_name)\
+TEST_F(RegisterAllocatorTest, test_name##_LinearScan) {\
+ test_name(Strategy::kRegisterAllocatorLinearScan);\
+}\
+TEST_F(RegisterAllocatorTest, test_name##_GraphColor) {\
+ test_name(Strategy::kRegisterAllocatorGraphColor);\
+}
-static bool Check(const uint16_t* data) {
+static bool Check(const uint16_t* data, Strategy strategy) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
HGraph* graph = CreateCFG(&allocator, data);
@@ -45,7 +62,8 @@ static bool Check(const uint16_t* data) {
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
- RegisterAllocator* register_allocator = RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator* register_allocator =
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
return register_allocator->Validate(false);
}
@@ -143,7 +161,7 @@ TEST_F(RegisterAllocatorTest, ValidateIntervals) {
}
}
-TEST_F(RegisterAllocatorTest, CFG1) {
+static void CFG1(Strategy strategy) {
/*
* Test the following snippet:
* return 0;
@@ -160,10 +178,12 @@ TEST_F(RegisterAllocatorTest, CFG1) {
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
- ASSERT_TRUE(Check(data));
+ ASSERT_TRUE(Check(data, strategy));
}
-TEST_F(RegisterAllocatorTest, Loop1) {
+TEST_ALL_STRATEGIES(CFG1);
+
+static void Loop1(Strategy strategy) {
/*
* Test the following snippet:
* int a = 0;
@@ -199,10 +219,12 @@ TEST_F(RegisterAllocatorTest, Loop1) {
Instruction::CONST_4 | 5 << 12 | 1 << 8,
Instruction::RETURN | 1 << 8);
- ASSERT_TRUE(Check(data));
+ ASSERT_TRUE(Check(data, strategy));
}
-TEST_F(RegisterAllocatorTest, Loop2) {
+TEST_ALL_STRATEGIES(Loop1);
+
+static void Loop2(Strategy strategy) {
/*
* Test the following snippet:
* int a = 0;
@@ -248,10 +270,12 @@ TEST_F(RegisterAllocatorTest, Loop2) {
Instruction::ADD_INT, 1 << 8 | 0,
Instruction::RETURN | 1 << 8);
- ASSERT_TRUE(Check(data));
+ ASSERT_TRUE(Check(data, strategy));
}
-TEST_F(RegisterAllocatorTest, Loop3) {
+TEST_ALL_STRATEGIES(Loop2);
+
+static void Loop3(Strategy strategy) {
/*
* Test the following snippet:
* int a = 0
@@ -296,7 +320,8 @@ TEST_F(RegisterAllocatorTest, Loop3) {
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
- RegisterAllocator* register_allocator = RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator* register_allocator =
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_TRUE(register_allocator->Validate(false));
@@ -314,6 +339,8 @@ TEST_F(RegisterAllocatorTest, Loop3) {
ASSERT_EQ(phi_interval->GetRegister(), ret->InputAt(0)->GetLiveInterval()->GetRegister());
}
+TEST_ALL_STRATEGIES(Loop3);
+
TEST_F(RegisterAllocatorTest, FirstRegisterUse) {
const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
@@ -354,7 +381,7 @@ TEST_F(RegisterAllocatorTest, FirstRegisterUse) {
ASSERT_EQ(new_interval->FirstRegisterUse(), last_xor->GetLifetimePosition());
}
-TEST_F(RegisterAllocatorTest, DeadPhi) {
+static void DeadPhi(Strategy strategy) {
/* Test for a dead loop phi taking as back-edge input a phi that also has
* this loop phi as input. Walking backwards in SsaDeadPhiElimination
* does not solve the problem because the loop phi will be visited last.
@@ -385,15 +412,19 @@ TEST_F(RegisterAllocatorTest, DeadPhi) {
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
- RegisterAllocator* register_allocator = RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator* register_allocator =
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_TRUE(register_allocator->Validate(false));
}
+TEST_ALL_STRATEGIES(DeadPhi);
+
/**
* Test that the TryAllocateFreeReg method works in the presence of inactive intervals
* that share the same register. It should split the interval it is currently
* allocating for at the minimum lifetime position between the two inactive intervals.
+ * This test only applies to the linear scan allocator.
*/
TEST_F(RegisterAllocatorTest, FreeUntil) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
@@ -507,15 +538,15 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
graph->GetDexFile(),
dex_cache,
0);
-*input2 = new (allocator) HInstanceFieldGet(parameter,
- Primitive::kPrimInt,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- dex_cache,
- 0);
+ *input2 = new (allocator) HInstanceFieldGet(parameter,
+ Primitive::kPrimInt,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ dex_cache,
+ 0);
then->AddInstruction(*input1);
else_->AddInstruction(*input2);
join->AddInstruction(new (allocator) HExit());
@@ -527,7 +558,7 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
return graph;
}
-TEST_F(RegisterAllocatorTest, PhiHint) {
+static void PhiHint(Strategy strategy) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
HPhi *phi;
@@ -543,7 +574,7 @@ TEST_F(RegisterAllocatorTest, PhiHint) {
// Check that the register allocator is deterministic.
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 0);
@@ -563,7 +594,7 @@ TEST_F(RegisterAllocatorTest, PhiHint) {
// the same register.
phi->GetLocations()->UpdateOut(Location::RegisterLocation(2));
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -583,7 +614,7 @@ TEST_F(RegisterAllocatorTest, PhiHint) {
// the same register.
input1->GetLocations()->UpdateOut(Location::RegisterLocation(2));
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -603,7 +634,7 @@ TEST_F(RegisterAllocatorTest, PhiHint) {
// the same register.
input2->GetLocations()->UpdateOut(Location::RegisterLocation(2));
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -612,6 +643,12 @@ TEST_F(RegisterAllocatorTest, PhiHint) {
}
}
+// TODO: Enable this test for graph coloring register allocation when iterative move
+// coalescing is merged.
+TEST_F(RegisterAllocatorTest, PhiHint_LinearScan) {
+ PhiHint(Strategy::kRegisterAllocatorLinearScan);
+}
+
static HGraph* BuildFieldReturn(ArenaAllocator* allocator,
HInstruction** field,
HInstruction** ret) {
@@ -650,7 +687,7 @@ static HGraph* BuildFieldReturn(ArenaAllocator* allocator,
return graph;
}
-TEST_F(RegisterAllocatorTest, ExpectedInRegisterHint) {
+void RegisterAllocatorTest::ExpectedInRegisterHint(Strategy strategy) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
HInstruction *field, *ret;
@@ -664,7 +701,7 @@ TEST_F(RegisterAllocatorTest, ExpectedInRegisterHint) {
liveness.Analyze();
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
// Sanity check that in normal conditions, the register should be hinted to 0 (EAX).
@@ -684,13 +721,19 @@ TEST_F(RegisterAllocatorTest, ExpectedInRegisterHint) {
ret->GetLocations()->inputs_[0] = Location::RegisterLocation(2);
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(field->GetLiveInterval()->GetRegister(), 2);
}
}
+// TODO: Enable this test for graph coloring register allocation when iterative move
+// coalescing is merged.
+TEST_F(RegisterAllocatorTest, ExpectedInRegisterHint_LinearScan) {
+ ExpectedInRegisterHint(Strategy::kRegisterAllocatorLinearScan);
+}
+
static HGraph* BuildTwoSubs(ArenaAllocator* allocator,
HInstruction** first_sub,
HInstruction** second_sub) {
@@ -720,7 +763,7 @@ static HGraph* BuildTwoSubs(ArenaAllocator* allocator,
return graph;
}
-TEST_F(RegisterAllocatorTest, SameAsFirstInputHint) {
+void RegisterAllocatorTest::SameAsFirstInputHint(Strategy strategy) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
HInstruction *first_sub, *second_sub;
@@ -734,7 +777,7 @@ TEST_F(RegisterAllocatorTest, SameAsFirstInputHint) {
liveness.Analyze();
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
// Sanity check that in normal conditions, the registers are the same.
@@ -757,7 +800,7 @@ TEST_F(RegisterAllocatorTest, SameAsFirstInputHint) {
ASSERT_EQ(second_sub->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(first_sub->GetLiveInterval()->GetRegister(), 2);
@@ -765,6 +808,12 @@ TEST_F(RegisterAllocatorTest, SameAsFirstInputHint) {
}
}
+// TODO: Enable this test for graph coloring register allocation when iterative move
+// coalescing is merged.
+TEST_F(RegisterAllocatorTest, SameAsFirstInputHint_LinearScan) {
+ SameAsFirstInputHint(Strategy::kRegisterAllocatorLinearScan);
+}
+
static HGraph* BuildDiv(ArenaAllocator* allocator,
HInstruction** div) {
HGraph* graph = CreateGraph(allocator);
@@ -791,7 +840,7 @@ static HGraph* BuildDiv(ArenaAllocator* allocator,
return graph;
}
-TEST_F(RegisterAllocatorTest, ExpectedExactInRegisterAndSameOutputHint) {
+static void ExpectedExactInRegisterAndSameOutputHint(Strategy strategy) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
HInstruction *div;
@@ -805,7 +854,7 @@ TEST_F(RegisterAllocatorTest, ExpectedExactInRegisterAndSameOutputHint) {
liveness.Analyze();
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
// div on x86 requires its first input in eax and the output be the same as the first input.
@@ -813,9 +862,16 @@ TEST_F(RegisterAllocatorTest, ExpectedExactInRegisterAndSameOutputHint) {
}
}
+// TODO: Enable this test for graph coloring register allocation when iterative move
+// coalescing is merged.
+TEST_F(RegisterAllocatorTest, ExpectedExactInRegisterAndSameOutputHint_LinearScan) {
+ ExpectedExactInRegisterAndSameOutputHint(Strategy::kRegisterAllocatorLinearScan);
+}
+
// Test a bug in the register allocator, where allocating a blocked
// register would lead to spilling an inactive interval at the wrong
// position.
+// This test only applies to the linear scan allocator.
TEST_F(RegisterAllocatorTest, SpillInactive) {
ArenaPool pool;
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 7af4302884..a01e107e02 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -368,6 +368,27 @@ bool SsaLivenessAnalysis::UpdateLiveIn(const HBasicBlock& block) {
return live_in->UnionIfNotIn(live_out, kill);
}
+void LiveInterval::DumpWithContext(std::ostream& stream,
+ const CodeGenerator& codegen) const {
+ Dump(stream);
+ if (IsFixed()) {
+ stream << ", register:" << GetRegister() << "(";
+ if (IsFloatingPoint()) {
+ codegen.DumpFloatingPointRegister(stream, GetRegister());
+ } else {
+ codegen.DumpCoreRegister(stream, GetRegister());
+ }
+ stream << ")";
+ } else {
+ stream << ", spill slot:" << GetSpillSlot();
+ }
+ stream << ", requires_register:" << (GetDefinedBy() != nullptr && RequiresRegister());
+ if (GetParent()->GetDefinedBy() != nullptr) {
+ stream << ", defined_by:" << GetParent()->GetDefinedBy()->GetKind();
+ stream << "(" << GetParent()->GetDefinedBy()->GetLifetimePosition() << ")";
+ }
+}
+
static int RegisterOrLowRegister(Location location) {
return location.IsPair() ? location.low() : location.reg();
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index dc98864d9b..92788fe6b8 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -150,9 +150,7 @@ class UsePosition : public ArenaObject<kArenaAllocSsaLiveness> {
if (GetIsEnvironment()) return false;
if (IsSynthesized()) return false;
Location location = GetUser()->GetLocations()->InAt(GetInputIndex());
- return location.IsUnallocated()
- && (location.GetPolicy() == Location::kRequiresRegister
- || location.GetPolicy() == Location::kRequiresFpuRegister);
+ return location.IsUnallocated() && location.RequiresRegisterKind();
}
private:
@@ -481,6 +479,10 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
return last_range_->GetEnd();
}
+ size_t GetLength() const {
+ return GetEnd() - GetStart();
+ }
+
size_t FirstRegisterUseAfter(size_t position) const {
if (is_temp_) {
return position == GetStart() ? position : kNoLifetime;
@@ -504,10 +506,18 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
return kNoLifetime;
}
+ // Returns the location of the first register use for this live interval,
+ // including a register definition if applicable.
size_t FirstRegisterUse() const {
return FirstRegisterUseAfter(GetStart());
}
+ // Whether the interval requires a register rather than a stack location.
+ // If needed for performance, this could be cached.
+ bool RequiresRegister() const {
+ return !HasRegister() && FirstRegisterUse() != kNoLifetime;
+ }
+
size_t FirstUseAfter(size_t position) const {
if (is_temp_) {
return position == GetStart() ? position : kNoLifetime;
@@ -693,6 +703,10 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
stream << " is_high: " << IsHighInterval();
}
+ // Same as Dump, but adds context such as the instruction defining this interval, and
+ // the register currently assigned to this interval.
+ void DumpWithContext(std::ostream& stream, const CodeGenerator& codegen) const;
+
LiveInterval* GetNextSibling() const { return next_sibling_; }
LiveInterval* GetLastSibling() {
LiveInterval* result = this;
@@ -871,6 +885,33 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
range_search_start_ = first_range_;
}
+ bool DefinitionRequiresRegister() const {
+ DCHECK(IsParent());
+ LocationSummary* locations = defined_by_->GetLocations();
+ Location location = locations->Out();
+ // This interval is the first interval of the instruction. If the output
+ // of the instruction requires a register, we return the position of that instruction
+ // as the first register use.
+ if (location.IsUnallocated()) {
+ if ((location.GetPolicy() == Location::kRequiresRegister)
+ || (location.GetPolicy() == Location::kSameAsFirstInput
+ && (locations->InAt(0).IsRegister()
+ || locations->InAt(0).IsRegisterPair()
+ || locations->InAt(0).GetPolicy() == Location::kRequiresRegister))) {
+ return true;
+ } else if ((location.GetPolicy() == Location::kRequiresFpuRegister)
+ || (location.GetPolicy() == Location::kSameAsFirstInput
+ && (locations->InAt(0).IsFpuRegister()
+ || locations->InAt(0).IsFpuRegisterPair()
+ || locations->InAt(0).GetPolicy() == Location::kRequiresFpuRegister))) {
+ return true;
+ }
+ } else if (location.IsRegister() || location.IsRegisterPair()) {
+ return true;
+ }
+ return false;
+ }
+
private:
LiveInterval(ArenaAllocator* allocator,
Primitive::Type type,
@@ -925,33 +966,6 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
return range;
}
- bool DefinitionRequiresRegister() const {
- DCHECK(IsParent());
- LocationSummary* locations = defined_by_->GetLocations();
- Location location = locations->Out();
- // This interval is the first interval of the instruction. If the output
- // of the instruction requires a register, we return the position of that instruction
- // as the first register use.
- if (location.IsUnallocated()) {
- if ((location.GetPolicy() == Location::kRequiresRegister)
- || (location.GetPolicy() == Location::kSameAsFirstInput
- && (locations->InAt(0).IsRegister()
- || locations->InAt(0).IsRegisterPair()
- || locations->InAt(0).GetPolicy() == Location::kRequiresRegister))) {
- return true;
- } else if ((location.GetPolicy() == Location::kRequiresFpuRegister)
- || (location.GetPolicy() == Location::kSameAsFirstInput
- && (locations->InAt(0).IsFpuRegister()
- || locations->InAt(0).IsFpuRegisterPair()
- || locations->InAt(0).GetPolicy() == Location::kRequiresFpuRegister))) {
- return true;
- }
- } else if (location.IsRegister() || location.IsRegisterPair()) {
- return true;
- }
- return false;
- }
-
bool IsDefiningPosition(size_t position) const {
return IsParent() && (position == GetStart());
}
diff --git a/compiler/optimizing/x86_memory_gen.cc b/compiler/optimizing/x86_memory_gen.cc
index 195159f61b..8aa315a7e3 100644
--- a/compiler/optimizing/x86_memory_gen.cc
+++ b/compiler/optimizing/x86_memory_gen.cc
@@ -69,8 +69,8 @@ class MemoryOperandVisitor : public HGraphVisitor {
};
X86MemoryOperandGeneration::X86MemoryOperandGeneration(HGraph* graph,
- OptimizingCompilerStats* stats,
- CodeGenerator* codegen)
+ CodeGenerator* codegen,
+ OptimizingCompilerStats* stats)
: HOptimization(graph, kX86MemoryOperandGenerationPassName, stats),
do_implicit_null_checks_(codegen->GetCompilerOptions().GetImplicitNullChecks()) {
}
diff --git a/compiler/optimizing/x86_memory_gen.h b/compiler/optimizing/x86_memory_gen.h
index 7e886819bb..5f15d9f1e6 100644
--- a/compiler/optimizing/x86_memory_gen.h
+++ b/compiler/optimizing/x86_memory_gen.h
@@ -28,8 +28,8 @@ namespace x86 {
class X86MemoryOperandGeneration : public HOptimization {
public:
X86MemoryOperandGeneration(HGraph* graph,
- OptimizingCompilerStats* stats,
- CodeGenerator* codegen);
+ CodeGenerator* codegen,
+ OptimizingCompilerStats* stats);
void Run() OVERRIDE;