Stop converting from Location to ManagedRegister.
Now the source of truth is the Location object that knows
which register (core, pair, fpu) it needs to refer to.
Change-Id: I62401343d7479ecfb24b5ed161ec7829cda5a0b1
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index e6fe067..fe4c3c3 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -186,20 +186,16 @@
Location loc = locations->InAt(i);
HInstruction* input = instruction->InputAt(i);
if (loc.IsUnallocated()) {
- if (loc.GetPolicy() == Location::kRequiresRegister) {
- loc = Location::RegisterLocation(
- AllocateFreeRegister(input->GetType(), blocked_registers_));
- } else if (loc.GetPolicy() == Location::kRequiresFpuRegister) {
- loc = Location::FpuRegisterLocation(
- AllocateFreeRegister(input->GetType(), blocked_registers_));
+ if ((loc.GetPolicy() == Location::kRequiresRegister)
+ || (loc.GetPolicy() == Location::kRequiresFpuRegister)) {
+ loc = AllocateFreeRegister(input->GetType(), blocked_registers_);
} else {
DCHECK_EQ(loc.GetPolicy(), Location::kAny);
HLoadLocal* load = input->AsLoadLocal();
if (load != nullptr) {
loc = GetStackLocation(load);
} else {
- loc = Location::RegisterLocation(
- AllocateFreeRegister(input->GetType(), blocked_registers_));
+ loc = AllocateFreeRegister(input->GetType(), blocked_registers_);
}
}
locations->SetInAt(i, loc);
@@ -213,8 +209,7 @@
DCHECK_EQ(loc.GetPolicy(), Location::kRequiresRegister);
// TODO: Adjust handling of temps. We currently consider temps to use
// core registers. They may also use floating point registers at some point.
- loc = Location::RegisterLocation(static_cast<ManagedRegister>(
- AllocateFreeRegister(Primitive::kPrimInt, blocked_registers_)));
+ loc = AllocateFreeRegister(Primitive::kPrimInt, blocked_registers_);
locations->SetTempAt(i, loc);
}
}
@@ -223,12 +218,8 @@
switch (result_location.GetPolicy()) {
case Location::kAny:
case Location::kRequiresRegister:
- result_location = Location::RegisterLocation(
- AllocateFreeRegister(instruction->GetType(), blocked_registers_));
- break;
case Location::kRequiresFpuRegister:
- result_location = Location::FpuRegisterLocation(
- AllocateFreeRegister(instruction->GetType(), blocked_registers_));
+ result_location = AllocateFreeRegister(instruction->GetType(), blocked_registers_);
break;
case Location::kSameAsFirstInput:
result_location = locations->InAt(0);
@@ -465,7 +456,7 @@
}
case Location::kRegister : {
- int id = location.reg().RegId();
+ int id = location.reg();
stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInRegister, id);
if (current->GetType() == Primitive::kPrimDouble
|| current->GetType() == Primitive::kPrimLong) {
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index c7623fe..74ad8e9 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -168,8 +168,8 @@
void AllocateRegistersLocally(HInstruction* instruction) const;
// Backend specific implementation for allocating a register.
- virtual ManagedRegister AllocateFreeRegister(Primitive::Type type,
- bool* blocked_registers) const = 0;
+ virtual Location AllocateFreeRegister(Primitive::Type type,
+ bool* blocked_registers) const = 0;
// Raw implementation of allocating a register: loops over blocked_registers to find
// the first available register.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index a68837e..d555a0d 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -29,21 +29,17 @@
namespace art {
-arm::ArmManagedRegister Location::AsArm() const {
- return reg().AsArm();
-}
-
namespace arm {
+static SRegister FromDToLowS(DRegister reg) {
+ return static_cast<SRegister>(reg * 2);
+}
+
static constexpr bool kExplicitStackOverflowCheck = false;
static constexpr int kNumberOfPushedRegistersAtEntry = 1 + 2; // LR, R6, R7
static constexpr int kCurrentMethodStackOffset = 0;
-static Location ArmCoreLocation(Register reg) {
- return Location::RegisterLocation(ArmManagedRegister::FromCoreRegister(reg));
-}
-
static constexpr Register kRuntimeParameterCoreRegisters[] = { R0, R1, R2 };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
@@ -144,8 +140,8 @@
CodeGeneratorARM* arm_codegen = reinterpret_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
InvokeRuntimeCallingConvention calling_convention;
- arm_codegen->Move32(ArmCoreLocation(calling_convention.GetRegisterAt(0)), index_location_);
- arm_codegen->Move32(ArmCoreLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ arm_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
+ arm_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pThrowArrayBounds).Int32Value();
__ ldr(LR, Address(TR, offset));
__ blx(LR);
@@ -226,8 +222,8 @@
return blocked_registers + kNumberOfCoreRegisters + kNumberOfSRegisters;
}
-ManagedRegister CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type,
- bool* blocked_registers) const {
+Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type,
+ bool* blocked_registers) const {
switch (type) {
case Primitive::kPrimLong: {
bool* blocked_register_pairs = GetBlockedRegisterPairs(blocked_registers);
@@ -247,7 +243,7 @@
blocked_register_pairs[i] = true;
}
}
- return pair;
+ return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
}
case Primitive::kPrimByte:
@@ -266,20 +262,20 @@
blocked_register_pairs[i] = true;
}
}
- return ArmManagedRegister::FromCoreRegister(static_cast<Register>(reg));
+ return Location::RegisterLocation(reg);
}
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
int reg = AllocateFreeRegisterInternal(GetBlockedDRegisters(blocked_registers), kNumberOfDRegisters);
- return ArmManagedRegister::FromDRegister(static_cast<DRegister>(reg));
+ return Location::FpuRegisterLocation(reg);
}
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << type;
}
- return ManagedRegister::NoRegister();
+ return Location();
}
void CodeGeneratorARM::SetupBlockedRegisters(bool* blocked_registers) const {
@@ -400,7 +396,7 @@
case Primitive::kPrimNot: {
uint32_t index = gp_index_++;
if (index < calling_convention.GetNumberOfRegisters()) {
- return ArmCoreLocation(calling_convention.GetRegisterAt(index));
+ return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
} else {
return Location::StackSlot(calling_convention.GetStackOffsetOf(index));
}
@@ -411,8 +407,9 @@
uint32_t index = gp_index_;
gp_index_ += 2;
if (index + 1 < calling_convention.GetNumberOfRegisters()) {
- return Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(
- calling_convention.GetRegisterPairAt(index)));
+ ArmManagedRegister pair = ArmManagedRegister::FromRegisterPair(
+ calling_convention.GetRegisterPairAt(index));
+ return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
} else if (index + 1 == calling_convention.GetNumberOfRegisters()) {
return Location::QuickParameter(index);
} else {
@@ -433,31 +430,26 @@
}
if (destination.IsRegister()) {
if (source.IsRegister()) {
- __ Mov(destination.AsArm().AsCoreRegister(), source.AsArm().AsCoreRegister());
+ __ Mov(destination.As<Register>(), source.As<Register>());
} else if (source.IsFpuRegister()) {
- __ vmovrs(destination.AsArm().AsCoreRegister(),
- source.AsArm().AsOverlappingDRegisterLow());
+ __ vmovrs(destination.As<Register>(), FromDToLowS(source.As<DRegister>()));
} else {
- __ ldr(destination.AsArm().AsCoreRegister(), Address(SP, source.GetStackIndex()));
+ __ ldr(destination.As<Register>(), Address(SP, source.GetStackIndex()));
}
} else if (destination.IsFpuRegister()) {
if (source.IsRegister()) {
- __ vmovsr(destination.AsArm().AsOverlappingDRegisterLow(),
- source.AsArm().AsCoreRegister());
+ __ vmovsr(FromDToLowS(destination.As<DRegister>()), source.As<Register>());
} else if (source.IsFpuRegister()) {
- __ vmovs(destination.AsArm().AsOverlappingDRegisterLow(),
- source.AsArm().AsOverlappingDRegisterLow());
+ __ vmovs(FromDToLowS(destination.As<DRegister>()), FromDToLowS(source.As<DRegister>()));
} else {
- __ vldrs(destination.AsArm().AsOverlappingDRegisterLow(),
- Address(SP, source.GetStackIndex()));
+ __ vldrs(FromDToLowS(destination.As<DRegister>()), Address(SP, source.GetStackIndex()));
}
} else {
DCHECK(destination.IsStackSlot());
if (source.IsRegister()) {
- __ str(source.AsArm().AsCoreRegister(), Address(SP, destination.GetStackIndex()));
+ __ str(source.As<Register>(), Address(SP, destination.GetStackIndex()));
} else if (source.IsFpuRegister()) {
- __ vstrs(source.AsArm().AsOverlappingDRegisterLow(),
- Address(SP, destination.GetStackIndex()));
+ __ vstrs(FromDToLowS(source.As<DRegister>()), Address(SP, destination.GetStackIndex()));
} else {
DCHECK(source.IsStackSlot());
__ ldr(IP, Address(SP, source.GetStackIndex()));
@@ -470,41 +462,42 @@
if (source.Equals(destination)) {
return;
}
- if (destination.IsRegister()) {
- if (source.IsRegister()) {
- __ Mov(destination.AsArm().AsRegisterPairLow(), source.AsArm().AsRegisterPairLow());
- __ Mov(destination.AsArm().AsRegisterPairHigh(), source.AsArm().AsRegisterPairHigh());
+ if (destination.IsRegisterPair()) {
+ if (source.IsRegisterPair()) {
+ __ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
+ __ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else if (source.IsQuickParameter()) {
uint32_t argument_index = source.GetQuickParameterIndex();
InvokeDexCallingConvention calling_convention;
- __ Mov(destination.AsArm().AsRegisterPairLow(),
+ __ Mov(destination.AsRegisterPairLow<Register>(),
calling_convention.GetRegisterAt(argument_index));
- __ ldr(destination.AsArm().AsRegisterPairHigh(),
+ __ ldr(destination.AsRegisterPairHigh<Register>(),
Address(SP, calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize()));
} else {
DCHECK(source.IsDoubleStackSlot());
- if (destination.AsArm().AsRegisterPair() == R1_R2) {
+ if (destination.AsRegisterPairLow<Register>() == R1) {
+ DCHECK_EQ(destination.AsRegisterPairHigh<Register>(), R2);
__ ldr(R1, Address(SP, source.GetStackIndex()));
__ ldr(R2, Address(SP, source.GetHighStackIndex(kArmWordSize)));
} else {
- __ LoadFromOffset(kLoadWordPair, destination.AsArm().AsRegisterPairLow(),
+ __ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(),
SP, source.GetStackIndex());
}
}
} else if (destination.IsFpuRegister()) {
if (source.IsDoubleStackSlot()) {
- __ vldrd(destination.AsArm().AsDRegister(), Address(SP, source.GetStackIndex()));
+ __ vldrd(destination.As<DRegister>(), Address(SP, source.GetStackIndex()));
} else {
LOG(FATAL) << "Unimplemented";
}
} else if (destination.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
uint32_t argument_index = destination.GetQuickParameterIndex();
- if (source.IsRegister()) {
- __ Mov(calling_convention.GetRegisterAt(argument_index), source.AsArm().AsRegisterPairLow());
- __ str(source.AsArm().AsRegisterPairHigh(),
+ if (source.IsRegisterPair()) {
+ __ Mov(calling_convention.GetRegisterAt(argument_index), source.AsRegisterPairLow<Register>());
+ __ str(source.AsRegisterPairHigh<Register>(),
Address(SP, calling_convention.GetStackOffsetOf(argument_index + 1)));
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
@@ -516,12 +509,13 @@
}
} else {
DCHECK(destination.IsDoubleStackSlot());
- if (source.IsRegister()) {
- if (source.AsArm().AsRegisterPair() == R1_R2) {
+ if (source.IsRegisterPair()) {
+ if (source.AsRegisterPairLow<Register>() == R1) {
+ DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2);
__ str(R1, Address(SP, destination.GetStackIndex()));
__ str(R2, Address(SP, destination.GetHighStackIndex(kArmWordSize)));
} else {
- __ StoreToOffset(kStoreWordPair, source.AsArm().AsRegisterPairLow(),
+ __ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(),
SP, destination.GetStackIndex());
}
} else if (source.IsQuickParameter()) {
@@ -533,7 +527,7 @@
Address(SP, calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize()));
__ str(R0, Address(SP, destination.GetHighStackIndex(kArmWordSize)));
} else if (source.IsFpuRegister()) {
- __ vstrd(source.AsArm().AsDRegister(), Address(SP, destination.GetStackIndex()));
+ __ vstrd(source.As<DRegister>(), Address(SP, destination.GetStackIndex()));
} else {
DCHECK(source.IsDoubleStackSlot());
__ ldr(IP, Address(SP, source.GetStackIndex()));
@@ -553,7 +547,7 @@
if (instruction->AsIntConstant() != nullptr) {
int32_t value = instruction->AsIntConstant()->GetValue();
if (location.IsRegister()) {
- __ LoadImmediate(location.AsArm().AsCoreRegister(), value);
+ __ LoadImmediate(location.As<Register>(), value);
} else {
DCHECK(location.IsStackSlot());
__ LoadImmediate(IP, value);
@@ -561,9 +555,9 @@
}
} else if (instruction->AsLongConstant() != nullptr) {
int64_t value = instruction->AsLongConstant()->GetValue();
- if (location.IsRegister()) {
- __ LoadImmediate(location.AsArm().AsRegisterPairLow(), Low32Bits(value));
- __ LoadImmediate(location.AsArm().AsRegisterPairHigh(), High32Bits(value));
+ if (location.IsRegisterPair()) {
+ __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
+ __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
} else {
DCHECK(location.IsDoubleStackSlot());
__ LoadImmediate(IP, Low32Bits(value));
@@ -667,7 +661,7 @@
if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
// Condition has been materialized, compare the output to 0
DCHECK(if_instr->GetLocations()->InAt(0).IsRegister());
- __ cmp(if_instr->GetLocations()->InAt(0).AsArm().AsCoreRegister(),
+ __ cmp(if_instr->GetLocations()->InAt(0).As<Register>(),
ShifterOperand(0));
__ b(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()), NE);
} else {
@@ -675,18 +669,18 @@
// condition as the branch condition.
LocationSummary* locations = cond->GetLocations();
if (locations->InAt(1).IsRegister()) {
- __ cmp(locations->InAt(0).AsArm().AsCoreRegister(),
- ShifterOperand(locations->InAt(1).AsArm().AsCoreRegister()));
+ __ cmp(locations->InAt(0).As<Register>(),
+ ShifterOperand(locations->InAt(1).As<Register>()));
} else {
DCHECK(locations->InAt(1).IsConstant());
int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
ShifterOperand operand;
if (ShifterOperand::CanHoldArm(value, &operand)) {
- __ cmp(locations->InAt(0).AsArm().AsCoreRegister(), ShifterOperand(value));
+ __ cmp(locations->InAt(0).As<Register>(), ShifterOperand(value));
} else {
Register temp = IP;
__ LoadImmediate(temp, value);
- __ cmp(locations->InAt(0).AsArm().AsCoreRegister(), ShifterOperand(temp));
+ __ cmp(locations->InAt(0).As<Register>(), ShifterOperand(temp));
}
}
__ b(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()),
@@ -714,24 +708,24 @@
LocationSummary* locations = comp->GetLocations();
if (locations->InAt(1).IsRegister()) {
- __ cmp(locations->InAt(0).AsArm().AsCoreRegister(),
- ShifterOperand(locations->InAt(1).AsArm().AsCoreRegister()));
+ __ cmp(locations->InAt(0).As<Register>(),
+ ShifterOperand(locations->InAt(1).As<Register>()));
} else {
DCHECK(locations->InAt(1).IsConstant());
int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
ShifterOperand operand;
if (ShifterOperand::CanHoldArm(value, &operand)) {
- __ cmp(locations->InAt(0).AsArm().AsCoreRegister(), ShifterOperand(value));
+ __ cmp(locations->InAt(0).As<Register>(), ShifterOperand(value));
} else {
Register temp = IP;
__ LoadImmediate(temp, value);
- __ cmp(locations->InAt(0).AsArm().AsCoreRegister(), ShifterOperand(temp));
+ __ cmp(locations->InAt(0).As<Register>(), ShifterOperand(temp));
}
}
__ it(ARMCondition(comp->GetCondition()), kItElse);
- __ mov(locations->Out().AsArm().AsCoreRegister(), ShifterOperand(1),
+ __ mov(locations->Out().As<Register>(), ShifterOperand(1),
ARMCondition(comp->GetCondition()));
- __ mov(locations->Out().AsArm().AsCoreRegister(), ShifterOperand(0),
+ __ mov(locations->Out().As<Register>(), ShifterOperand(0),
ARMOppositeCondition(comp->GetCondition()));
}
@@ -864,13 +858,12 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimFloat:
- locations->SetInAt(0, ArmCoreLocation(R0));
+ locations->SetInAt(0, Location::RegisterLocation(R0));
break;
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
- locations->SetInAt(
- 0, Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ locations->SetInAt(0, Location::RegisterPairLocation(R0, R1));
break;
default:
@@ -888,12 +881,13 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimFloat:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsArm().AsCoreRegister(), R0);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).As<Register>(), R0);
break;
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsArm().AsRegisterPair(), R0_R1);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairLow<Register>(), R0);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairHigh<Register>(), R1);
break;
default:
@@ -912,7 +906,7 @@
}
void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
- Register temp = invoke->GetLocations()->GetTemp(0).AsArm().AsCoreRegister();
+ Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
invoke->GetIndexInDexCache() * kArmWordSize;
@@ -947,7 +941,7 @@
void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
- locations->AddTemp(ArmCoreLocation(R0));
+ locations->AddTemp(Location::RegisterLocation(R0));
InvokeDexCallingConventionVisitor calling_convention_visitor;
for (size_t i = 0; i < invoke->InputCount(); i++) {
@@ -963,12 +957,12 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimFloat:
- locations->SetOut(ArmCoreLocation(R0));
+ locations->SetOut(Location::RegisterLocation(R0));
break;
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
- locations->SetOut(Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ locations->SetOut(Location::RegisterPairLocation(R0, R1));
break;
case Primitive::kPrimVoid:
@@ -978,7 +972,7 @@
void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- Register temp = invoke->GetLocations()->GetTemp(0).AsArm().AsCoreRegister();
+ Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
LocationSummary* locations = invoke->GetLocations();
@@ -989,7 +983,7 @@
__ ldr(temp, Address(SP, receiver.GetStackIndex()));
__ ldr(temp, Address(temp, class_offset));
} else {
- __ ldr(temp, Address(receiver.AsArm().AsCoreRegister(), class_offset));
+ __ ldr(temp, Address(receiver.As<Register>(), class_offset));
}
// temp = temp->GetMethodAt(method_offset);
uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value();
@@ -1030,38 +1024,37 @@
void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
LocationSummary* locations = add->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
switch (add->GetResultType()) {
case Primitive::kPrimInt:
- if (locations->InAt(1).IsRegister()) {
- __ add(locations->Out().AsArm().AsCoreRegister(),
- locations->InAt(0).AsArm().AsCoreRegister(),
- ShifterOperand(locations->InAt(1).AsArm().AsCoreRegister()));
+ if (second.IsRegister()) {
+ __ add(out.As<Register>(), first.As<Register>(), ShifterOperand(second.As<Register>()));
} else {
- __ AddConstant(locations->Out().AsArm().AsCoreRegister(),
- locations->InAt(0).AsArm().AsCoreRegister(),
- locations->InAt(1).GetConstant()->AsIntConstant()->GetValue());
+ __ AddConstant(out.As<Register>(),
+ first.As<Register>(),
+ second.GetConstant()->AsIntConstant()->GetValue());
}
break;
case Primitive::kPrimLong:
- __ adds(locations->Out().AsArm().AsRegisterPairLow(),
- locations->InAt(0).AsArm().AsRegisterPairLow(),
- ShifterOperand(locations->InAt(1).AsArm().AsRegisterPairLow()));
- __ adc(locations->Out().AsArm().AsRegisterPairHigh(),
- locations->InAt(0).AsArm().AsRegisterPairHigh(),
- ShifterOperand(locations->InAt(1).AsArm().AsRegisterPairHigh()));
+ __ adds(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ adc(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
break;
case Primitive::kPrimFloat:
- __ vadds(locations->Out().AsArm().AsOverlappingDRegisterLow(),
- locations->InAt(0).AsArm().AsOverlappingDRegisterLow(),
- locations->InAt(1).AsArm().AsOverlappingDRegisterLow());
+ __ vadds(FromDToLowS(out.As<DRegister>()),
+ FromDToLowS(first.As<DRegister>()),
+ FromDToLowS(second.As<DRegister>()));
break;
case Primitive::kPrimDouble:
- __ vaddd(locations->Out().AsArm().AsDRegister(),
- locations->InAt(0).AsArm().AsDRegister(),
- locations->InAt(1).AsArm().AsDRegister());
+ __ vaddd(out.As<DRegister>(), first.As<DRegister>(), second.As<DRegister>());
break;
default:
@@ -1099,24 +1092,24 @@
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
if (locations->InAt(1).IsRegister()) {
- __ sub(locations->Out().AsArm().AsCoreRegister(),
- locations->InAt(0).AsArm().AsCoreRegister(),
- ShifterOperand(locations->InAt(1).AsArm().AsCoreRegister()));
+ __ sub(locations->Out().As<Register>(),
+ locations->InAt(0).As<Register>(),
+ ShifterOperand(locations->InAt(1).As<Register>()));
} else {
- __ AddConstant(locations->Out().AsArm().AsCoreRegister(),
- locations->InAt(0).AsArm().AsCoreRegister(),
+ __ AddConstant(locations->Out().As<Register>(),
+ locations->InAt(0).As<Register>(),
-locations->InAt(1).GetConstant()->AsIntConstant()->GetValue());
}
break;
}
case Primitive::kPrimLong:
- __ subs(locations->Out().AsArm().AsRegisterPairLow(),
- locations->InAt(0).AsArm().AsRegisterPairLow(),
- ShifterOperand(locations->InAt(1).AsArm().AsRegisterPairLow()));
- __ sbc(locations->Out().AsArm().AsRegisterPairHigh(),
- locations->InAt(0).AsArm().AsRegisterPairHigh(),
- ShifterOperand(locations->InAt(1).AsArm().AsRegisterPairHigh()));
+ __ subs(locations->Out().AsRegisterPairLow<Register>(),
+ locations->InAt(0).AsRegisterPairLow<Register>(),
+ ShifterOperand(locations->InAt(1).AsRegisterPairLow<Register>()));
+ __ sbc(locations->Out().AsRegisterPairHigh<Register>(),
+ locations->InAt(0).AsRegisterPairHigh<Register>(),
+ ShifterOperand(locations->InAt(1).AsRegisterPairHigh<Register>()));
break;
case Primitive::kPrimBoolean:
@@ -1135,9 +1128,9 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
InvokeRuntimeCallingConvention calling_convention;
- locations->AddTemp(ArmCoreLocation(calling_convention.GetRegisterAt(0)));
- locations->AddTemp(ArmCoreLocation(calling_convention.GetRegisterAt(1)));
- locations->SetOut(ArmCoreLocation(R0));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(Location::RegisterLocation(R0));
}
void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
@@ -1178,8 +1171,8 @@
void InstructionCodeGeneratorARM::VisitNot(HNot* instruction) {
LocationSummary* locations = instruction->GetLocations();
- __ eor(locations->Out().AsArm().AsCoreRegister(),
- locations->InAt(0).AsArm().AsCoreRegister(), ShifterOperand(1));
+ __ eor(locations->Out().As<Register>(),
+ locations->InAt(0).As<Register>(), ShifterOperand(1));
}
void LocationsBuilderARM::VisitCompare(HCompare* compare) {
@@ -1195,19 +1188,19 @@
LocationSummary* locations = compare->GetLocations();
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
- Register output = locations->Out().AsArm().AsCoreRegister();
- ArmManagedRegister left = locations->InAt(0).AsArm();
- ArmManagedRegister right = locations->InAt(1).AsArm();
+ Register output = locations->Out().As<Register>();
+ Location left = locations->InAt(0);
+ Location right = locations->InAt(1);
Label less, greater, done;
- __ cmp(left.AsRegisterPairHigh(),
- ShifterOperand(right.AsRegisterPairHigh())); // Signed compare.
+ __ cmp(left.AsRegisterPairHigh<Register>(),
+ ShifterOperand(right.AsRegisterPairHigh<Register>())); // Signed compare.
__ b(&less, LT);
__ b(&greater, GT);
// Do LoadImmediate before any `cmp`, as LoadImmediate might affect
// the status flags.
__ LoadImmediate(output, 0);
- __ cmp(left.AsRegisterPairLow(),
- ShifterOperand(right.AsRegisterPairLow())); // Unsigned compare.
+ __ cmp(left.AsRegisterPairLow<Register>(),
+ ShifterOperand(right.AsRegisterPairLow<Register>())); // Unsigned compare.
__ b(&done, EQ);
__ b(&less, CC);
@@ -1255,40 +1248,40 @@
void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsArm().AsCoreRegister();
+ Register obj = locations->InAt(0).As<Register>();
uint32_t offset = instruction->GetFieldOffset().Uint32Value();
Primitive::Type field_type = instruction->GetFieldType();
switch (field_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
- Register value = locations->InAt(1).AsArm().AsCoreRegister();
+ Register value = locations->InAt(1).As<Register>();
__ StoreToOffset(kStoreByte, value, obj, offset);
break;
}
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
- Register value = locations->InAt(1).AsArm().AsCoreRegister();
+ Register value = locations->InAt(1).As<Register>();
__ StoreToOffset(kStoreHalfword, value, obj, offset);
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register value = locations->InAt(1).AsArm().AsCoreRegister();
+ Register value = locations->InAt(1).As<Register>();
__ StoreToOffset(kStoreWord, value, obj, offset);
if (field_type == Primitive::kPrimNot) {
- Register temp = locations->GetTemp(0).AsArm().AsCoreRegister();
- Register card = locations->GetTemp(1).AsArm().AsCoreRegister();
+ Register temp = locations->GetTemp(0).As<Register>();
+ Register card = locations->GetTemp(1).As<Register>();
codegen_->MarkGCCard(temp, card, obj, value);
}
break;
}
case Primitive::kPrimLong: {
- ArmManagedRegister value = locations->InAt(1).AsArm();
- __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow(), obj, offset);
+ Location value = locations->InAt(1);
+ __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
break;
}
@@ -1310,45 +1303,45 @@
void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsArm().AsCoreRegister();
+ Register obj = locations->InAt(0).As<Register>();
uint32_t offset = instruction->GetFieldOffset().Uint32Value();
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
__ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
break;
}
case Primitive::kPrimByte: {
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
__ LoadFromOffset(kLoadSignedByte, out, obj, offset);
break;
}
case Primitive::kPrimShort: {
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
__ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
break;
}
case Primitive::kPrimChar: {
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
__ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
__ LoadFromOffset(kLoadWord, out, obj, offset);
break;
}
case Primitive::kPrimLong: {
// TODO: support volatile.
- ArmManagedRegister out = locations->Out().AsArm();
- __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow(), obj, offset);
+ Location out = locations->Out();
+ __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
break;
}
@@ -1378,7 +1371,7 @@
Location obj = locations->InAt(0);
if (obj.IsRegister()) {
- __ cmp(obj.AsArm().AsCoreRegister(), ShifterOperand(0));
+ __ cmp(obj.As<Register>(), ShifterOperand(0));
__ b(slow_path->GetEntryLabel(), EQ);
} else {
DCHECK(obj.IsConstant()) << obj;
@@ -1398,18 +1391,18 @@
void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsArm().AsCoreRegister();
+ Register obj = locations->InAt(0).As<Register>();
Location index = locations->InAt(1);
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
__ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister()));
+ __ add(IP, obj, ShifterOperand(index.As<Register>()));
__ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset);
}
break;
@@ -1417,12 +1410,12 @@
case Primitive::kPrimByte: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
__ LoadFromOffset(kLoadSignedByte, out, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister()));
+ __ add(IP, obj, ShifterOperand(index.As<Register>()));
__ LoadFromOffset(kLoadSignedByte, out, IP, data_offset);
}
break;
@@ -1430,12 +1423,12 @@
case Primitive::kPrimShort: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
__ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_2));
+ __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_2));
__ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset);
}
break;
@@ -1443,12 +1436,12 @@
case Primitive::kPrimChar: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
__ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_2));
+ __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_2));
__ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset);
}
break;
@@ -1458,12 +1451,12 @@
case Primitive::kPrimNot: {
DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
__ LoadFromOffset(kLoadWord, out, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_4));
+ __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_4));
__ LoadFromOffset(kLoadWord, out, IP, data_offset);
}
break;
@@ -1471,13 +1464,13 @@
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- ArmManagedRegister out = locations->Out().AsArm();
+ Location out = locations->Out();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow(), obj, offset);
+ __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_8));
- __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow(), IP, data_offset);
+ __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_8));
+ __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset);
}
break;
}
@@ -1498,9 +1491,9 @@
instruction, is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
if (is_object) {
InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(0, ArmCoreLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, ArmCoreLocation(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(2, ArmCoreLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
} else {
locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
locations->SetInAt(
@@ -1511,7 +1504,7 @@
void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsArm().AsCoreRegister();
+ Register obj = locations->InAt(0).As<Register>();
Location index = locations->InAt(1);
Primitive::Type value_type = instruction->GetComponentType();
@@ -1519,12 +1512,12 @@
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- Register value = locations->InAt(2).AsArm().AsCoreRegister();
+ Register value = locations->InAt(2).As<Register>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
__ StoreToOffset(kStoreByte, value, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister()));
+ __ add(IP, obj, ShifterOperand(index.As<Register>()));
__ StoreToOffset(kStoreByte, value, IP, data_offset);
}
break;
@@ -1533,12 +1526,12 @@
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- Register value = locations->InAt(2).AsArm().AsCoreRegister();
+ Register value = locations->InAt(2).As<Register>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
__ StoreToOffset(kStoreHalfword, value, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_2));
+ __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_2));
__ StoreToOffset(kStoreHalfword, value, IP, data_offset);
}
break;
@@ -1546,12 +1539,12 @@
case Primitive::kPrimInt: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Register value = locations->InAt(2).AsArm().AsCoreRegister();
+ Register value = locations->InAt(2).As<Register>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
__ StoreToOffset(kStoreWord, value, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_4));
+ __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_4));
__ StoreToOffset(kStoreWord, value, IP, data_offset);
}
break;
@@ -1568,13 +1561,13 @@
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- ArmManagedRegister value = locations->InAt(2).AsArm();
+ Location value = locations->InAt(2);
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow(), obj, offset);
+ __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_8));
- __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow(), IP, data_offset);
+ __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_8));
+ __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset);
}
break;
}
@@ -1598,8 +1591,8 @@
void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = instruction->GetLocations();
uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
- Register obj = locations->InAt(0).AsArm().AsCoreRegister();
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register obj = locations->InAt(0).As<Register>();
+ Register out = locations->Out().As<Register>();
__ LoadFromOffset(kLoadWord, out, obj, offset);
}
@@ -1619,8 +1612,8 @@
instruction, locations->InAt(0), locations->InAt(1));
codegen_->AddSlowPath(slow_path);
- Register index = locations->InAt(0).AsArm().AsCoreRegister();
- Register length = locations->InAt(1).AsArm().AsCoreRegister();
+ Register index = locations->InAt(0).As<Register>();
+ Register length = locations->InAt(1).As<Register>();
__ cmp(index, ShifterOperand(length));
__ b(slow_path->GetEntryLabel(), CS);
@@ -1696,15 +1689,15 @@
if (source.IsRegister()) {
if (destination.IsRegister()) {
- __ Mov(destination.AsArm().AsCoreRegister(), source.AsArm().AsCoreRegister());
+ __ Mov(destination.As<Register>(), source.As<Register>());
} else {
DCHECK(destination.IsStackSlot());
- __ StoreToOffset(kStoreWord, source.AsArm().AsCoreRegister(),
+ __ StoreToOffset(kStoreWord, source.As<Register>(),
SP, destination.GetStackIndex());
}
} else if (source.IsStackSlot()) {
if (destination.IsRegister()) {
- __ LoadFromOffset(kLoadWord, destination.AsArm().AsCoreRegister(),
+ __ LoadFromOffset(kLoadWord, destination.As<Register>(),
SP, source.GetStackIndex());
} else {
DCHECK(destination.IsStackSlot());
@@ -1716,7 +1709,7 @@
DCHECK(source.GetConstant()->AsIntConstant() != nullptr);
int32_t value = source.GetConstant()->AsIntConstant()->GetValue();
if (destination.IsRegister()) {
- __ LoadImmediate(destination.AsArm().AsCoreRegister(), value);
+ __ LoadImmediate(destination.As<Register>(), value);
} else {
DCHECK(destination.IsStackSlot());
__ LoadImmediate(IP, value);
@@ -1748,15 +1741,15 @@
Location destination = move->GetDestination();
if (source.IsRegister() && destination.IsRegister()) {
- DCHECK_NE(source.AsArm().AsCoreRegister(), IP);
- DCHECK_NE(destination.AsArm().AsCoreRegister(), IP);
- __ Mov(IP, source.AsArm().AsCoreRegister());
- __ Mov(source.AsArm().AsCoreRegister(), destination.AsArm().AsCoreRegister());
- __ Mov(destination.AsArm().AsCoreRegister(), IP);
+ DCHECK_NE(source.As<Register>(), IP);
+ DCHECK_NE(destination.As<Register>(), IP);
+ __ Mov(IP, source.As<Register>());
+ __ Mov(source.As<Register>(), destination.As<Register>());
+ __ Mov(destination.As<Register>(), IP);
} else if (source.IsRegister() && destination.IsStackSlot()) {
- Exchange(source.AsArm().AsCoreRegister(), destination.GetStackIndex());
+ Exchange(source.As<Register>(), destination.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsRegister()) {
- Exchange(destination.AsArm().AsCoreRegister(), source.GetStackIndex());
+ Exchange(destination.As<Register>(), source.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsStackSlot()) {
Exchange(source.GetStackIndex(), destination.GetStackIndex());
} else {
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index b5de8ed..9da26e8 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -164,7 +164,7 @@
}
virtual void SetupBlockedRegisters(bool* blocked_registers) const OVERRIDE;
- virtual ManagedRegister AllocateFreeRegister(
+ virtual Location AllocateFreeRegister(
Primitive::Type type, bool* blocked_registers) const OVERRIDE;
virtual size_t GetNumberOfRegisters() const OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index ec82dd3..5f6d458 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -29,10 +29,6 @@
namespace art {
-x86::X86ManagedRegister Location::AsX86() const {
- return reg().AsX86();
-}
-
namespace x86 {
static constexpr bool kExplicitStackOverflowCheck = false;
@@ -40,10 +36,6 @@
static constexpr int kNumberOfPushedRegistersAtEntry = 1;
static constexpr int kCurrentMethodStackOffset = 0;
-static Location X86CpuLocation(Register reg) {
- return Location::RegisterLocation(X86ManagedRegister::FromCpuRegister(reg));
-}
-
static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
@@ -105,8 +97,8 @@
CodeGeneratorX86* x86_codegen = reinterpret_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
InvokeRuntimeCallingConvention calling_convention;
- x86_codegen->Move32(X86CpuLocation(calling_convention.GetRegisterAt(0)), index_location_);
- x86_codegen->Move32(X86CpuLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
+ x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowArrayBounds)));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
}
@@ -201,8 +193,7 @@
return blocked_registers + kNumberOfCpuRegisters;
}
-ManagedRegister CodeGeneratorX86::AllocateFreeRegister(Primitive::Type type,
- bool* blocked_registers) const {
+Location CodeGeneratorX86::AllocateFreeRegister(Primitive::Type type, bool* blocked_registers) const {
switch (type) {
case Primitive::kPrimLong: {
bool* blocked_register_pairs = GetBlockedRegisterPairs(blocked_registers);
@@ -222,7 +213,7 @@
blocked_register_pairs[i] = true;
}
}
- return pair;
+ return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
}
case Primitive::kPrimByte:
@@ -242,21 +233,20 @@
blocked_register_pairs[i] = true;
}
}
- return X86ManagedRegister::FromCpuRegister(reg);
+ return Location::RegisterLocation(reg);
}
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
- XmmRegister reg = static_cast<XmmRegister>(AllocateFreeRegisterInternal(
+ return Location::FpuRegisterLocation(AllocateFreeRegisterInternal(
GetBlockedXmmRegisters(blocked_registers), kNumberOfXmmRegisters));
- return X86ManagedRegister::FromXmmRegister(reg);
}
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << type;
}
- return ManagedRegister::NoRegister();
+ return Location();
}
void CodeGeneratorX86::SetupBlockedRegisters(bool* blocked_registers) const {
@@ -359,7 +349,7 @@
case Primitive::kPrimNot: {
uint32_t index = gp_index_++;
if (index < calling_convention.GetNumberOfRegisters()) {
- return X86CpuLocation(calling_convention.GetRegisterAt(index));
+ return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
} else {
return Location::StackSlot(calling_convention.GetStackOffsetOf(index));
}
@@ -370,8 +360,9 @@
uint32_t index = gp_index_;
gp_index_ += 2;
if (index + 1 < calling_convention.GetNumberOfRegisters()) {
- return Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(
- calling_convention.GetRegisterPairAt(index)));
+ X86ManagedRegister pair = X86ManagedRegister::FromRegisterPair(
+ calling_convention.GetRegisterPairAt(index));
+ return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
} else if (index + 1 == calling_convention.GetNumberOfRegisters()) {
return Location::QuickParameter(index);
} else {
@@ -392,28 +383,28 @@
}
if (destination.IsRegister()) {
if (source.IsRegister()) {
- __ movl(destination.AsX86().AsCpuRegister(), source.AsX86().AsCpuRegister());
+ __ movl(destination.As<Register>(), source.As<Register>());
} else if (source.IsFpuRegister()) {
- __ movd(destination.AsX86().AsCpuRegister(), source.AsX86().AsXmmRegister());
+ __ movd(destination.As<Register>(), source.As<XmmRegister>());
} else {
DCHECK(source.IsStackSlot());
- __ movl(destination.AsX86().AsCpuRegister(), Address(ESP, source.GetStackIndex()));
+ __ movl(destination.As<Register>(), Address(ESP, source.GetStackIndex()));
}
} else if (destination.IsFpuRegister()) {
if (source.IsRegister()) {
- __ movd(destination.AsX86().AsXmmRegister(), source.AsX86().AsCpuRegister());
+ __ movd(destination.As<XmmRegister>(), source.As<Register>());
} else if (source.IsFpuRegister()) {
- __ movaps(destination.AsX86().AsXmmRegister(), source.AsX86().AsXmmRegister());
+ __ movaps(destination.As<XmmRegister>(), source.As<XmmRegister>());
} else {
DCHECK(source.IsStackSlot());
- __ movss(destination.AsX86().AsXmmRegister(), Address(ESP, source.GetStackIndex()));
+ __ movss(destination.As<XmmRegister>(), Address(ESP, source.GetStackIndex()));
}
} else {
DCHECK(destination.IsStackSlot());
if (source.IsRegister()) {
- __ movl(Address(ESP, destination.GetStackIndex()), source.AsX86().AsCpuRegister());
+ __ movl(Address(ESP, destination.GetStackIndex()), source.As<Register>());
} else if (source.IsFpuRegister()) {
- __ movss(Address(ESP, destination.GetStackIndex()), source.AsX86().AsXmmRegister());
+ __ movss(Address(ESP, destination.GetStackIndex()), source.As<XmmRegister>());
} else {
DCHECK(source.IsStackSlot());
__ pushl(Address(ESP, source.GetStackIndex()));
@@ -426,32 +417,32 @@
if (source.Equals(destination)) {
return;
}
- if (destination.IsRegister()) {
- if (source.IsRegister()) {
- __ movl(destination.AsX86().AsRegisterPairLow(), source.AsX86().AsRegisterPairLow());
- __ movl(destination.AsX86().AsRegisterPairHigh(), source.AsX86().AsRegisterPairHigh());
+ if (destination.IsRegisterPair()) {
+ if (source.IsRegisterPair()) {
+ __ movl(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
+ __ movl(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else if (source.IsQuickParameter()) {
uint32_t argument_index = source.GetQuickParameterIndex();
InvokeDexCallingConvention calling_convention;
- __ movl(destination.AsX86().AsRegisterPairLow(),
+ __ movl(destination.AsRegisterPairLow<Register>(),
calling_convention.GetRegisterAt(argument_index));
- __ movl(destination.AsX86().AsRegisterPairHigh(), Address(ESP,
+ __ movl(destination.AsRegisterPairHigh<Register>(), Address(ESP,
calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize()));
} else {
DCHECK(source.IsDoubleStackSlot());
- __ movl(destination.AsX86().AsRegisterPairLow(), Address(ESP, source.GetStackIndex()));
- __ movl(destination.AsX86().AsRegisterPairHigh(),
+ __ movl(destination.AsRegisterPairLow<Register>(), Address(ESP, source.GetStackIndex()));
+ __ movl(destination.AsRegisterPairHigh<Register>(),
Address(ESP, source.GetHighStackIndex(kX86WordSize)));
}
} else if (destination.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
uint32_t argument_index = destination.GetQuickParameterIndex();
if (source.IsRegister()) {
- __ movl(calling_convention.GetRegisterAt(argument_index), source.AsX86().AsRegisterPairLow());
+ __ movl(calling_convention.GetRegisterAt(argument_index), source.AsRegisterPairLow<Register>());
__ movl(Address(ESP, calling_convention.GetStackOffsetOf(argument_index + 1)),
- source.AsX86().AsRegisterPairHigh());
+ source.AsRegisterPairHigh<Register>());
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else {
@@ -463,16 +454,16 @@
}
} else if (destination.IsFpuRegister()) {
if (source.IsDoubleStackSlot()) {
- __ movsd(destination.AsX86().AsXmmRegister(), Address(ESP, source.GetStackIndex()));
+ __ movsd(destination.As<XmmRegister>(), Address(ESP, source.GetStackIndex()));
} else {
LOG(FATAL) << "Unimplemented";
}
} else {
DCHECK(destination.IsDoubleStackSlot());
- if (source.IsRegister()) {
- __ movl(Address(ESP, destination.GetStackIndex()), source.AsX86().AsRegisterPairLow());
+ if (source.IsRegisterPair()) {
+ __ movl(Address(ESP, destination.GetStackIndex()), source.AsRegisterPairLow<Register>());
__ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)),
- source.AsX86().AsRegisterPairHigh());
+ source.AsRegisterPairHigh<Register>());
} else if (source.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
uint32_t argument_index = source.GetQuickParameterIndex();
@@ -481,7 +472,7 @@
DCHECK_EQ(calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize(),
static_cast<size_t>(destination.GetHighStackIndex(kX86WordSize)));
} else if (source.IsFpuRegister()) {
- __ movsd(Address(ESP, destination.GetStackIndex()), source.AsX86().AsXmmRegister());
+ __ movsd(Address(ESP, destination.GetStackIndex()), source.As<XmmRegister>());
} else {
DCHECK(source.IsDoubleStackSlot());
__ pushl(Address(ESP, source.GetStackIndex()));
@@ -496,15 +487,15 @@
if (instruction->AsIntConstant() != nullptr) {
Immediate imm(instruction->AsIntConstant()->GetValue());
if (location.IsRegister()) {
- __ movl(location.AsX86().AsCpuRegister(), imm);
+ __ movl(location.As<Register>(), imm);
} else {
__ movl(Address(ESP, location.GetStackIndex()), imm);
}
} else if (instruction->AsLongConstant() != nullptr) {
int64_t value = instruction->AsLongConstant()->GetValue();
if (location.IsRegister()) {
- __ movl(location.AsX86().AsRegisterPairLow(), Immediate(Low32Bits(value)));
- __ movl(location.AsX86().AsRegisterPairHigh(), Immediate(High32Bits(value)));
+ __ movl(location.AsRegisterPairLow<Register>(), Immediate(Low32Bits(value)));
+ __ movl(location.AsRegisterPairHigh<Register>(), Immediate(High32Bits(value)));
} else {
__ movl(Address(ESP, location.GetStackIndex()), Immediate(Low32Bits(value)));
__ movl(Address(ESP, location.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
@@ -609,7 +600,7 @@
// Materialized condition, compare against 0.
Location lhs = if_instr->GetLocations()->InAt(0);
if (lhs.IsRegister()) {
- __ cmpl(lhs.AsX86().AsCpuRegister(), Immediate(0));
+ __ cmpl(lhs.As<Register>(), Immediate(0));
} else {
__ cmpl(Address(ESP, lhs.GetStackIndex()), Immediate(0));
}
@@ -620,13 +611,13 @@
Location rhs = cond->GetLocations()->InAt(1);
// LHS is guaranteed to be in a register (see LocationsBuilderX86::VisitCondition).
if (rhs.IsRegister()) {
- __ cmpl(lhs.AsX86().AsCpuRegister(), rhs.AsX86().AsCpuRegister());
+ __ cmpl(lhs.As<Register>(), rhs.As<Register>());
} else if (rhs.IsConstant()) {
HIntConstant* instruction = rhs.GetConstant()->AsIntConstant();
Immediate imm(instruction->AsIntConstant()->GetValue());
- __ cmpl(lhs.AsX86().AsCpuRegister(), imm);
+ __ cmpl(lhs.As<Register>(), imm);
} else {
- __ cmpl(lhs.AsX86().AsCpuRegister(), Address(ESP, rhs.GetStackIndex()));
+ __ cmpl(lhs.As<Register>(), Address(ESP, rhs.GetStackIndex()));
}
__ j(X86Condition(cond->AsCondition()->GetCondition()),
codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
@@ -693,18 +684,18 @@
void InstructionCodeGeneratorX86::VisitCondition(HCondition* comp) {
if (comp->NeedsMaterialization()) {
LocationSummary* locations = comp->GetLocations();
- Register reg = locations->Out().AsX86().AsCpuRegister();
+ Register reg = locations->Out().As<Register>();
// Clear register: setcc only sets the low byte.
__ xorl(reg, reg);
if (locations->InAt(1).IsRegister()) {
- __ cmpl(locations->InAt(0).AsX86().AsCpuRegister(),
- locations->InAt(1).AsX86().AsCpuRegister());
+ __ cmpl(locations->InAt(0).As<Register>(),
+ locations->InAt(1).As<Register>());
} else if (locations->InAt(1).IsConstant()) {
HConstant* instruction = locations->InAt(1).GetConstant();
Immediate imm(instruction->AsIntConstant()->GetValue());
- __ cmpl(locations->InAt(0).AsX86().AsCpuRegister(), imm);
+ __ cmpl(locations->InAt(0).As<Register>(), imm);
} else {
- __ cmpl(locations->InAt(0).AsX86().AsCpuRegister(),
+ __ cmpl(locations->InAt(0).As<Register>(),
Address(ESP, locations->InAt(1).GetStackIndex()));
}
__ setb(X86Condition(comp->GetCondition()), reg);
@@ -797,18 +788,18 @@
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimNot:
- locations->SetInAt(0, X86CpuLocation(EAX));
+ locations->SetInAt(0, Location::RegisterLocation(EAX));
break;
case Primitive::kPrimLong:
locations->SetInAt(
- 0, Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ 0, Location::RegisterPairLocation(EAX, EDX));
break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
locations->SetInAt(
- 0, Location::FpuRegisterLocation(X86ManagedRegister::FromXmmRegister(XMM0)));
+ 0, Location::FpuRegisterLocation(XMM0));
break;
default:
@@ -825,16 +816,17 @@
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimNot:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsX86().AsCpuRegister(), EAX);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).As<Register>(), EAX);
break;
case Primitive::kPrimLong:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsX86().AsRegisterPair(), EAX_EDX);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairLow<Register>(), EAX);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairHigh<Register>(), EDX);
break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsX86().AsXmmRegister(), XMM0);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).As<XmmRegister>(), XMM0);
break;
default:
@@ -850,7 +842,7 @@
}
void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
- Register temp = invoke->GetLocations()->GetTemp(0).AsX86().AsCpuRegister();
+ Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
invoke->GetIndexInDexCache() * kX86WordSize;
@@ -882,7 +874,7 @@
void LocationsBuilderX86::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
- locations->AddTemp(X86CpuLocation(EAX));
+ locations->AddTemp(Location::RegisterLocation(EAX));
InvokeDexCallingConventionVisitor calling_convention_visitor;
for (size_t i = 0; i < invoke->InputCount(); i++) {
@@ -897,11 +889,11 @@
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimNot:
- locations->SetOut(X86CpuLocation(EAX));
+ locations->SetOut(Location::RegisterLocation(EAX));
break;
case Primitive::kPrimLong:
- locations->SetOut(Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ locations->SetOut(Location::RegisterPairLocation(EAX, EDX));
break;
case Primitive::kPrimVoid:
@@ -909,7 +901,7 @@
case Primitive::kPrimDouble:
case Primitive::kPrimFloat:
- locations->SetOut(Location::FpuRegisterLocation(X86ManagedRegister::FromXmmRegister(XMM0)));
+ locations->SetOut(Location::FpuRegisterLocation(XMM0));
break;
}
@@ -917,7 +909,7 @@
}
void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- Register temp = invoke->GetLocations()->GetTemp(0).AsX86().AsCpuRegister();
+ Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
LocationSummary* locations = invoke->GetLocations();
@@ -928,7 +920,7 @@
__ movl(temp, Address(ESP, receiver.GetStackIndex()));
__ movl(temp, Address(temp, class_offset));
} else {
- __ movl(temp, Address(receiver.AsX86().AsCpuRegister(), class_offset));
+ __ movl(temp, Address(receiver.As<Register>(), class_offset));
}
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
@@ -972,28 +964,30 @@
switch (add->GetResultType()) {
case Primitive::kPrimInt: {
- DCHECK_EQ(first.AsX86().AsCpuRegister(), locations->Out().AsX86().AsCpuRegister());
+ DCHECK_EQ(first.As<Register>(), locations->Out().As<Register>());
if (second.IsRegister()) {
- __ addl(first.AsX86().AsCpuRegister(), second.AsX86().AsCpuRegister());
+ __ addl(first.As<Register>(), second.As<Register>());
} else if (second.IsConstant()) {
HConstant* instruction = second.GetConstant();
Immediate imm(instruction->AsIntConstant()->GetValue());
- __ addl(first.AsX86().AsCpuRegister(), imm);
+ __ addl(first.As<Register>(), imm);
} else {
- __ addl(first.AsX86().AsCpuRegister(), Address(ESP, second.GetStackIndex()));
+ __ addl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
}
break;
}
case Primitive::kPrimLong: {
- DCHECK_EQ(first.AsX86().AsRegisterPair(),
- locations->Out().AsX86().AsRegisterPair());
+ DCHECK_EQ(first.AsRegisterPairLow<Register>(),
+ locations->Out().AsRegisterPairLow<Register>());
+ DCHECK_EQ(first.AsRegisterPairHigh<Register>(),
+ locations->Out().AsRegisterPairHigh<Register>());
if (second.IsRegister()) {
- __ addl(first.AsX86().AsRegisterPairLow(), second.AsX86().AsRegisterPairLow());
- __ adcl(first.AsX86().AsRegisterPairHigh(), second.AsX86().AsRegisterPairHigh());
+ __ addl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
+ __ adcl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
} else {
- __ addl(first.AsX86().AsRegisterPairLow(), Address(ESP, second.GetStackIndex()));
- __ adcl(first.AsX86().AsRegisterPairHigh(),
+ __ addl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
+ __ adcl(first.AsRegisterPairHigh<Register>(),
Address(ESP, second.GetHighStackIndex(kX86WordSize)));
}
break;
@@ -1001,18 +995,18 @@
case Primitive::kPrimFloat: {
if (second.IsFpuRegister()) {
- __ addss(first.AsX86().AsXmmRegister(), second.AsX86().AsXmmRegister());
+ __ addss(first.As<XmmRegister>(), second.As<XmmRegister>());
} else {
- __ addss(first.AsX86().AsXmmRegister(), Address(ESP, second.GetStackIndex()));
+ __ addss(first.As<XmmRegister>(), Address(ESP, second.GetStackIndex()));
}
break;
}
case Primitive::kPrimDouble: {
if (second.IsFpuRegister()) {
- __ addsd(first.AsX86().AsXmmRegister(), second.AsX86().AsXmmRegister());
+ __ addsd(first.As<XmmRegister>(), second.As<XmmRegister>());
} else {
- __ addsd(first.AsX86().AsXmmRegister(), Address(ESP, second.GetStackIndex()));
+ __ addsd(first.As<XmmRegister>(), Address(ESP, second.GetStackIndex()));
}
break;
}
@@ -1048,37 +1042,41 @@
void InstructionCodeGeneratorX86::VisitSub(HSub* sub) {
LocationSummary* locations = sub->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
- DCHECK_EQ(locations->InAt(0).AsX86().AsCpuRegister(),
- locations->Out().AsX86().AsCpuRegister());
- if (locations->InAt(1).IsRegister()) {
- __ subl(locations->InAt(0).AsX86().AsCpuRegister(),
- locations->InAt(1).AsX86().AsCpuRegister());
- } else if (locations->InAt(1).IsConstant()) {
- HConstant* instruction = locations->InAt(1).GetConstant();
+ DCHECK_EQ(first.As<Register>(),
+ locations->Out().As<Register>());
+ if (second.IsRegister()) {
+ __ subl(first.As<Register>(),
+ second.As<Register>());
+ } else if (second.IsConstant()) {
+ HConstant* instruction = second.GetConstant();
Immediate imm(instruction->AsIntConstant()->GetValue());
- __ subl(locations->InAt(0).AsX86().AsCpuRegister(), imm);
+ __ subl(first.As<Register>(), imm);
} else {
- __ subl(locations->InAt(0).AsX86().AsCpuRegister(),
- Address(ESP, locations->InAt(1).GetStackIndex()));
+ __ subl(first.As<Register>(),
+ Address(ESP, second.GetStackIndex()));
}
break;
}
case Primitive::kPrimLong: {
- DCHECK_EQ(locations->InAt(0).AsX86().AsRegisterPair(),
- locations->Out().AsX86().AsRegisterPair());
- if (locations->InAt(1).IsRegister()) {
- __ subl(locations->InAt(0).AsX86().AsRegisterPairLow(),
- locations->InAt(1).AsX86().AsRegisterPairLow());
- __ sbbl(locations->InAt(0).AsX86().AsRegisterPairHigh(),
- locations->InAt(1).AsX86().AsRegisterPairHigh());
+ DCHECK_EQ(first.AsRegisterPairLow<Register>(),
+ locations->Out().AsRegisterPairLow<Register>());
+ DCHECK_EQ(first.AsRegisterPairHigh<Register>(),
+ locations->Out().AsRegisterPairHigh<Register>());
+ if (second.IsRegister()) {
+ __ subl(first.AsRegisterPairLow<Register>(),
+ second.AsRegisterPairLow<Register>());
+ __ sbbl(first.AsRegisterPairHigh<Register>(),
+ second.AsRegisterPairHigh<Register>());
} else {
- __ subl(locations->InAt(0).AsX86().AsRegisterPairLow(),
- Address(ESP, locations->InAt(1).GetStackIndex()));
- __ sbbl(locations->InAt(0).AsX86().AsRegisterPairHigh(),
- Address(ESP, locations->InAt(1).GetHighStackIndex(kX86WordSize)));
+ __ subl(first.AsRegisterPairLow<Register>(),
+ Address(ESP, second.GetStackIndex()));
+ __ sbbl(first.AsRegisterPairHigh<Register>(),
+ Address(ESP, second.GetHighStackIndex(kX86WordSize)));
}
break;
}
@@ -1098,10 +1096,10 @@
void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
- locations->SetOut(X86CpuLocation(EAX));
+ locations->SetOut(Location::RegisterLocation(EAX));
InvokeRuntimeCallingConvention calling_convention;
- locations->AddTemp(X86CpuLocation(calling_convention.GetRegisterAt(0)));
- locations->AddTemp(X86CpuLocation(calling_convention.GetRegisterAt(1)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) {
@@ -1141,8 +1139,8 @@
void InstructionCodeGeneratorX86::VisitNot(HNot* instruction) {
LocationSummary* locations = instruction->GetLocations();
Location out = locations->Out();
- DCHECK_EQ(locations->InAt(0).AsX86().AsCpuRegister(), out.AsX86().AsCpuRegister());
- __ xorl(out.AsX86().AsCpuRegister(), Immediate(1));
+ DCHECK_EQ(locations->InAt(0).As<Register>(), out.As<Register>());
+ __ xorl(out.As<Register>(), Immediate(1));
}
void LocationsBuilderX86::VisitCompare(HCompare* compare) {
@@ -1159,22 +1157,23 @@
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
Label less, greater, done;
- Register output = locations->Out().AsX86().AsCpuRegister();
- X86ManagedRegister left = locations->InAt(0).AsX86();
+ Register output = locations->Out().As<Register>();
+ Location left = locations->InAt(0);
Location right = locations->InAt(1);
if (right.IsRegister()) {
- __ cmpl(left.AsRegisterPairHigh(), right.AsX86().AsRegisterPairHigh());
+ __ cmpl(left.AsRegisterPairHigh<Register>(), right.AsRegisterPairHigh<Register>());
} else {
DCHECK(right.IsDoubleStackSlot());
- __ cmpl(left.AsRegisterPairHigh(), Address(ESP, right.GetHighStackIndex(kX86WordSize)));
+ __ cmpl(left.AsRegisterPairHigh<Register>(),
+ Address(ESP, right.GetHighStackIndex(kX86WordSize)));
}
__ j(kLess, &less); // Signed compare.
__ j(kGreater, &greater); // Signed compare.
- if (right.IsRegister()) {
- __ cmpl(left.AsRegisterPairLow(), right.AsX86().AsRegisterPairLow());
+ if (right.IsRegisterPair()) {
+ __ cmpl(left.AsRegisterPairLow<Register>(), right.AsRegisterPairLow<Register>());
} else {
DCHECK(right.IsDoubleStackSlot());
- __ cmpl(left.AsRegisterPairLow(), Address(ESP, right.GetStackIndex()));
+ __ cmpl(left.AsRegisterPairLow<Register>(), Address(ESP, right.GetStackIndex()));
}
__ movl(output, Immediate(0));
__ j(kEqual, &done);
@@ -1221,7 +1220,7 @@
bool dies_at_entry = !is_object_type && !is_byte_type;
if (is_byte_type) {
// Ensure the value is in a byte register.
- locations->SetInAt(1, X86CpuLocation(EAX), dies_at_entry);
+ locations->SetInAt(1, Location::RegisterLocation(EAX), dies_at_entry);
} else {
locations->SetInAt(1, Location::RequiresRegister(), dies_at_entry);
}
@@ -1229,48 +1228,48 @@
if (is_object_type) {
locations->AddTemp(Location::RequiresRegister());
// Ensure the card is in a byte register.
- locations->AddTemp(X86CpuLocation(ECX));
+ locations->AddTemp(Location::RegisterLocation(ECX));
}
}
void InstructionCodeGeneratorX86::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsX86().AsCpuRegister();
+ Register obj = locations->InAt(0).As<Register>();
uint32_t offset = instruction->GetFieldOffset().Uint32Value();
Primitive::Type field_type = instruction->GetFieldType();
switch (field_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
- ByteRegister value = locations->InAt(1).AsX86().AsByteRegister();
+ ByteRegister value = locations->InAt(1).As<ByteRegister>();
__ movb(Address(obj, offset), value);
break;
}
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
- Register value = locations->InAt(1).AsX86().AsCpuRegister();
+ Register value = locations->InAt(1).As<Register>();
__ movw(Address(obj, offset), value);
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register value = locations->InAt(1).AsX86().AsCpuRegister();
+ Register value = locations->InAt(1).As<Register>();
__ movl(Address(obj, offset), value);
if (field_type == Primitive::kPrimNot) {
- Register temp = locations->GetTemp(0).AsX86().AsCpuRegister();
- Register card = locations->GetTemp(1).AsX86().AsCpuRegister();
+ Register temp = locations->GetTemp(0).As<Register>();
+ Register card = locations->GetTemp(1).As<Register>();
codegen_->MarkGCCard(temp, card, obj, value);
}
break;
}
case Primitive::kPrimLong: {
- X86ManagedRegister value = locations->InAt(1).AsX86();
- __ movl(Address(obj, offset), value.AsRegisterPairLow());
- __ movl(Address(obj, kX86WordSize + offset), value.AsRegisterPairHigh());
+ Location value = locations->InAt(1);
+ __ movl(Address(obj, offset), value.AsRegisterPairLow<Register>());
+ __ movl(Address(obj, kX86WordSize + offset), value.AsRegisterPairHigh<Register>());
break;
}
@@ -1304,46 +1303,45 @@
void InstructionCodeGeneratorX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsX86().AsCpuRegister();
+ Register obj = locations->InAt(0).As<Register>();
uint32_t offset = instruction->GetFieldOffset().Uint32Value();
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
__ movzxb(out, Address(obj, offset));
break;
}
case Primitive::kPrimByte: {
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
__ movsxb(out, Address(obj, offset));
break;
}
case Primitive::kPrimShort: {
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
__ movsxw(out, Address(obj, offset));
break;
}
case Primitive::kPrimChar: {
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
__ movzxw(out, Address(obj, offset));
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
__ movl(out, Address(obj, offset));
break;
}
case Primitive::kPrimLong: {
// TODO: support volatile.
- X86ManagedRegister out = locations->Out().AsX86();
- __ movl(out.AsRegisterPairLow(), Address(obj, offset));
- __ movl(out.AsRegisterPairHigh(), Address(obj, kX86WordSize + offset));
+ __ movl(locations->Out().AsRegisterPairLow<Register>(), Address(obj, offset));
+ __ movl(locations->Out().AsRegisterPairHigh<Register>(), Address(obj, kX86WordSize + offset));
break;
}
@@ -1373,7 +1371,7 @@
Location obj = locations->InAt(0);
if (obj.IsRegister()) {
- __ cmpl(obj.AsX86().AsCpuRegister(), Immediate(0));
+ __ cmpl(obj.As<Register>(), Immediate(0));
} else if (obj.IsStackSlot()) {
__ cmpl(Address(ESP, obj.GetStackIndex()), Immediate(0));
} else {
@@ -1396,54 +1394,54 @@
void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsX86().AsCpuRegister();
+ Register obj = locations->InAt(0).As<Register>();
Location index = locations->InAt(1);
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
__ movzxb(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
} else {
- __ movzxb(out, Address(obj, index.AsX86().AsCpuRegister(), TIMES_1, data_offset));
+ __ movzxb(out, Address(obj, index.As<Register>(), TIMES_1, data_offset));
}
break;
}
case Primitive::kPrimByte: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
__ movsxb(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
} else {
- __ movsxb(out, Address(obj, index.AsX86().AsCpuRegister(), TIMES_1, data_offset));
+ __ movsxb(out, Address(obj, index.As<Register>(), TIMES_1, data_offset));
}
break;
}
case Primitive::kPrimShort: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
__ movsxw(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
} else {
- __ movsxw(out, Address(obj, index.AsX86().AsCpuRegister(), TIMES_2, data_offset));
+ __ movsxw(out, Address(obj, index.As<Register>(), TIMES_2, data_offset));
}
break;
}
case Primitive::kPrimChar: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
__ movzxw(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
} else {
- __ movzxw(out, Address(obj, index.AsX86().AsCpuRegister(), TIMES_2, data_offset));
+ __ movzxw(out, Address(obj, index.As<Register>(), TIMES_2, data_offset));
}
break;
}
@@ -1451,28 +1449,28 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
__ movl(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
} else {
- __ movl(out, Address(obj, index.AsX86().AsCpuRegister(), TIMES_4, data_offset));
+ __ movl(out, Address(obj, index.As<Register>(), TIMES_4, data_offset));
}
break;
}
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- X86ManagedRegister out = locations->Out().AsX86();
+ Location out = locations->Out();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ movl(out.AsRegisterPairLow(), Address(obj, offset));
- __ movl(out.AsRegisterPairHigh(), Address(obj, offset + kX86WordSize));
+ __ movl(out.AsRegisterPairLow<Register>(), Address(obj, offset));
+ __ movl(out.AsRegisterPairHigh<Register>(), Address(obj, offset + kX86WordSize));
} else {
- __ movl(out.AsRegisterPairLow(),
- Address(obj, index.AsX86().AsCpuRegister(), TIMES_8, data_offset));
- __ movl(out.AsRegisterPairHigh(),
- Address(obj, index.AsX86().AsCpuRegister(), TIMES_8, data_offset + kX86WordSize));
+ __ movl(out.AsRegisterPairLow<Register>(),
+ Address(obj, index.As<Register>(), TIMES_8, data_offset));
+ __ movl(out.AsRegisterPairHigh<Register>(),
+ Address(obj, index.As<Register>(), TIMES_8, data_offset + kX86WordSize));
}
break;
}
@@ -1494,9 +1492,9 @@
if (value_type == Primitive::kPrimNot) {
InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(0, X86CpuLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, X86CpuLocation(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(2, X86CpuLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
} else {
bool is_byte_type = (value_type == Primitive::kPrimBoolean)
|| (value_type == Primitive::kPrimByte);
@@ -1510,7 +1508,7 @@
if (is_byte_type) {
// Ensure the value is in a byte register.
locations->SetInAt(2, Location::ByteRegisterOrConstant(
- X86ManagedRegister::FromCpuRegister(EAX), instruction->InputAt(2)), dies_at_entry);
+ EAX, instruction->InputAt(2)), dies_at_entry);
} else {
locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2)), dies_at_entry);
}
@@ -1519,7 +1517,7 @@
void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsX86().AsCpuRegister();
+ Register obj = locations->InAt(0).As<Register>();
Location index = locations->InAt(1);
Location value = locations->InAt(2);
Primitive::Type value_type = instruction->GetComponentType();
@@ -1531,17 +1529,17 @@
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
if (value.IsRegister()) {
- __ movb(Address(obj, offset), value.AsX86().AsByteRegister());
+ __ movb(Address(obj, offset), value.As<ByteRegister>());
} else {
__ movb(Address(obj, offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
if (value.IsRegister()) {
- __ movb(Address(obj, index.AsX86().AsCpuRegister(), TIMES_1, data_offset),
- value.AsX86().AsByteRegister());
+ __ movb(Address(obj, index.As<Register>(), TIMES_1, data_offset),
+ value.As<ByteRegister>());
} else {
- __ movb(Address(obj, index.AsX86().AsCpuRegister(), TIMES_1, data_offset),
+ __ movb(Address(obj, index.As<Register>(), TIMES_1, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
@@ -1554,17 +1552,17 @@
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
if (value.IsRegister()) {
- __ movw(Address(obj, offset), value.AsX86().AsCpuRegister());
+ __ movw(Address(obj, offset), value.As<Register>());
} else {
__ movw(Address(obj, offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
if (value.IsRegister()) {
- __ movw(Address(obj, index.AsX86().AsCpuRegister(), TIMES_2, data_offset),
- value.AsX86().AsCpuRegister());
+ __ movw(Address(obj, index.As<Register>(), TIMES_2, data_offset),
+ value.As<Register>());
} else {
- __ movw(Address(obj, index.AsX86().AsCpuRegister(), TIMES_2, data_offset),
+ __ movw(Address(obj, index.As<Register>(), TIMES_2, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
@@ -1576,16 +1574,16 @@
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
if (value.IsRegister()) {
- __ movl(Address(obj, offset), value.AsX86().AsCpuRegister());
+ __ movl(Address(obj, offset), value.As<Register>());
} else {
__ movl(Address(obj, offset), Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
if (value.IsRegister()) {
- __ movl(Address(obj, index.AsX86().AsCpuRegister(), TIMES_4, data_offset),
- value.AsX86().AsCpuRegister());
+ __ movl(Address(obj, index.As<Register>(), TIMES_4, data_offset),
+ value.As<Register>());
} else {
- __ movl(Address(obj, index.AsX86().AsCpuRegister(), TIMES_4, data_offset),
+ __ movl(Address(obj, index.As<Register>(), TIMES_4, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
@@ -1603,25 +1601,27 @@
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- if (value.IsRegister()) {
- __ movl(Address(obj, offset), value.AsX86().AsRegisterPairLow());
- __ movl(Address(obj, offset + kX86WordSize), value.AsX86().AsRegisterPairHigh());
+ if (value.IsRegisterPair()) {
+ __ movl(Address(obj, offset), value.AsRegisterPairLow<Register>());
+ __ movl(Address(obj, offset + kX86WordSize), value.AsRegisterPairHigh<Register>());
} else {
+ DCHECK(value.IsConstant());
int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
__ movl(Address(obj, offset), Immediate(Low32Bits(val)));
__ movl(Address(obj, offset + kX86WordSize), Immediate(High32Bits(val)));
}
} else {
- if (value.IsRegister()) {
- __ movl(Address(obj, index.AsX86().AsCpuRegister(), TIMES_8, data_offset),
- value.AsX86().AsRegisterPairLow());
- __ movl(Address(obj, index.AsX86().AsCpuRegister(), TIMES_8, data_offset + kX86WordSize),
- value.AsX86().AsRegisterPairHigh());
+ if (value.IsRegisterPair()) {
+ __ movl(Address(obj, index.As<Register>(), TIMES_8, data_offset),
+ value.AsRegisterPairLow<Register>());
+ __ movl(Address(obj, index.As<Register>(), TIMES_8, data_offset + kX86WordSize),
+ value.AsRegisterPairHigh<Register>());
} else {
+ DCHECK(value.IsConstant());
int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
- __ movl(Address(obj, index.AsX86().AsCpuRegister(), TIMES_8, data_offset),
+ __ movl(Address(obj, index.As<Register>(), TIMES_8, data_offset),
Immediate(Low32Bits(val)));
- __ movl(Address(obj, index.AsX86().AsCpuRegister(), TIMES_8, data_offset + kX86WordSize),
+ __ movl(Address(obj, index.As<Register>(), TIMES_8, data_offset + kX86WordSize),
Immediate(High32Bits(val)));
}
}
@@ -1647,8 +1647,8 @@
void InstructionCodeGeneratorX86::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = instruction->GetLocations();
uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
- Register obj = locations->InAt(0).AsX86().AsCpuRegister();
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register obj = locations->InAt(0).As<Register>();
+ Register out = locations->Out().As<Register>();
__ movl(out, Address(obj, offset));
}
@@ -1668,8 +1668,8 @@
instruction, locations->InAt(0), locations->InAt(1));
codegen_->AddSlowPath(slow_path);
- Register index = locations->InAt(0).AsX86().AsCpuRegister();
- Register length = locations->InAt(1).AsX86().AsCpuRegister();
+ Register index = locations->InAt(0).As<Register>();
+ Register length = locations->InAt(1).As<Register>();
__ cmpl(index, length);
__ j(kAboveEqual, slow_path->GetEntryLabel());
@@ -1744,14 +1744,14 @@
if (source.IsRegister()) {
if (destination.IsRegister()) {
- __ movl(destination.AsX86().AsCpuRegister(), source.AsX86().AsCpuRegister());
+ __ movl(destination.As<Register>(), source.As<Register>());
} else {
DCHECK(destination.IsStackSlot());
- __ movl(Address(ESP, destination.GetStackIndex()), source.AsX86().AsCpuRegister());
+ __ movl(Address(ESP, destination.GetStackIndex()), source.As<Register>());
}
} else if (source.IsStackSlot()) {
if (destination.IsRegister()) {
- __ movl(destination.AsX86().AsCpuRegister(), Address(ESP, source.GetStackIndex()));
+ __ movl(destination.As<Register>(), Address(ESP, source.GetStackIndex()));
} else {
DCHECK(destination.IsStackSlot());
MoveMemoryToMemory(destination.GetStackIndex(),
@@ -1761,7 +1761,7 @@
HIntConstant* instruction = source.GetConstant()->AsIntConstant();
Immediate imm(instruction->AsIntConstant()->GetValue());
if (destination.IsRegister()) {
- __ movl(destination.AsX86().AsCpuRegister(), imm);
+ __ movl(destination.As<Register>(), imm);
} else {
__ movl(Address(ESP, destination.GetStackIndex()), imm);
}
@@ -1803,11 +1803,11 @@
Location destination = move->GetDestination();
if (source.IsRegister() && destination.IsRegister()) {
- __ xchgl(destination.AsX86().AsCpuRegister(), source.AsX86().AsCpuRegister());
+ __ xchgl(destination.As<Register>(), source.As<Register>());
} else if (source.IsRegister() && destination.IsStackSlot()) {
- Exchange(source.AsX86().AsCpuRegister(), destination.GetStackIndex());
+ Exchange(source.As<Register>(), destination.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsRegister()) {
- Exchange(destination.AsX86().AsCpuRegister(), source.GetStackIndex());
+ Exchange(destination.As<Register>(), source.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsStackSlot()) {
Exchange(destination.GetStackIndex(), source.GetStackIndex());
} else {
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 3e2ca90..c520164 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -167,7 +167,7 @@
virtual size_t GetNumberOfRegisters() const OVERRIDE;
virtual void SetupBlockedRegisters(bool* blocked_registers) const OVERRIDE;
- virtual ManagedRegister AllocateFreeRegister(
+ virtual Location AllocateFreeRegister(
Primitive::Type type, bool* blocked_registers) const OVERRIDE;
virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index f5437a1..393eb1a 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -30,10 +30,6 @@
namespace art {
-x86_64::X86_64ManagedRegister Location::AsX86_64() const {
- return reg().AsX86_64();
-}
-
namespace x86_64 {
static constexpr bool kExplicitStackOverflowCheck = false;
@@ -44,10 +40,6 @@
static constexpr int kNumberOfPushedRegistersAtEntry = 1;
static constexpr int kCurrentMethodStackOffset = 0;
-static Location X86_64CpuLocation(Register reg) {
- return Location::RegisterLocation(X86_64ManagedRegister::FromCpuRegister(reg));
-}
-
static constexpr Register kRuntimeParameterCoreRegisters[] = { RDI, RSI, RDX };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
@@ -144,8 +136,8 @@
CodeGeneratorX86_64* x64_codegen = reinterpret_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
InvokeRuntimeCallingConvention calling_convention;
- x64_codegen->Move(X86_64CpuLocation(calling_convention.GetRegisterAt(0)), index_location_);
- x64_codegen->Move(X86_64CpuLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ x64_codegen->Move(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
+ x64_codegen->Move(Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
__ gs()->call(Address::Absolute(
QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowArrayBounds), true));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
@@ -208,8 +200,8 @@
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
-ManagedRegister CodeGeneratorX86_64::AllocateFreeRegister(Primitive::Type type,
- bool* blocked_registers) const {
+Location CodeGeneratorX86_64::AllocateFreeRegister(Primitive::Type type,
+ bool* blocked_registers) const {
switch (type) {
case Primitive::kPrimLong:
case Primitive::kPrimByte:
@@ -219,21 +211,21 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
size_t reg = AllocateFreeRegisterInternal(blocked_registers, kNumberOfCpuRegisters);
- return X86_64ManagedRegister::FromCpuRegister(static_cast<Register>(reg));
+ return Location::RegisterLocation(reg);
}
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
size_t reg = AllocateFreeRegisterInternal(
blocked_registers + kNumberOfCpuRegisters, kNumberOfFloatRegisters);
- return X86_64ManagedRegister::FromXmmRegister(static_cast<FloatRegister>(reg));
+ return Location::FpuRegisterLocation(reg);
}
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << type;
}
- return ManagedRegister::NoRegister();
+ return Location();
}
void CodeGeneratorX86_64::SetupBlockedRegisters(bool* blocked_registers) const {
@@ -331,37 +323,37 @@
}
if (destination.IsRegister()) {
if (source.IsRegister()) {
- __ movq(destination.AsX86_64().AsCpuRegister(), source.AsX86_64().AsCpuRegister());
+ __ movq(destination.As<CpuRegister>(), source.As<CpuRegister>());
} else if (source.IsFpuRegister()) {
- __ movd(destination.AsX86_64().AsCpuRegister(), source.AsX86_64().AsXmmRegister());
+ __ movd(destination.As<CpuRegister>(), source.As<XmmRegister>());
} else if (source.IsStackSlot()) {
- __ movl(destination.AsX86_64().AsCpuRegister(),
+ __ movl(destination.As<CpuRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
} else {
DCHECK(source.IsDoubleStackSlot());
- __ movq(destination.AsX86_64().AsCpuRegister(),
+ __ movq(destination.As<CpuRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
}
} else if (destination.IsFpuRegister()) {
if (source.IsRegister()) {
- __ movd(destination.AsX86_64().AsXmmRegister(), source.AsX86_64().AsCpuRegister());
+ __ movd(destination.As<XmmRegister>(), source.As<CpuRegister>());
} else if (source.IsFpuRegister()) {
- __ movaps(destination.AsX86_64().AsXmmRegister(), source.AsX86_64().AsXmmRegister());
+ __ movaps(destination.As<XmmRegister>(), source.As<XmmRegister>());
} else if (source.IsStackSlot()) {
- __ movss(destination.AsX86_64().AsXmmRegister(),
+ __ movss(destination.As<XmmRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
} else {
DCHECK(source.IsDoubleStackSlot());
- __ movsd(destination.AsX86_64().AsXmmRegister(),
+ __ movsd(destination.As<XmmRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
}
} else if (destination.IsStackSlot()) {
if (source.IsRegister()) {
__ movl(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.AsX86_64().AsCpuRegister());
+ source.As<CpuRegister>());
} else if (source.IsFpuRegister()) {
__ movss(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.AsX86_64().AsXmmRegister());
+ source.As<XmmRegister>());
} else {
DCHECK(source.IsStackSlot());
__ movl(CpuRegister(TMP), Address(CpuRegister(RSP), source.GetStackIndex()));
@@ -371,10 +363,10 @@
DCHECK(destination.IsDoubleStackSlot());
if (source.IsRegister()) {
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.AsX86_64().AsCpuRegister());
+ source.As<CpuRegister>());
} else if (source.IsFpuRegister()) {
__ movsd(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.AsX86_64().AsXmmRegister());
+ source.As<XmmRegister>());
} else {
DCHECK(source.IsDoubleStackSlot());
__ movq(CpuRegister(TMP), Address(CpuRegister(RSP), source.GetStackIndex()));
@@ -389,14 +381,14 @@
if (instruction->AsIntConstant() != nullptr) {
Immediate imm(instruction->AsIntConstant()->GetValue());
if (location.IsRegister()) {
- __ movl(location.AsX86_64().AsCpuRegister(), imm);
+ __ movl(location.As<CpuRegister>(), imm);
} else {
__ movl(Address(CpuRegister(RSP), location.GetStackIndex()), imm);
}
} else if (instruction->AsLongConstant() != nullptr) {
int64_t value = instruction->AsLongConstant()->GetValue();
if (location.IsRegister()) {
- __ movq(location.AsX86_64().AsCpuRegister(), Immediate(value));
+ __ movq(location.As<CpuRegister>(), Immediate(value));
} else {
__ movq(CpuRegister(TMP), Immediate(value));
__ movq(Address(CpuRegister(RSP), location.GetStackIndex()), CpuRegister(TMP));
@@ -497,7 +489,7 @@
// Materialized condition, compare against 0.
Location lhs = if_instr->GetLocations()->InAt(0);
if (lhs.IsRegister()) {
- __ cmpl(lhs.AsX86_64().AsCpuRegister(), Immediate(0));
+ __ cmpl(lhs.As<CpuRegister>(), Immediate(0));
} else {
__ cmpl(Address(CpuRegister(RSP), lhs.GetStackIndex()), Immediate(0));
}
@@ -507,12 +499,12 @@
Location lhs = cond->GetLocations()->InAt(0);
Location rhs = cond->GetLocations()->InAt(1);
if (rhs.IsRegister()) {
- __ cmpl(lhs.AsX86_64().AsCpuRegister(), rhs.AsX86_64().AsCpuRegister());
+ __ cmpl(lhs.As<CpuRegister>(), rhs.As<CpuRegister>());
} else if (rhs.IsConstant()) {
- __ cmpl(lhs.AsX86_64().AsCpuRegister(),
+ __ cmpl(lhs.As<CpuRegister>(),
Immediate(rhs.GetConstant()->AsIntConstant()->GetValue()));
} else {
- __ cmpl(lhs.AsX86_64().AsCpuRegister(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
+ __ cmpl(lhs.As<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
}
__ j(X86_64Condition(cond->AsCondition()->GetCondition()),
codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
@@ -578,17 +570,17 @@
void InstructionCodeGeneratorX86_64::VisitCondition(HCondition* comp) {
if (comp->NeedsMaterialization()) {
LocationSummary* locations = comp->GetLocations();
- CpuRegister reg = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister reg = locations->Out().As<CpuRegister>();
// Clear register: setcc only sets the low byte.
__ xorq(reg, reg);
if (locations->InAt(1).IsRegister()) {
- __ cmpq(locations->InAt(0).AsX86_64().AsCpuRegister(),
- locations->InAt(1).AsX86_64().AsCpuRegister());
+ __ cmpq(locations->InAt(0).As<CpuRegister>(),
+ locations->InAt(1).As<CpuRegister>());
} else if (locations->InAt(1).IsConstant()) {
- __ cmpq(locations->InAt(0).AsX86_64().AsCpuRegister(),
+ __ cmpq(locations->InAt(0).As<CpuRegister>(),
Immediate(locations->InAt(1).GetConstant()->AsIntConstant()->GetValue()));
} else {
- __ cmpq(locations->InAt(0).AsX86_64().AsCpuRegister(),
+ __ cmpq(locations->InAt(0).As<CpuRegister>(),
Address(CpuRegister(RSP), locations->InAt(1).GetStackIndex()));
}
__ setcc(X86_64Condition(comp->GetCondition()), reg);
@@ -656,22 +648,23 @@
LocationSummary* locations = compare->GetLocations();
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong:
- __ cmpq(locations->InAt(0).AsX86_64().AsCpuRegister(),
- locations->InAt(1).AsX86_64().AsCpuRegister());
+ __ cmpq(locations->InAt(0).As<CpuRegister>(),
+ locations->InAt(1).As<CpuRegister>());
break;
default:
LOG(FATAL) << "Unimplemented compare type " << compare->InputAt(0)->GetType();
}
- __ movl(locations->Out().AsX86_64().AsCpuRegister(), Immediate(0));
+ CpuRegister output = locations->Out().As<CpuRegister>();
+ __ movl(output, Immediate(0));
__ j(kEqual, &done);
__ j(kGreater, &greater);
- __ movl(locations->Out().AsX86_64().AsCpuRegister(), Immediate(-1));
+ __ movl(output, Immediate(-1));
__ jmp(&done);
__ Bind(&greater);
- __ movl(locations->Out().AsX86_64().AsCpuRegister(), Immediate(1));
+ __ movl(output, Immediate(1));
__ Bind(&done);
}
@@ -714,13 +707,13 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimLong:
- locations->SetInAt(0, X86_64CpuLocation(RAX));
+ locations->SetInAt(0, Location::RegisterLocation(RAX));
break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
locations->SetInAt(0,
- Location::FpuRegisterLocation(X86_64ManagedRegister::FromXmmRegister(XMM0)));
+ Location::FpuRegisterLocation(XMM0));
break;
default:
@@ -738,12 +731,12 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimLong:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsX86_64().AsCpuRegister().AsRegister(), RAX);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).As<CpuRegister>().AsRegister(), RAX);
break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsX86_64().AsXmmRegister().AsFloatRegister(),
+ DCHECK_EQ(ret->GetLocations()->InAt(0).As<XmmRegister>().AsFloatRegister(),
XMM0);
break;
@@ -766,7 +759,7 @@
uint32_t index = gp_index_++;
stack_index_++;
if (index < calling_convention.GetNumberOfRegisters()) {
- return X86_64CpuLocation(calling_convention.GetRegisterAt(index));
+ return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
} else {
return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 1));
}
@@ -777,7 +770,7 @@
stack_index_ += 2;
if (index < calling_convention.GetNumberOfRegisters()) {
gp_index_ += 1;
- return X86_64CpuLocation(calling_convention.GetRegisterAt(index));
+ return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
} else {
gp_index_ += 2;
return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 2));
@@ -788,8 +781,7 @@
uint32_t index = fp_index_++;
stack_index_++;
if (index < calling_convention.GetNumberOfFpuRegisters()) {
- return Location::FpuRegisterLocation(X86_64ManagedRegister::FromXmmRegister(
- calling_convention.GetFpuRegisterAt(index)));
+ return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(index));
} else {
return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 1));
}
@@ -799,8 +791,7 @@
uint32_t index = fp_index_++;
stack_index_ += 2;
if (index < calling_convention.GetNumberOfFpuRegisters()) {
- return Location::FpuRegisterLocation(X86_64ManagedRegister::FromXmmRegister(
- calling_convention.GetFpuRegisterAt(index)));
+ return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(index));
} else {
return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 2));
}
@@ -818,7 +809,7 @@
}
void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
- CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsX86_64().AsCpuRegister();
+ CpuRegister temp = invoke->GetLocations()->GetTemp(0).As<CpuRegister>();
uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).SizeValue() +
invoke->GetIndexInDexCache() * heap_reference_size;
@@ -850,7 +841,7 @@
void LocationsBuilderX86_64::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
- locations->AddTemp(X86_64CpuLocation(RDI));
+ locations->AddTemp(Location::RegisterLocation(RDI));
InvokeDexCallingConventionVisitor calling_convention_visitor;
for (size_t i = 0; i < invoke->InputCount(); i++) {
@@ -866,7 +857,7 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimLong:
- locations->SetOut(X86_64CpuLocation(RAX));
+ locations->SetOut(Location::RegisterLocation(RAX));
break;
case Primitive::kPrimVoid:
@@ -874,14 +865,13 @@
case Primitive::kPrimDouble:
case Primitive::kPrimFloat:
- locations->SetOut(
- Location::FpuRegisterLocation(X86_64ManagedRegister::FromXmmRegister(XMM0)));
+ locations->SetOut(Location::FpuRegisterLocation(XMM0));
break;
}
}
void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsX86_64().AsCpuRegister();
+ CpuRegister temp = invoke->GetLocations()->GetTemp(0).As<CpuRegister>();
size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
LocationSummary* locations = invoke->GetLocations();
@@ -892,7 +882,7 @@
__ movq(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
__ movq(temp, Address(temp, class_offset));
} else {
- __ movq(temp, Address(receiver.AsX86_64().AsCpuRegister(), class_offset));
+ __ movq(temp, Address(receiver.As<CpuRegister>(), class_offset));
}
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
@@ -943,28 +933,28 @@
switch (add->GetResultType()) {
case Primitive::kPrimInt: {
if (second.IsRegister()) {
- __ addl(first.AsX86_64().AsCpuRegister(), second.AsX86_64().AsCpuRegister());
+ __ addl(first.As<CpuRegister>(), second.As<CpuRegister>());
} else if (second.IsConstant()) {
HConstant* instruction = second.GetConstant();
Immediate imm(instruction->AsIntConstant()->GetValue());
- __ addl(first.AsX86_64().AsCpuRegister(), imm);
+ __ addl(first.As<CpuRegister>(), imm);
} else {
- __ addl(first.AsX86_64().AsCpuRegister(),
+ __ addl(first.As<CpuRegister>(),
Address(CpuRegister(RSP), second.GetStackIndex()));
}
break;
}
case Primitive::kPrimLong: {
- __ addq(first.AsX86_64().AsCpuRegister(), second.AsX86_64().AsCpuRegister());
+ __ addq(first.As<CpuRegister>(), second.As<CpuRegister>());
break;
}
case Primitive::kPrimFloat: {
if (second.IsFpuRegister()) {
- __ addss(first.AsX86_64().AsXmmRegister(), second.AsX86_64().AsXmmRegister());
+ __ addss(first.As<XmmRegister>(), second.As<XmmRegister>());
} else {
- __ addss(first.AsX86_64().AsXmmRegister(),
+ __ addss(first.As<XmmRegister>(),
Address(CpuRegister(RSP), second.GetStackIndex()));
}
break;
@@ -972,10 +962,9 @@
case Primitive::kPrimDouble: {
if (second.IsFpuRegister()) {
- __ addsd(first.AsX86_64().AsXmmRegister(), second.AsX86_64().AsXmmRegister());
+ __ addsd(first.As<XmmRegister>(), second.As<XmmRegister>());
} else {
- __ addsd(first.AsX86_64().AsXmmRegister(),
- Address(CpuRegister(RSP), second.GetStackIndex()));
+ __ addsd(first.As<XmmRegister>(), Address(CpuRegister(RSP), second.GetStackIndex()));
}
break;
}
@@ -1016,26 +1005,26 @@
void InstructionCodeGeneratorX86_64::VisitSub(HSub* sub) {
LocationSummary* locations = sub->GetLocations();
- DCHECK_EQ(locations->InAt(0).AsX86_64().AsCpuRegister().AsRegister(),
- locations->Out().AsX86_64().AsCpuRegister().AsRegister());
+ DCHECK_EQ(locations->InAt(0).As<CpuRegister>().AsRegister(),
+ locations->Out().As<CpuRegister>().AsRegister());
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
if (locations->InAt(1).IsRegister()) {
- __ subl(locations->InAt(0).AsX86_64().AsCpuRegister(),
- locations->InAt(1).AsX86_64().AsCpuRegister());
+ __ subl(locations->InAt(0).As<CpuRegister>(),
+ locations->InAt(1).As<CpuRegister>());
} else if (locations->InAt(1).IsConstant()) {
HConstant* instruction = locations->InAt(1).GetConstant();
Immediate imm(instruction->AsIntConstant()->GetValue());
- __ subl(locations->InAt(0).AsX86_64().AsCpuRegister(), imm);
+ __ subl(locations->InAt(0).As<CpuRegister>(), imm);
} else {
- __ subl(locations->InAt(0).AsX86_64().AsCpuRegister(),
+ __ subl(locations->InAt(0).As<CpuRegister>(),
Address(CpuRegister(RSP), locations->InAt(1).GetStackIndex()));
}
break;
}
case Primitive::kPrimLong: {
- __ subq(locations->InAt(0).AsX86_64().AsCpuRegister(),
- locations->InAt(1).AsX86_64().AsCpuRegister());
+ __ subq(locations->InAt(0).As<CpuRegister>(),
+ locations->InAt(1).As<CpuRegister>());
break;
}
@@ -1055,9 +1044,9 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
InvokeRuntimeCallingConvention calling_convention;
- locations->AddTemp(X86_64CpuLocation(calling_convention.GetRegisterAt(0)));
- locations->AddTemp(X86_64CpuLocation(calling_convention.GetRegisterAt(1)));
- locations->SetOut(X86_64CpuLocation(RAX));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(Location::RegisterLocation(RAX));
}
void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction) {
@@ -1097,9 +1086,9 @@
void InstructionCodeGeneratorX86_64::VisitNot(HNot* instruction) {
LocationSummary* locations = instruction->GetLocations();
- DCHECK_EQ(locations->InAt(0).AsX86_64().AsCpuRegister().AsRegister(),
- locations->Out().AsX86_64().AsCpuRegister().AsRegister());
- __ xorq(locations->Out().AsX86_64().AsCpuRegister(), Immediate(1));
+ DCHECK_EQ(locations->InAt(0).As<CpuRegister>().AsRegister(),
+ locations->Out().As<CpuRegister>().AsRegister());
+ __ xorq(locations->Out().As<CpuRegister>(), Immediate(1));
}
void LocationsBuilderX86_64::VisitPhi(HPhi* instruction) {
@@ -1132,8 +1121,8 @@
void InstructionCodeGeneratorX86_64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).AsX86_64().AsCpuRegister();
- CpuRegister value = locations->InAt(1).AsX86_64().AsCpuRegister();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
size_t offset = instruction->GetFieldOffset().SizeValue();
Primitive::Type field_type = instruction->GetFieldType();
@@ -1154,8 +1143,8 @@
case Primitive::kPrimNot: {
__ movl(Address(obj, offset), value);
if (field_type == Primitive::kPrimNot) {
- CpuRegister temp = locations->GetTemp(0).AsX86_64().AsCpuRegister();
- CpuRegister card = locations->GetTemp(1).AsX86_64().AsCpuRegister();
+ CpuRegister temp = locations->GetTemp(0).As<CpuRegister>();
+ CpuRegister card = locations->GetTemp(1).As<CpuRegister>();
codegen_->MarkGCCard(temp, card, obj, value);
}
break;
@@ -1184,8 +1173,8 @@
void InstructionCodeGeneratorX86_64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).AsX86_64().AsCpuRegister();
- CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ CpuRegister out = locations->Out().As<CpuRegister>();
size_t offset = instruction->GetFieldOffset().SizeValue();
switch (instruction->GetType()) {
@@ -1246,7 +1235,7 @@
Location obj = locations->InAt(0);
if (obj.IsRegister()) {
- __ cmpl(obj.AsX86_64().AsCpuRegister(), Immediate(0));
+ __ cmpl(obj.As<CpuRegister>(), Immediate(0));
} else if (obj.IsStackSlot()) {
__ cmpl(Address(CpuRegister(RSP), obj.GetStackIndex()), Immediate(0));
} else {
@@ -1269,54 +1258,54 @@
void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).AsX86_64().AsCpuRegister();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
Location index = locations->InAt(1);
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister out = locations->Out().As<CpuRegister>();
if (index.IsConstant()) {
__ movzxb(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
} else {
- __ movzxb(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_1, data_offset));
+ __ movzxb(out, Address(obj, index.As<CpuRegister>(), TIMES_1, data_offset));
}
break;
}
case Primitive::kPrimByte: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
- CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister out = locations->Out().As<CpuRegister>();
if (index.IsConstant()) {
__ movsxb(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
} else {
- __ movsxb(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_1, data_offset));
+ __ movsxb(out, Address(obj, index.As<CpuRegister>(), TIMES_1, data_offset));
}
break;
}
case Primitive::kPrimShort: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
- CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister out = locations->Out().As<CpuRegister>();
if (index.IsConstant()) {
__ movsxw(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
} else {
- __ movsxw(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_2, data_offset));
+ __ movsxw(out, Address(obj, index.As<CpuRegister>(), TIMES_2, data_offset));
}
break;
}
case Primitive::kPrimChar: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister out = locations->Out().As<CpuRegister>();
if (index.IsConstant()) {
__ movzxw(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
} else {
- __ movzxw(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_2, data_offset));
+ __ movzxw(out, Address(obj, index.As<CpuRegister>(), TIMES_2, data_offset));
}
break;
}
@@ -1325,24 +1314,24 @@
case Primitive::kPrimNot: {
DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister out = locations->Out().As<CpuRegister>();
if (index.IsConstant()) {
__ movl(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
} else {
- __ movl(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_4, data_offset));
+ __ movl(out, Address(obj, index.As<CpuRegister>(), TIMES_4, data_offset));
}
break;
}
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister out = locations->Out().As<CpuRegister>();
if (index.IsConstant()) {
__ movq(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
} else {
- __ movq(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_8, data_offset));
+ __ movq(out, Address(obj, index.As<CpuRegister>(), TIMES_8, data_offset));
}
break;
}
@@ -1363,9 +1352,9 @@
instruction, is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
if (is_object) {
InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(0, X86_64CpuLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, X86_64CpuLocation(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(2, X86_64CpuLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
} else {
locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
locations->SetInAt(
@@ -1381,7 +1370,7 @@
void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).AsX86_64().AsCpuRegister();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
Location index = locations->InAt(1);
Location value = locations->InAt(2);
Primitive::Type value_type = instruction->GetComponentType();
@@ -1393,16 +1382,16 @@
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
if (value.IsRegister()) {
- __ movb(Address(obj, offset), value.AsX86_64().AsCpuRegister());
+ __ movb(Address(obj, offset), value.As<CpuRegister>());
} else {
__ movb(Address(obj, offset), Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
if (value.IsRegister()) {
- __ movb(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_1, data_offset),
- value.AsX86_64().AsCpuRegister());
+ __ movb(Address(obj, index.As<CpuRegister>(), TIMES_1, data_offset),
+ value.As<CpuRegister>());
} else {
- __ movb(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_1, data_offset),
+ __ movb(Address(obj, index.As<CpuRegister>(), TIMES_1, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
@@ -1415,16 +1404,16 @@
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
if (value.IsRegister()) {
- __ movw(Address(obj, offset), value.AsX86_64().AsCpuRegister());
+ __ movw(Address(obj, offset), value.As<CpuRegister>());
} else {
__ movw(Address(obj, offset), Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
if (value.IsRegister()) {
- __ movw(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_2, data_offset),
- value.AsX86_64().AsCpuRegister());
+ __ movw(Address(obj, index.As<CpuRegister>(), TIMES_2, data_offset),
+ value.As<CpuRegister>());
} else {
- __ movw(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_2, data_offset),
+ __ movw(Address(obj, index.As<CpuRegister>(), TIMES_2, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
@@ -1436,16 +1425,16 @@
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
if (value.IsRegister()) {
- __ movl(Address(obj, offset), value.AsX86_64().AsCpuRegister());
+ __ movl(Address(obj, offset), value.As<CpuRegister>());
} else {
__ movl(Address(obj, offset), Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
if (value.IsRegister()) {
- __ movl(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_4, data_offset),
- value.AsX86_64().AsCpuRegister());
+ __ movl(Address(obj, index.As<CpuRegister>(), TIMES_4, data_offset),
+ value.As<CpuRegister>());
} else {
- __ movl(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_4, data_offset),
+ __ movl(Address(obj, index.As<CpuRegister>(), TIMES_4, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
@@ -1464,11 +1453,11 @@
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
DCHECK(value.IsRegister());
- __ movq(Address(obj, offset), value.AsX86_64().AsCpuRegister());
+ __ movq(Address(obj, offset), value.As<CpuRegister>());
} else {
DCHECK(value.IsRegister());
- __ movq(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_8, data_offset),
- value.AsX86_64().AsCpuRegister());
+ __ movq(Address(obj, index.As<CpuRegister>(), TIMES_8, data_offset),
+ value.As<CpuRegister>());
}
break;
}
@@ -1492,8 +1481,8 @@
void InstructionCodeGeneratorX86_64::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = instruction->GetLocations();
uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
- CpuRegister obj = locations->InAt(0).AsX86_64().AsCpuRegister();
- CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movl(out, Address(obj, offset));
}
@@ -1513,8 +1502,8 @@
instruction, locations->InAt(0), locations->InAt(1));
codegen_->AddSlowPath(slow_path);
- CpuRegister index = locations->InAt(0).AsX86_64().AsCpuRegister();
- CpuRegister length = locations->InAt(1).AsX86_64().AsCpuRegister();
+ CpuRegister index = locations->InAt(0).As<CpuRegister>();
+ CpuRegister length = locations->InAt(1).As<CpuRegister>();
__ cmpl(index, length);
__ j(kAboveEqual, slow_path->GetEntryLabel());
@@ -1596,18 +1585,18 @@
if (source.IsRegister()) {
if (destination.IsRegister()) {
- __ movq(destination.AsX86_64().AsCpuRegister(), source.AsX86_64().AsCpuRegister());
+ __ movq(destination.As<CpuRegister>(), source.As<CpuRegister>());
} else if (destination.IsStackSlot()) {
__ movl(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.AsX86_64().AsCpuRegister());
+ source.As<CpuRegister>());
} else {
DCHECK(destination.IsDoubleStackSlot());
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.AsX86_64().AsCpuRegister());
+ source.As<CpuRegister>());
}
} else if (source.IsStackSlot()) {
if (destination.IsRegister()) {
- __ movl(destination.AsX86_64().AsX86_64().AsCpuRegister(),
+ __ movl(destination.As<CpuRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
} else {
DCHECK(destination.IsStackSlot());
@@ -1616,7 +1605,7 @@
}
} else if (source.IsDoubleStackSlot()) {
if (destination.IsRegister()) {
- __ movq(destination.AsX86_64().AsX86_64().AsCpuRegister(),
+ __ movq(destination.As<CpuRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
} else {
DCHECK(destination.IsDoubleStackSlot());
@@ -1628,14 +1617,14 @@
if (constant->IsIntConstant()) {
Immediate imm(constant->AsIntConstant()->GetValue());
if (destination.IsRegister()) {
- __ movl(destination.AsX86_64().AsCpuRegister(), imm);
+ __ movl(destination.As<CpuRegister>(), imm);
} else {
__ movl(Address(CpuRegister(RSP), destination.GetStackIndex()), imm);
}
} else if (constant->IsLongConstant()) {
int64_t value = constant->AsLongConstant()->GetValue();
if (destination.IsRegister()) {
- __ movq(destination.AsX86_64().AsCpuRegister(), Immediate(value));
+ __ movq(destination.As<CpuRegister>(), Immediate(value));
} else {
__ movq(CpuRegister(TMP), Immediate(value));
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP));
@@ -1692,17 +1681,17 @@
Location destination = move->GetDestination();
if (source.IsRegister() && destination.IsRegister()) {
- __ xchgq(destination.AsX86_64().AsCpuRegister(), source.AsX86_64().AsCpuRegister());
+ __ xchgq(destination.As<CpuRegister>(), source.As<CpuRegister>());
} else if (source.IsRegister() && destination.IsStackSlot()) {
- Exchange32(source.AsX86_64().AsCpuRegister(), destination.GetStackIndex());
+ Exchange32(source.As<CpuRegister>(), destination.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsRegister()) {
- Exchange32(destination.AsX86_64().AsCpuRegister(), source.GetStackIndex());
+ Exchange32(destination.As<CpuRegister>(), source.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsStackSlot()) {
Exchange32(destination.GetStackIndex(), source.GetStackIndex());
} else if (source.IsRegister() && destination.IsDoubleStackSlot()) {
- Exchange64(source.AsX86_64().AsCpuRegister(), destination.GetStackIndex());
+ Exchange64(source.As<CpuRegister>(), destination.GetStackIndex());
} else if (source.IsDoubleStackSlot() && destination.IsRegister()) {
- Exchange64(destination.AsX86_64().AsCpuRegister(), source.GetStackIndex());
+ Exchange64(destination.As<CpuRegister>(), source.GetStackIndex());
} else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
Exchange64(destination.GetStackIndex(), source.GetStackIndex());
} else {
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index c81f785..bdaf15f 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -186,7 +186,7 @@
}
virtual void SetupBlockedRegisters(bool* blocked_registers) const OVERRIDE;
- virtual ManagedRegister AllocateFreeRegister(
+ virtual Location AllocateFreeRegister(
Primitive::Type type, bool* blocked_registers) const OVERRIDE;
virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 686a0b0..b4eb89d 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -123,9 +123,9 @@
void DumpLocation(Location location, Primitive::Type type) {
if (location.IsRegister()) {
if (type == Primitive::kPrimDouble || type == Primitive::kPrimFloat) {
- codegen_.DumpFloatingPointRegister(output_, location.reg().RegId());
+ codegen_.DumpFloatingPointRegister(output_, location.reg());
} else {
- codegen_.DumpCoreRegister(output_, location.reg().RegId());
+ codegen_.DumpCoreRegister(output_, location.reg());
}
} else if (location.IsConstant()) {
output_ << "constant";
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index 7b09241..1637484 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -55,7 +55,7 @@
: Location::RequiresRegister();
}
-Location Location::ByteRegisterOrConstant(ManagedRegister reg, HInstruction* instruction) {
+Location Location::ByteRegisterOrConstant(int reg, HInstruction* instruction) {
return instruction->IsConstant()
? Location::ConstantLocation(instruction->AsConstant())
: Location::RegisterLocation(reg);
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 5f85b6a..ac44a42 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -21,7 +21,6 @@
#include "base/bit_vector.h"
#include "utils/allocation.h"
#include "utils/growable_array.h"
-#include "utils/managed_register.h"
namespace art {
@@ -45,21 +44,26 @@
kRegister = 4, // Core register.
// We do not use the value 5 because it conflicts with kLocationConstantMask.
- kDoNotUse = 5,
+ kDoNotUse5 = 5,
kFpuRegister = 6, // Floating point processor.
+ kRegisterPair = 7,
+
// On 32bits architectures, quick can pass a long where the
// low bits are in the last parameter register, and the high
// bits are in a stack slot. The kQuickParameter kind is for
// handling this special case.
- kQuickParameter = 7,
+ kQuickParameter = 8,
+
+ // We do not use the value 9 because it conflicts with kLocationConstantMask.
+ kDoNotUse9 = 9,
// Unallocated location represents a location that is not fixed and can be
// allocated by a register allocator. Each unallocated location has
// a policy that specifies what kind of location is suitable. Payload
// contains register allocation policy.
- kUnallocated = 8,
+ kUnallocated = 10,
};
Location() : value_(kInvalid) {
@@ -71,6 +75,7 @@
COMPILE_ASSERT((kRegister & kLocationConstantMask) != kConstant, TagError);
COMPILE_ASSERT((kQuickParameter & kLocationConstantMask) != kConstant, TagError);
COMPILE_ASSERT((kFpuRegister & kLocationConstantMask) != kConstant, TagError);
+ COMPILE_ASSERT((kRegisterPair & kLocationConstantMask) != kConstant, TagError);
COMPILE_ASSERT((kConstant & kLocationConstantMask) == kConstant, TagError);
DCHECK(!IsValid());
@@ -111,12 +116,16 @@
}
// Register locations.
- static Location RegisterLocation(ManagedRegister reg) {
- return Location(kRegister, reg.RegId());
+ static Location RegisterLocation(int reg) {
+ return Location(kRegister, reg);
}
- static Location FpuRegisterLocation(ManagedRegister reg) {
- return Location(kFpuRegister, reg.RegId());
+ static Location FpuRegisterLocation(int reg) {
+ return Location(kFpuRegister, reg);
+ }
+
+ static Location RegisterPairLocation(int low, int high) {
+ return Location(kRegisterPair, low << 16 | high);
}
bool IsRegister() const {
@@ -127,15 +136,36 @@
return GetKind() == kFpuRegister;
}
- ManagedRegister reg() const {
- DCHECK(IsRegister() || IsFpuRegister());
- return static_cast<ManagedRegister>(GetPayload());
+ bool IsRegisterPair() const {
+ return GetKind() == kRegisterPair;
}
- static uword EncodeStackIndex(intptr_t stack_index) {
+ int reg() const {
+ DCHECK(IsRegister() || IsFpuRegister());
+ return GetPayload();
+ }
+
+ template <typename T>
+ T As() const {
+ return static_cast<T>(reg());
+ }
+
+ template <typename T>
+ T AsRegisterPairLow() const {
+ DCHECK(IsRegisterPair());
+ return static_cast<T>(GetPayload() >> 16);
+ }
+
+ template <typename T>
+ T AsRegisterPairHigh() const {
+ DCHECK(IsRegisterPair());
+ return static_cast<T>(GetPayload() & 0xFFFF);
+ }
+
+ static uintptr_t EncodeStackIndex(intptr_t stack_index) {
DCHECK(-kStackIndexBias <= stack_index);
DCHECK(stack_index < kStackIndexBias);
- return static_cast<uword>(kStackIndexBias + stack_index);
+ return static_cast<uintptr_t>(kStackIndexBias + stack_index);
}
static Location StackSlot(intptr_t stack_index) {
@@ -187,10 +217,6 @@
return GetKind() == kQuickParameter;
}
- arm::ArmManagedRegister AsArm() const;
- x86::X86ManagedRegister AsX86() const;
- x86_64::X86_64ManagedRegister AsX86_64() const;
-
Kind GetKind() const {
return IsConstant() ? kConstant : KindField::Decode(value_);
}
@@ -209,7 +235,9 @@
case kUnallocated: return "U";
case kConstant: return "C";
case kFpuRegister: return "F";
- case kDoNotUse:
+ case kRegisterPair: return "RP";
+ case kDoNotUse5: // fall-through
+ case kDoNotUse9:
LOG(FATAL) << "Should not use this location kind";
}
UNREACHABLE();
@@ -246,7 +274,7 @@
}
static Location RegisterOrConstant(HInstruction* instruction);
- static Location ByteRegisterOrConstant(ManagedRegister reg, HInstruction* instruction);
+ static Location ByteRegisterOrConstant(int reg, HInstruction* instruction);
// The location of the first input to the instruction will be
// used to replace this unallocated location.
@@ -299,8 +327,12 @@
RegisterSet() : core_registers_(0), floating_point_registers_(0) {}
void Add(Location loc) {
- // TODO: floating point registers.
- core_registers_ |= (1 << loc.reg().RegId());
+ if (loc.IsRegister()) {
+ core_registers_ |= (1 << loc.reg());
+ } else {
+ DCHECK(loc.IsFpuRegister());
+ floating_point_registers_ |= (1 << loc.reg());
+ }
}
bool ContainsCoreRegister(uint32_t id) {
diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc
index cadd3c5..c71d93e 100644
--- a/compiler/optimizing/parallel_move_resolver.cc
+++ b/compiler/optimizing/parallel_move_resolver.cc
@@ -170,8 +170,7 @@
DCHECK_NE(blocked, if_scratch);
int scratch = -1;
for (int reg = 0; reg < register_count; ++reg) {
- if ((blocked != reg) &&
- IsScratchLocation(Location::RegisterLocation(ManagedRegister(reg)))) {
+ if ((blocked != reg) && IsScratchLocation(Location::RegisterLocation(reg))) {
scratch = reg;
break;
}
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 863e107..2bdcc61 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -32,9 +32,9 @@
message_ << " ";
}
message_ << "("
- << move->GetSource().reg().RegId()
+ << move->GetSource().reg()
<< " -> "
- << move->GetDestination().reg().RegId()
+ << move->GetDestination().reg()
<< ")";
}
@@ -44,9 +44,9 @@
message_ << " ";
}
message_ << "("
- << move->GetSource().reg().RegId()
+ << move->GetSource().reg()
<< " <-> "
- << move->GetDestination().reg().RegId()
+ << move->GetDestination().reg()
<< ")";
}
@@ -70,8 +70,8 @@
HParallelMove* moves = new (allocator) HParallelMove(allocator);
for (size_t i = 0; i < number_of_moves; ++i) {
moves->AddMove(new (allocator) MoveOperands(
- Location::RegisterLocation(ManagedRegister(operands[i][0])),
- Location::RegisterLocation(ManagedRegister(operands[i][1])),
+ Location::RegisterLocation(operands[i][0]),
+ Location::RegisterLocation(operands[i][1]),
nullptr));
}
return moves;
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 3ee1afe..a9d159e 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -95,7 +95,7 @@
size_t start,
size_t end,
Primitive::Type type) {
- int reg = location.reg().RegId();
+ int reg = location.reg();
LiveInterval* interval = physical_register_intervals_.Get(reg);
if (interval == nullptr) {
interval = LiveInterval::MakeFixedInterval(allocator_, reg, type);
@@ -187,7 +187,7 @@
if (locations->WillCall()) {
// Block all registers.
for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
- BlockRegister(Location::RegisterLocation(ManagedRegister(i)),
+ BlockRegister(Location::RegisterLocation(i),
position,
position + 1,
Primitive::kPrimInt);
@@ -216,7 +216,7 @@
if (output.IsRegister()) {
// Shift the interval's start by one to account for the blocked register.
current->SetFrom(position + 1);
- current->SetRegister(output.reg().RegId());
+ current->SetRegister(output.reg());
BlockRegister(output, position, position + 1, instruction->GetType());
} else if (output.IsStackSlot() || output.IsDoubleStackSlot()) {
current->SetSpillSlot(output.GetStackIndex());
@@ -884,7 +884,7 @@
if (current->HasSpillSlot() && current->HasRegister()) {
// We spill eagerly, so move must be at definition.
InsertMoveAfter(interval->GetDefinedBy(),
- Location::RegisterLocation(ManagedRegister(interval->GetRegister())),
+ Location::RegisterLocation(interval->GetRegister()),
interval->NeedsTwoSpillSlots()
? Location::DoubleStackSlot(interval->GetParent()->GetSpillSlot())
: Location::StackSlot(interval->GetParent()->GetSpillSlot()));
@@ -938,7 +938,7 @@
case Location::kRegister: {
locations->AddLiveRegister(source);
if (current->GetType() == Primitive::kPrimNot) {
- locations->SetRegisterBit(source.reg().RegId());
+ locations->SetRegisterBit(source.reg());
}
break;
}
@@ -1106,7 +1106,7 @@
}
LocationSummary* locations = at->GetLocations();
locations->SetTempAt(
- temp_index++, Location::RegisterLocation(ManagedRegister(temp->GetRegister())));
+ temp_index++, Location::RegisterLocation(temp->GetRegister()));
}
}
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index b7d56e6..7517a6b 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -25,7 +25,6 @@
#include "ssa_liveness_analysis.h"
#include "ssa_phi_elimination.h"
#include "utils/arena_allocator.h"
-#include "utils/managed_register.h"
#include "gtest/gtest.h"
@@ -525,7 +524,7 @@
// Set the phi to a specific register, and check that the inputs get allocated
// the same register.
- phi->GetLocations()->SetOut(Location::RegisterLocation(ManagedRegister(2)));
+ phi->GetLocations()->SetOut(Location::RegisterLocation(2));
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
register_allocator.AllocateRegisters();
@@ -542,7 +541,7 @@
// Set input1 to a specific register, and check that the phi and other input get allocated
// the same register.
- input1->GetLocations()->SetOut(Location::RegisterLocation(ManagedRegister(2)));
+ input1->GetLocations()->SetOut(Location::RegisterLocation(2));
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
register_allocator.AllocateRegisters();
@@ -559,7 +558,7 @@
// Set input2 to a specific register, and check that the phi and other input get allocated
// the same register.
- input2->GetLocations()->SetOut(Location::RegisterLocation(ManagedRegister(2)));
+ input2->GetLocations()->SetOut(Location::RegisterLocation(2));
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
register_allocator.AllocateRegisters();
@@ -620,7 +619,7 @@
liveness.Analyze();
// Check that the field gets put in the register expected by its use.
- ret->GetLocations()->SetInAt(0, Location::RegisterLocation(ManagedRegister(2)));
+ ret->GetLocations()->SetInAt(0, Location::RegisterLocation(2));
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
register_allocator.AllocateRegisters();
@@ -682,7 +681,7 @@
liveness.Analyze();
// check that both adds get the same register.
- first_add->InputAt(0)->GetLocations()->SetOut(Location::RegisterLocation(ManagedRegister(2)));
+ first_add->InputAt(0)->GetLocations()->SetOut(Location::RegisterLocation(2));
ASSERT_EQ(first_add->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
ASSERT_EQ(second_add->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 1de90b4..f0edc64 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -319,8 +319,8 @@
if (user->IsPhi()) {
// If the phi has a register, try to use the same.
Location phi_location = user->GetLiveInterval()->ToLocation();
- if (phi_location.IsRegister() && free_until[phi_location.reg().RegId()] >= use_position) {
- return phi_location.reg().RegId();
+ if (phi_location.IsRegister() && free_until[phi_location.reg()] >= use_position) {
+ return phi_location.reg();
}
const GrowableArray<HBasicBlock*>& predecessors = user->GetBlock()->GetPredecessors();
// If the instruction dies at the phi assignment, we can try having the
@@ -333,8 +333,8 @@
HInstruction* input = user->InputAt(i);
Location location = input->GetLiveInterval()->GetLocationAt(
predecessors.Get(i)->GetLifetimeEnd() - 1);
- if (location.IsRegister() && free_until[location.reg().RegId()] >= use_position) {
- return location.reg().RegId();
+ if (location.IsRegister() && free_until[location.reg()] >= use_position) {
+ return location.reg();
}
}
}
@@ -345,8 +345,8 @@
// We use the user's lifetime position - 1 (and not `use_position`) because the
// register is blocked at the beginning of the user.
size_t position = user->GetLifetimePosition() - 1;
- if (expected.IsRegister() && free_until[expected.reg().RegId()] >= position) {
- return expected.reg().RegId();
+ if (expected.IsRegister() && free_until[expected.reg()] >= position) {
+ return expected.reg();
}
}
}
@@ -369,7 +369,7 @@
// be reused.
Location input_location = input_interval.ToLocation();
if (input_location.IsRegister()) {
- return input_location.reg().RegId();
+ return input_location.reg();
}
}
}
@@ -385,7 +385,7 @@
// be reused.
Location location = input_interval.ToLocation();
if (location.IsRegister()) {
- return location.reg().RegId();
+ return location.reg();
}
}
}
@@ -399,7 +399,7 @@
Location LiveInterval::ToLocation() const {
if (HasRegister()) {
- return Location::RegisterLocation(ManagedRegister(GetRegister()));
+ return Location::RegisterLocation(GetRegister());
} else {
HInstruction* defined_by = GetParent()->GetDefinedBy();
if (defined_by->IsConstant()) {