MIPS32: MoveLocation refactoring
Move32 and Move64 are removed so MoveLocation now handles all cases.
Reason for this are 128-bit (SIMDStackSlot, VectorRegister) moves
which will be added in follow-up patch.
Test: mma test-art-host-gtest
Test: ./testrunner.py --optimizing --target in QEMU
Change-Id: I93496e74874f77337b11b2265aa4b470bc7c6ce2
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index b39d412..6828fed 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -950,7 +950,9 @@
this);
CheckEntrypointTypes<
kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>();
- mips_codegen->Move32(out_, calling_convention.GetReturnLocation(Primitive::kPrimNot));
+ mips_codegen->MoveLocation(out_,
+ calling_convention.GetReturnLocation(Primitive::kPrimNot),
+ Primitive::kPrimNot);
RestoreLiveRegisters(codegen, locations);
__ B(GetExitLabel());
@@ -1013,13 +1015,17 @@
InvokeRuntimeCallingConvention calling_convention;
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
- mips_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), root_);
+ mips_codegen->MoveLocation(Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ root_,
+ Primitive::kPrimNot);
mips_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow,
instruction_,
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickReadBarrierForRootSlow, mirror::Object*, GcRoot<mirror::Object>*>();
- mips_codegen->Move32(out_, calling_convention.GetReturnLocation(Primitive::kPrimNot));
+ mips_codegen->MoveLocation(out_,
+ calling_convention.GetReturnLocation(Primitive::kPrimNot),
+ Primitive::kPrimNot);
RestoreLiveRegisters(codegen, locations);
__ B(GetExitLabel());
@@ -1407,106 +1413,92 @@
__ Bind(GetLabelOf(block));
}
-void CodeGeneratorMIPS::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
- if (src.Equals(dst)) {
- return;
- }
-
- if (src.IsConstant()) {
- MoveConstant(dst, src.GetConstant());
- } else {
- if (Primitive::Is64BitType(dst_type)) {
- Move64(dst, src);
- } else {
- Move32(dst, src);
- }
- }
-}
-
-void CodeGeneratorMIPS::Move32(Location destination, Location source) {
+void CodeGeneratorMIPS::MoveLocation(Location destination,
+ Location source,
+ Primitive::Type dst_type) {
if (source.Equals(destination)) {
return;
}
- if (destination.IsRegister()) {
- if (source.IsRegister()) {
- __ Move(destination.AsRegister<Register>(), source.AsRegister<Register>());
- } else if (source.IsFpuRegister()) {
- __ Mfc1(destination.AsRegister<Register>(), source.AsFpuRegister<FRegister>());
- } else {
- DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
+ if (source.IsConstant()) {
+ MoveConstant(destination, source.GetConstant());
+ } else {
+ if (destination.IsRegister()) {
+ if (source.IsRegister()) {
+ __ Move(destination.AsRegister<Register>(), source.AsRegister<Register>());
+ } else if (source.IsFpuRegister()) {
+ __ Mfc1(destination.AsRegister<Register>(), source.AsFpuRegister<FRegister>());
+ } else {
+ DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
__ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
- }
- } else if (destination.IsFpuRegister()) {
- if (source.IsRegister()) {
- __ Mtc1(source.AsRegister<Register>(), destination.AsFpuRegister<FRegister>());
- } else if (source.IsFpuRegister()) {
- __ MovS(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
+ }
+ } else if (destination.IsRegisterPair()) {
+ if (source.IsRegisterPair()) {
+ __ Move(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
+ __ Move(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
+ } else if (source.IsFpuRegister()) {
+ Register dst_high = destination.AsRegisterPairHigh<Register>();
+ Register dst_low = destination.AsRegisterPairLow<Register>();
+ FRegister src = source.AsFpuRegister<FRegister>();
+ __ Mfc1(dst_low, src);
+ __ MoveFromFpuHigh(dst_high, src);
+ } else {
+ DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
+ int32_t off = source.GetStackIndex();
+ Register r = destination.AsRegisterPairLow<Register>();
+ __ LoadFromOffset(kLoadDoubleword, r, SP, off);
+ }
+ } else if (destination.IsFpuRegister()) {
+ if (source.IsRegister()) {
+ DCHECK(!Primitive::Is64BitType(dst_type));
+ __ Mtc1(source.AsRegister<Register>(), destination.AsFpuRegister<FRegister>());
+ } else if (source.IsRegisterPair()) {
+ DCHECK(Primitive::Is64BitType(dst_type));
+ FRegister dst = destination.AsFpuRegister<FRegister>();
+ Register src_high = source.AsRegisterPairHigh<Register>();
+ Register src_low = source.AsRegisterPairLow<Register>();
+ __ Mtc1(src_low, dst);
+ __ MoveToFpuHigh(src_high, dst);
+ } else if (source.IsFpuRegister()) {
+ if (Primitive::Is64BitType(dst_type)) {
+ __ MovD(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
+ } else {
+ DCHECK_EQ(dst_type, Primitive::kPrimFloat);
+ __ MovS(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
+ }
+ } else if (source.IsDoubleStackSlot()) {
+ DCHECK(Primitive::Is64BitType(dst_type));
+ __ LoadDFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
+ } else {
+ DCHECK(!Primitive::Is64BitType(dst_type));
+ DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadSFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
+ }
+ } else if (destination.IsDoubleStackSlot()) {
+ int32_t dst_offset = destination.GetStackIndex();
+ if (source.IsRegisterPair()) {
+ __ StoreToOffset(kStoreDoubleword, source.AsRegisterPairLow<Register>(), SP, dst_offset);
+ } else if (source.IsFpuRegister()) {
+ __ StoreDToOffset(source.AsFpuRegister<FRegister>(), SP, dst_offset);
+ } else {
+ DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
+ __ StoreToOffset(kStoreWord, TMP, SP, dst_offset);
+ __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex() + 4);
+ __ StoreToOffset(kStoreWord, TMP, SP, dst_offset + 4);
+ }
} else {
- DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
- __ LoadSFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
- }
- } else {
- DCHECK(destination.IsStackSlot()) << destination;
- if (source.IsRegister()) {
- __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
- } else if (source.IsFpuRegister()) {
- __ StoreSToOffset(source.AsFpuRegister<FRegister>(), SP, destination.GetStackIndex());
- } else {
- DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
- __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
- __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
- }
- }
-}
-
-void CodeGeneratorMIPS::Move64(Location destination, Location source) {
- if (source.Equals(destination)) {
- return;
- }
-
- if (destination.IsRegisterPair()) {
- if (source.IsRegisterPair()) {
- __ Move(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
- __ Move(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
- } else if (source.IsFpuRegister()) {
- Register dst_high = destination.AsRegisterPairHigh<Register>();
- Register dst_low = destination.AsRegisterPairLow<Register>();
- FRegister src = source.AsFpuRegister<FRegister>();
- __ Mfc1(dst_low, src);
- __ MoveFromFpuHigh(dst_high, src);
- } else {
- DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
- int32_t off = source.GetStackIndex();
- Register r = destination.AsRegisterPairLow<Register>();
- __ LoadFromOffset(kLoadDoubleword, r, SP, off);
- }
- } else if (destination.IsFpuRegister()) {
- if (source.IsRegisterPair()) {
- FRegister dst = destination.AsFpuRegister<FRegister>();
- Register src_high = source.AsRegisterPairHigh<Register>();
- Register src_low = source.AsRegisterPairLow<Register>();
- __ Mtc1(src_low, dst);
- __ MoveToFpuHigh(src_high, dst);
- } else if (source.IsFpuRegister()) {
- __ MovD(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
- } else {
- DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
- __ LoadDFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
- }
- } else {
- DCHECK(destination.IsDoubleStackSlot()) << destination;
- int32_t off = destination.GetStackIndex();
- if (source.IsRegisterPair()) {
- __ StoreToOffset(kStoreDoubleword, source.AsRegisterPairLow<Register>(), SP, off);
- } else if (source.IsFpuRegister()) {
- __ StoreDToOffset(source.AsFpuRegister<FRegister>(), SP, off);
- } else {
- DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
- __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
- __ StoreToOffset(kStoreWord, TMP, SP, off);
- __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex() + 4);
- __ StoreToOffset(kStoreWord, TMP, SP, off + 4);
+ DCHECK(destination.IsStackSlot()) << destination;
+ int32_t dst_offset = destination.GetStackIndex();
+ if (source.IsRegister()) {
+ __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, dst_offset);
+ } else if (source.IsFpuRegister()) {
+ __ StoreSToOffset(source.AsFpuRegister<FRegister>(), SP, dst_offset);
+ } else {
+ DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
+ __ StoreToOffset(kStoreWord, TMP, SP, dst_offset);
+ }
}
}
}
@@ -2285,7 +2277,7 @@
Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
if (use_imm) {
if (shift_value == 0) {
- codegen_->Move64(locations->Out(), locations->InAt(0));
+ codegen_->MoveLocation(locations->Out(), locations->InAt(0), type);
} else if (shift_value < kMipsBitsPerWord) {
if (has_ins_rotr) {
if (instr->IsShl()) {
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index e72e838d..4d73ad1 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -368,8 +368,6 @@
void Bind(HBasicBlock* block) OVERRIDE;
- void Move32(Location destination, Location source);
- void Move64(Location destination, Location source);
void MoveConstant(Location location, HConstant* c);
size_t GetWordSize() const OVERRIDE { return kMipsWordSize; }