summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/mir_optimization.cc4
-rw-r--r--compiler/dex/quick/mir_to_lir.cc17
-rw-r--r--compiler/optimizing/code_generator_arm64.cc669
-rw-r--r--compiler/optimizing/code_generator_arm64.h26
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S3
5 files changed, 471 insertions, 248 deletions
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index b35d51c46e..a0ad2133be 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -771,11 +771,11 @@ void MIRGraph::CombineBlocks(class BasicBlock* bb) {
if ((df_attributes & DF_IFIELD) != 0) {
// Combine only if fast, otherwise weird things can happen.
const MirIFieldLoweringInfo& field_info = GetIFieldLoweringInfo(throw_insn);
- ok = (df_attributes & DF_DA) ? field_info.FastPut() : field_info.FastGet();
+ ok = (df_attributes & DF_DA) ? field_info.FastGet() : field_info.FastPut();
} else if ((df_attributes & DF_SFIELD) != 0) {
// Combine only if fast, otherwise weird things can happen.
const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(throw_insn);
- bool fast = ((df_attributes & DF_DA) ? field_info.FastPut() : field_info.FastGet());
+ bool fast = ((df_attributes & DF_DA) ? field_info.FastGet() : field_info.FastPut());
// Don't combine if the SGET/SPUT can call <clinit>().
bool clinit = !field_info.IsClassInitialized() &&
(throw_insn->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0;
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index ccaa167d6a..92ef70db7e 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -417,10 +417,10 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
RegLocation rl_src[3];
RegLocation rl_dest = mir_graph_->GetBadLoc();
RegLocation rl_result = mir_graph_->GetBadLoc();
- Instruction::Code opcode = mir->dalvikInsn.opcode;
- int opt_flags = mir->optimization_flags;
- uint32_t vB = mir->dalvikInsn.vB;
- uint32_t vC = mir->dalvikInsn.vC;
+ const Instruction::Code opcode = mir->dalvikInsn.opcode;
+ const int opt_flags = mir->optimization_flags;
+ const uint32_t vB = mir->dalvikInsn.vB;
+ const uint32_t vC = mir->dalvikInsn.vC;
DCHECK(CheckCorePoolSanity()) << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " @ 0x:"
<< std::hex << current_dalvik_offset_;
@@ -572,7 +572,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
GenThrow(rl_src[0]);
break;
- case Instruction::ARRAY_LENGTH:
+ case Instruction::ARRAY_LENGTH: {
int len_offset;
len_offset = mirror::Array::LengthOffset().Int32Value();
rl_src[0] = LoadValue(rl_src[0], kRefReg);
@@ -582,7 +582,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
MarkPossibleNullPointerException(opt_flags);
StoreValue(rl_dest, rl_result);
break;
-
+ }
case Instruction::CONST_STRING:
case Instruction::CONST_STRING_JUMBO:
GenConstString(vB, rl_dest);
@@ -666,8 +666,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken);
}
break;
- }
-
+ }
case Instruction::IF_EQZ:
case Instruction::IF_NEZ:
case Instruction::IF_LTZ:
@@ -693,7 +692,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
GenCompareZeroAndBranch(opcode, rl_src[0], taken);
}
break;
- }
+ }
case Instruction::AGET_WIDE:
GenArrayGet(opt_flags, k64, rl_src[0], rl_src[1], rl_dest, 3);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index f9cf7d87af..4dc836f412 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -38,15 +38,20 @@ namespace art {
namespace arm64 {
-static bool IsFPType(Primitive::Type type) {
- return type == Primitive::kPrimFloat || type == Primitive::kPrimDouble;
-}
-
// TODO: clean-up some of the constant definitions.
static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>);
static constexpr int kCurrentMethodStackOffset = 0;
namespace {
+
+bool IsFPType(Primitive::Type type) {
+ return type == Primitive::kPrimFloat || type == Primitive::kPrimDouble;
+}
+
+bool Is64BitType(Primitive::Type type) {
+ return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
+}
+
// Convenience helpers to ease conversion to and from VIXL operands.
int VIXLRegCodeFromART(int code) {
@@ -101,6 +106,28 @@ Register InputRegisterAt(HInstruction* instr, int input_index) {
instr->InputAt(input_index)->GetType());
}
+FPRegister DRegisterFrom(Location location) {
+ return FPRegister::DRegFromCode(location.reg());
+}
+
+FPRegister SRegisterFrom(Location location) {
+ return FPRegister::SRegFromCode(location.reg());
+}
+
+FPRegister FPRegisterFrom(Location location, Primitive::Type type) {
+ DCHECK(IsFPType(type));
+ return type == Primitive::kPrimDouble ? DRegisterFrom(location) : SRegisterFrom(location);
+}
+
+FPRegister OutputFPRegister(HInstruction* instr) {
+ return FPRegisterFrom(instr->GetLocations()->Out(), instr->GetType());
+}
+
+FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
+ return FPRegisterFrom(instr->GetLocations()->InAt(input_index),
+ instr->InputAt(input_index)->GetType());
+}
+
int64_t Int64ConstantFrom(Location location) {
HConstant* instr = location.GetConstant();
return instr->IsIntConstant() ? instr->AsIntConstant()->GetValue()
@@ -138,6 +165,10 @@ Location LocationFrom(const Register& reg) {
return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.code()));
}
+Location LocationFrom(const FPRegister& fpreg) {
+ return Location::FpuRegisterLocation(fpreg.code());
+}
+
} // namespace
inline Condition ARM64Condition(IfCondition cond) {
@@ -154,6 +185,22 @@ inline Condition ARM64Condition(IfCondition cond) {
return nv; // Unreachable.
}
+Location ARM64ReturnLocation(Primitive::Type return_type) {
+ DCHECK_NE(return_type, Primitive::kPrimVoid);
+ // Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the
+ // same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`,
+ // but we use the exact registers for clarity.
+ if (return_type == Primitive::kPrimFloat) {
+ return LocationFrom(s0);
+ } else if (return_type == Primitive::kPrimDouble) {
+ return LocationFrom(d0);
+ } else if (return_type == Primitive::kPrimLong) {
+ return LocationFrom(x0);
+ } else {
+ return LocationFrom(w0);
+ }
+}
+
static const Register kRuntimeParameterCoreRegisters[] = { x0, x1, x2, x3, x4, x5, x6, x7 };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
@@ -177,11 +224,7 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register, FPRegi
};
Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) {
- DCHECK_NE(return_type, Primitive::kPrimVoid);
- if (return_type == Primitive::kPrimFloat || return_type == Primitive::kPrimDouble) {
- LOG(FATAL) << "Unimplemented return type " << return_type;
- }
- return LocationFrom(x0);
+ return ARM64ReturnLocation(return_type);
}
#define __ reinterpret_cast<Arm64Assembler*>(codegen->GetAssembler())->vixl_masm_->
@@ -289,35 +332,25 @@ Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type
LOG(FATAL) << "Unreachable type " << type;
}
- if (type == Primitive::kPrimFloat || type == Primitive::kPrimDouble) {
- LOG(FATAL) << "Unimplemented type " << type;
+ if (IsFPType(type) && (fp_index_ < calling_convention.GetNumberOfFpuRegisters())) {
+ next_location = LocationFrom(calling_convention.GetFpuRegisterAt(fp_index_++));
+ } else if (!IsFPType(type) && (gp_index_ < calling_convention.GetNumberOfRegisters())) {
+ next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++));
+ } else {
+ size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
+ next_location = Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
+ : Location::StackSlot(stack_offset);
}
- if (gp_index_ < calling_convention.GetNumberOfRegisters()) {
- next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_));
- if (type == Primitive::kPrimLong) {
- // Double stack slot reserved on the stack.
- stack_index_++;
- }
- } else { // Stack.
- if (type == Primitive::kPrimLong) {
- next_location = Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index_));
- // Double stack slot reserved on the stack.
- stack_index_++;
- } else {
- next_location = Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index_));
- }
- }
- // Move to the next register/stack slot.
- gp_index_++;
- stack_index_++;
+ // Space on the stack is reserved for all arguments.
+ stack_index_ += Is64BitType(type) ? 2 : 1;
return next_location;
}
CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph)
: CodeGenerator(graph,
kNumberOfAllocatableRegisters,
- kNumberOfAllocatableFloatingPointRegisters,
+ kNumberOfAllocatableFPRegisters,
kNumberOfAllocatableRegisterPairs),
block_labels_(nullptr),
location_builder_(graph, this),
@@ -359,35 +392,6 @@ void CodeGeneratorARM64::Bind(HBasicBlock* block) {
__ Bind(GetLabelOf(block));
}
-void CodeGeneratorARM64::MoveHelper(Location destination,
- Location source,
- Primitive::Type type) {
- if (source.Equals(destination)) {
- return;
- }
- if (destination.IsRegister()) {
- Register dst = RegisterFrom(destination, type);
- if (source.IsRegister()) {
- Register src = RegisterFrom(source, type);
- DCHECK(dst.IsSameSizeAndType(src));
- __ Mov(dst, src);
- } else {
- DCHECK(dst.Is64Bits() || !source.IsDoubleStackSlot());
- __ Ldr(dst, StackOperandFrom(source));
- }
- } else {
- DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
- if (source.IsRegister()) {
- __ Str(RegisterFrom(source, type), StackOperandFrom(destination));
- } else {
- UseScratchRegisterScope temps(assembler_.vixl_masm_);
- Register temp = destination.IsDoubleStackSlot() ? temps.AcquireX() : temps.AcquireW();
- __ Ldr(temp, StackOperandFrom(source));
- __ Str(temp, StackOperandFrom(destination));
- }
- }
-}
-
void CodeGeneratorARM64::Move(HInstruction* instruction,
Location location,
HInstruction* move_for) {
@@ -397,6 +401,7 @@ void CodeGeneratorARM64::Move(HInstruction* instruction,
}
Primitive::Type type = instruction->GetType();
+ DCHECK_NE(type, Primitive::kPrimVoid);
if (instruction->IsIntConstant() || instruction->IsLongConstant()) {
int64_t value = instruction->IsIntConstant() ? instruction->AsIntConstant()->GetValue()
@@ -418,20 +423,10 @@ void CodeGeneratorARM64::Move(HInstruction* instruction,
MoveHelper(location, temp_location, type);
} else if (instruction->IsLoadLocal()) {
uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
- switch (type) {
- case Primitive::kPrimNot:
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- MoveHelper(location, Location::StackSlot(stack_slot), type);
- break;
- case Primitive::kPrimLong:
- MoveHelper(location, Location::DoubleStackSlot(stack_slot), type);
- break;
- default:
- LOG(FATAL) << "Unimplemented type" << type;
+ if (Is64BitType(type)) {
+ MoveHelper(location, Location::DoubleStackSlot(stack_slot), type);
+ } else {
+ MoveHelper(location, Location::StackSlot(stack_slot), type);
}
} else {
@@ -446,24 +441,25 @@ size_t CodeGeneratorARM64::FrameEntrySpillSize() const {
Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const {
Primitive::Type type = load->GetType();
+
switch (type) {
case Primitive::kPrimNot:
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
return Location::StackSlot(GetStackSlot(load->GetLocal()));
+
case Primitive::kPrimLong:
- return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
- case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented type " << type;
- break;
+ return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
case Primitive::kPrimVoid:
- default:
LOG(FATAL) << "Unexpected type " << type;
}
+
LOG(FATAL) << "Unreachable";
return Location::NoLocation();
}
@@ -487,13 +483,19 @@ void CodeGeneratorARM64::SetupBlockedRegisters() const {
// xSuspend (Suspend counter)
// lr
// sp is not part of the allocatable registers, so we don't need to block it.
+ // TODO: Avoid blocking callee-saved registers, and instead preserve them
+ // where necessary.
CPURegList reserved_core_registers = vixl_reserved_core_registers;
reserved_core_registers.Combine(runtime_reserved_core_registers);
- // TODO: See if we should instead allow allocating but preserve those if used.
reserved_core_registers.Combine(quick_callee_saved_registers);
while (!reserved_core_registers.IsEmpty()) {
blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true;
}
+ CPURegList reserved_fp_registers = vixl_reserved_fp_registers;
+ reserved_fp_registers.Combine(CPURegList::GetCalleeSavedFP());
+ while (!reserved_core_registers.IsEmpty()) {
+ blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true;
+ }
}
Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
@@ -501,17 +503,13 @@ Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
LOG(FATAL) << "Unreachable type " << type;
}
- if (type == Primitive::kPrimFloat || type == Primitive::kPrimDouble) {
- LOG(FATAL) << "Unimplemented support for floating-point";
- }
-
- ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfXRegisters);
- DCHECK_NE(reg, -1);
- blocked_core_registers_[reg] = true;
-
if (IsFPType(type)) {
+ ssize_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfAllocatableFPRegisters);
+ DCHECK_NE(reg, -1);
return Location::FpuRegisterLocation(reg);
} else {
+ ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfAllocatableRegisters);
+ DCHECK_NE(reg, -1);
return Location::RegisterLocation(reg);
}
}
@@ -524,8 +522,107 @@ void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg
stream << Arm64ManagedRegister::FromDRegister(DRegister(reg));
}
+void CodeGeneratorARM64::MoveHelper(Location destination,
+ Location source,
+ Primitive::Type type) {
+ if (source.Equals(destination)) {
+ return;
+ }
+ if (destination.IsRegister()) {
+ Register dst = RegisterFrom(destination, type);
+ if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
+ DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
+ __ Ldr(dst, StackOperandFrom(source));
+ } else {
+ __ Mov(dst, OperandFrom(source, type));
+ }
+ } else if (destination.IsFpuRegister()) {
+ FPRegister dst = FPRegisterFrom(destination, type);
+ if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
+ DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
+ __ Ldr(dst, StackOperandFrom(source));
+ } else if (source.IsFpuRegister()) {
+ __ Fmov(dst, FPRegisterFrom(source, type));
+ } else {
+ HConstant* cst = source.GetConstant();
+ if (cst->IsFloatConstant()) {
+ __ Fmov(dst, cst->AsFloatConstant()->GetValue());
+ } else {
+ DCHECK(cst->IsDoubleConstant());
+ __ Fmov(dst, cst->AsDoubleConstant()->GetValue());
+ }
+ }
+ } else {
+ DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
+ if (source.IsRegister()) {
+ __ Str(RegisterFrom(source, type), StackOperandFrom(destination));
+ } else if (source.IsFpuRegister()) {
+ __ Str(FPRegisterFrom(source, type), StackOperandFrom(destination));
+ } else {
+ UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ Register temp = destination.IsDoubleStackSlot() ? temps.AcquireX() : temps.AcquireW();
+ __ Ldr(temp, StackOperandFrom(source));
+ __ Str(temp, StackOperandFrom(destination));
+ }
+ }
+}
+
+void CodeGeneratorARM64::Load(Primitive::Type type,
+ vixl::Register dst,
+ const vixl::MemOperand& src) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ __ Ldrb(dst, src);
+ break;
+ case Primitive::kPrimByte:
+ __ Ldrsb(dst, src);
+ break;
+ case Primitive::kPrimShort:
+ __ Ldrsh(dst, src);
+ break;
+ case Primitive::kPrimChar:
+ __ Ldrh(dst, src);
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ case Primitive::kPrimLong:
+ DCHECK(dst.Is64Bits() == (type == Primitive::kPrimLong));
+ __ Ldr(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+}
+
+void CodeGeneratorARM64::Store(Primitive::Type type,
+ vixl::Register rt,
+ const vixl::MemOperand& dst) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ __ Strb(rt, dst);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ __ Strh(rt, dst);
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ case Primitive::kPrimLong:
+ DCHECK(rt.Is64Bits() == (type == Primitive::kPrimLong));
+ __ Str(rt, dst);
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+}
+
#undef __
-#define __ assembler_->vixl_masm_->
+#define __ GetAssembler()->vixl_masm_->
InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
CodeGeneratorARM64* codegen)
@@ -534,19 +631,12 @@ InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
codegen_(codegen) {}
#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
- M(ArrayGet) \
- M(ArraySet) \
M(ClinitCheck) \
- M(DoubleConstant) \
- M(Div) \
M(DivZeroCheck) \
- M(FloatConstant) \
M(InvokeInterface) \
M(LoadClass) \
M(LoadException) \
M(LoadString) \
- M(Neg) \
- M(NewArray) \
M(ParallelMove) \
M(StaticFieldGet) \
M(StaticFieldSet) \
@@ -583,20 +673,21 @@ void LocationsBuilderARM64::HandleAddSub(HBinaryOperation* instr) {
Primitive::Type type = instr->GetResultType();
switch (type) {
case Primitive::kPrimInt:
- case Primitive::kPrimLong: {
+ case Primitive::kPrimLong:
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
- }
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
break;
+
default:
- LOG(FATAL) << "Unimplemented " << instr->DebugName() << " type " << type;
+ LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type;
}
}
@@ -604,28 +695,34 @@ void InstructionCodeGeneratorARM64::HandleAddSub(HBinaryOperation* instr) {
DCHECK(instr->IsAdd() || instr->IsSub());
Primitive::Type type = instr->GetType();
- Register dst = OutputRegister(instr);
- Register lhs = InputRegisterAt(instr, 0);
- Operand rhs = InputOperandAt(instr, 1);
switch (type) {
case Primitive::kPrimInt:
- case Primitive::kPrimLong:
+ case Primitive::kPrimLong: {
+ Register dst = OutputRegister(instr);
+ Register lhs = InputRegisterAt(instr, 0);
+ Operand rhs = InputOperandAt(instr, 1);
if (instr->IsAdd()) {
__ Add(dst, lhs, rhs);
} else {
__ Sub(dst, lhs, rhs);
}
break;
-
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected add/sub type " << type;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FPRegister dst = OutputFPRegister(instr);
+ FPRegister lhs = InputFPRegisterAt(instr, 0);
+ FPRegister rhs = InputFPRegisterAt(instr, 1);
+ if (instr->IsAdd()) {
+ __ Fadd(dst, lhs, rhs);
+ } else {
+ __ Fsub(dst, lhs, rhs);
+ }
break;
+ }
default:
- LOG(FATAL) << "Unimplemented add/sub type " << type;
+ LOG(FATAL) << "Unexpected add/sub type " << type;
}
}
@@ -637,6 +734,37 @@ void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) {
HandleAddSub(instruction);
}
+void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Primitive::Type type = instruction->GetType();
+ Register obj = InputRegisterAt(instruction, 0);
+ Register out = OutputRegister(instruction);
+ Location index = locations->InAt(1);
+ size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
+ MemOperand source(obj);
+ UseScratchRegisterScope temps(GetAssembler()->vixl_masm_);
+
+ if (index.IsConstant()) {
+ offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type);
+ source = MemOperand(obj, offset);
+ } else {
+ Register temp = temps.AcquireSameSizeAs(obj);
+ Register index_reg = RegisterFrom(index, Primitive::kPrimInt);
+ __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(type)));
+ source = MemOperand(temp, offset);
+ }
+
+ codegen_->Load(type, out, source);
+}
+
void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
@@ -648,6 +776,53 @@ void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction)
HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset()));
}
+void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
+ Primitive::Type value_type = instruction->GetComponentType();
+ bool is_object = value_type == Primitive::kPrimNot;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
+ if (is_object) {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetInAt(2, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
+ Primitive::Type value_type = instruction->GetComponentType();
+ if (value_type == Primitive::kPrimNot) {
+ __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAputObject).Int32Value()));
+ __ Blr(lr);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
+ } else {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = InputRegisterAt(instruction, 0);
+ Register value = InputRegisterAt(instruction, 2);
+ Location index = locations->InAt(1);
+ size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
+ MemOperand destination(obj);
+ UseScratchRegisterScope temps(GetAssembler()->vixl_masm_);
+
+ if (index.IsConstant()) {
+ offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
+ destination = MemOperand(obj, offset);
+ } else {
+ Register temp = temps.AcquireSameSizeAs(obj);
+ Register index_reg = InputRegisterAt(instruction, 1);
+ __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(value_type)));
+ destination = MemOperand(temp, offset);
+ }
+
+ codegen_->Store(value_type, value, destination);
+ }
+}
+
void LocationsBuilderARM64::VisitCompare(HCompare* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
@@ -715,6 +890,58 @@ void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(
FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS)
#undef FOR_EACH_CONDITION_INSTRUCTION
+void LocationsBuilderARM64::VisitDiv(HDiv* div) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ switch (div->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << div->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) {
+ Primitive::Type type = div->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ __ Sdiv(OutputRegister(div), InputRegisterAt(div, 0), InputRegisterAt(div, 1));
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ __ Fdiv(OutputFPRegister(div), InputFPRegisterAt(div, 0), InputFPRegisterAt(div, 1));
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << type;
+ }
+}
+
+void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) {
+ UNUSED(constant);
+ // Will be generated at use site.
+}
+
void LocationsBuilderARM64::VisitExit(HExit* exit) {
exit->SetLocations(nullptr);
}
@@ -727,6 +954,17 @@ void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
}
}
+void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) {
+ UNUSED(constant);
+ // Will be generated at use site.
+}
+
void LocationsBuilderARM64::VisitGoto(HGoto* got) {
got->SetLocations(nullptr);
}
@@ -793,44 +1031,9 @@ void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction
}
void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
- Primitive::Type res_type = instruction->GetType();
- Register res = OutputRegister(instruction);
- Register obj = InputRegisterAt(instruction, 0);
- uint32_t offset = instruction->GetFieldOffset().Uint32Value();
-
- switch (res_type) {
- case Primitive::kPrimBoolean: {
- __ Ldrb(res, MemOperand(obj, offset));
- break;
- }
- case Primitive::kPrimByte: {
- __ Ldrsb(res, MemOperand(obj, offset));
- break;
- }
- case Primitive::kPrimShort: {
- __ Ldrsh(res, MemOperand(obj, offset));
- break;
- }
- case Primitive::kPrimChar: {
- __ Ldrh(res, MemOperand(obj, offset));
- break;
- }
- case Primitive::kPrimInt:
- case Primitive::kPrimNot:
- case Primitive::kPrimLong: { // TODO: support volatile.
- DCHECK(res.IsX() == (res_type == Primitive::kPrimLong));
- __ Ldr(res, MemOperand(obj, offset));
- break;
- }
-
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register res_type " << res_type;
- break;
-
- case Primitive::kPrimVoid:
- LOG(FATAL) << "Unreachable res_type " << res_type;
- }
+ MemOperand field = MemOperand(InputRegisterAt(instruction, 0),
+ instruction->GetFieldOffset().Uint32Value());
+ codegen_->Load(instruction->GetType(), OutputRegister(instruction), field);
}
void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
@@ -840,43 +1043,12 @@ void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction
}
void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
- Register obj = InputRegisterAt(instruction, 0);
+ Primitive::Type field_type = instruction->GetFieldType();
Register value = InputRegisterAt(instruction, 1);
- Primitive::Type field_type = instruction->InputAt(1)->GetType();
- uint32_t offset = instruction->GetFieldOffset().Uint32Value();
-
- switch (field_type) {
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte: {
- __ Strb(value, MemOperand(obj, offset));
- break;
- }
-
- case Primitive::kPrimShort:
- case Primitive::kPrimChar: {
- __ Strh(value, MemOperand(obj, offset));
- break;
- }
-
- case Primitive::kPrimInt:
- case Primitive::kPrimNot:
- case Primitive::kPrimLong: {
- DCHECK(value.IsX() == (field_type == Primitive::kPrimLong));
- __ Str(value, MemOperand(obj, offset));
-
- if (field_type == Primitive::kPrimNot) {
- codegen_->MarkGCCard(obj, value);
- }
- break;
- }
-
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << field_type;
- break;
-
- case Primitive::kPrimVoid:
- LOG(FATAL) << "Unreachable type " << field_type;
+ Register obj = InputRegisterAt(instruction, 0);
+ codegen_->Store(field_type, value, MemOperand(obj, instruction->GetFieldOffset().Uint32Value()));
+ if (field_type == Primitive::kPrimNot) {
+ codegen_->MarkGCCard(obj, value);
}
}
@@ -932,11 +1104,13 @@ void InstructionCodeGeneratorARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
// temp = method;
__ Ldr(temp, MemOperand(sp, kCurrentMethodStackOffset));
// temp = temp->dex_cache_resolved_methods_;
- __ Ldr(temp, MemOperand(temp.X(), mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+ __ Ldr(temp, MemOperand(temp.X(),
+ mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
// temp = temp[index_in_cache];
__ Ldr(temp, MemOperand(temp.X(), index_in_cache));
// lr = temp->entry_point_from_quick_compiled_code_;
- __ Ldr(lr, MemOperand(temp.X(), mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ __ Ldr(lr, MemOperand(temp.X(),
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
// lr();
__ Blr(lr);
@@ -1012,7 +1186,9 @@ void LocationsBuilderARM64::VisitMul(HMul* mul) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
break;
default:
@@ -1029,7 +1205,7 @@ void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ __ Fmul(OutputFPRegister(mul), InputFPRegisterAt(mul, 0), InputFPRegisterAt(mul, 1));
break;
default:
@@ -1037,6 +1213,71 @@ void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) {
}
}
+void LocationsBuilderARM64::VisitNeg(HNeg* neg) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ switch (neg->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RegisterOrConstant(neg->InputAt(0)));
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Not yet implemented neg type " << neg->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) {
+ switch (neg->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ __ Neg(OutputRegister(neg), InputOperandAt(neg, 0));
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Not yet implemented neg type " << neg->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+ }
+}
+
+void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(LocationFrom(x0));
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(2)));
+}
+
+void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ InvokeRuntimeCallingConvention calling_convention;
+ Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
+ DCHECK(type_index.Is(w0));
+ Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
+ DCHECK(current_method.Is(w1));
+ __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset));
+ __ Mov(type_index, instruction->GetTypeIndex());
+ int32_t quick_entrypoint_offset =
+ QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocArrayWithAccessCheck).Int32Value();
+ __ Ldr(lr, MemOperand(tr, quick_entrypoint_offset));
+ __ Blr(lr);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
+}
+
void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
@@ -1054,7 +1295,9 @@ void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction)
DCHECK(current_method.Is(w1));
__ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset));
__ Mov(type_index, instruction->GetTypeIndex());
- __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocObjectWithAccessCheck).Int32Value()));
+ int32_t quick_entrypoint_offset =
+ QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocObjectWithAccessCheck).Int32Value();
+ __ Ldr(lr, MemOperand(tr, quick_entrypoint_offset));
__ Blr(lr);
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
DCHECK(!codegen_->IsLeafMethod());
@@ -1138,35 +1381,11 @@ void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
Primitive::Type return_type = instruction->InputAt(0)->GetType();
-
- if (return_type == Primitive::kPrimFloat || return_type == Primitive::kPrimDouble) {
- LOG(FATAL) << "Unimplemented return type " << return_type;
- }
-
- locations->SetInAt(0, LocationFrom(x0));
+ locations->SetInAt(0, ARM64ReturnLocation(return_type));
}
void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
- if (kIsDebugBuild) {
- Primitive::Type type = instruction->InputAt(0)->GetType();
- switch (type) {
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot:
- DCHECK(InputRegisterAt(instruction, 0).Is(w0));
- break;
-
- case Primitive::kPrimLong:
- DCHECK(InputRegisterAt(instruction, 0).Is(x0));
- break;
-
- default:
- LOG(FATAL) << "Unimplemented return type " << type;
- }
- }
+ UNUSED(instruction);
codegen_->GenerateFrameExit();
__ Br(lr);
}
@@ -1185,16 +1404,18 @@ void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
Primitive::Type field_type = store->InputAt(1)->GetType();
switch (field_type) {
+ case Primitive::kPrimNot:
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
case Primitive::kPrimChar:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
- case Primitive::kPrimNot:
+ case Primitive::kPrimFloat:
locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
break;
case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
break;
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 4a41000e8d..f2ead21e15 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -45,11 +45,14 @@ const vixl::Register wSuspend = vixl::w19; // Suspend Register
const vixl::Register xSuspend = vixl::x19;
const vixl::CPURegList vixl_reserved_core_registers(vixl::ip0, vixl::ip1);
+const vixl::CPURegList vixl_reserved_fp_registers(vixl::d31);
const vixl::CPURegList runtime_reserved_core_registers(tr, xSuspend, vixl::lr);
const vixl::CPURegList quick_callee_saved_registers(vixl::CPURegister::kRegister,
vixl::kXRegSize,
kArm64CalleeSaveRefSpills);
+Location ARM64ReturnLocation(Primitive::Type return_type);
+
class InvokeDexCallingConvention : public CallingConvention<vixl::Register, vixl::FPRegister> {
public:
InvokeDexCallingConvention()
@@ -59,11 +62,7 @@ class InvokeDexCallingConvention : public CallingConvention<vixl::Register, vixl
kParameterFPRegistersLength) {}
Location GetReturnLocation(Primitive::Type return_type) {
- DCHECK_NE(return_type, Primitive::kPrimVoid);
- if (return_type == Primitive::kPrimFloat || return_type == Primitive::kPrimDouble) {
- LOG(FATAL) << "Unimplemented return type " << return_type;
- }
- return Location::RegisterLocation(X0);
+ return ARM64ReturnLocation(return_type);
}
@@ -73,7 +72,7 @@ class InvokeDexCallingConvention : public CallingConvention<vixl::Register, vixl
class InvokeDexCallingConventionVisitor {
public:
- InvokeDexCallingConventionVisitor() : gp_index_(0), stack_index_(0) {}
+ InvokeDexCallingConventionVisitor() : gp_index_(0), fp_index_(0), stack_index_(0) {}
Location GetNextLocation(Primitive::Type type);
Location GetReturnLocation(Primitive::Type return_type) {
@@ -84,6 +83,8 @@ class InvokeDexCallingConventionVisitor {
InvokeDexCallingConvention calling_convention;
// The current index for core registers.
uint32_t gp_index_;
+ // The current index for floating-point registers.
+ uint32_t fp_index_;
// The current stack index.
uint32_t stack_index_;
@@ -204,10 +205,8 @@ class CodeGeneratorARM64 : public CodeGenerator {
// (xzr, wzr), or make for poor allocatable registers (sp alignment
// requirements, etc.). This also facilitates our task as all other registers
// can easily be mapped via to or from their type and index or code.
- static const int kNumberOfAllocatableCoreRegisters = vixl::kNumberOfRegisters - 1;
- static const int kNumberOfAllocatableFloatingPointRegisters = vixl::kNumberOfFPRegisters;
- static const int kNumberOfAllocatableRegisters =
- kNumberOfAllocatableCoreRegisters + kNumberOfAllocatableFloatingPointRegisters;
+ static const int kNumberOfAllocatableRegisters = vixl::kNumberOfRegisters - 1;
+ static const int kNumberOfAllocatableFPRegisters = vixl::kNumberOfFPRegisters;
static constexpr int kNumberOfAllocatableRegisterPairs = 0;
void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
@@ -217,8 +216,6 @@ class CodeGeneratorARM64 : public CodeGenerator {
return InstructionSet::kArm64;
}
- void MoveHelper(Location destination, Location source, Primitive::Type type);
-
void Initialize() OVERRIDE {
HGraph* graph = GetGraph();
int length = graph->GetBlocks().Size();
@@ -228,6 +225,11 @@ class CodeGeneratorARM64 : public CodeGenerator {
}
}
+ // Code generation helpers.
+ void MoveHelper(Location destination, Location source, Primitive::Type type);
+ void Load(Primitive::Type type, vixl::Register dst, const vixl::MemOperand& src);
+ void Store(Primitive::Type type, vixl::Register rt, const vixl::MemOperand& dst);
+
private:
// Labels for each block that will be compiled.
vixl::Label* block_labels_;
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 69527cea54..0109a7c553 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -917,9 +917,10 @@ THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RE
// Call artSet64InstanceFromCode with 4 word size arguments and the referrer.
DEFINE_FUNCTION art_quick_set64_instance
+ movd %ebx, %xmm0
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ movd %xmm0, %ebx
// Outgoing argument set up
- mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ebx // get referrer
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()