ART: optimizing compiler: initial support for ARM64.
The ARM64 port uses VIXL for code generation, to which it defers work
like label binding and branch resolving, register type coherency
checking, and immediate values handling.
Change-Id: I0a44508c0c991f472a63e67b3469cdd878fe1a68
Signed-off-by: Serban Constantinescu <serban.constantinescu@arm.com>
Signed-off-by: Alexandre Rames <alexandre.rames@arm.com>
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 25b23a2..172c96c 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -88,6 +88,7 @@
optimizing/builder.cc \
optimizing/code_generator.cc \
optimizing/code_generator_arm.cc \
+ optimizing/code_generator_arm64.cc \
optimizing/code_generator_x86.cc \
optimizing/code_generator_x86_64.cc \
optimizing/constant_folding.cc \
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index d5cd490..c4286a4 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -17,6 +17,7 @@
#include "code_generator.h"
#include "code_generator_arm.h"
+#include "code_generator_arm64.h"
#include "code_generator_x86.h"
#include "code_generator_x86_64.h"
#include "compiled_method.h"
@@ -281,6 +282,9 @@
case kThumb2: {
return new (allocator) arm::CodeGeneratorARM(graph);
}
+ case kArm64: {
+ return new (allocator) arm64::CodeGeneratorARM64(graph);
+ }
case kMips:
return nullptr;
case kX86: {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
new file mode 100644
index 0000000..79528ac
--- /dev/null
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -0,0 +1,1205 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_arm64.h"
+
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "gc/accounting/card_table.h"
+#include "mirror/array-inl.h"
+#include "mirror/art_method.h"
+#include "mirror/class.h"
+#include "thread.h"
+#include "utils/arm64/assembler_arm64.h"
+#include "utils/assembler.h"
+#include "utils/stack_checks.h"
+
+
+using namespace vixl; // NOLINT(build/namespaces)
+
+#ifdef __
+#error "ARM64 Codegen VIXL macro-assembler macro already defined."
+#endif
+
+
+namespace art {
+
+namespace arm64 {
+
+static bool IsFPType(Primitive::Type type) {
+ return type == Primitive::kPrimFloat || type == Primitive::kPrimDouble;
+}
+
+// TODO: clean-up some of the constant definitions.
+static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>);
+static constexpr int kCurrentMethodStackOffset = 0;
+
+namespace {
+// Convenience helpers to ease conversion to and from VIXL operands.
+
+int VIXLRegCodeFromART(int code) {
+ // TODO: static check?
+ DCHECK_EQ(SP, 31);
+ DCHECK_EQ(WSP, 31);
+ DCHECK_EQ(XZR, 32);
+ DCHECK_EQ(WZR, 32);
+ if (code == SP) {
+ return vixl::kSPRegInternalCode;
+ }
+ if (code == XZR) {
+ return vixl::kZeroRegCode;
+ }
+ return code;
+}
+
+int ARTRegCodeFromVIXL(int code) {
+ // TODO: static check?
+ DCHECK_EQ(SP, 31);
+ DCHECK_EQ(WSP, 31);
+ DCHECK_EQ(XZR, 32);
+ DCHECK_EQ(WZR, 32);
+ if (code == vixl::kSPRegInternalCode) {
+ return SP;
+ }
+ if (code == vixl::kZeroRegCode) {
+ return XZR;
+ }
+ return code;
+}
+
+Register XRegisterFrom(Location location) {
+ return Register::XRegFromCode(VIXLRegCodeFromART(location.reg()));
+}
+
+Register WRegisterFrom(Location location) {
+ return Register::WRegFromCode(VIXLRegCodeFromART(location.reg()));
+}
+
+Register RegisterFrom(Location location, Primitive::Type type) {
+ DCHECK(type != Primitive::kPrimVoid && !IsFPType(type));
+ return type == Primitive::kPrimLong ? XRegisterFrom(location) : WRegisterFrom(location);
+}
+
+Register OutputRegister(HInstruction* instr) {
+ return RegisterFrom(instr->GetLocations()->Out(), instr->GetType());
+}
+
+Register InputRegisterAt(HInstruction* instr, int input_index) {
+ return RegisterFrom(instr->GetLocations()->InAt(input_index),
+ instr->InputAt(input_index)->GetType());
+}
+
+int64_t Int64ConstantFrom(Location location) {
+ HConstant* instr = location.GetConstant();
+ return instr->IsIntConstant() ? instr->AsIntConstant()->GetValue()
+ : instr->AsLongConstant()->GetValue();
+}
+
+Operand OperandFrom(Location location, Primitive::Type type) {
+ if (location.IsRegister()) {
+ return Operand(RegisterFrom(location, type));
+ } else {
+ return Operand(Int64ConstantFrom(location));
+ }
+}
+
+Operand InputOperandAt(HInstruction* instr, int input_index) {
+ return OperandFrom(instr->GetLocations()->InAt(input_index),
+ instr->InputAt(input_index)->GetType());
+}
+
+MemOperand StackOperandFrom(Location location) {
+ return MemOperand(sp, location.GetStackIndex());
+}
+
+MemOperand HeapOperand(const Register& base, Offset offset) {
+ // A heap reference must be 32bit, so fit in a W register.
+ DCHECK(base.IsW());
+ return MemOperand(base.X(), offset.SizeValue());
+}
+
+MemOperand HeapOperandFrom(Location location, Primitive::Type type, Offset offset) {
+ return HeapOperand(RegisterFrom(location, type), offset);
+}
+
+Location LocationFrom(const Register& reg) {
+ return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.code()));
+}
+
+} // namespace
+
+inline Condition ARM64Condition(IfCondition cond) {
+ switch (cond) {
+ case kCondEQ: return eq;
+ case kCondNE: return ne;
+ case kCondLT: return lt;
+ case kCondLE: return le;
+ case kCondGT: return gt;
+ case kCondGE: return ge;
+ default:
+ LOG(FATAL) << "Unknown if condition";
+ }
+ return nv; // Unreachable.
+}
+
+static const Register kRuntimeParameterCoreRegisters[] = { x0, x1, x2, x3, x4, x5, x6, x7 };
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+static const FPRegister kRuntimeParameterFpuRegisters[] = { };
+static constexpr size_t kRuntimeParameterFpuRegistersLength = 0;
+
+class InvokeRuntimeCallingConvention : public CallingConvention<Register, FPRegister> {
+ public:
+ static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+
+ InvokeRuntimeCallingConvention()
+ : CallingConvention(kRuntimeParameterCoreRegisters,
+ kRuntimeParameterCoreRegistersLength,
+ kRuntimeParameterFpuRegisters,
+ kRuntimeParameterFpuRegistersLength) {}
+
+ Location GetReturnLocation(Primitive::Type return_type);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
+};
+
+Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) {
+ DCHECK_NE(return_type, Primitive::kPrimVoid);
+ if (return_type == Primitive::kPrimFloat || return_type == Primitive::kPrimDouble) {
+ LOG(FATAL) << "Unimplemented return type " << return_type;
+ }
+ return LocationFrom(x0);
+}
+
+#define __ reinterpret_cast<Arm64Assembler*>(codegen->GetAssembler())->vixl_masm_->
+
+class SlowPathCodeARM64 : public SlowPathCode {
+ public:
+ SlowPathCodeARM64() : entry_label_(), exit_label_() {}
+
+ vixl::Label* GetEntryLabel() { return &entry_label_; }
+ vixl::Label* GetExitLabel() { return &exit_label_; }
+
+ private:
+ vixl::Label entry_label_;
+ vixl::Label exit_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64);
+};
+
+class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction,
+ Location index_location,
+ Location length_location)
+ : instruction_(instruction),
+ index_location_(index_location),
+ length_location_(length_location) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM64* arm64_codegen = reinterpret_cast<CodeGeneratorARM64*>(codegen);
+ __ Bind(GetEntryLabel());
+ InvokeRuntimeCallingConvention calling_convention;
+ arm64_codegen->MoveHelper(LocationFrom(calling_convention.GetRegisterAt(0)),
+ index_location_, Primitive::kPrimInt);
+ arm64_codegen->MoveHelper(LocationFrom(calling_convention.GetRegisterAt(1)),
+ length_location_, Primitive::kPrimInt);
+ size_t offset = QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pThrowArrayBounds).SizeValue();
+ __ Ldr(lr, MemOperand(tr, offset));
+ __ Blr(lr);
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HBoundsCheck* const instruction_;
+ const Location index_location_;
+ const Location length_location_;
+
+ DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
+};
+
+class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pThrowNullPointer).Int32Value();
+ __ Ldr(lr, MemOperand(tr, offset));
+ __ Blr(lr);
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HNullCheck* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64);
+};
+
+class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit SuspendCheckSlowPathARM64(HSuspendCheck* instruction,
+ HBasicBlock* successor)
+ : instruction_(instruction), successor_(successor) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ size_t offset = QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pTestSuspend).SizeValue();
+ __ Bind(GetEntryLabel());
+ __ Ldr(lr, MemOperand(tr, offset));
+ __ Blr(lr);
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ __ B(GetReturnLabel());
+ }
+
+ vixl::Label* GetReturnLabel() {
+ DCHECK(successor_ == nullptr);
+ return &return_label_;
+ }
+
+
+ private:
+ HSuspendCheck* const instruction_;
+ // If not null, the block to branch to after the suspend check.
+ HBasicBlock* const successor_;
+
+ // If `successor_` is null, the label to branch to after the suspend check.
+ vixl::Label return_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64);
+};
+
+#undef __
+
+Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
+ Location next_location;
+ if (type == Primitive::kPrimVoid) {
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+
+ if (type == Primitive::kPrimFloat || type == Primitive::kPrimDouble) {
+ LOG(FATAL) << "Unimplemented type " << type;
+ }
+
+ if (gp_index_ < calling_convention.GetNumberOfRegisters()) {
+ next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_));
+ if (type == Primitive::kPrimLong) {
+ // Double stack slot reserved on the stack.
+ stack_index_++;
+ }
+ } else { // Stack.
+ if (type == Primitive::kPrimLong) {
+ next_location = Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index_));
+ // Double stack slot reserved on the stack.
+ stack_index_++;
+ } else {
+ next_location = Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index_));
+ }
+ }
+ // Move to the next register/stack slot.
+ gp_index_++;
+ stack_index_++;
+ return next_location;
+}
+
+CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph)
+ : CodeGenerator(graph,
+ kNumberOfAllocatableRegisters,
+ kNumberOfAllocatableFloatingPointRegisters,
+ kNumberOfAllocatableRegisterPairs),
+ block_labels_(nullptr),
+ location_builder_(graph, this),
+ instruction_visitor_(graph, this) {}
+
+#define __ reinterpret_cast<Arm64Assembler*>(GetAssembler())->vixl_masm_->
+
+void CodeGeneratorARM64::GenerateFrameEntry() {
+ // TODO: Add proper support for the stack overflow check.
+ UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ Register temp = temps.AcquireX();
+ __ Add(temp, sp, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
+ __ Ldr(temp, MemOperand(temp, 0));
+ RecordPcInfo(nullptr, 0);
+
+ CPURegList preserved_regs = GetFramePreservedRegisters();
+ int frame_size = GetFrameSize();
+ core_spill_mask_ |= preserved_regs.list();
+
+ __ Str(w0, MemOperand(sp, -frame_size, PreIndex));
+ __ PokeCPURegList(preserved_regs, frame_size - preserved_regs.TotalSizeInBytes());
+
+ // Stack layout:
+ // sp[frame_size - 8] : lr.
+ // ... : other preserved registers.
+ // sp[frame_size - regs_size]: first preserved register.
+ // ... : reserved frame space.
+ // sp[0] : context pointer.
+}
+
+void CodeGeneratorARM64::GenerateFrameExit() {
+ int frame_size = GetFrameSize();
+ CPURegList preserved_regs = GetFramePreservedRegisters();
+ __ PeekCPURegList(preserved_regs, frame_size - preserved_regs.TotalSizeInBytes());
+ __ Drop(frame_size);
+}
+
+void CodeGeneratorARM64::Bind(HBasicBlock* block) {
+ __ Bind(GetLabelOf(block));
+}
+
+void CodeGeneratorARM64::MoveHelper(Location destination,
+ Location source,
+ Primitive::Type type) {
+ if (source.Equals(destination)) {
+ return;
+ }
+ if (destination.IsRegister()) {
+ Register dst = RegisterFrom(destination, type);
+ if (source.IsRegister()) {
+ Register src = RegisterFrom(source, type);
+ DCHECK(dst.IsSameSizeAndType(src));
+ __ Mov(dst, src);
+ } else {
+ DCHECK(dst.Is64Bits() || !source.IsDoubleStackSlot());
+ __ Ldr(dst, StackOperandFrom(source));
+ }
+ } else {
+ DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
+ if (source.IsRegister()) {
+ __ Str(RegisterFrom(source, type), StackOperandFrom(destination));
+ } else {
+ UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ Register temp = destination.IsDoubleStackSlot() ? temps.AcquireX() : temps.AcquireW();
+ __ Ldr(temp, StackOperandFrom(source));
+ __ Str(temp, StackOperandFrom(destination));
+ }
+ }
+}
+
+void CodeGeneratorARM64::Move(HInstruction* instruction,
+ Location location,
+ HInstruction* move_for) {
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ }
+
+ Primitive::Type type = instruction->GetType();
+
+ if (instruction->IsIntConstant() || instruction->IsLongConstant()) {
+ int64_t value = instruction->IsIntConstant() ? instruction->AsIntConstant()->GetValue()
+ : instruction->AsLongConstant()->GetValue();
+ if (location.IsRegister()) {
+ Register dst = RegisterFrom(location, type);
+ DCHECK((instruction->IsIntConstant() && dst.Is32Bits()) ||
+ (instruction->IsLongConstant() && dst.Is64Bits()));
+ __ Mov(dst, value);
+ } else {
+ DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
+ UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ Register temp = instruction->IsIntConstant() ? temps.AcquireW() : temps.AcquireX();
+ __ Mov(temp, value);
+ __ Str(temp, StackOperandFrom(location));
+ }
+
+ } else if (instruction->IsLoadLocal()) {
+ uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
+ switch (type) {
+ case Primitive::kPrimNot:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ MoveHelper(location, Location::StackSlot(stack_slot), type);
+ break;
+ case Primitive::kPrimLong:
+ MoveHelper(location, Location::DoubleStackSlot(stack_slot), type);
+ break;
+ default:
+ LOG(FATAL) << "Unimplemented type" << type;
+ }
+
+ } else {
+ DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
+ MoveHelper(location, locations->Out(), type);
+ }
+}
+
+size_t CodeGeneratorARM64::FrameEntrySpillSize() const {
+ return GetFramePreservedRegistersSize();
+}
+
+Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const {
+ Primitive::Type type = load->GetType();
+ switch (type) {
+ case Primitive::kPrimNot:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ return Location::StackSlot(GetStackSlot(load->GetLocal()));
+ case Primitive::kPrimLong:
+ return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented type " << type;
+ break;
+ case Primitive::kPrimVoid:
+ default:
+ LOG(FATAL) << "Unexpected type " << type;
+ }
+ LOG(FATAL) << "Unreachable";
+ return Location::NoLocation();
+}
+
+void CodeGeneratorARM64::MarkGCCard(Register object, Register value) {
+ UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ Register card = temps.AcquireX();
+ Register temp = temps.AcquireX();
+ vixl::Label done;
+ __ Cbz(value, &done);
+ __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value()));
+ __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
+ __ Strb(card, MemOperand(card, temp));
+ __ Bind(&done);
+}
+
+void CodeGeneratorARM64::SetupBlockedRegisters() const {
+ // Block reserved registers:
+ // ip0 (VIXL temporary)
+ // ip1 (VIXL temporary)
+ // xSuspend (Suspend counter)
+ // lr
+ // sp is not part of the allocatable registers, so we don't need to block it.
+ CPURegList reserved_core_registers = vixl_reserved_core_registers;
+ reserved_core_registers.Combine(runtime_reserved_core_registers);
+ // TODO: See if we should instead allow allocating but preserve those if used.
+ reserved_core_registers.Combine(quick_callee_saved_registers);
+ while (!reserved_core_registers.IsEmpty()) {
+ blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true;
+ }
+}
+
+Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
+ if (type == Primitive::kPrimVoid) {
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+
+ if (type == Primitive::kPrimFloat || type == Primitive::kPrimDouble) {
+ LOG(FATAL) << "Unimplemented support for floating-point";
+ }
+
+ ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfXRegisters);
+ DCHECK_NE(reg, -1);
+ blocked_core_registers_[reg] = true;
+
+ if (IsFPType(type)) {
+ return Location::FpuRegisterLocation(reg);
+ } else {
+ return Location::RegisterLocation(reg);
+ }
+}
+
+void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const {
+ stream << Arm64ManagedRegister::FromXRegister(XRegister(reg));
+}
+
+void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
+ stream << Arm64ManagedRegister::FromDRegister(DRegister(reg));
+}
+
+#undef __
+#define __ assembler_->vixl_masm_->
+
+InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
+ CodeGeneratorARM64* codegen)
+ : HGraphVisitor(graph),
+ assembler_(codegen->GetAssembler()),
+ codegen_(codegen) {}
+
+#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
+ M(ArrayGet) \
+ M(ArraySet) \
+ M(DoubleConstant) \
+ M(FloatConstant) \
+ M(Mul) \
+ M(Neg) \
+ M(NewArray) \
+ M(ParallelMove)
+
+#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
+
+enum UnimplementedInstructionBreakCode {
+#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name),
+ FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION)
+#undef ENUM_UNIMPLEMENTED_INSTRUCTION
+};
+
+#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \
+ void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \
+ __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \
+ } \
+ void LocationsBuilderARM64::Visit##name(H##name* instr) { \
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \
+ locations->SetOut(Location::Any()); \
+ }
+ FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS)
+#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS
+
+#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE
+
+void LocationsBuilderARM64::HandleAddSub(HBinaryOperation* instr) {
+ DCHECK(instr->IsAdd() || instr->IsSub());
+ DCHECK_EQ(instr->InputCount(), 2U);
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ Primitive::Type type = instr->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ }
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type;
+ break;
+ default:
+ LOG(FATAL) << "Unimplemented " << instr->DebugName() << " type " << type;
+ }
+}
+
+void InstructionCodeGeneratorARM64::HandleAddSub(HBinaryOperation* instr) {
+ DCHECK(instr->IsAdd() || instr->IsSub());
+
+ Primitive::Type type = instr->GetType();
+ Register dst = OutputRegister(instr);
+ Register lhs = InputRegisterAt(instr, 0);
+ Operand rhs = InputOperandAt(instr, 1);
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ if (instr->IsAdd()) {
+ __ Add(dst, lhs, rhs);
+ } else {
+ __ Sub(dst, lhs, rhs);
+ }
+ break;
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected add/sub type " << type;
+ break;
+ default:
+ LOG(FATAL) << "Unimplemented add/sub type " << type;
+ }
+}
+
+void LocationsBuilderARM64::VisitAdd(HAdd* instruction) {
+ HandleAddSub(instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) {
+ HandleAddSub(instruction);
+}
+
+void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
+ __ Ldr(OutputRegister(instruction),
+ HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset()));
+}
+
+void LocationsBuilderARM64::VisitCompare(HCompare* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitCompare(HCompare* instruction) {
+ Primitive::Type in_type = instruction->InputAt(0)->GetType();
+
+ DCHECK_EQ(in_type, Primitive::kPrimLong);
+ switch (in_type) {
+ case Primitive::kPrimLong: {
+ vixl::Label done;
+ Register result = OutputRegister(instruction);
+ Register left = InputRegisterAt(instruction, 0);
+ Operand right = InputOperandAt(instruction, 1);
+ __ Subs(result, left, right);
+ __ B(eq, &done);
+ __ Mov(result, 1);
+ __ Cneg(result, result, le);
+ __ Bind(&done);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unimplemented compare type " << in_type;
+ }
+}
+
+void LocationsBuilderARM64::VisitCondition(HCondition* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (instruction->NeedsMaterialization()) {
+ locations->SetOut(Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) {
+ if (!instruction->NeedsMaterialization()) {
+ return;
+ }
+
+ LocationSummary* locations = instruction->GetLocations();
+ Register lhs = InputRegisterAt(instruction, 0);
+ Operand rhs = InputOperandAt(instruction, 1);
+ Register res = RegisterFrom(locations->Out(), instruction->GetType());
+ Condition cond = ARM64Condition(instruction->GetCondition());
+
+ __ Cmp(lhs, rhs);
+ __ Csel(res, vixl::Assembler::AppropriateZeroRegFor(res), Operand(1), InvertCondition(cond));
+}
+
+#define FOR_EACH_CONDITION_INSTRUCTION(M) \
+ M(Equal) \
+ M(NotEqual) \
+ M(LessThan) \
+ M(LessThanOrEqual) \
+ M(GreaterThan) \
+ M(GreaterThanOrEqual)
+#define DEFINE_CONDITION_VISITORS(Name) \
+void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } \
+void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }
+FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS)
+#undef FOR_EACH_CONDITION_INSTRUCTION
+
+void LocationsBuilderARM64::VisitExit(HExit* exit) {
+ exit->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
+ if (kIsDebugBuild) {
+ down_cast<Arm64Assembler*>(GetAssembler())->Comment("Unreachable");
+ __ Brk(0); // TODO: Introduce special markers for such code locations.
+ }
+}
+
+void LocationsBuilderARM64::VisitGoto(HGoto* got) {
+ got->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) {
+ HBasicBlock* successor = got->GetSuccessor();
+ // TODO: Support for suspend checks emission.
+ if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
+ __ B(codegen_->GetLabelOf(successor));
+ }
+}
+
+void LocationsBuilderARM64::VisitIf(HIf* if_instr) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ HInstruction* cond = if_instr->InputAt(0);
+ DCHECK(cond->IsCondition());
+ if (cond->AsCondition()->NeedsMaterialization()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
+ HInstruction* cond = if_instr->InputAt(0);
+ DCHECK(cond->IsCondition());
+ HCondition* condition = cond->AsCondition();
+ vixl::Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
+ vixl::Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
+
+ // TODO: Support constant condition input in VisitIf.
+
+ if (condition->NeedsMaterialization()) {
+ // The condition instruction has been materialized, compare the output to 0.
+ Location cond_val = if_instr->GetLocations()->InAt(0);
+ DCHECK(cond_val.IsRegister());
+ __ Cbnz(InputRegisterAt(if_instr, 0), true_target);
+
+ } else {
+ // The condition instruction has not been materialized, use its inputs as
+ // the comparison and its condition as the branch condition.
+ Register lhs = InputRegisterAt(condition, 0);
+ Operand rhs = InputOperandAt(condition, 1);
+ Condition cond = ARM64Condition(condition->GetCondition());
+ if ((cond == eq || cond == ne) && rhs.IsImmediate() && (rhs.immediate() == 0)) {
+ if (cond == eq) {
+ __ Cbz(lhs, true_target);
+ } else {
+ __ Cbnz(lhs, true_target);
+ }
+ } else {
+ __ Cmp(lhs, rhs);
+ __ B(cond, true_target);
+ }
+ }
+
+ if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfFalseSuccessor())) {
+ __ B(false_target);
+ }
+}
+
+void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+ Primitive::Type res_type = instruction->GetType();
+ Register res = OutputRegister(instruction);
+ Register obj = InputRegisterAt(instruction, 0);
+ uint32_t offset = instruction->GetFieldOffset().Uint32Value();
+
+ switch (res_type) {
+ case Primitive::kPrimBoolean: {
+ __ Ldrb(res, MemOperand(obj, offset));
+ break;
+ }
+ case Primitive::kPrimByte: {
+ __ Ldrsb(res, MemOperand(obj, offset));
+ break;
+ }
+ case Primitive::kPrimShort: {
+ __ Ldrsh(res, MemOperand(obj, offset));
+ break;
+ }
+ case Primitive::kPrimChar: {
+ __ Ldrh(res, MemOperand(obj, offset));
+ break;
+ }
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ case Primitive::kPrimLong: { // TODO: support volatile.
+ DCHECK(res.IsX() == (res_type == Primitive::kPrimLong));
+ __ Ldr(res, MemOperand(obj, offset));
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented register res_type " << res_type;
+ break;
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable res_type " << res_type;
+ }
+}
+
+void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+ Register obj = InputRegisterAt(instruction, 0);
+ Register value = InputRegisterAt(instruction, 1);
+ Primitive::Type field_type = instruction->InputAt(1)->GetType();
+ uint32_t offset = instruction->GetFieldOffset().Uint32Value();
+
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte: {
+ __ Strb(value, MemOperand(obj, offset));
+ break;
+ }
+
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar: {
+ __ Strh(value, MemOperand(obj, offset));
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ case Primitive::kPrimLong: {
+ DCHECK(value.IsX() == (field_type == Primitive::kPrimLong));
+ __ Str(value, MemOperand(obj, offset));
+
+ if (field_type == Primitive::kPrimNot) {
+ codegen_->MarkGCCard(obj, value);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented register type " << field_type;
+ break;
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << field_type;
+ }
+}
+
+void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
+ locations->AddTemp(LocationFrom(x0));
+
+ InvokeDexCallingConventionVisitor calling_convention_visitor;
+ for (size_t i = 0; i < invoke->InputCount(); i++) {
+ HInstruction* input = invoke->InputAt(i);
+ locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
+ }
+
+ Primitive::Type return_type = invoke->GetType();
+ if (return_type != Primitive::kPrimVoid) {
+ locations->SetOut(calling_convention_visitor.GetReturnLocation(return_type));
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
+ Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ // Make sure that ArtMethod* is passed in W0 as per the calling convention
+ DCHECK(temp.Is(w0));
+ size_t index_in_cache = mirror::Array::DataOffset(kHeapRefSize).SizeValue() +
+ invoke->GetIndexInDexCache() * kHeapRefSize;
+
+ // TODO: Implement all kinds of calls:
+ // 1) boot -> boot
+ // 2) app -> boot
+ // 3) app -> app
+ //
+ // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+ // temp = method;
+ __ Ldr(temp, MemOperand(sp, kCurrentMethodStackOffset));
+ // temp = temp->dex_cache_resolved_methods_;
+ __ Ldr(temp, MemOperand(temp.X(), mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+ // temp = temp[index_in_cache];
+ __ Ldr(temp, MemOperand(temp.X(), index_in_cache));
+ // lr = temp->entry_point_from_quick_compiled_code_;
+ __ Ldr(lr, MemOperand(temp.X(), mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ // lr();
+ __ Blr(lr);
+
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
+}
+
+void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
+ invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ Offset class_offset = mirror::Object::ClassOffset();
+ Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset();
+
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ Ldr(temp.W(), MemOperand(sp, receiver.GetStackIndex()));
+ __ Ldr(temp.W(), MemOperand(temp, class_offset.SizeValue()));
+ } else {
+ DCHECK(receiver.IsRegister());
+ __ Ldr(temp.W(), HeapOperandFrom(receiver, Primitive::kPrimNot,
+ class_offset));
+ }
+ // temp = temp->GetMethodAt(method_offset);
+ __ Ldr(temp.W(), MemOperand(temp, method_offset));
+ // lr = temp->GetEntryPoint();
+ __ Ldr(lr, MemOperand(temp, entry_point.SizeValue()));
+ // lr();
+ __ Blr(lr);
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
+ load->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
+ // Nothing to do, this is driven by the code generator.
+}
+
+void LocationsBuilderARM64::VisitLocal(HLocal* local) {
+ local->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitLocal(HLocal* local) {
+ DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
+}
+
+void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+}
+
+void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
+ DCHECK(type_index.Is(w0));
+ Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
+ DCHECK(current_method.Is(w1));
+ __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset));
+ __ Mov(type_index, instruction->GetTypeIndex());
+ __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocObjectWithAccessCheck).Int32Value()));
+ __ Blr(lr);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
+}
+
+void LocationsBuilderARM64::VisitNot(HNot* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) {
+ switch (instruction->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), Operand(1));
+ break;
+
+ case Primitive::kPrimInt:
+ __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0));
+ break;
+
+ case Primitive::kPrimLong:
+ LOG(FATAL) << "Not yet implemented type for not operation " << instruction->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
+ }
+}
+
+void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) {
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location obj = locations->InAt(0);
+ if (obj.IsRegister()) {
+ __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel());
+ } else {
+ DCHECK(obj.IsConstant()) << obj;
+ DCHECK_EQ(obj.GetConstant()->AsIntConstant()->GetValue(), 0);
+ __ B(slow_path->GetEntryLabel());
+ }
+}
+
+void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
+ if (location.IsStackSlot()) {
+ location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ } else if (location.IsDoubleStackSlot()) {
+ location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ }
+ locations->SetOut(location);
+}
+
+void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) {
+ // Nothing to do, the parameter is already at its location.
+}
+
+void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
+ locations->SetInAt(i, Location::Any());
+ }
+ locations->SetOut(Location::Any());
+}
+
+void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Primitive::Type return_type = instruction->InputAt(0)->GetType();
+
+ if (return_type == Primitive::kPrimFloat || return_type == Primitive::kPrimDouble) {
+ LOG(FATAL) << "Unimplemented return type " << return_type;
+ }
+
+ locations->SetInAt(0, LocationFrom(x0));
+}
+
+void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
+ if (kIsDebugBuild) {
+ Primitive::Type type = instruction->InputAt(0)->GetType();
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ DCHECK(InputRegisterAt(instruction, 0).Is(w0));
+ break;
+
+ case Primitive::kPrimLong:
+ DCHECK(InputRegisterAt(instruction, 0).Is(x0));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented return type " << type;
+ }
+ }
+ codegen_->GenerateFrameExit();
+ __ Br(lr);
+}
+
+void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
+ instruction->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
+ codegen_->GenerateFrameExit();
+ __ Br(lr);
+}
+
+void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
+ Primitive::Type field_type = store->InputAt(1)->GetType();
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ case Primitive::kPrimLong:
+ locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented local type " << field_type;
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) {
+}
+
+void LocationsBuilderARM64::VisitSub(HSub* instruction) {
+ HandleAddSub(instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) {
+ HandleAddSub(instruction);
+}
+
+void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(
+ instruction, locations->InAt(0), locations->InAt(1));
+ codegen_->AddSlowPath(slow_path);
+
+ __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
+ __ B(slow_path->GetEntryLabel(), hs);
+}
+
+void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+}
+
+void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
+ // TODO: Improve support for suspend checks.
+ SuspendCheckSlowPathARM64* slow_path =
+ new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, nullptr);
+ codegen_->AddSlowPath(slow_path);
+
+ __ Subs(wSuspend, wSuspend, 1);
+ __ B(slow_path->GetEntryLabel(), le);
+ __ Bind(slow_path->GetReturnLabel());
+}
+
+void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
+ temp->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) {
+ // Nothing to do, this is driven by the code generator.
+}
+
+} // namespace arm64
+} // namespace art
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
new file mode 100644
index 0000000..a4003ff
--- /dev/null
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
+#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
+
+#include "code_generator.h"
+#include "nodes.h"
+#include "parallel_move_resolver.h"
+#include "utils/arm64/assembler_arm64.h"
+#include "a64/disasm-a64.h"
+#include "a64/macro-assembler-a64.h"
+#include "arch/arm64/quick_method_frame_info_arm64.h"
+
+namespace art {
+namespace arm64 {
+
+class CodeGeneratorARM64;
+
+static constexpr size_t kArm64WordSize = 8;
+static const vixl::Register kParameterCoreRegisters[] = {
+ vixl::x1, vixl::x2, vixl::x3, vixl::x4, vixl::x5, vixl::x6, vixl::x7
+};
+static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+static const vixl::FPRegister kParameterFPRegisters[] = {
+ vixl::d0, vixl::d1, vixl::d2, vixl::d3, vixl::d4, vixl::d5, vixl::d6, vixl::d7
+};
+static constexpr size_t kParameterFPRegistersLength = arraysize(kParameterFPRegisters);
+
+const vixl::Register tr = vixl::x18; // Thread Register
+const vixl::Register wSuspend = vixl::w19; // Suspend Register
+const vixl::Register xSuspend = vixl::x19;
+
+const vixl::CPURegList vixl_reserved_core_registers(vixl::ip0, vixl::ip1);
+const vixl::CPURegList runtime_reserved_core_registers(tr, xSuspend, vixl::lr);
+const vixl::CPURegList quick_callee_saved_registers(vixl::CPURegister::kRegister,
+ vixl::kXRegSize,
+ kArm64CalleeSaveRefSpills);
+
+class InvokeDexCallingConvention : public CallingConvention<vixl::Register, vixl::FPRegister> {
+ public:
+ InvokeDexCallingConvention()
+ : CallingConvention(kParameterCoreRegisters,
+ kParameterCoreRegistersLength,
+ kParameterFPRegisters,
+ kParameterFPRegistersLength) {}
+
+ Location GetReturnLocation(Primitive::Type return_type) {
+ DCHECK_NE(return_type, Primitive::kPrimVoid);
+ if (return_type == Primitive::kPrimFloat || return_type == Primitive::kPrimDouble) {
+ LOG(FATAL) << "Unimplemented return type " << return_type;
+ }
+ return Location::RegisterLocation(X0);
+ }
+
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
+};
+
+class InvokeDexCallingConventionVisitor {
+ public:
+ InvokeDexCallingConventionVisitor() : gp_index_(0), stack_index_(0) {}
+
+ Location GetNextLocation(Primitive::Type type);
+ Location GetReturnLocation(Primitive::Type return_type) {
+ return calling_convention.GetReturnLocation(return_type);
+ }
+
+ private:
+ InvokeDexCallingConvention calling_convention;
+ // The current index for core registers.
+ uint32_t gp_index_;
+ // The current stack index.
+ uint32_t stack_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
+};
+
+class InstructionCodeGeneratorARM64 : public HGraphVisitor {
+ public:
+ InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
+
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ virtual void Visit##name(H##name* instr);
+ FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+#undef DECLARE_VISIT_INSTRUCTION
+
+ void LoadCurrentMethod(XRegister reg);
+
+ Arm64Assembler* GetAssembler() const { return assembler_; }
+
+ private:
+ void HandleAddSub(HBinaryOperation* instr);
+
+ Arm64Assembler* const assembler_;
+ CodeGeneratorARM64* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorARM64);
+};
+
+class LocationsBuilderARM64 : public HGraphVisitor {
+ public:
+ explicit LocationsBuilderARM64(HGraph* graph, CodeGeneratorARM64* codegen)
+ : HGraphVisitor(graph), codegen_(codegen) {}
+
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ virtual void Visit##name(H##name* instr);
+ FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+#undef DECLARE_VISIT_INSTRUCTION
+
+ private:
+ void HandleAddSub(HBinaryOperation* instr);
+ void HandleInvoke(HInvoke* instr);
+
+ CodeGeneratorARM64* const codegen_;
+ InvokeDexCallingConventionVisitor parameter_visitor_;
+
+ DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM64);
+};
+
+class CodeGeneratorARM64 : public CodeGenerator {
+ public:
+ explicit CodeGeneratorARM64(HGraph* graph);
+ virtual ~CodeGeneratorARM64() { }
+
+ virtual void GenerateFrameEntry() OVERRIDE;
+ virtual void GenerateFrameExit() OVERRIDE;
+
+ static const vixl::CPURegList& GetFramePreservedRegisters() {
+ static const vixl::CPURegList frame_preserved_regs =
+ vixl::CPURegList(vixl::CPURegister::kRegister, vixl::kXRegSize, vixl::lr.Bit());
+ return frame_preserved_regs;
+ }
+ static int GetFramePreservedRegistersSize() {
+ return GetFramePreservedRegisters().TotalSizeInBytes();
+ }
+
+ virtual void Bind(HBasicBlock* block) OVERRIDE;
+
+ vixl::Label* GetLabelOf(HBasicBlock* block) const {
+ return block_labels_ + block->GetBlockId();
+ }
+
+ virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+
+ virtual size_t GetWordSize() const OVERRIDE {
+ return kArm64WordSize;
+ }
+
+ virtual size_t FrameEntrySpillSize() const OVERRIDE;
+
+ virtual HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+ virtual HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+ virtual Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
+
+ // Emit a write barrier.
+ void MarkGCCard(vixl::Register object, vixl::Register value);
+
+ // Register allocation.
+
+ virtual void SetupBlockedRegisters() const OVERRIDE;
+ // AllocateFreeRegister() is only used when allocating registers locally
+ // during CompileBaseline().
+ virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+
+ virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+
+ virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: SaveCoreRegister";
+ return 0;
+ }
+
+ virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: RestoreCoreRegister";
+ return 0;
+ }
+
+ // The number of registers that can be allocated. The register allocator may
+ // decide to reserve and not use a few of them.
+ // We do not consider registers sp, xzr, wzr. They are either not allocatable
+ // (xzr, wzr), or make for poor allocatable registers (sp alignment
+ // requirements, etc.). This also facilitates our task as all other registers
+ // can easily be mapped via to or from their type and index or code.
+ static const int kNumberOfAllocatableCoreRegisters = vixl::kNumberOfRegisters - 1;
+ static const int kNumberOfAllocatableFloatingPointRegisters = vixl::kNumberOfFPRegisters;
+ static const int kNumberOfAllocatableRegisters =
+ kNumberOfAllocatableCoreRegisters + kNumberOfAllocatableFloatingPointRegisters;
+ static constexpr int kNumberOfAllocatableRegisterPairs = 0;
+
+ virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+ virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+
+ virtual InstructionSet GetInstructionSet() const OVERRIDE {
+ return InstructionSet::kArm64;
+ }
+
+ void MoveHelper(Location destination, Location source, Primitive::Type type);
+
+ virtual void Initialize() OVERRIDE {
+ HGraph* graph = GetGraph();
+ int length = graph->GetBlocks().Size();
+ block_labels_ = graph->GetArena()->AllocArray<vixl::Label>(length);
+ for (int i = 0; i < length; ++i) {
+ new(block_labels_ + i) vixl::Label();
+ }
+ }
+
+ private:
+ // Labels for each block that will be compiled.
+ vixl::Label* block_labels_;
+
+ LocationsBuilderARM64 location_builder_;
+ InstructionCodeGeneratorARM64 instruction_visitor_;
+ Arm64Assembler assembler_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64);
+};
+
+} // namespace arm64
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 29ad3de..34d8bfe 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -18,6 +18,7 @@
#include "builder.h"
#include "code_generator_arm.h"
+#include "code_generator_arm64.h"
#include "code_generator_x86.h"
#include "code_generator_x86_64.h"
#include "common_compiler_test.h"
@@ -93,6 +94,12 @@
if (kRuntimeISA == kX86_64) {
Run(allocator, codegenX86_64, has_result, expected);
}
+
+ arm64::CodeGeneratorARM64 codegenARM64(graph);
+ codegenARM64.CompileBaseline(&allocator, true);
+ if (kRuntimeISA == kArm64) {
+ Run(allocator, codegenARM64, has_result, expected);
+ }
}
static void RunCodeOptimized(CodeGenerator* codegen,
@@ -395,10 +402,16 @@
TestCode(data, true, 12); \
}
+#if !defined(__aarch64__)
MUL_TEST(INT, MulInt);
MUL_TEST(LONG, MulLong);
+#endif
+#if defined(__aarch64__)
+TEST(CodegenTest, DISABLED_ReturnMulIntLit8) {
+#else
TEST(CodegenTest, ReturnMulIntLit8) {
+#endif
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT8, 3 << 8 | 0,
@@ -407,7 +420,11 @@
TestCode(data, true, 12);
}
+#if defined(__aarch64__)
+TEST(CodegenTest, DISABLED_ReturnMulIntLit16) {
+#else
TEST(CodegenTest, ReturnMulIntLit16) {
+#endif
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT16, 3,
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index dce8e6d..80e9cdb 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -215,7 +215,10 @@
}
// Do not attempt to compile on architectures we do not support.
- if (instruction_set != kX86 && instruction_set != kX86_64 && instruction_set != kThumb2) {
+ if (instruction_set != kArm64 &&
+ instruction_set != kThumb2 &&
+ instruction_set != kX86 &&
+ instruction_set != kX86_64) {
return nullptr;
}
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index c144991..1b1d121 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -219,12 +219,13 @@
void AddConstant(XRegister rd, int32_t value, vixl::Condition cond = vixl::al);
void AddConstant(XRegister rd, XRegister rn, int32_t value, vixl::Condition cond = vixl::al);
- // Vixl assembler.
- vixl::MacroAssembler* const vixl_masm_;
-
// List of exception blocks to generate at the end of the code cache.
std::vector<Arm64Exception*> exception_blocks_;
+ public:
+ // Vixl assembler.
+ vixl::MacroAssembler* const vixl_masm_;
+
// Used for testing.
friend class Arm64ManagedRegister_VixlRegisters_Test;
};
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 91b8d8ab..2b0c94c 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -118,6 +118,7 @@
friend class arm::ArmAssembler;
friend class arm::Arm32Assembler;
friend class arm::Thumb2Assembler;
+ friend class arm64::Arm64Assembler;
friend class mips::MipsAssembler;
friend class x86::X86Assembler;
friend class x86_64::X86_64Assembler;
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 427e0b1..9279ce3 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -290,6 +290,44 @@
TEST_ART_BROKEN_DEFAULT_RUN_TESTS :=
+# Known broken tests for the arm64 optimizing compiler backend.
+TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
+ 003-omnibus-opcodes \
+ 006-args \
+ 011-array-copy \
+ 018-stack-overflow \
+ 036-finalizer \
+ 044-proxy \
+ 070-nio-buffer \
+ 072-precise-gc \
+ 082-inline-execute \
+ 083-compiler-regressions \
+ 093-serialization \
+ 096-array-copy-concurrent-gc \
+ 100-reflect2 \
+ 106-exceptions2 \
+ 107-int-math2 \
+ 121-modifiers \
+ 122-npe \
+ 123-compiler-regressions-mt \
+ 405-optimizing-long-allocator \
+ 407-arrays \
+ 410-floats \
+ 411-optimizing-arith \
+ 412-new-array \
+ 413-regalloc-regression \
+ 700-LoadArgRegs \
+ 800-smali
+
+ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
+ optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS),64)
+endif
+
+TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS :=
+
+
# Clear variables ahead of appending to them when defining tests.
$(foreach target, $(TARGET_TYPES), $(eval ART_RUN_TEST_$(call name-to-var,$(target))_RULES :=))
$(foreach target, $(TARGET_TYPES), \