summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/builder.cc25
-rw-r--r--compiler/optimizing/code_generator.cc4
-rw-r--r--compiler/optimizing/code_generator_arm.cc91
-rw-r--r--compiler/optimizing/code_generator_arm64.cc1205
-rw-r--r--compiler/optimizing/code_generator_arm64.h236
-rw-r--r--compiler/optimizing/code_generator_x86.cc92
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc87
-rw-r--r--compiler/optimizing/codegen_test.cc163
-rw-r--r--compiler/optimizing/graph_visualizer.h2
-rw-r--r--compiler/optimizing/instruction_simplifier.cc2
-rw-r--r--compiler/optimizing/nodes.h14
-rw-r--r--compiler/optimizing/optimizing_compiler.cc5
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h1
13 files changed, 1799 insertions, 128 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 4d575cbdcf..79cbd0ee21 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -748,6 +748,11 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::NOT_INT: {
+ Unop_12x<HNot>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
case Instruction::ADD_INT: {
Binop_23x<HAdd>(instruction, Primitive::kPrimInt);
break;
@@ -778,6 +783,16 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::SUB_FLOAT: {
+ Binop_23x<HSub>(instruction, Primitive::kPrimFloat);
+ break;
+ }
+
+ case Instruction::SUB_DOUBLE: {
+ Binop_23x<HSub>(instruction, Primitive::kPrimDouble);
+ break;
+ }
+
case Instruction::ADD_INT_2ADDR: {
Binop_12x<HAdd>(instruction, Primitive::kPrimInt);
break;
@@ -828,6 +843,16 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::SUB_FLOAT_2ADDR: {
+ Binop_12x<HSub>(instruction, Primitive::kPrimFloat);
+ break;
+ }
+
+ case Instruction::SUB_DOUBLE_2ADDR: {
+ Binop_12x<HSub>(instruction, Primitive::kPrimDouble);
+ break;
+ }
+
case Instruction::MUL_INT_2ADDR: {
Binop_12x<HMul>(instruction, Primitive::kPrimInt);
break;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index d5cd490d13..c4286a401b 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -17,6 +17,7 @@
#include "code_generator.h"
#include "code_generator_arm.h"
+#include "code_generator_arm64.h"
#include "code_generator_x86.h"
#include "code_generator_x86_64.h"
#include "compiled_method.h"
@@ -281,6 +282,9 @@ CodeGenerator* CodeGenerator::Create(ArenaAllocator* allocator,
case kThumb2: {
return new (allocator) arm::CodeGeneratorARM(graph);
}
+ case kArm64: {
+ return new (allocator) arm64::CodeGeneratorARM64(graph);
+ }
case kMips:
return nullptr;
case kX86: {
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 7ed802ec7b..7b00d2f523 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1157,53 +1157,60 @@ void LocationsBuilderARM::VisitSub(HSub* sub) {
locations->SetOut(Location::RequiresRegister(), output_overlaps);
break;
}
-
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
break;
-
+ }
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
LocationSummary* locations = sub->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
- if (locations->InAt(1).IsRegister()) {
- __ sub(locations->Out().As<Register>(),
- locations->InAt(0).As<Register>(),
- ShifterOperand(locations->InAt(1).As<Register>()));
+ if (second.IsRegister()) {
+ __ sub(out.As<Register>(), first.As<Register>(), ShifterOperand(second.As<Register>()));
} else {
- __ AddConstant(locations->Out().As<Register>(),
- locations->InAt(0).As<Register>(),
- -locations->InAt(1).GetConstant()->AsIntConstant()->GetValue());
+ __ AddConstant(out.As<Register>(),
+ first.As<Register>(),
+ -second.GetConstant()->AsIntConstant()->GetValue());
}
break;
}
- case Primitive::kPrimLong:
- __ subs(locations->Out().AsRegisterPairLow<Register>(),
- locations->InAt(0).AsRegisterPairLow<Register>(),
- ShifterOperand(locations->InAt(1).AsRegisterPairLow<Register>()));
- __ sbc(locations->Out().AsRegisterPairHigh<Register>(),
- locations->InAt(0).AsRegisterPairHigh<Register>(),
- ShifterOperand(locations->InAt(1).AsRegisterPairHigh<Register>()));
+ case Primitive::kPrimLong: {
+ __ subs(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ sbc(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
break;
+ }
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat: {
+ __ vsubs(FromDToLowS(out.As<DRegister>()),
+ FromDToLowS(first.As<DRegister>()),
+ FromDToLowS(second.As<DRegister>()));
break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ vsubd(out.As<DRegister>(), first.As<DRegister>(), second.As<DRegister>());
+ break;
+ }
+
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
@@ -1351,17 +1358,33 @@ void InstructionCodeGeneratorARM::VisitParameterValue(HParameterValue* instructi
// Nothing to do, the parameter is already at its location.
}
-void LocationsBuilderARM::VisitNot(HNot* instruction) {
+void LocationsBuilderARM::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
-void InstructionCodeGeneratorARM::VisitNot(HNot* instruction) {
- LocationSummary* locations = instruction->GetLocations();
- __ eor(locations->Out().As<Register>(),
- locations->InAt(0).As<Register>(), ShifterOperand(1));
+void InstructionCodeGeneratorARM::VisitNot(HNot* not_) {
+ LocationSummary* locations = not_->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ switch (not_->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ __ eor(out.As<Register>(), in.As<Register>(), ShifterOperand(1));
+ break;
+
+ case Primitive::kPrimInt:
+ __ mvn(out.As<Register>(), ShifterOperand(in.As<Register>()));
+ break;
+
+ case Primitive::kPrimLong:
+ LOG(FATAL) << "Not yet implemented type for not operation " << not_->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
+ }
}
void LocationsBuilderARM::VisitCompare(HCompare* compare) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
new file mode 100644
index 0000000000..79528ac128
--- /dev/null
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -0,0 +1,1205 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_arm64.h"
+
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "gc/accounting/card_table.h"
+#include "mirror/array-inl.h"
+#include "mirror/art_method.h"
+#include "mirror/class.h"
+#include "thread.h"
+#include "utils/arm64/assembler_arm64.h"
+#include "utils/assembler.h"
+#include "utils/stack_checks.h"
+
+
+using namespace vixl; // NOLINT(build/namespaces)
+
+#ifdef __
+#error "ARM64 Codegen VIXL macro-assembler macro already defined."
+#endif
+
+
+namespace art {
+
+namespace arm64 {
+
+static bool IsFPType(Primitive::Type type) {
+ return type == Primitive::kPrimFloat || type == Primitive::kPrimDouble;
+}
+
+// TODO: clean-up some of the constant definitions.
+static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>);
+static constexpr int kCurrentMethodStackOffset = 0;
+
+namespace {
+// Convenience helpers to ease conversion to and from VIXL operands.
+
+int VIXLRegCodeFromART(int code) {
+ // TODO: static check?
+ DCHECK_EQ(SP, 31);
+ DCHECK_EQ(WSP, 31);
+ DCHECK_EQ(XZR, 32);
+ DCHECK_EQ(WZR, 32);
+ if (code == SP) {
+ return vixl::kSPRegInternalCode;
+ }
+ if (code == XZR) {
+ return vixl::kZeroRegCode;
+ }
+ return code;
+}
+
+int ARTRegCodeFromVIXL(int code) {
+ // TODO: static check?
+ DCHECK_EQ(SP, 31);
+ DCHECK_EQ(WSP, 31);
+ DCHECK_EQ(XZR, 32);
+ DCHECK_EQ(WZR, 32);
+ if (code == vixl::kSPRegInternalCode) {
+ return SP;
+ }
+ if (code == vixl::kZeroRegCode) {
+ return XZR;
+ }
+ return code;
+}
+
+Register XRegisterFrom(Location location) {
+ return Register::XRegFromCode(VIXLRegCodeFromART(location.reg()));
+}
+
+Register WRegisterFrom(Location location) {
+ return Register::WRegFromCode(VIXLRegCodeFromART(location.reg()));
+}
+
+Register RegisterFrom(Location location, Primitive::Type type) {
+ DCHECK(type != Primitive::kPrimVoid && !IsFPType(type));
+ return type == Primitive::kPrimLong ? XRegisterFrom(location) : WRegisterFrom(location);
+}
+
+Register OutputRegister(HInstruction* instr) {
+ return RegisterFrom(instr->GetLocations()->Out(), instr->GetType());
+}
+
+Register InputRegisterAt(HInstruction* instr, int input_index) {
+ return RegisterFrom(instr->GetLocations()->InAt(input_index),
+ instr->InputAt(input_index)->GetType());
+}
+
+int64_t Int64ConstantFrom(Location location) {
+ HConstant* instr = location.GetConstant();
+ return instr->IsIntConstant() ? instr->AsIntConstant()->GetValue()
+ : instr->AsLongConstant()->GetValue();
+}
+
+Operand OperandFrom(Location location, Primitive::Type type) {
+ if (location.IsRegister()) {
+ return Operand(RegisterFrom(location, type));
+ } else {
+ return Operand(Int64ConstantFrom(location));
+ }
+}
+
+Operand InputOperandAt(HInstruction* instr, int input_index) {
+ return OperandFrom(instr->GetLocations()->InAt(input_index),
+ instr->InputAt(input_index)->GetType());
+}
+
+MemOperand StackOperandFrom(Location location) {
+ return MemOperand(sp, location.GetStackIndex());
+}
+
+MemOperand HeapOperand(const Register& base, Offset offset) {
+ // A heap reference must be 32bit, so fit in a W register.
+ DCHECK(base.IsW());
+ return MemOperand(base.X(), offset.SizeValue());
+}
+
+MemOperand HeapOperandFrom(Location location, Primitive::Type type, Offset offset) {
+ return HeapOperand(RegisterFrom(location, type), offset);
+}
+
+Location LocationFrom(const Register& reg) {
+ return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.code()));
+}
+
+} // namespace
+
+inline Condition ARM64Condition(IfCondition cond) {
+ switch (cond) {
+ case kCondEQ: return eq;
+ case kCondNE: return ne;
+ case kCondLT: return lt;
+ case kCondLE: return le;
+ case kCondGT: return gt;
+ case kCondGE: return ge;
+ default:
+ LOG(FATAL) << "Unknown if condition";
+ }
+ return nv; // Unreachable.
+}
+
+static const Register kRuntimeParameterCoreRegisters[] = { x0, x1, x2, x3, x4, x5, x6, x7 };
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+static const FPRegister kRuntimeParameterFpuRegisters[] = { };
+static constexpr size_t kRuntimeParameterFpuRegistersLength = 0;
+
+class InvokeRuntimeCallingConvention : public CallingConvention<Register, FPRegister> {
+ public:
+ static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+
+ InvokeRuntimeCallingConvention()
+ : CallingConvention(kRuntimeParameterCoreRegisters,
+ kRuntimeParameterCoreRegistersLength,
+ kRuntimeParameterFpuRegisters,
+ kRuntimeParameterFpuRegistersLength) {}
+
+ Location GetReturnLocation(Primitive::Type return_type);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
+};
+
+Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) {
+ DCHECK_NE(return_type, Primitive::kPrimVoid);
+ if (return_type == Primitive::kPrimFloat || return_type == Primitive::kPrimDouble) {
+ LOG(FATAL) << "Unimplemented return type " << return_type;
+ }
+ return LocationFrom(x0);
+}
+
+#define __ reinterpret_cast<Arm64Assembler*>(codegen->GetAssembler())->vixl_masm_->
+
+class SlowPathCodeARM64 : public SlowPathCode {
+ public:
+ SlowPathCodeARM64() : entry_label_(), exit_label_() {}
+
+ vixl::Label* GetEntryLabel() { return &entry_label_; }
+ vixl::Label* GetExitLabel() { return &exit_label_; }
+
+ private:
+ vixl::Label entry_label_;
+ vixl::Label exit_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64);
+};
+
+class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction,
+ Location index_location,
+ Location length_location)
+ : instruction_(instruction),
+ index_location_(index_location),
+ length_location_(length_location) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM64* arm64_codegen = reinterpret_cast<CodeGeneratorARM64*>(codegen);
+ __ Bind(GetEntryLabel());
+ InvokeRuntimeCallingConvention calling_convention;
+ arm64_codegen->MoveHelper(LocationFrom(calling_convention.GetRegisterAt(0)),
+ index_location_, Primitive::kPrimInt);
+ arm64_codegen->MoveHelper(LocationFrom(calling_convention.GetRegisterAt(1)),
+ length_location_, Primitive::kPrimInt);
+ size_t offset = QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pThrowArrayBounds).SizeValue();
+ __ Ldr(lr, MemOperand(tr, offset));
+ __ Blr(lr);
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HBoundsCheck* const instruction_;
+ const Location index_location_;
+ const Location length_location_;
+
+ DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
+};
+
+class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pThrowNullPointer).Int32Value();
+ __ Ldr(lr, MemOperand(tr, offset));
+ __ Blr(lr);
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HNullCheck* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64);
+};
+
+class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit SuspendCheckSlowPathARM64(HSuspendCheck* instruction,
+ HBasicBlock* successor)
+ : instruction_(instruction), successor_(successor) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ size_t offset = QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pTestSuspend).SizeValue();
+ __ Bind(GetEntryLabel());
+ __ Ldr(lr, MemOperand(tr, offset));
+ __ Blr(lr);
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ __ B(GetReturnLabel());
+ }
+
+ vixl::Label* GetReturnLabel() {
+ DCHECK(successor_ == nullptr);
+ return &return_label_;
+ }
+
+
+ private:
+ HSuspendCheck* const instruction_;
+ // If not null, the block to branch to after the suspend check.
+ HBasicBlock* const successor_;
+
+ // If `successor_` is null, the label to branch to after the suspend check.
+ vixl::Label return_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64);
+};
+
+#undef __
+
+Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
+ Location next_location;
+ if (type == Primitive::kPrimVoid) {
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+
+ if (type == Primitive::kPrimFloat || type == Primitive::kPrimDouble) {
+ LOG(FATAL) << "Unimplemented type " << type;
+ }
+
+ if (gp_index_ < calling_convention.GetNumberOfRegisters()) {
+ next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_));
+ if (type == Primitive::kPrimLong) {
+ // Double stack slot reserved on the stack.
+ stack_index_++;
+ }
+ } else { // Stack.
+ if (type == Primitive::kPrimLong) {
+ next_location = Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index_));
+ // Double stack slot reserved on the stack.
+ stack_index_++;
+ } else {
+ next_location = Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index_));
+ }
+ }
+ // Move to the next register/stack slot.
+ gp_index_++;
+ stack_index_++;
+ return next_location;
+}
+
+CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph)
+ : CodeGenerator(graph,
+ kNumberOfAllocatableRegisters,
+ kNumberOfAllocatableFloatingPointRegisters,
+ kNumberOfAllocatableRegisterPairs),
+ block_labels_(nullptr),
+ location_builder_(graph, this),
+ instruction_visitor_(graph, this) {}
+
+#define __ reinterpret_cast<Arm64Assembler*>(GetAssembler())->vixl_masm_->
+
+void CodeGeneratorARM64::GenerateFrameEntry() {
+ // TODO: Add proper support for the stack overflow check.
+ UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ Register temp = temps.AcquireX();
+ __ Add(temp, sp, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
+ __ Ldr(temp, MemOperand(temp, 0));
+ RecordPcInfo(nullptr, 0);
+
+ CPURegList preserved_regs = GetFramePreservedRegisters();
+ int frame_size = GetFrameSize();
+ core_spill_mask_ |= preserved_regs.list();
+
+ __ Str(w0, MemOperand(sp, -frame_size, PreIndex));
+ __ PokeCPURegList(preserved_regs, frame_size - preserved_regs.TotalSizeInBytes());
+
+ // Stack layout:
+ // sp[frame_size - 8] : lr.
+ // ... : other preserved registers.
+ // sp[frame_size - regs_size]: first preserved register.
+ // ... : reserved frame space.
+ // sp[0] : context pointer.
+}
+
+void CodeGeneratorARM64::GenerateFrameExit() {
+ int frame_size = GetFrameSize();
+ CPURegList preserved_regs = GetFramePreservedRegisters();
+ __ PeekCPURegList(preserved_regs, frame_size - preserved_regs.TotalSizeInBytes());
+ __ Drop(frame_size);
+}
+
+void CodeGeneratorARM64::Bind(HBasicBlock* block) {
+ __ Bind(GetLabelOf(block));
+}
+
+void CodeGeneratorARM64::MoveHelper(Location destination,
+ Location source,
+ Primitive::Type type) {
+ if (source.Equals(destination)) {
+ return;
+ }
+ if (destination.IsRegister()) {
+ Register dst = RegisterFrom(destination, type);
+ if (source.IsRegister()) {
+ Register src = RegisterFrom(source, type);
+ DCHECK(dst.IsSameSizeAndType(src));
+ __ Mov(dst, src);
+ } else {
+ DCHECK(dst.Is64Bits() || !source.IsDoubleStackSlot());
+ __ Ldr(dst, StackOperandFrom(source));
+ }
+ } else {
+ DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
+ if (source.IsRegister()) {
+ __ Str(RegisterFrom(source, type), StackOperandFrom(destination));
+ } else {
+ UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ Register temp = destination.IsDoubleStackSlot() ? temps.AcquireX() : temps.AcquireW();
+ __ Ldr(temp, StackOperandFrom(source));
+ __ Str(temp, StackOperandFrom(destination));
+ }
+ }
+}
+
+void CodeGeneratorARM64::Move(HInstruction* instruction,
+ Location location,
+ HInstruction* move_for) {
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ }
+
+ Primitive::Type type = instruction->GetType();
+
+ if (instruction->IsIntConstant() || instruction->IsLongConstant()) {
+ int64_t value = instruction->IsIntConstant() ? instruction->AsIntConstant()->GetValue()
+ : instruction->AsLongConstant()->GetValue();
+ if (location.IsRegister()) {
+ Register dst = RegisterFrom(location, type);
+ DCHECK((instruction->IsIntConstant() && dst.Is32Bits()) ||
+ (instruction->IsLongConstant() && dst.Is64Bits()));
+ __ Mov(dst, value);
+ } else {
+ DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
+ UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ Register temp = instruction->IsIntConstant() ? temps.AcquireW() : temps.AcquireX();
+ __ Mov(temp, value);
+ __ Str(temp, StackOperandFrom(location));
+ }
+
+ } else if (instruction->IsLoadLocal()) {
+ uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
+ switch (type) {
+ case Primitive::kPrimNot:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ MoveHelper(location, Location::StackSlot(stack_slot), type);
+ break;
+ case Primitive::kPrimLong:
+ MoveHelper(location, Location::DoubleStackSlot(stack_slot), type);
+ break;
+ default:
+ LOG(FATAL) << "Unimplemented type" << type;
+ }
+
+ } else {
+ DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
+ MoveHelper(location, locations->Out(), type);
+ }
+}
+
+size_t CodeGeneratorARM64::FrameEntrySpillSize() const {
+ return GetFramePreservedRegistersSize();
+}
+
+Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const {
+ Primitive::Type type = load->GetType();
+ switch (type) {
+ case Primitive::kPrimNot:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ return Location::StackSlot(GetStackSlot(load->GetLocal()));
+ case Primitive::kPrimLong:
+ return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented type " << type;
+ break;
+ case Primitive::kPrimVoid:
+ default:
+ LOG(FATAL) << "Unexpected type " << type;
+ }
+ LOG(FATAL) << "Unreachable";
+ return Location::NoLocation();
+}
+
+void CodeGeneratorARM64::MarkGCCard(Register object, Register value) {
+ UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ Register card = temps.AcquireX();
+ Register temp = temps.AcquireX();
+ vixl::Label done;
+ __ Cbz(value, &done);
+ __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value()));
+ __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
+ __ Strb(card, MemOperand(card, temp));
+ __ Bind(&done);
+}
+
+void CodeGeneratorARM64::SetupBlockedRegisters() const {
+ // Block reserved registers:
+ // ip0 (VIXL temporary)
+ // ip1 (VIXL temporary)
+ // xSuspend (Suspend counter)
+ // lr
+ // sp is not part of the allocatable registers, so we don't need to block it.
+ CPURegList reserved_core_registers = vixl_reserved_core_registers;
+ reserved_core_registers.Combine(runtime_reserved_core_registers);
+ // TODO: See if we should instead allow allocating but preserve those if used.
+ reserved_core_registers.Combine(quick_callee_saved_registers);
+ while (!reserved_core_registers.IsEmpty()) {
+ blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true;
+ }
+}
+
+Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
+ if (type == Primitive::kPrimVoid) {
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+
+ if (type == Primitive::kPrimFloat || type == Primitive::kPrimDouble) {
+ LOG(FATAL) << "Unimplemented support for floating-point";
+ }
+
+ ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfXRegisters);
+ DCHECK_NE(reg, -1);
+ blocked_core_registers_[reg] = true;
+
+ if (IsFPType(type)) {
+ return Location::FpuRegisterLocation(reg);
+ } else {
+ return Location::RegisterLocation(reg);
+ }
+}
+
+void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const {
+ stream << Arm64ManagedRegister::FromXRegister(XRegister(reg));
+}
+
+void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
+ stream << Arm64ManagedRegister::FromDRegister(DRegister(reg));
+}
+
+#undef __
+#define __ assembler_->vixl_masm_->
+
+InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
+ CodeGeneratorARM64* codegen)
+ : HGraphVisitor(graph),
+ assembler_(codegen->GetAssembler()),
+ codegen_(codegen) {}
+
+#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
+ M(ArrayGet) \
+ M(ArraySet) \
+ M(DoubleConstant) \
+ M(FloatConstant) \
+ M(Mul) \
+ M(Neg) \
+ M(NewArray) \
+ M(ParallelMove)
+
+#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
+
+enum UnimplementedInstructionBreakCode {
+#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name),
+ FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION)
+#undef ENUM_UNIMPLEMENTED_INSTRUCTION
+};
+
+#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \
+ void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \
+ __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \
+ } \
+ void LocationsBuilderARM64::Visit##name(H##name* instr) { \
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \
+ locations->SetOut(Location::Any()); \
+ }
+ FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS)
+#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS
+
+#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE
+
+void LocationsBuilderARM64::HandleAddSub(HBinaryOperation* instr) {
+ DCHECK(instr->IsAdd() || instr->IsSub());
+ DCHECK_EQ(instr->InputCount(), 2U);
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ Primitive::Type type = instr->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ }
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type;
+ break;
+ default:
+ LOG(FATAL) << "Unimplemented " << instr->DebugName() << " type " << type;
+ }
+}
+
+void InstructionCodeGeneratorARM64::HandleAddSub(HBinaryOperation* instr) {
+ DCHECK(instr->IsAdd() || instr->IsSub());
+
+ Primitive::Type type = instr->GetType();
+ Register dst = OutputRegister(instr);
+ Register lhs = InputRegisterAt(instr, 0);
+ Operand rhs = InputOperandAt(instr, 1);
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ if (instr->IsAdd()) {
+ __ Add(dst, lhs, rhs);
+ } else {
+ __ Sub(dst, lhs, rhs);
+ }
+ break;
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected add/sub type " << type;
+ break;
+ default:
+ LOG(FATAL) << "Unimplemented add/sub type " << type;
+ }
+}
+
+void LocationsBuilderARM64::VisitAdd(HAdd* instruction) {
+ HandleAddSub(instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) {
+ HandleAddSub(instruction);
+}
+
+void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
+ __ Ldr(OutputRegister(instruction),
+ HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset()));
+}
+
+void LocationsBuilderARM64::VisitCompare(HCompare* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitCompare(HCompare* instruction) {
+ Primitive::Type in_type = instruction->InputAt(0)->GetType();
+
+ DCHECK_EQ(in_type, Primitive::kPrimLong);
+ switch (in_type) {
+ case Primitive::kPrimLong: {
+ vixl::Label done;
+ Register result = OutputRegister(instruction);
+ Register left = InputRegisterAt(instruction, 0);
+ Operand right = InputOperandAt(instruction, 1);
+ __ Subs(result, left, right);
+ __ B(eq, &done);
+ __ Mov(result, 1);
+ __ Cneg(result, result, le);
+ __ Bind(&done);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unimplemented compare type " << in_type;
+ }
+}
+
+void LocationsBuilderARM64::VisitCondition(HCondition* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (instruction->NeedsMaterialization()) {
+ locations->SetOut(Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) {
+ if (!instruction->NeedsMaterialization()) {
+ return;
+ }
+
+ LocationSummary* locations = instruction->GetLocations();
+ Register lhs = InputRegisterAt(instruction, 0);
+ Operand rhs = InputOperandAt(instruction, 1);
+ Register res = RegisterFrom(locations->Out(), instruction->GetType());
+ Condition cond = ARM64Condition(instruction->GetCondition());
+
+ __ Cmp(lhs, rhs);
+ __ Csel(res, vixl::Assembler::AppropriateZeroRegFor(res), Operand(1), InvertCondition(cond));
+}
+
+#define FOR_EACH_CONDITION_INSTRUCTION(M) \
+ M(Equal) \
+ M(NotEqual) \
+ M(LessThan) \
+ M(LessThanOrEqual) \
+ M(GreaterThan) \
+ M(GreaterThanOrEqual)
+#define DEFINE_CONDITION_VISITORS(Name) \
+void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } \
+void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }
+FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS)
+#undef FOR_EACH_CONDITION_INSTRUCTION
+
+void LocationsBuilderARM64::VisitExit(HExit* exit) {
+ exit->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
+ if (kIsDebugBuild) {
+ down_cast<Arm64Assembler*>(GetAssembler())->Comment("Unreachable");
+ __ Brk(0); // TODO: Introduce special markers for such code locations.
+ }
+}
+
+void LocationsBuilderARM64::VisitGoto(HGoto* got) {
+ got->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) {
+ HBasicBlock* successor = got->GetSuccessor();
+ // TODO: Support for suspend checks emission.
+ if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
+ __ B(codegen_->GetLabelOf(successor));
+ }
+}
+
+void LocationsBuilderARM64::VisitIf(HIf* if_instr) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ HInstruction* cond = if_instr->InputAt(0);
+ DCHECK(cond->IsCondition());
+ if (cond->AsCondition()->NeedsMaterialization()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
+ HInstruction* cond = if_instr->InputAt(0);
+ DCHECK(cond->IsCondition());
+ HCondition* condition = cond->AsCondition();
+ vixl::Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
+ vixl::Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
+
+ // TODO: Support constant condition input in VisitIf.
+
+ if (condition->NeedsMaterialization()) {
+ // The condition instruction has been materialized, compare the output to 0.
+ Location cond_val = if_instr->GetLocations()->InAt(0);
+ DCHECK(cond_val.IsRegister());
+ __ Cbnz(InputRegisterAt(if_instr, 0), true_target);
+
+ } else {
+ // The condition instruction has not been materialized, use its inputs as
+ // the comparison and its condition as the branch condition.
+ Register lhs = InputRegisterAt(condition, 0);
+ Operand rhs = InputOperandAt(condition, 1);
+ Condition cond = ARM64Condition(condition->GetCondition());
+ if ((cond == eq || cond == ne) && rhs.IsImmediate() && (rhs.immediate() == 0)) {
+ if (cond == eq) {
+ __ Cbz(lhs, true_target);
+ } else {
+ __ Cbnz(lhs, true_target);
+ }
+ } else {
+ __ Cmp(lhs, rhs);
+ __ B(cond, true_target);
+ }
+ }
+
+ if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfFalseSuccessor())) {
+ __ B(false_target);
+ }
+}
+
+void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+ Primitive::Type res_type = instruction->GetType();
+ Register res = OutputRegister(instruction);
+ Register obj = InputRegisterAt(instruction, 0);
+ uint32_t offset = instruction->GetFieldOffset().Uint32Value();
+
+ switch (res_type) {
+ case Primitive::kPrimBoolean: {
+ __ Ldrb(res, MemOperand(obj, offset));
+ break;
+ }
+ case Primitive::kPrimByte: {
+ __ Ldrsb(res, MemOperand(obj, offset));
+ break;
+ }
+ case Primitive::kPrimShort: {
+ __ Ldrsh(res, MemOperand(obj, offset));
+ break;
+ }
+ case Primitive::kPrimChar: {
+ __ Ldrh(res, MemOperand(obj, offset));
+ break;
+ }
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ case Primitive::kPrimLong: { // TODO: support volatile.
+ DCHECK(res.IsX() == (res_type == Primitive::kPrimLong));
+ __ Ldr(res, MemOperand(obj, offset));
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented register res_type " << res_type;
+ break;
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable res_type " << res_type;
+ }
+}
+
+void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+ Register obj = InputRegisterAt(instruction, 0);
+ Register value = InputRegisterAt(instruction, 1);
+ Primitive::Type field_type = instruction->InputAt(1)->GetType();
+ uint32_t offset = instruction->GetFieldOffset().Uint32Value();
+
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte: {
+ __ Strb(value, MemOperand(obj, offset));
+ break;
+ }
+
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar: {
+ __ Strh(value, MemOperand(obj, offset));
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ case Primitive::kPrimLong: {
+ DCHECK(value.IsX() == (field_type == Primitive::kPrimLong));
+ __ Str(value, MemOperand(obj, offset));
+
+ if (field_type == Primitive::kPrimNot) {
+ codegen_->MarkGCCard(obj, value);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented register type " << field_type;
+ break;
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << field_type;
+ }
+}
+
+void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
+ locations->AddTemp(LocationFrom(x0));
+
+ InvokeDexCallingConventionVisitor calling_convention_visitor;
+ for (size_t i = 0; i < invoke->InputCount(); i++) {
+ HInstruction* input = invoke->InputAt(i);
+ locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
+ }
+
+ Primitive::Type return_type = invoke->GetType();
+ if (return_type != Primitive::kPrimVoid) {
+ locations->SetOut(calling_convention_visitor.GetReturnLocation(return_type));
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
+ Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ // Make sure that ArtMethod* is passed in W0 as per the calling convention
+ DCHECK(temp.Is(w0));
+ size_t index_in_cache = mirror::Array::DataOffset(kHeapRefSize).SizeValue() +
+ invoke->GetIndexInDexCache() * kHeapRefSize;
+
+ // TODO: Implement all kinds of calls:
+ // 1) boot -> boot
+ // 2) app -> boot
+ // 3) app -> app
+ //
+ // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+ // temp = method;
+ __ Ldr(temp, MemOperand(sp, kCurrentMethodStackOffset));
+ // temp = temp->dex_cache_resolved_methods_;
+ __ Ldr(temp, MemOperand(temp.X(), mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+ // temp = temp[index_in_cache];
+ __ Ldr(temp, MemOperand(temp.X(), index_in_cache));
+ // lr = temp->entry_point_from_quick_compiled_code_;
+ __ Ldr(lr, MemOperand(temp.X(), mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ // lr();
+ __ Blr(lr);
+
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
+}
+
+void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
+ invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ Offset class_offset = mirror::Object::ClassOffset();
+ Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset();
+
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ Ldr(temp.W(), MemOperand(sp, receiver.GetStackIndex()));
+ __ Ldr(temp.W(), MemOperand(temp, class_offset.SizeValue()));
+ } else {
+ DCHECK(receiver.IsRegister());
+ __ Ldr(temp.W(), HeapOperandFrom(receiver, Primitive::kPrimNot,
+ class_offset));
+ }
+ // temp = temp->GetMethodAt(method_offset);
+ __ Ldr(temp.W(), MemOperand(temp, method_offset));
+ // lr = temp->GetEntryPoint();
+ __ Ldr(lr, MemOperand(temp, entry_point.SizeValue()));
+ // lr();
+ __ Blr(lr);
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
+ load->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
+ // Nothing to do, this is driven by the code generator.
+}
+
+void LocationsBuilderARM64::VisitLocal(HLocal* local) {
+ local->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitLocal(HLocal* local) {
+ DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
+}
+
+void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+}
+
+void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
+ DCHECK(type_index.Is(w0));
+ Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
+ DCHECK(current_method.Is(w1));
+ __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset));
+ __ Mov(type_index, instruction->GetTypeIndex());
+ __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocObjectWithAccessCheck).Int32Value()));
+ __ Blr(lr);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
+}
+
+void LocationsBuilderARM64::VisitNot(HNot* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) {
+ switch (instruction->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), Operand(1));
+ break;
+
+ case Primitive::kPrimInt:
+ __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0));
+ break;
+
+ case Primitive::kPrimLong:
+ LOG(FATAL) << "Not yet implemented type for not operation " << instruction->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
+ }
+}
+
+void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) {
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location obj = locations->InAt(0);
+ if (obj.IsRegister()) {
+ __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel());
+ } else {
+ DCHECK(obj.IsConstant()) << obj;
+ DCHECK_EQ(obj.GetConstant()->AsIntConstant()->GetValue(), 0);
+ __ B(slow_path->GetEntryLabel());
+ }
+}
+
+void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
+ if (location.IsStackSlot()) {
+ location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ } else if (location.IsDoubleStackSlot()) {
+ location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ }
+ locations->SetOut(location);
+}
+
+void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) {
+ // Nothing to do, the parameter is already at its location.
+}
+
+void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
+ locations->SetInAt(i, Location::Any());
+ }
+ locations->SetOut(Location::Any());
+}
+
+void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Primitive::Type return_type = instruction->InputAt(0)->GetType();
+
+ if (return_type == Primitive::kPrimFloat || return_type == Primitive::kPrimDouble) {
+ LOG(FATAL) << "Unimplemented return type " << return_type;
+ }
+
+ locations->SetInAt(0, LocationFrom(x0));
+}
+
+void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
+ if (kIsDebugBuild) {
+ Primitive::Type type = instruction->InputAt(0)->GetType();
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ DCHECK(InputRegisterAt(instruction, 0).Is(w0));
+ break;
+
+ case Primitive::kPrimLong:
+ DCHECK(InputRegisterAt(instruction, 0).Is(x0));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented return type " << type;
+ }
+ }
+ codegen_->GenerateFrameExit();
+ __ Br(lr);
+}
+
+void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
+ instruction->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
+ codegen_->GenerateFrameExit();
+ __ Br(lr);
+}
+
+void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
+ Primitive::Type field_type = store->InputAt(1)->GetType();
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ case Primitive::kPrimLong:
+ locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented local type " << field_type;
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) {
+}
+
+void LocationsBuilderARM64::VisitSub(HSub* instruction) {
+ HandleAddSub(instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) {
+ HandleAddSub(instruction);
+}
+
+void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(
+ instruction, locations->InAt(0), locations->InAt(1));
+ codegen_->AddSlowPath(slow_path);
+
+ __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
+ __ B(slow_path->GetEntryLabel(), hs);
+}
+
+void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+}
+
+void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
+ // TODO: Improve support for suspend checks.
+ SuspendCheckSlowPathARM64* slow_path =
+ new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, nullptr);
+ codegen_->AddSlowPath(slow_path);
+
+ __ Subs(wSuspend, wSuspend, 1);
+ __ B(slow_path->GetEntryLabel(), le);
+ __ Bind(slow_path->GetReturnLabel());
+}
+
+void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
+ temp->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) {
+ // Nothing to do, this is driven by the code generator.
+}
+
+} // namespace arm64
+} // namespace art
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
new file mode 100644
index 0000000000..a4003ffea5
--- /dev/null
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
+#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
+
+#include "code_generator.h"
+#include "nodes.h"
+#include "parallel_move_resolver.h"
+#include "utils/arm64/assembler_arm64.h"
+#include "a64/disasm-a64.h"
+#include "a64/macro-assembler-a64.h"
+#include "arch/arm64/quick_method_frame_info_arm64.h"
+
+namespace art {
+namespace arm64 {
+
+class CodeGeneratorARM64;
+
+static constexpr size_t kArm64WordSize = 8;
+static const vixl::Register kParameterCoreRegisters[] = {
+ vixl::x1, vixl::x2, vixl::x3, vixl::x4, vixl::x5, vixl::x6, vixl::x7
+};
+static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+static const vixl::FPRegister kParameterFPRegisters[] = {
+ vixl::d0, vixl::d1, vixl::d2, vixl::d3, vixl::d4, vixl::d5, vixl::d6, vixl::d7
+};
+static constexpr size_t kParameterFPRegistersLength = arraysize(kParameterFPRegisters);
+
+const vixl::Register tr = vixl::x18; // Thread Register
+const vixl::Register wSuspend = vixl::w19; // Suspend Register
+const vixl::Register xSuspend = vixl::x19;
+
+const vixl::CPURegList vixl_reserved_core_registers(vixl::ip0, vixl::ip1);
+const vixl::CPURegList runtime_reserved_core_registers(tr, xSuspend, vixl::lr);
+const vixl::CPURegList quick_callee_saved_registers(vixl::CPURegister::kRegister,
+ vixl::kXRegSize,
+ kArm64CalleeSaveRefSpills);
+
+class InvokeDexCallingConvention : public CallingConvention<vixl::Register, vixl::FPRegister> {
+ public:
+ InvokeDexCallingConvention()
+ : CallingConvention(kParameterCoreRegisters,
+ kParameterCoreRegistersLength,
+ kParameterFPRegisters,
+ kParameterFPRegistersLength) {}
+
+ Location GetReturnLocation(Primitive::Type return_type) {
+ DCHECK_NE(return_type, Primitive::kPrimVoid);
+ if (return_type == Primitive::kPrimFloat || return_type == Primitive::kPrimDouble) {
+ LOG(FATAL) << "Unimplemented return type " << return_type;
+ }
+ return Location::RegisterLocation(X0);
+ }
+
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
+};
+
+class InvokeDexCallingConventionVisitor {
+ public:
+ InvokeDexCallingConventionVisitor() : gp_index_(0), stack_index_(0) {}
+
+ Location GetNextLocation(Primitive::Type type);
+ Location GetReturnLocation(Primitive::Type return_type) {
+ return calling_convention.GetReturnLocation(return_type);
+ }
+
+ private:
+ InvokeDexCallingConvention calling_convention;
+ // The current index for core registers.
+ uint32_t gp_index_;
+ // The current stack index.
+ uint32_t stack_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
+};
+
+class InstructionCodeGeneratorARM64 : public HGraphVisitor {
+ public:
+ InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
+
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ virtual void Visit##name(H##name* instr);
+ FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+#undef DECLARE_VISIT_INSTRUCTION
+
+ void LoadCurrentMethod(XRegister reg);
+
+ Arm64Assembler* GetAssembler() const { return assembler_; }
+
+ private:
+ void HandleAddSub(HBinaryOperation* instr);
+
+ Arm64Assembler* const assembler_;
+ CodeGeneratorARM64* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorARM64);
+};
+
+class LocationsBuilderARM64 : public HGraphVisitor {
+ public:
+ explicit LocationsBuilderARM64(HGraph* graph, CodeGeneratorARM64* codegen)
+ : HGraphVisitor(graph), codegen_(codegen) {}
+
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ virtual void Visit##name(H##name* instr);
+ FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+#undef DECLARE_VISIT_INSTRUCTION
+
+ private:
+ void HandleAddSub(HBinaryOperation* instr);
+ void HandleInvoke(HInvoke* instr);
+
+ CodeGeneratorARM64* const codegen_;
+ InvokeDexCallingConventionVisitor parameter_visitor_;
+
+ DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM64);
+};
+
+class CodeGeneratorARM64 : public CodeGenerator {
+ public:
+ explicit CodeGeneratorARM64(HGraph* graph);
+ virtual ~CodeGeneratorARM64() { }
+
+ virtual void GenerateFrameEntry() OVERRIDE;
+ virtual void GenerateFrameExit() OVERRIDE;
+
+ static const vixl::CPURegList& GetFramePreservedRegisters() {
+ static const vixl::CPURegList frame_preserved_regs =
+ vixl::CPURegList(vixl::CPURegister::kRegister, vixl::kXRegSize, vixl::lr.Bit());
+ return frame_preserved_regs;
+ }
+ static int GetFramePreservedRegistersSize() {
+ return GetFramePreservedRegisters().TotalSizeInBytes();
+ }
+
+ virtual void Bind(HBasicBlock* block) OVERRIDE;
+
+ vixl::Label* GetLabelOf(HBasicBlock* block) const {
+ return block_labels_ + block->GetBlockId();
+ }
+
+ virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+
+ virtual size_t GetWordSize() const OVERRIDE {
+ return kArm64WordSize;
+ }
+
+ virtual size_t FrameEntrySpillSize() const OVERRIDE;
+
+ virtual HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+ virtual HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+ virtual Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
+
+ // Emit a write barrier.
+ void MarkGCCard(vixl::Register object, vixl::Register value);
+
+ // Register allocation.
+
+ virtual void SetupBlockedRegisters() const OVERRIDE;
+ // AllocateFreeRegister() is only used when allocating registers locally
+ // during CompileBaseline().
+ virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+
+ virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+
+ virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: SaveCoreRegister";
+ return 0;
+ }
+
+ virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: RestoreCoreRegister";
+ return 0;
+ }
+
+ // The number of registers that can be allocated. The register allocator may
+ // decide to reserve and not use a few of them.
+ // We do not consider registers sp, xzr, wzr. They are either not allocatable
+ // (xzr, wzr), or make for poor allocatable registers (sp alignment
+ // requirements, etc.). This also facilitates our task as all other registers
+ // can easily be mapped via to or from their type and index or code.
+ static const int kNumberOfAllocatableCoreRegisters = vixl::kNumberOfRegisters - 1;
+ static const int kNumberOfAllocatableFloatingPointRegisters = vixl::kNumberOfFPRegisters;
+ static const int kNumberOfAllocatableRegisters =
+ kNumberOfAllocatableCoreRegisters + kNumberOfAllocatableFloatingPointRegisters;
+ static constexpr int kNumberOfAllocatableRegisterPairs = 0;
+
+ virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+ virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+
+ virtual InstructionSet GetInstructionSet() const OVERRIDE {
+ return InstructionSet::kArm64;
+ }
+
+ void MoveHelper(Location destination, Location source, Primitive::Type type);
+
+ virtual void Initialize() OVERRIDE {
+ HGraph* graph = GetGraph();
+ int length = graph->GetBlocks().Size();
+ block_labels_ = graph->GetArena()->AllocArray<vixl::Label>(length);
+ for (int i = 0; i < length; ++i) {
+ new(block_labels_ + i) vixl::Label();
+ }
+ }
+
+ private:
+ // Labels for each block that will be compiled.
+ vixl::Label* block_labels_;
+
+ LocationsBuilderARM64 location_builder_;
+ InstructionCodeGeneratorARM64 instruction_visitor_;
+ Arm64Assembler assembler_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64);
+};
+
+} // namespace arm64
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 5f01265b85..61f0750c5c 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1056,16 +1056,13 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) {
LocationSummary* locations = add->GetLocations();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
-
+ DCHECK(first.Equals(locations->Out()));
switch (add->GetResultType()) {
case Primitive::kPrimInt: {
- DCHECK_EQ(first.As<Register>(), locations->Out().As<Register>());
if (second.IsRegister()) {
__ addl(first.As<Register>(), second.As<Register>());
} else if (second.IsConstant()) {
- HConstant* instruction = second.GetConstant();
- Immediate imm(instruction->AsIntConstant()->GetValue());
- __ addl(first.As<Register>(), imm);
+ __ addl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
} else {
__ addl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
}
@@ -1073,10 +1070,6 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) {
}
case Primitive::kPrimLong: {
- DCHECK_EQ(first.AsRegisterPairLow<Register>(),
- locations->Out().AsRegisterPairLow<Register>());
- DCHECK_EQ(first.AsRegisterPairHigh<Register>(),
- locations->Out().AsRegisterPairHigh<Register>());
if (second.IsRegister()) {
__ addl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
__ adcl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
@@ -1122,16 +1115,16 @@ void LocationsBuilderX86::VisitSub(HSub* sub) {
locations->SetOut(Location::SameAsFirstInput());
break;
}
-
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
break;
+ }
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
@@ -1139,52 +1132,43 @@ void InstructionCodeGeneratorX86::VisitSub(HSub* sub) {
LocationSummary* locations = sub->GetLocations();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
+ DCHECK(first.Equals(locations->Out()));
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
- DCHECK_EQ(first.As<Register>(),
- locations->Out().As<Register>());
if (second.IsRegister()) {
- __ subl(first.As<Register>(),
- second.As<Register>());
+ __ subl(first.As<Register>(), second.As<Register>());
} else if (second.IsConstant()) {
- HConstant* instruction = second.GetConstant();
- Immediate imm(instruction->AsIntConstant()->GetValue());
- __ subl(first.As<Register>(), imm);
+ __ subl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
} else {
- __ subl(first.As<Register>(),
- Address(ESP, second.GetStackIndex()));
+ __ subl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
}
break;
}
case Primitive::kPrimLong: {
- DCHECK_EQ(first.AsRegisterPairLow<Register>(),
- locations->Out().AsRegisterPairLow<Register>());
- DCHECK_EQ(first.AsRegisterPairHigh<Register>(),
- locations->Out().AsRegisterPairHigh<Register>());
if (second.IsRegister()) {
- __ subl(first.AsRegisterPairLow<Register>(),
- second.AsRegisterPairLow<Register>());
- __ sbbl(first.AsRegisterPairHigh<Register>(),
- second.AsRegisterPairHigh<Register>());
+ __ subl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
+ __ sbbl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
} else {
- __ subl(first.AsRegisterPairLow<Register>(),
- Address(ESP, second.GetStackIndex()));
+ __ subl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
__ sbbl(first.AsRegisterPairHigh<Register>(),
Address(ESP, second.GetHighStackIndex(kX86WordSize)));
}
break;
}
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat: {
+ __ subss(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ subsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ break;
+ }
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
@@ -1356,18 +1340,34 @@ void LocationsBuilderX86::VisitParameterValue(HParameterValue* instruction) {
void InstructionCodeGeneratorX86::VisitParameterValue(HParameterValue* instruction) {
}
-void LocationsBuilderX86::VisitNot(HNot* instruction) {
+void LocationsBuilderX86::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
-void InstructionCodeGeneratorX86::VisitNot(HNot* instruction) {
- LocationSummary* locations = instruction->GetLocations();
+void InstructionCodeGeneratorX86::VisitNot(HNot* not_) {
+ LocationSummary* locations = not_->GetLocations();
+ DCHECK_EQ(locations->InAt(0).As<Register>(), locations->Out().As<Register>());
Location out = locations->Out();
DCHECK_EQ(locations->InAt(0).As<Register>(), out.As<Register>());
- __ xorl(out.As<Register>(), Immediate(1));
+ switch (not_->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ __ xorl(out.As<Register>(), Immediate(1));
+ break;
+
+ case Primitive::kPrimInt:
+ __ notl(out.As<Register>());
+ break;
+
+ case Primitive::kPrimLong:
+ LOG(FATAL) << "Not yet implemented type for not operation " << not_->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
+ }
}
void LocationsBuilderX86::VisitCompare(HCompare* compare) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 38a40dc7de..4a05b89892 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1047,19 +1047,17 @@ void InstructionCodeGeneratorX86_64::VisitAdd(HAdd* add) {
LocationSummary* locations = add->GetLocations();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
-
DCHECK(first.Equals(locations->Out()));
+
switch (add->GetResultType()) {
case Primitive::kPrimInt: {
if (second.IsRegister()) {
__ addl(first.As<CpuRegister>(), second.As<CpuRegister>());
} else if (second.IsConstant()) {
- HConstant* instruction = second.GetConstant();
- Immediate imm(instruction->AsIntConstant()->GetValue());
+ Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
__ addl(first.As<CpuRegister>(), imm);
} else {
- __ addl(first.As<CpuRegister>(),
- Address(CpuRegister(RSP), second.GetStackIndex()));
+ __ addl(first.As<CpuRegister>(), Address(CpuRegister(RSP), second.GetStackIndex()));
}
break;
}
@@ -1100,53 +1098,52 @@ void LocationsBuilderX86_64::VisitSub(HSub* sub) {
locations->SetOut(Location::SameAsFirstInput());
break;
}
-
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
break;
-
+ }
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
void InstructionCodeGeneratorX86_64::VisitSub(HSub* sub) {
LocationSummary* locations = sub->GetLocations();
- DCHECK_EQ(locations->InAt(0).As<CpuRegister>().AsRegister(),
- locations->Out().As<CpuRegister>().AsRegister());
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ DCHECK(first.Equals(locations->Out()));
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
- if (locations->InAt(1).IsRegister()) {
- __ subl(locations->InAt(0).As<CpuRegister>(),
- locations->InAt(1).As<CpuRegister>());
- } else if (locations->InAt(1).IsConstant()) {
- HConstant* instruction = locations->InAt(1).GetConstant();
- Immediate imm(instruction->AsIntConstant()->GetValue());
- __ subl(locations->InAt(0).As<CpuRegister>(), imm);
+ if (second.IsRegister()) {
+ __ subl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else if (second.IsConstant()) {
+ Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
+ __ subl(first.As<CpuRegister>(), imm);
} else {
- __ subl(locations->InAt(0).As<CpuRegister>(),
- Address(CpuRegister(RSP), locations->InAt(1).GetStackIndex()));
+ __ subl(first.As<CpuRegister>(), Address(CpuRegister(RSP), second.GetStackIndex()));
}
break;
}
case Primitive::kPrimLong: {
- __ subq(locations->InAt(0).As<CpuRegister>(),
- locations->InAt(1).As<CpuRegister>());
+ __ subq(first.As<CpuRegister>(), second.As<CpuRegister>());
break;
}
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat: {
+ __ subss(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ subsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ break;
+ }
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
@@ -1276,18 +1273,34 @@ void InstructionCodeGeneratorX86_64::VisitParameterValue(HParameterValue* instru
// Nothing to do, the parameter is already at its location.
}
-void LocationsBuilderX86_64::VisitNot(HNot* instruction) {
+void LocationsBuilderX86_64::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
-void InstructionCodeGeneratorX86_64::VisitNot(HNot* instruction) {
- LocationSummary* locations = instruction->GetLocations();
+void InstructionCodeGeneratorX86_64::VisitNot(HNot* not_) {
+ LocationSummary* locations = not_->GetLocations();
DCHECK_EQ(locations->InAt(0).As<CpuRegister>().AsRegister(),
locations->Out().As<CpuRegister>().AsRegister());
- __ xorq(locations->Out().As<CpuRegister>(), Immediate(1));
+ Location out = locations->Out();
+ switch (not_->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ __ xorq(out.As<CpuRegister>(), Immediate(1));
+ break;
+
+ case Primitive::kPrimInt:
+ __ notl(out.As<CpuRegister>());
+ break;
+
+ case Primitive::kPrimLong:
+ LOG(FATAL) << "Not yet implemented type for not operation " << not_->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
+ }
}
void LocationsBuilderX86_64::VisitPhi(HPhi* instruction) {
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index af4cf73867..03951e29dd 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -16,8 +16,10 @@
#include <functional>
+#include "base/macros.h"
#include "builder.h"
#include "code_generator_arm.h"
+#include "code_generator_arm64.h"
#include "code_generator_x86.h"
#include "code_generator_x86_64.h"
#include "common_compiler_test.h"
@@ -93,6 +95,12 @@ static void RunCodeBaseline(HGraph* graph, bool has_result, int32_t expected) {
if (kRuntimeISA == kX86_64) {
Run(allocator, codegenX86_64, has_result, expected);
}
+
+ arm64::CodeGeneratorARM64 codegenARM64(graph);
+ codegenARM64.CompileBaseline(&allocator, true);
+ if (kRuntimeISA == kArm64) {
+ Run(allocator, codegenARM64, has_result, expected);
+ }
}
static void RunCodeOptimized(CodeGenerator* codegen,
@@ -134,8 +142,8 @@ static void TestCode(const uint16_t* data, bool has_result = false, int32_t expe
HGraphBuilder builder(&arena);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
HGraph* graph = builder.BuildGraph(*item);
- // Remove suspend checks, they cannot be executed in this context.
ASSERT_NE(graph, nullptr);
+ // Remove suspend checks, they cannot be executed in this context.
RemoveSuspendChecks(graph);
RunCodeBaseline(graph, has_result, expected);
}
@@ -260,6 +268,31 @@ TEST(CodegenTest, ReturnIf2) {
TestCode(data, true, 0);
}
+// Exercise bit-wise (one's complement) not-int instruction.
+#define NOT_INT_TEST(TEST_NAME, INPUT, EXPECTED_OUTPUT) \
+TEST(CodegenTest, TEST_NAME) { \
+ const int32_t input = INPUT; \
+ const uint16_t input_lo = input & 0x0000FFFF; \
+ const uint16_t input_hi = input >> 16; \
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM( \
+ Instruction::CONST | 0 << 8, input_lo, input_hi, \
+ Instruction::NOT_INT | 1 << 8 | 0 << 12 , \
+ Instruction::RETURN | 1 << 8); \
+ \
+ TestCode(data, true, EXPECTED_OUTPUT); \
+}
+
+NOT_INT_TEST(ReturnNotIntMinus2, -2, 1)
+NOT_INT_TEST(ReturnNotIntMinus1, -1, 0)
+NOT_INT_TEST(ReturnNotInt0, 0, -1)
+NOT_INT_TEST(ReturnNotInt1, 1, -2)
+NOT_INT_TEST(ReturnNotIntINT_MIN, -2147483648, 2147483647) // (2^31) - 1
+NOT_INT_TEST(ReturnNotIntINT_MINPlus1, -2147483647, 2147483646) // (2^31) - 2
+NOT_INT_TEST(ReturnNotIntINT_MAXMinus1, 2147483646, -2147483647) // -(2^31) - 1
+NOT_INT_TEST(ReturnNotIntINT_MAX, 2147483647, -2147483648) // -(2^31)
+
+#undef NOT_INT_TEST
+
TEST(CodegenTest, ReturnAdd1) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
@@ -370,12 +403,16 @@ TEST(CodegenTest, NonMaterializedCondition) {
TestCode(data, true, 12); \
}
+#if !defined(__aarch64__)
MUL_TEST(INT, MulInt);
MUL_TEST(LONG, MulLong);
-// MUL_TEST(FLOAT, Float);
-// MUL_TEST(DOUBLE, Double);
+#endif
+#if defined(__aarch64__)
+TEST(CodegenTest, DISABLED_ReturnMulIntLit8) {
+#else
TEST(CodegenTest, ReturnMulIntLit8) {
+#endif
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT8, 3 << 8 | 0,
@@ -384,7 +421,11 @@ TEST(CodegenTest, ReturnMulIntLit8) {
TestCode(data, true, 12);
}
+#if defined(__aarch64__)
+TEST(CodegenTest, DISABLED_ReturnMulIntLit16) {
+#else
TEST(CodegenTest, ReturnMulIntLit16) {
+#endif
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT16, 3,
@@ -393,5 +434,121 @@ TEST(CodegenTest, ReturnMulIntLit16) {
TestCode(data, true, 12);
}
+TEST(CodegenTest, MaterializedCondition1) {
+ // Check that condition are materialized correctly. A materialized condition
+ // should yield `1` if it evaluated to true, and `0` otherwise.
+ // We force the materialization of comparisons for different combinations of
+ // inputs and check the results.
+
+ int lhs[] = {1, 2, -1, 2, 0xabc};
+ int rhs[] = {2, 1, 2, -1, 0xabc};
+
+ for (size_t i = 0; i < arraysize(lhs); i++) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = new (&allocator) HGraph(&allocator);
+
+ HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry_block);
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddInstruction(new (&allocator) HGoto());
+ HBasicBlock* code_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(code_block);
+ HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(exit_block);
+ exit_block->AddInstruction(new (&allocator) HExit());
+
+ entry_block->AddSuccessor(code_block);
+ code_block->AddSuccessor(exit_block);
+ graph->SetExitBlock(exit_block);
+
+ HIntConstant cst_lhs(lhs[i]);
+ code_block->AddInstruction(&cst_lhs);
+ HIntConstant cst_rhs(rhs[i]);
+ code_block->AddInstruction(&cst_rhs);
+ HLessThan cmp_lt(&cst_lhs, &cst_rhs);
+ code_block->AddInstruction(&cmp_lt);
+ HReturn ret(&cmp_lt);
+ code_block->AddInstruction(&ret);
+
+ auto hook_before_codegen = [](HGraph* graph) {
+ HBasicBlock* block = graph->GetEntryBlock()->GetSuccessors().Get(0);
+ HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ block->InsertInstructionBefore(move, block->GetLastInstruction());
+ };
+
+ RunCodeOptimized(graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ }
+}
+
+TEST(CodegenTest, MaterializedCondition2) {
+ // Check that HIf correctly interprets a materialized condition.
+ // We force the materialization of comparisons for different combinations of
+ // inputs. An HIf takes the materialized combination as input and returns a
+ // value that we verify.
+
+ int lhs[] = {1, 2, -1, 2, 0xabc};
+ int rhs[] = {2, 1, 2, -1, 0xabc};
+
+
+ for (size_t i = 0; i < arraysize(lhs); i++) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = new (&allocator) HGraph(&allocator);
+
+ HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry_block);
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddInstruction(new (&allocator) HGoto());
+
+ HBasicBlock* if_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(if_block);
+ HBasicBlock* if_true_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(if_true_block);
+ HBasicBlock* if_false_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(if_false_block);
+ HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(exit_block);
+ exit_block->AddInstruction(new (&allocator) HExit());
+
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddSuccessor(if_block);
+ if_block->AddSuccessor(if_true_block);
+ if_block->AddSuccessor(if_false_block);
+ if_true_block->AddSuccessor(exit_block);
+ if_false_block->AddSuccessor(exit_block);
+ graph->SetExitBlock(exit_block);
+
+ HIntConstant cst_lhs(lhs[i]);
+ if_block->AddInstruction(&cst_lhs);
+ HIntConstant cst_rhs(rhs[i]);
+ if_block->AddInstruction(&cst_rhs);
+ HLessThan cmp_lt(&cst_lhs, &cst_rhs);
+ if_block->AddInstruction(&cmp_lt);
+ // We insert a temporary to separate the HIf from the HLessThan and force
+ // the materialization of the condition.
+ HTemporary force_materialization(0);
+ if_block->AddInstruction(&force_materialization);
+ HIf if_lt(&cmp_lt);
+ if_block->AddInstruction(&if_lt);
+
+ HIntConstant cst_lt(1);
+ if_true_block->AddInstruction(&cst_lt);
+ HReturn ret_lt(&cst_lt);
+ if_true_block->AddInstruction(&ret_lt);
+ HIntConstant cst_ge(0);
+ if_false_block->AddInstruction(&cst_ge);
+ HReturn ret_ge(&cst_ge);
+ if_false_block->AddInstruction(&ret_ge);
+
+ auto hook_before_codegen = [](HGraph* graph) {
+ HBasicBlock* block = graph->GetEntryBlock()->GetSuccessors().Get(0);
+ HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ block->InsertInstructionBefore(move, block->GetLastInstruction());
+ };
+
+ RunCodeOptimized(graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ }
+}
} // namespace art
diff --git a/compiler/optimizing/graph_visualizer.h b/compiler/optimizing/graph_visualizer.h
index 05984a080e..4d8bec2422 100644
--- a/compiler/optimizing/graph_visualizer.h
+++ b/compiler/optimizing/graph_visualizer.h
@@ -17,6 +17,8 @@
#ifndef ART_COMPILER_OPTIMIZING_GRAPH_VISUALIZER_H_
#define ART_COMPILER_OPTIMIZING_GRAPH_VISUALIZER_H_
+#include <ostream>
+
#include "base/value_object.h"
namespace art {
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 2d9e35c3b6..29eabe7e29 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -50,7 +50,7 @@ void InstructionSimplifier::VisitEqual(HEqual* equal) {
// Replace (bool_value == 0) with !bool_value
DCHECK_EQ(input2->AsIntConstant()->GetValue(), 0);
equal->GetBlock()->ReplaceAndRemoveInstructionWith(
- equal, new (GetGraph()->GetArena()) HNot(input1));
+ equal, new (GetGraph()->GetArena()) HNot(Primitive::kPrimBoolean, input1));
}
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 9b7ff88b68..7adb84008a 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -485,7 +485,7 @@ class HBasicBlock : public ArenaObject {
M(Local, Instruction) \
M(LongConstant, Constant) \
M(NewInstance, Instruction) \
- M(Not, Instruction) \
+ M(Not, UnaryOperation) \
M(ParameterValue, Instruction) \
M(ParallelMove, Instruction) \
M(Phi, Instruction) \
@@ -1708,15 +1708,17 @@ class HParameterValue : public HExpression<0> {
DISALLOW_COPY_AND_ASSIGN(HParameterValue);
};
-class HNot : public HExpression<1> {
+class HNot : public HUnaryOperation {
public:
- explicit HNot(HInstruction* input) : HExpression(Primitive::kPrimBoolean, SideEffects::None()) {
- SetRawInputAt(0, input);
- }
+ explicit HNot(Primitive::Type result_type, HInstruction* input)
+ : HUnaryOperation(result_type, input) {}
virtual bool CanBeMoved() const { return true; }
virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual int32_t Evaluate(int32_t x) const OVERRIDE { return ~x; }
+ virtual int64_t Evaluate(int64_t x) const OVERRIDE { return ~x; }
+
DECLARE_INSTRUCTION(Not);
private:
@@ -2123,7 +2125,7 @@ class HGraphVisitor : public ValueObject {
#undef DECLARE_VISIT_INSTRUCTION
private:
- HGraph* graph_;
+ HGraph* const graph_;
DISALLOW_COPY_AND_ASSIGN(HGraphVisitor);
};
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index dce8e6d78b..80e9cdb16f 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -215,7 +215,10 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
}
// Do not attempt to compile on architectures we do not support.
- if (instruction_set != kX86 && instruction_set != kX86_64 && instruction_set != kThumb2) {
+ if (instruction_set != kArm64 &&
+ instruction_set != kThumb2 &&
+ instruction_set != kX86 &&
+ instruction_set != kX86_64) {
return nullptr;
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 8f718480b3..7dda4f61d5 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -32,6 +32,7 @@ class BlockInfo : public ArenaObject {
live_in_(allocator, number_of_ssa_values, false),
live_out_(allocator, number_of_ssa_values, false),
kill_(allocator, number_of_ssa_values, false) {
+ UNUSED(block_);
live_in_.ClearAllBits();
live_out_.ClearAllBits();
kill_.ClearAllBits();