Move assembler out of runtime into compiler/utils.
Other directory layout bits of clean up. There is still work to separate quick
and portable in some files (e.g. argument visitor, proxy..).
Change-Id: If8fecffda8ba5c4c47a035f0c622c538c6b58351
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
new file mode 100644
index 0000000..0778cd3
--- /dev/null
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -0,0 +1,1895 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_arm.h"
+
+#include "base/logging.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "offsets.h"
+#include "thread.h"
+#include "utils.h"
+
+namespace art {
+namespace arm {
+
+// Instruction encoding bits.
+enum {
+ H = 1 << 5, // halfword (or byte)
+ L = 1 << 20, // load (or store)
+ S = 1 << 20, // set condition code (or leave unchanged)
+ W = 1 << 21, // writeback base register (or leave unchanged)
+ A = 1 << 21, // accumulate in multiply instruction (or not)
+ B = 1 << 22, // unsigned byte (or word)
+ N = 1 << 22, // long (or short)
+ U = 1 << 23, // positive (or negative) offset/index
+ P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
+ I = 1 << 25, // immediate shifter operand (or not)
+
+ B0 = 1,
+ B1 = 1 << 1,
+ B2 = 1 << 2,
+ B3 = 1 << 3,
+ B4 = 1 << 4,
+ B5 = 1 << 5,
+ B6 = 1 << 6,
+ B7 = 1 << 7,
+ B8 = 1 << 8,
+ B9 = 1 << 9,
+ B10 = 1 << 10,
+ B11 = 1 << 11,
+ B12 = 1 << 12,
+ B16 = 1 << 16,
+ B17 = 1 << 17,
+ B18 = 1 << 18,
+ B19 = 1 << 19,
+ B20 = 1 << 20,
+ B21 = 1 << 21,
+ B22 = 1 << 22,
+ B23 = 1 << 23,
+ B24 = 1 << 24,
+ B25 = 1 << 25,
+ B26 = 1 << 26,
+ B27 = 1 << 27,
+
+ // Instruction bit masks.
+ RdMask = 15 << 12, // in str instruction
+ CondMask = 15 << 28,
+ CoprocessorMask = 15 << 8,
+ OpCodeMask = 15 << 21, // in data-processing instructions
+ Imm24Mask = (1 << 24) - 1,
+ Off12Mask = (1 << 12) - 1,
+
+ // ldrex/strex register field encodings.
+ kLdExRnShift = 16,
+ kLdExRtShift = 12,
+ kStrExRnShift = 16,
+ kStrExRdShift = 12,
+ kStrExRtShift = 0,
+};
+
+
+static const char* kRegisterNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "fp", "ip", "sp", "lr", "pc"
+};
+std::ostream& operator<<(std::ostream& os, const Register& rhs) {
+ if (rhs >= R0 && rhs <= PC) {
+ os << kRegisterNames[rhs];
+ } else {
+ os << "Register[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const SRegister& rhs) {
+ if (rhs >= S0 && rhs < kNumberOfSRegisters) {
+ os << "s" << static_cast<int>(rhs);
+ } else {
+ os << "SRegister[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
+ if (rhs >= D0 && rhs < kNumberOfDRegisters) {
+ os << "d" << static_cast<int>(rhs);
+ } else {
+ os << "DRegister[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+
+static const char* kConditionNames[] = {
+ "EQ", "NE", "CS", "CC", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT",
+ "LE", "AL",
+};
+std::ostream& operator<<(std::ostream& os, const Condition& rhs) {
+ if (rhs >= EQ && rhs <= AL) {
+ os << kConditionNames[rhs];
+ } else {
+ os << "Condition[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+void ArmAssembler::Emit(int32_t value) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ buffer_.Emit<int32_t>(value);
+}
+
+
+void ArmAssembler::EmitType01(Condition cond,
+ int type,
+ Opcode opcode,
+ int set_cc,
+ Register rn,
+ Register rd,
+ ShifterOperand so) {
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+ type << kTypeShift |
+ static_cast<int32_t>(opcode) << kOpcodeShift |
+ set_cc << kSShift |
+ static_cast<int32_t>(rn) << kRnShift |
+ static_cast<int32_t>(rd) << kRdShift |
+ so.encoding();
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitType5(Condition cond, int offset, bool link) {
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+ 5 << kTypeShift |
+ (link ? 1 : 0) << kLinkShift;
+ Emit(ArmAssembler::EncodeBranchOffset(offset, encoding));
+}
+
+
+void ArmAssembler::EmitMemOp(Condition cond,
+ bool load,
+ bool byte,
+ Register rd,
+ Address ad) {
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B26 |
+ (load ? L : 0) |
+ (byte ? B : 0) |
+ (static_cast<int32_t>(rd) << kRdShift) |
+ ad.encoding();
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitMemOpAddressMode3(Condition cond,
+ int32_t mode,
+ Register rd,
+ Address ad) {
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B22 |
+ mode |
+ (static_cast<int32_t>(rd) << kRdShift) |
+ ad.encoding3();
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitMultiMemOp(Condition cond,
+ BlockAddressMode am,
+ bool load,
+ Register base,
+ RegList regs) {
+ CHECK_NE(base, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 |
+ am |
+ (load ? L : 0) |
+ (static_cast<int32_t>(base) << kRnShift) |
+ regs;
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitShiftImmediate(Condition cond,
+ Shift opcode,
+ Register rd,
+ Register rm,
+ ShifterOperand so) {
+ CHECK_NE(cond, kNoCondition);
+ CHECK_EQ(so.type(), 1U);
+ int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+ static_cast<int32_t>(MOV) << kOpcodeShift |
+ static_cast<int32_t>(rd) << kRdShift |
+ so.encoding() << kShiftImmShift |
+ static_cast<int32_t>(opcode) << kShiftShift |
+ static_cast<int32_t>(rm);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitShiftRegister(Condition cond,
+ Shift opcode,
+ Register rd,
+ Register rm,
+ ShifterOperand so) {
+ CHECK_NE(cond, kNoCondition);
+ CHECK_EQ(so.type(), 0U);
+ int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+ static_cast<int32_t>(MOV) << kOpcodeShift |
+ static_cast<int32_t>(rd) << kRdShift |
+ so.encoding() << kShiftRegisterShift |
+ static_cast<int32_t>(opcode) << kShiftShift |
+ B4 |
+ static_cast<int32_t>(rm);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitBranch(Condition cond, Label* label, bool link) {
+ if (label->IsBound()) {
+ EmitType5(cond, label->Position() - buffer_.Size(), link);
+ } else {
+ int position = buffer_.Size();
+ // Use the offset field of the branch instruction for linking the sites.
+ EmitType5(cond, label->position_, link);
+ label->LinkTo(position);
+ }
+}
+
+void ArmAssembler::and_(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), AND, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::eor(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), EOR, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::sub(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), SUB, 0, rn, rd, so);
+}
+
+void ArmAssembler::rsb(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), RSB, 0, rn, rd, so);
+}
+
+void ArmAssembler::rsbs(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), RSB, 1, rn, rd, so);
+}
+
+
+void ArmAssembler::add(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), ADD, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::adds(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), ADD, 1, rn, rd, so);
+}
+
+
+void ArmAssembler::subs(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), SUB, 1, rn, rd, so);
+}
+
+
+void ArmAssembler::adc(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), ADC, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::sbc(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), SBC, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::rsc(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), RSC, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::tst(Register rn, ShifterOperand so, Condition cond) {
+ CHECK_NE(rn, PC); // Reserve tst pc instruction for exception handler marker.
+ EmitType01(cond, so.type(), TST, 1, rn, R0, so);
+}
+
+
+void ArmAssembler::teq(Register rn, ShifterOperand so, Condition cond) {
+ CHECK_NE(rn, PC); // Reserve teq pc instruction for exception handler marker.
+ EmitType01(cond, so.type(), TEQ, 1, rn, R0, so);
+}
+
+
+void ArmAssembler::cmp(Register rn, ShifterOperand so, Condition cond) {
+ EmitType01(cond, so.type(), CMP, 1, rn, R0, so);
+}
+
+
+void ArmAssembler::cmn(Register rn, ShifterOperand so, Condition cond) {
+ EmitType01(cond, so.type(), CMN, 1, rn, R0, so);
+}
+
+
+void ArmAssembler::orr(Register rd, Register rn,
+ ShifterOperand so, Condition cond) {
+ EmitType01(cond, so.type(), ORR, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::orrs(Register rd, Register rn,
+ ShifterOperand so, Condition cond) {
+ EmitType01(cond, so.type(), ORR, 1, rn, rd, so);
+}
+
+
+void ArmAssembler::mov(Register rd, ShifterOperand so, Condition cond) {
+ EmitType01(cond, so.type(), MOV, 0, R0, rd, so);
+}
+
+
+void ArmAssembler::movs(Register rd, ShifterOperand so, Condition cond) {
+ EmitType01(cond, so.type(), MOV, 1, R0, rd, so);
+}
+
+
+void ArmAssembler::bic(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), BIC, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::mvn(Register rd, ShifterOperand so, Condition cond) {
+ EmitType01(cond, so.type(), MVN, 0, R0, rd, so);
+}
+
+
+void ArmAssembler::mvns(Register rd, ShifterOperand so, Condition cond) {
+ EmitType01(cond, so.type(), MVN, 1, R0, rd, so);
+}
+
+
+void ArmAssembler::clz(Register rd, Register rm, Condition cond) {
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(rm, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ CHECK_NE(rd, PC);
+ CHECK_NE(rm, PC);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B24 | B22 | B21 | (0xf << 16) |
+ (static_cast<int32_t>(rd) << kRdShift) |
+ (0xf << 8) | B4 | static_cast<int32_t>(rm);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::movw(Register rd, uint16_t imm16, Condition cond) {
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+ B25 | B24 | ((imm16 >> 12) << 16) |
+ static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::movt(Register rd, uint16_t imm16, Condition cond) {
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+ B25 | B24 | B22 | ((imm16 >> 12) << 16) |
+ static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitMulOp(Condition cond, int32_t opcode,
+ Register rd, Register rn,
+ Register rm, Register rs) {
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(rn, kNoRegister);
+ CHECK_NE(rm, kNoRegister);
+ CHECK_NE(rs, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = opcode |
+ (static_cast<int32_t>(cond) << kConditionShift) |
+ (static_cast<int32_t>(rn) << kRnShift) |
+ (static_cast<int32_t>(rd) << kRdShift) |
+ (static_cast<int32_t>(rs) << kRsShift) |
+ B7 | B4 |
+ (static_cast<int32_t>(rm) << kRmShift);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::mul(Register rd, Register rn, Register rm, Condition cond) {
+ // Assembler registers rd, rn, rm are encoded as rn, rm, rs.
+ EmitMulOp(cond, 0, R0, rd, rn, rm);
+}
+
+
+void ArmAssembler::mla(Register rd, Register rn, Register rm, Register ra,
+ Condition cond) {
+ // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
+ EmitMulOp(cond, B21, ra, rd, rn, rm);
+}
+
+
+void ArmAssembler::mls(Register rd, Register rn, Register rm, Register ra,
+ Condition cond) {
+ // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
+ EmitMulOp(cond, B22 | B21, ra, rd, rn, rm);
+}
+
+
+void ArmAssembler::umull(Register rd_lo, Register rd_hi, Register rn,
+ Register rm, Condition cond) {
+ // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
+ EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm);
+}
+
+
+void ArmAssembler::ldr(Register rd, Address ad, Condition cond) {
+ EmitMemOp(cond, true, false, rd, ad);
+}
+
+
+void ArmAssembler::str(Register rd, Address ad, Condition cond) {
+ EmitMemOp(cond, false, false, rd, ad);
+}
+
+
+void ArmAssembler::ldrb(Register rd, Address ad, Condition cond) {
+ EmitMemOp(cond, true, true, rd, ad);
+}
+
+
+void ArmAssembler::strb(Register rd, Address ad, Condition cond) {
+ EmitMemOp(cond, false, true, rd, ad);
+}
+
+
+void ArmAssembler::ldrh(Register rd, Address ad, Condition cond) {
+ EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad);
+}
+
+
+void ArmAssembler::strh(Register rd, Address ad, Condition cond) {
+ EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad);
+}
+
+
+void ArmAssembler::ldrsb(Register rd, Address ad, Condition cond) {
+ EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad);
+}
+
+
+void ArmAssembler::ldrsh(Register rd, Address ad, Condition cond) {
+ EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad);
+}
+
+
+void ArmAssembler::ldrd(Register rd, Address ad, Condition cond) {
+ CHECK_EQ(rd % 2, 0);
+ EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, ad);
+}
+
+
+void ArmAssembler::strd(Register rd, Address ad, Condition cond) {
+ CHECK_EQ(rd % 2, 0);
+ EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, ad);
+}
+
+
+void ArmAssembler::ldm(BlockAddressMode am,
+ Register base,
+ RegList regs,
+ Condition cond) {
+ EmitMultiMemOp(cond, am, true, base, regs);
+}
+
+
+void ArmAssembler::stm(BlockAddressMode am,
+ Register base,
+ RegList regs,
+ Condition cond) {
+ EmitMultiMemOp(cond, am, false, base, regs);
+}
+
+
+void ArmAssembler::ldrex(Register rt, Register rn, Condition cond) {
+ CHECK_NE(rn, kNoRegister);
+ CHECK_NE(rt, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B24 |
+ B23 |
+ L |
+ (static_cast<int32_t>(rn) << kLdExRnShift) |
+ (static_cast<int32_t>(rt) << kLdExRtShift) |
+ B11 | B10 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0;
+ Emit(encoding);
+}
+
+
+void ArmAssembler::strex(Register rd,
+ Register rt,
+ Register rn,
+ Condition cond) {
+ CHECK_NE(rn, kNoRegister);
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(rt, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B24 |
+ B23 |
+ (static_cast<int32_t>(rn) << kStrExRnShift) |
+ (static_cast<int32_t>(rd) << kStrExRdShift) |
+ B11 | B10 | B9 | B8 | B7 | B4 |
+ (static_cast<int32_t>(rt) << kStrExRtShift);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::clrex() {
+ int32_t encoding = (kSpecialCondition << kConditionShift) |
+ B26 | B24 | B22 | B21 | B20 | (0xff << 12) | B4 | 0xf;
+ Emit(encoding);
+}
+
+
+void ArmAssembler::nop(Condition cond) {
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B25 | B24 | B21 | (0xf << 12);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vmovsr(SRegister sn, Register rt, Condition cond) {
+ CHECK_NE(sn, kNoSRegister);
+ CHECK_NE(rt, kNoRegister);
+ CHECK_NE(rt, SP);
+ CHECK_NE(rt, PC);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B25 |
+ ((static_cast<int32_t>(sn) >> 1)*B16) |
+ (static_cast<int32_t>(rt)*B12) | B11 | B9 |
+ ((static_cast<int32_t>(sn) & 1)*B7) | B4;
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vmovrs(Register rt, SRegister sn, Condition cond) {
+ CHECK_NE(sn, kNoSRegister);
+ CHECK_NE(rt, kNoRegister);
+ CHECK_NE(rt, SP);
+ CHECK_NE(rt, PC);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B25 | B20 |
+ ((static_cast<int32_t>(sn) >> 1)*B16) |
+ (static_cast<int32_t>(rt)*B12) | B11 | B9 |
+ ((static_cast<int32_t>(sn) & 1)*B7) | B4;
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vmovsrr(SRegister sm, Register rt, Register rt2,
+ Condition cond) {
+ CHECK_NE(sm, kNoSRegister);
+ CHECK_NE(sm, S31);
+ CHECK_NE(rt, kNoRegister);
+ CHECK_NE(rt, SP);
+ CHECK_NE(rt, PC);
+ CHECK_NE(rt2, kNoRegister);
+ CHECK_NE(rt2, SP);
+ CHECK_NE(rt2, PC);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B22 |
+ (static_cast<int32_t>(rt2)*B16) |
+ (static_cast<int32_t>(rt)*B12) | B11 | B9 |
+ ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
+ (static_cast<int32_t>(sm) >> 1);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vmovrrs(Register rt, Register rt2, SRegister sm,
+ Condition cond) {
+ CHECK_NE(sm, kNoSRegister);
+ CHECK_NE(sm, S31);
+ CHECK_NE(rt, kNoRegister);
+ CHECK_NE(rt, SP);
+ CHECK_NE(rt, PC);
+ CHECK_NE(rt2, kNoRegister);
+ CHECK_NE(rt2, SP);
+ CHECK_NE(rt2, PC);
+ CHECK_NE(rt, rt2);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B22 | B20 |
+ (static_cast<int32_t>(rt2)*B16) |
+ (static_cast<int32_t>(rt)*B12) | B11 | B9 |
+ ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
+ (static_cast<int32_t>(sm) >> 1);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vmovdrr(DRegister dm, Register rt, Register rt2,
+ Condition cond) {
+ CHECK_NE(dm, kNoDRegister);
+ CHECK_NE(rt, kNoRegister);
+ CHECK_NE(rt, SP);
+ CHECK_NE(rt, PC);
+ CHECK_NE(rt2, kNoRegister);
+ CHECK_NE(rt2, SP);
+ CHECK_NE(rt2, PC);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B22 |
+ (static_cast<int32_t>(rt2)*B16) |
+ (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
+ ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
+ (static_cast<int32_t>(dm) & 0xf);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vmovrrd(Register rt, Register rt2, DRegister dm,
+ Condition cond) {
+ CHECK_NE(dm, kNoDRegister);
+ CHECK_NE(rt, kNoRegister);
+ CHECK_NE(rt, SP);
+ CHECK_NE(rt, PC);
+ CHECK_NE(rt2, kNoRegister);
+ CHECK_NE(rt2, SP);
+ CHECK_NE(rt2, PC);
+ CHECK_NE(rt, rt2);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B22 | B20 |
+ (static_cast<int32_t>(rt2)*B16) |
+ (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
+ ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
+ (static_cast<int32_t>(dm) & 0xf);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vldrs(SRegister sd, Address ad, Condition cond) {
+ CHECK_NE(sd, kNoSRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B24 | B20 |
+ ((static_cast<int32_t>(sd) & 1)*B22) |
+ ((static_cast<int32_t>(sd) >> 1)*B12) |
+ B11 | B9 | ad.vencoding();
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vstrs(SRegister sd, Address ad, Condition cond) {
+ CHECK_NE(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)), PC);
+ CHECK_NE(sd, kNoSRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B24 |
+ ((static_cast<int32_t>(sd) & 1)*B22) |
+ ((static_cast<int32_t>(sd) >> 1)*B12) |
+ B11 | B9 | ad.vencoding();
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vldrd(DRegister dd, Address ad, Condition cond) {
+ CHECK_NE(dd, kNoDRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B24 | B20 |
+ ((static_cast<int32_t>(dd) >> 4)*B22) |
+ ((static_cast<int32_t>(dd) & 0xf)*B12) |
+ B11 | B9 | B8 | ad.vencoding();
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vstrd(DRegister dd, Address ad, Condition cond) {
+ CHECK_NE(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)), PC);
+ CHECK_NE(dd, kNoDRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B24 |
+ ((static_cast<int32_t>(dd) >> 4)*B22) |
+ ((static_cast<int32_t>(dd) & 0xf)*B12) |
+ B11 | B9 | B8 | ad.vencoding();
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitVFPsss(Condition cond, int32_t opcode,
+ SRegister sd, SRegister sn, SRegister sm) {
+ CHECK_NE(sd, kNoSRegister);
+ CHECK_NE(sn, kNoSRegister);
+ CHECK_NE(sm, kNoSRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B25 | B11 | B9 | opcode |
+ ((static_cast<int32_t>(sd) & 1)*B22) |
+ ((static_cast<int32_t>(sn) >> 1)*B16) |
+ ((static_cast<int32_t>(sd) >> 1)*B12) |
+ ((static_cast<int32_t>(sn) & 1)*B7) |
+ ((static_cast<int32_t>(sm) & 1)*B5) |
+ (static_cast<int32_t>(sm) >> 1);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitVFPddd(Condition cond, int32_t opcode,
+ DRegister dd, DRegister dn, DRegister dm) {
+ CHECK_NE(dd, kNoDRegister);
+ CHECK_NE(dn, kNoDRegister);
+ CHECK_NE(dm, kNoDRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B25 | B11 | B9 | B8 | opcode |
+ ((static_cast<int32_t>(dd) >> 4)*B22) |
+ ((static_cast<int32_t>(dn) & 0xf)*B16) |
+ ((static_cast<int32_t>(dd) & 0xf)*B12) |
+ ((static_cast<int32_t>(dn) >> 4)*B7) |
+ ((static_cast<int32_t>(dm) >> 4)*B5) |
+ (static_cast<int32_t>(dm) & 0xf);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vmovs(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vmovd(DRegister dd, DRegister dm, Condition cond) {
+ EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm);
+}
+
+
+bool ArmAssembler::vmovs(SRegister sd, float s_imm, Condition cond) {
+ uint32_t imm32 = bit_cast<uint32_t, float>(s_imm);
+ if (((imm32 & ((1 << 19) - 1)) == 0) &&
+ ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) ||
+ (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) -1)))) {
+ uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) |
+ ((imm32 >> 19) & ((1 << 6) -1));
+ EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | (imm8 & 0xf),
+ sd, S0, S0);
+ return true;
+ }
+ return false;
+}
+
+
+bool ArmAssembler::vmovd(DRegister dd, double d_imm, Condition cond) {
+ uint64_t imm64 = bit_cast<uint64_t, double>(d_imm);
+ if (((imm64 & ((1LL << 48) - 1)) == 0) &&
+ ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) ||
+ (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) -1)))) {
+ uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) |
+ ((imm64 >> 48) & ((1 << 6) -1));
+ EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | B8 | (imm8 & 0xf),
+ dd, D0, D0);
+ return true;
+ }
+ return false;
+}
+
+
+void ArmAssembler::vadds(SRegister sd, SRegister sn, SRegister sm,
+ Condition cond) {
+ EmitVFPsss(cond, B21 | B20, sd, sn, sm);
+}
+
+
+void ArmAssembler::vaddd(DRegister dd, DRegister dn, DRegister dm,
+ Condition cond) {
+ EmitVFPddd(cond, B21 | B20, dd, dn, dm);
+}
+
+
+void ArmAssembler::vsubs(SRegister sd, SRegister sn, SRegister sm,
+ Condition cond) {
+ EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm);
+}
+
+
+void ArmAssembler::vsubd(DRegister dd, DRegister dn, DRegister dm,
+ Condition cond) {
+ EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm);
+}
+
+
+void ArmAssembler::vmuls(SRegister sd, SRegister sn, SRegister sm,
+ Condition cond) {
+ EmitVFPsss(cond, B21, sd, sn, sm);
+}
+
+
+void ArmAssembler::vmuld(DRegister dd, DRegister dn, DRegister dm,
+ Condition cond) {
+ EmitVFPddd(cond, B21, dd, dn, dm);
+}
+
+
+void ArmAssembler::vmlas(SRegister sd, SRegister sn, SRegister sm,
+ Condition cond) {
+ EmitVFPsss(cond, 0, sd, sn, sm);
+}
+
+
+void ArmAssembler::vmlad(DRegister dd, DRegister dn, DRegister dm,
+ Condition cond) {
+ EmitVFPddd(cond, 0, dd, dn, dm);
+}
+
+
+void ArmAssembler::vmlss(SRegister sd, SRegister sn, SRegister sm,
+ Condition cond) {
+ EmitVFPsss(cond, B6, sd, sn, sm);
+}
+
+
+void ArmAssembler::vmlsd(DRegister dd, DRegister dn, DRegister dm,
+ Condition cond) {
+ EmitVFPddd(cond, B6, dd, dn, dm);
+}
+
+
+void ArmAssembler::vdivs(SRegister sd, SRegister sn, SRegister sm,
+ Condition cond) {
+ EmitVFPsss(cond, B23, sd, sn, sm);
+}
+
+
+void ArmAssembler::vdivd(DRegister dd, DRegister dn, DRegister dm,
+ Condition cond) {
+ EmitVFPddd(cond, B23, dd, dn, dm);
+}
+
+
+void ArmAssembler::vabss(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vabsd(DRegister dd, DRegister dm, Condition cond) {
+ EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm);
+}
+
+
+void ArmAssembler::vnegs(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vnegd(DRegister dd, DRegister dm, Condition cond) {
+ EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm);
+}
+
+
+void ArmAssembler::vsqrts(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm);
+}
+
+void ArmAssembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) {
+ EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm);
+}
+
+
+void ArmAssembler::EmitVFPsd(Condition cond, int32_t opcode,
+ SRegister sd, DRegister dm) {
+ CHECK_NE(sd, kNoSRegister);
+ CHECK_NE(dm, kNoDRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B25 | B11 | B9 | opcode |
+ ((static_cast<int32_t>(sd) & 1)*B22) |
+ ((static_cast<int32_t>(sd) >> 1)*B12) |
+ ((static_cast<int32_t>(dm) >> 4)*B5) |
+ (static_cast<int32_t>(dm) & 0xf);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitVFPds(Condition cond, int32_t opcode,
+ DRegister dd, SRegister sm) {
+ CHECK_NE(dd, kNoDRegister);
+ CHECK_NE(sm, kNoSRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B25 | B11 | B9 | opcode |
+ ((static_cast<int32_t>(dd) >> 4)*B22) |
+ ((static_cast<int32_t>(dd) & 0xf)*B12) |
+ ((static_cast<int32_t>(sm) & 1)*B5) |
+ (static_cast<int32_t>(sm) >> 1);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) {
+ EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm);
+}
+
+
+void ArmAssembler::vcvtds(DRegister dd, SRegister sm, Condition cond) {
+ EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm);
+}
+
+
+void ArmAssembler::vcvtis(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcvtid(SRegister sd, DRegister dm, Condition cond) {
+ EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm);
+}
+
+
+void ArmAssembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) {
+ EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm);
+}
+
+
+void ArmAssembler::vcvtus(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcvtud(SRegister sd, DRegister dm, Condition cond) {
+ EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm);
+}
+
+
+void ArmAssembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) {
+ EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm);
+}
+
+
+void ArmAssembler::vcmps(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcmpd(DRegister dd, DRegister dm, Condition cond) {
+ EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm);
+}
+
+
+void ArmAssembler::vcmpsz(SRegister sd, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0);
+}
+
+
+void ArmAssembler::vcmpdz(DRegister dd, Condition cond) {
+ EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0);
+}
+
+
+void ArmAssembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 |
+ (static_cast<int32_t>(PC)*B12) |
+ B11 | B9 | B4;
+ Emit(encoding);
+}
+
+
+void ArmAssembler::svc(uint32_t imm24) {
+ CHECK(IsUint(24, imm24)) << imm24;
+ int32_t encoding = (AL << kConditionShift) | B27 | B26 | B25 | B24 | imm24;
+ Emit(encoding);
+}
+
+
+void ArmAssembler::bkpt(uint16_t imm16) {
+ int32_t encoding = (AL << kConditionShift) | B24 | B21 |
+ ((imm16 >> 4) << 8) | B6 | B5 | B4 | (imm16 & 0xf);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::b(Label* label, Condition cond) {
+ EmitBranch(cond, label, false);
+}
+
+
+void ArmAssembler::bl(Label* label, Condition cond) {
+ EmitBranch(cond, label, true);
+}
+
+
+void ArmAssembler::blx(Register rm, Condition cond) {
+ CHECK_NE(rm, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B24 | B21 | (0xfff << 8) | B5 | B4 |
+ (static_cast<int32_t>(rm) << kRmShift);
+ Emit(encoding);
+}
+
+void ArmAssembler::bx(Register rm, Condition cond) {
+ CHECK_NE(rm, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B24 | B21 | (0xfff << 8) | B4 |
+ (static_cast<int32_t>(rm) << kRmShift);
+ Emit(encoding);
+}
+
+void ArmAssembler::MarkExceptionHandler(Label* label) {
+ EmitType01(AL, 1, TST, 1, PC, R0, ShifterOperand(0));
+ Label l;
+ b(&l);
+ EmitBranch(AL, label, false);
+ Bind(&l);
+}
+
+
+void ArmAssembler::Bind(Label* label) {
+ CHECK(!label->IsBound());
+ int bound_pc = buffer_.Size();
+ while (label->IsLinked()) {
+ int32_t position = label->Position();
+ int32_t next = buffer_.Load<int32_t>(position);
+ int32_t encoded = ArmAssembler::EncodeBranchOffset(bound_pc - position, next);
+ buffer_.Store<int32_t>(position, encoded);
+ label->position_ = ArmAssembler::DecodeBranchOffset(next);
+ }
+ label->BindTo(bound_pc);
+}
+
+
+void ArmAssembler::EncodeUint32InTstInstructions(uint32_t data) {
+ // TODO: Consider using movw ip, <16 bits>.
+ while (!IsUint(8, data)) {
+ tst(R0, ShifterOperand(data & 0xFF), VS);
+ data >>= 8;
+ }
+ tst(R0, ShifterOperand(data), MI);
+}
+
+
+int32_t ArmAssembler::EncodeBranchOffset(int offset, int32_t inst) {
+ // The offset is off by 8 due to the way the ARM CPUs read PC.
+ offset -= 8;
+ CHECK_ALIGNED(offset, 4);
+ CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset)) << offset;
+
+ // Properly preserve only the bits supported in the instruction.
+ offset >>= 2;
+ offset &= kBranchOffsetMask;
+ return (inst & ~kBranchOffsetMask) | offset;
+}
+
+
+int ArmAssembler::DecodeBranchOffset(int32_t inst) {
+ // Sign-extend, left-shift by 2, then add 8.
+ return ((((inst & kBranchOffsetMask) << 8) >> 6) + 8);
+}
+
+void ArmAssembler::AddConstant(Register rd, int32_t value, Condition cond) {
+ AddConstant(rd, rd, value, cond);
+}
+
+
+void ArmAssembler::AddConstant(Register rd, Register rn, int32_t value,
+ Condition cond) {
+ if (value == 0) {
+ if (rd != rn) {
+ mov(rd, ShifterOperand(rn), cond);
+ }
+ return;
+ }
+ // We prefer to select the shorter code sequence rather than selecting add for
+ // positive values and sub for negatives ones, which would slightly improve
+ // the readability of generated code for some constants.
+ ShifterOperand shifter_op;
+ if (ShifterOperand::CanHold(value, &shifter_op)) {
+ add(rd, rn, shifter_op, cond);
+ } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
+ sub(rd, rn, shifter_op, cond);
+ } else {
+ CHECK(rn != IP);
+ if (ShifterOperand::CanHold(~value, &shifter_op)) {
+ mvn(IP, shifter_op, cond);
+ add(rd, rn, ShifterOperand(IP), cond);
+ } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
+ mvn(IP, shifter_op, cond);
+ sub(rd, rn, ShifterOperand(IP), cond);
+ } else {
+ movw(IP, Low16Bits(value), cond);
+ uint16_t value_high = High16Bits(value);
+ if (value_high != 0) {
+ movt(IP, value_high, cond);
+ }
+ add(rd, rn, ShifterOperand(IP), cond);
+ }
+ }
+}
+
+
+void ArmAssembler::AddConstantSetFlags(Register rd, Register rn, int32_t value,
+ Condition cond) {
+ ShifterOperand shifter_op;
+ if (ShifterOperand::CanHold(value, &shifter_op)) {
+ adds(rd, rn, shifter_op, cond);
+ } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
+ subs(rd, rn, shifter_op, cond);
+ } else {
+ CHECK(rn != IP);
+ if (ShifterOperand::CanHold(~value, &shifter_op)) {
+ mvn(IP, shifter_op, cond);
+ adds(rd, rn, ShifterOperand(IP), cond);
+ } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
+ mvn(IP, shifter_op, cond);
+ subs(rd, rn, ShifterOperand(IP), cond);
+ } else {
+ movw(IP, Low16Bits(value), cond);
+ uint16_t value_high = High16Bits(value);
+ if (value_high != 0) {
+ movt(IP, value_high, cond);
+ }
+ adds(rd, rn, ShifterOperand(IP), cond);
+ }
+ }
+}
+
+
+void ArmAssembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
+ ShifterOperand shifter_op;
+ if (ShifterOperand::CanHold(value, &shifter_op)) {
+ mov(rd, shifter_op, cond);
+ } else if (ShifterOperand::CanHold(~value, &shifter_op)) {
+ mvn(rd, shifter_op, cond);
+ } else {
+ movw(rd, Low16Bits(value), cond);
+ uint16_t value_high = High16Bits(value);
+ if (value_high != 0) {
+ movt(rd, value_high, cond);
+ }
+ }
+}
+
+
+bool Address::CanHoldLoadOffset(LoadOperandType type, int offset) {
+ switch (type) {
+ case kLoadSignedByte:
+ case kLoadSignedHalfword:
+ case kLoadUnsignedHalfword:
+ case kLoadWordPair:
+ return IsAbsoluteUint(8, offset); // Addressing mode 3.
+ case kLoadUnsignedByte:
+ case kLoadWord:
+ return IsAbsoluteUint(12, offset); // Addressing mode 2.
+ case kLoadSWord:
+ case kLoadDWord:
+ return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ return false;
+ }
+}
+
+
+bool Address::CanHoldStoreOffset(StoreOperandType type, int offset) {
+ switch (type) {
+ case kStoreHalfword:
+ case kStoreWordPair:
+ return IsAbsoluteUint(8, offset); // Addressing mode 3.
+ case kStoreByte:
+ case kStoreWord:
+ return IsAbsoluteUint(12, offset); // Addressing mode 2.
+ case kStoreSWord:
+ case kStoreDWord:
+ return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ return false;
+ }
+}
+
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldLoadOffset.
+void ArmAssembler::LoadFromOffset(LoadOperandType type,
+ Register reg,
+ Register base,
+ int32_t offset,
+ Condition cond) {
+ if (!Address::CanHoldLoadOffset(type, offset)) {
+ CHECK(base != IP);
+ LoadImmediate(IP, offset, cond);
+ add(IP, IP, ShifterOperand(base), cond);
+ base = IP;
+ offset = 0;
+ }
+ CHECK(Address::CanHoldLoadOffset(type, offset));
+ switch (type) {
+ case kLoadSignedByte:
+ ldrsb(reg, Address(base, offset), cond);
+ break;
+ case kLoadUnsignedByte:
+ ldrb(reg, Address(base, offset), cond);
+ break;
+ case kLoadSignedHalfword:
+ ldrsh(reg, Address(base, offset), cond);
+ break;
+ case kLoadUnsignedHalfword:
+ ldrh(reg, Address(base, offset), cond);
+ break;
+ case kLoadWord:
+ ldr(reg, Address(base, offset), cond);
+ break;
+ case kLoadWordPair:
+ ldrd(reg, Address(base, offset), cond);
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset.
+void ArmAssembler::LoadSFromOffset(SRegister reg,
+ Register base,
+ int32_t offset,
+ Condition cond) {
+ if (!Address::CanHoldLoadOffset(kLoadSWord, offset)) {
+ CHECK_NE(base, IP);
+ LoadImmediate(IP, offset, cond);
+ add(IP, IP, ShifterOperand(base), cond);
+ base = IP;
+ offset = 0;
+ }
+ CHECK(Address::CanHoldLoadOffset(kLoadSWord, offset));
+ vldrs(reg, Address(base, offset), cond);
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset.
+void ArmAssembler::LoadDFromOffset(DRegister reg,
+ Register base,
+ int32_t offset,
+ Condition cond) {
+ if (!Address::CanHoldLoadOffset(kLoadDWord, offset)) {
+ CHECK_NE(base, IP);
+ LoadImmediate(IP, offset, cond);
+ add(IP, IP, ShifterOperand(base), cond);
+ base = IP;
+ offset = 0;
+ }
+ CHECK(Address::CanHoldLoadOffset(kLoadDWord, offset));
+ vldrd(reg, Address(base, offset), cond);
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldStoreOffset.
+void ArmAssembler::StoreToOffset(StoreOperandType type,
+ Register reg,
+ Register base,
+ int32_t offset,
+ Condition cond) {
+ if (!Address::CanHoldStoreOffset(type, offset)) {
+ CHECK(reg != IP);
+ CHECK(base != IP);
+ LoadImmediate(IP, offset, cond);
+ add(IP, IP, ShifterOperand(base), cond);
+ base = IP;
+ offset = 0;
+ }
+ CHECK(Address::CanHoldStoreOffset(type, offset));
+ switch (type) {
+ case kStoreByte:
+ strb(reg, Address(base, offset), cond);
+ break;
+ case kStoreHalfword:
+ strh(reg, Address(base, offset), cond);
+ break;
+ case kStoreWord:
+ str(reg, Address(base, offset), cond);
+ break;
+ case kStoreWordPair:
+ strd(reg, Address(base, offset), cond);
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreToOffset.
+void ArmAssembler::StoreSToOffset(SRegister reg,
+ Register base,
+ int32_t offset,
+ Condition cond) {
+ if (!Address::CanHoldStoreOffset(kStoreSWord, offset)) {
+ CHECK_NE(base, IP);
+ LoadImmediate(IP, offset, cond);
+ add(IP, IP, ShifterOperand(base), cond);
+ base = IP;
+ offset = 0;
+ }
+ CHECK(Address::CanHoldStoreOffset(kStoreSWord, offset));
+ vstrs(reg, Address(base, offset), cond);
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreSToOffset.
+void ArmAssembler::StoreDToOffset(DRegister reg,
+ Register base,
+ int32_t offset,
+ Condition cond) {
+ if (!Address::CanHoldStoreOffset(kStoreDWord, offset)) {
+ CHECK_NE(base, IP);
+ LoadImmediate(IP, offset, cond);
+ add(IP, IP, ShifterOperand(base), cond);
+ base = IP;
+ offset = 0;
+ }
+ CHECK(Address::CanHoldStoreOffset(kStoreDWord, offset));
+ vstrd(reg, Address(base, offset), cond);
+}
+
+void ArmAssembler::Push(Register rd, Condition cond) {
+ str(rd, Address(SP, -kRegisterSize, Address::PreIndex), cond);
+}
+
+void ArmAssembler::Pop(Register rd, Condition cond) {
+ ldr(rd, Address(SP, kRegisterSize, Address::PostIndex), cond);
+}
+
+void ArmAssembler::PushList(RegList regs, Condition cond) {
+ stm(DB_W, SP, regs, cond);
+}
+
+void ArmAssembler::PopList(RegList regs, Condition cond) {
+ ldm(IA_W, SP, regs, cond);
+}
+
+void ArmAssembler::Mov(Register rd, Register rm, Condition cond) {
+ if (rd != rm) {
+ mov(rd, ShifterOperand(rm), cond);
+ }
+}
+
+void ArmAssembler::Lsl(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond) {
+ CHECK_NE(shift_imm, 0u); // Do not use Lsl if no shift is wanted.
+ mov(rd, ShifterOperand(rm, LSL, shift_imm), cond);
+}
+
+void ArmAssembler::Lsr(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond) {
+ CHECK_NE(shift_imm, 0u); // Do not use Lsr if no shift is wanted.
+ if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax.
+ mov(rd, ShifterOperand(rm, LSR, shift_imm), cond);
+}
+
+void ArmAssembler::Asr(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond) {
+ CHECK_NE(shift_imm, 0u); // Do not use Asr if no shift is wanted.
+ if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax.
+ mov(rd, ShifterOperand(rm, ASR, shift_imm), cond);
+}
+
+void ArmAssembler::Ror(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond) {
+ CHECK_NE(shift_imm, 0u); // Use Rrx instruction.
+ mov(rd, ShifterOperand(rm, ROR, shift_imm), cond);
+}
+
+void ArmAssembler::Rrx(Register rd, Register rm, Condition cond) {
+ mov(rd, ShifterOperand(rm, ROR, 0), cond);
+}
+
+void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
+ const std::vector<ManagedRegister>& callee_save_regs,
+ const std::vector<ManagedRegister>& entry_spills) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
+
+ // Push callee saves and link register.
+ RegList push_list = 1 << LR;
+ size_t pushed_values = 1;
+ for (size_t i = 0; i < callee_save_regs.size(); i++) {
+ Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
+ push_list |= 1 << reg;
+ pushed_values++;
+ }
+ PushList(push_list);
+
+ // Increase frame to required size.
+ CHECK_GT(frame_size, pushed_values * kPointerSize); // Must be at least space to push Method*
+ size_t adjust = frame_size - (pushed_values * kPointerSize);
+ IncreaseFrameSize(adjust);
+
+ // Write out Method*.
+ StoreToOffset(kStoreWord, R0, SP, 0);
+
+ // Write out entry spills.
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ Register reg = entry_spills.at(i).AsArm().AsCoreRegister();
+ StoreToOffset(kStoreWord, reg, SP, frame_size + kPointerSize + (i * kPointerSize));
+ }
+}
+
+void ArmAssembler::RemoveFrame(size_t frame_size,
+ const std::vector<ManagedRegister>& callee_save_regs) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ // Compute callee saves to pop and PC
+ RegList pop_list = 1 << PC;
+ size_t pop_values = 1;
+ for (size_t i = 0; i < callee_save_regs.size(); i++) {
+ Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
+ pop_list |= 1 << reg;
+ pop_values++;
+ }
+
+ // Decrease frame to start of callee saves
+ CHECK_GT(frame_size, pop_values * kPointerSize);
+ size_t adjust = frame_size - (pop_values * kPointerSize);
+ DecreaseFrameSize(adjust);
+
+ // Pop callee saves and PC
+ PopList(pop_list);
+}
+
+void ArmAssembler::IncreaseFrameSize(size_t adjust) {
+ AddConstant(SP, -adjust);
+}
+
+void ArmAssembler::DecreaseFrameSize(size_t adjust) {
+ AddConstant(SP, adjust);
+}
+
+void ArmAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
+ ArmManagedRegister src = msrc.AsArm();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsCoreRegister()) {
+ CHECK_EQ(4u, size);
+ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+ } else if (src.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
+ StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
+ SP, dest.Int32Value() + 4);
+ } else if (src.IsSRegister()) {
+ StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value());
+ } else {
+ CHECK(src.IsDRegister()) << src;
+ StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
+ }
+}
+
+void ArmAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+ ArmManagedRegister src = msrc.AsArm();
+ CHECK(src.IsCoreRegister()) << src;
+ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+ ArmManagedRegister src = msrc.AsArm();
+ CHECK(src.IsCoreRegister()) << src;
+ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
+ FrameOffset in_off, ManagedRegister mscratch) {
+ ArmManagedRegister src = msrc.AsArm();
+ ArmManagedRegister scratch = mscratch.AsArm();
+ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+}
+
+void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
+ MemberOffset offs) {
+ ArmManagedRegister dst = mdest.AsArm();
+ CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
+ LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
+ base.AsArm().AsCoreRegister(), offs.Int32Value());
+}
+
+void ArmAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+ ArmManagedRegister dst = mdest.AsArm();
+ CHECK(dst.IsCoreRegister()) << dst;
+ LoadFromOffset(kLoadWord, dst.AsCoreRegister(), SP, src.Int32Value());
+}
+
+void ArmAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
+ Offset offs) {
+ ArmManagedRegister dst = mdest.AsArm();
+ CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
+ LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
+ base.AsArm().AsCoreRegister(), offs.Int32Value());
+}
+
+void ArmAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ LoadImmediate(scratch.AsCoreRegister(), imm);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmAssembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ LoadImmediate(scratch.AsCoreRegister(), imm);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, dest.Int32Value());
+}
+
+static void EmitLoad(ArmAssembler* assembler, ManagedRegister m_dst,
+ Register src_register, int32_t src_offset, size_t size) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ if (dst.IsNoRegister()) {
+ CHECK_EQ(0u, size) << dst;
+ } else if (dst.IsCoreRegister()) {
+ CHECK_EQ(4u, size) << dst;
+ assembler->LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
+ } else if (dst.IsRegisterPair()) {
+ CHECK_EQ(8u, size) << dst;
+ assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
+ assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
+ } else if (dst.IsSRegister()) {
+ assembler->LoadSFromOffset(dst.AsSRegister(), src_register, src_offset);
+ } else {
+ CHECK(dst.IsDRegister()) << dst;
+ assembler->LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
+ }
+}
+
+void ArmAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
+ return EmitLoad(this, m_dst, SP, src.Int32Value(), size);
+}
+
+void ArmAssembler::Load(ManagedRegister m_dst, ThreadOffset src, size_t size) {
+ return EmitLoad(this, m_dst, TR, src.Int32Value(), size);
+}
+
+void ArmAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset offs) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ CHECK(dst.IsCoreRegister()) << dst;
+ LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value());
+}
+
+void ArmAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset thr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ TR, thr_offs.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+ SP, fr_offs.Int32Value());
+}
+
+void ArmAssembler::CopyRawPtrToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ SP, fr_offs.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+ TR, thr_offs.Int32Value());
+}
+
+void ArmAssembler::StoreStackOffsetToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+ TR, thr_offs.Int32Value());
+}
+
+void ArmAssembler::StoreStackPointerToThread(ThreadOffset thr_offs) {
+ StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
+}
+
+void ArmAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm";
+}
+
+void ArmAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
+}
+
+void ArmAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t /*size*/) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ ArmManagedRegister src = m_src.AsArm();
+ if (!dst.Equals(src)) {
+ if (dst.IsCoreRegister()) {
+ CHECK(src.IsCoreRegister()) << src;
+ mov(dst.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
+ } else if (dst.IsDRegister()) {
+ CHECK(src.IsDRegister()) << src;
+ vmovd(dst.AsDRegister(), src.AsDRegister());
+ } else if (dst.IsSRegister()) {
+ CHECK(src.IsSRegister()) << src;
+ vmovs(dst.AsSRegister(), src.AsSRegister());
+ } else {
+ CHECK(dst.IsRegisterPair()) << dst;
+ CHECK(src.IsRegisterPair()) << src;
+ // Ensure that the first move doesn't clobber the input of the second
+ if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
+ mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
+ mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
+ } else {
+ mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
+ mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
+ }
+ }
+ }
+}
+
+void ArmAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+ } else if (size == 8) {
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+ }
+}
+
+void ArmAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+ ManagedRegister mscratch, size_t size) {
+ Register scratch = mscratch.AsArm().AsCoreRegister();
+ CHECK_EQ(size, 4u);
+ LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value());
+ StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
+}
+
+void ArmAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+ ManagedRegister mscratch, size_t size) {
+ Register scratch = mscratch.AsArm().AsCoreRegister();
+ CHECK_EQ(size, 4u);
+ LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
+ StoreToOffset(kStoreWord, scratch, dest_base.AsArm().AsCoreRegister(), dest_offset.Int32Value());
+}
+
+void ArmAssembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
+ ManagedRegister /*mscratch*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmAssembler::Copy(ManagedRegister dest, Offset dest_offset,
+ ManagedRegister src, Offset src_offset,
+ ManagedRegister mscratch, size_t size) {
+ CHECK_EQ(size, 4u);
+ Register scratch = mscratch.AsArm().AsCoreRegister();
+ LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value());
+ StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value());
+}
+
+void ArmAssembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
+ ManagedRegister /*scratch*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+
+void ArmAssembler::MemoryBarrier(ManagedRegister mscratch) {
+ CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12);
+#if ANDROID_SMP != 0
+#if defined(__ARM_HAVE_DMB)
+ int32_t encoding = 0xf57ff05f; // dmb
+ Emit(encoding);
+#elif defined(__ARM_HAVE_LDREX_STREX)
+ LoadImmediate(R12, 0);
+ int32_t encoding = 0xee07cfba; // mcr p15, 0, r12, c7, c10, 5
+ Emit(encoding);
+#else
+ LoadImmediate(R12, 0xffff0fa0); // kuser_memory_barrier
+ blx(R12);
+#endif
+#endif
+}
+
+void ArmAssembler::CreateSirtEntry(ManagedRegister mout_reg,
+ FrameOffset sirt_offset,
+ ManagedRegister min_reg, bool null_allowed) {
+ ArmManagedRegister out_reg = mout_reg.AsArm();
+ ArmManagedRegister in_reg = min_reg.AsArm();
+ CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
+ CHECK(out_reg.IsCoreRegister()) << out_reg;
+ if (null_allowed) {
+ // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
+ // the address in the SIRT holding the reference.
+ // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+ if (in_reg.IsNoRegister()) {
+ LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
+ SP, sirt_offset.Int32Value());
+ in_reg = out_reg;
+ }
+ cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
+ if (!out_reg.Equals(in_reg)) {
+ LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+ }
+ AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+ } else {
+ AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+ }
+}
+
+void ArmAssembler::CreateSirtEntry(FrameOffset out_off,
+ FrameOffset sirt_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ if (null_allowed) {
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
+ sirt_offset.Int32Value());
+ // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
+ // the address in the SIRT holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+ cmp(scratch.AsCoreRegister(), ShifterOperand(0));
+ AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+ } else {
+ AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+ }
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
+}
+
+void ArmAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+ ManagedRegister min_reg) {
+ ArmManagedRegister out_reg = mout_reg.AsArm();
+ ArmManagedRegister in_reg = min_reg.AsArm();
+ CHECK(out_reg.IsCoreRegister()) << out_reg;
+ CHECK(in_reg.IsCoreRegister()) << in_reg;
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+ }
+ cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
+ LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
+ in_reg.AsCoreRegister(), 0, NE);
+}
+
+void ArmAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void ArmAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void ArmAssembler::Call(ManagedRegister mbase, Offset offset,
+ ManagedRegister mscratch) {
+ ArmManagedRegister base = mbase.AsArm();
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(base.IsCoreRegister()) << base;
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ base.AsCoreRegister(), offset.Int32Value());
+ blx(scratch.AsCoreRegister());
+ // TODO: place reference map on call
+}
+
+void ArmAssembler::Call(FrameOffset base, Offset offset,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ // Call *(*(SP + base) + offset)
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ SP, base.Int32Value());
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ scratch.AsCoreRegister(), offset.Int32Value());
+ blx(scratch.AsCoreRegister());
+ // TODO: place reference map on call
+}
+
+void ArmAssembler::Call(ThreadOffset /*offset*/, ManagedRegister /*scratch*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmAssembler::GetCurrentThread(ManagedRegister tr) {
+ mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR));
+}
+
+void ArmAssembler::GetCurrentThread(FrameOffset offset,
+ ManagedRegister /*scratch*/) {
+ StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL);
+}
+
+void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ ArmExceptionSlowPath* slow = new ArmExceptionSlowPath(scratch, stack_adjust);
+ buffer_.EnqueueSlowPath(slow);
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ TR, Thread::ExceptionOffset().Int32Value());
+ cmp(scratch.AsCoreRegister(), ShifterOperand(0));
+ b(slow->Entry(), NE);
+}
+
+void ArmExceptionSlowPath::Emit(Assembler* sasm) {
+ ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
+#define __ sp_asm->
+ __ Bind(&entry_);
+ if (stack_adjust_ != 0) { // Fix up the frame.
+ __ DecreaseFrameSize(stack_adjust_);
+ }
+ // Pass exception object as argument
+ // Don't care about preserving R0 as this call won't return
+ __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
+ // Set up call to Thread::Current()->pDeliverException
+ __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pDeliverException));
+ __ blx(R12);
+ // Call never returns
+ __ bkpt(0);
+#undef __
+}
+
+} // namespace arm
+} // namespace art
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
new file mode 100644
index 0000000..757a8a2
--- /dev/null
+++ b/compiler/utils/arm/assembler_arm.h
@@ -0,0 +1,659 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_H_
+#define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_H_
+
+#include <vector>
+
+#include "base/logging.h"
+#include "constants_arm.h"
+#include "utils/arm/managed_register_arm.h"
+#include "utils/assembler.h"
+#include "offsets.h"
+#include "utils.h"
+
+namespace art {
+namespace arm {
+
+// Encodes Addressing Mode 1 - Data-processing operands defined in Section 5.1.
+class ShifterOperand {
+ public:
+ // Data-processing operands - Uninitialized
+ ShifterOperand() {
+ type_ = -1;
+ }
+
+ // Data-processing operands - Immediate
+ explicit ShifterOperand(uint32_t immediate) {
+ CHECK(immediate < (1 << kImmed8Bits));
+ type_ = 1;
+ encoding_ = immediate;
+ }
+
+ // Data-processing operands - Rotated immediate
+ ShifterOperand(uint32_t rotate, uint32_t immed8) {
+ CHECK((rotate < (1 << kRotateBits)) && (immed8 < (1 << kImmed8Bits)));
+ type_ = 1;
+ encoding_ = (rotate << kRotateShift) | (immed8 << kImmed8Shift);
+ }
+
+ // Data-processing operands - Register
+ explicit ShifterOperand(Register rm) {
+ type_ = 0;
+ encoding_ = static_cast<uint32_t>(rm);
+ }
+
+ // Data-processing operands - Logical shift/rotate by immediate
+ ShifterOperand(Register rm, Shift shift, uint32_t shift_imm) {
+ CHECK(shift_imm < (1 << kShiftImmBits));
+ type_ = 0;
+ encoding_ = shift_imm << kShiftImmShift |
+ static_cast<uint32_t>(shift) << kShiftShift |
+ static_cast<uint32_t>(rm);
+ }
+
+ // Data-processing operands - Logical shift/rotate by register
+ ShifterOperand(Register rm, Shift shift, Register rs) {
+ type_ = 0;
+ encoding_ = static_cast<uint32_t>(rs) << kShiftRegisterShift |
+ static_cast<uint32_t>(shift) << kShiftShift | (1 << 4) |
+ static_cast<uint32_t>(rm);
+ }
+
+ static bool CanHold(uint32_t immediate, ShifterOperand* shifter_op) {
+ // Avoid the more expensive test for frequent small immediate values.
+ if (immediate < (1 << kImmed8Bits)) {
+ shifter_op->type_ = 1;
+ shifter_op->encoding_ = (0 << kRotateShift) | (immediate << kImmed8Shift);
+ return true;
+ }
+ // Note that immediate must be unsigned for the test to work correctly.
+ for (int rot = 0; rot < 16; rot++) {
+ uint32_t imm8 = (immediate << 2*rot) | (immediate >> (32 - 2*rot));
+ if (imm8 < (1 << kImmed8Bits)) {
+ shifter_op->type_ = 1;
+ shifter_op->encoding_ = (rot << kRotateShift) | (imm8 << kImmed8Shift);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private:
+ bool is_valid() const { return (type_ == 0) || (type_ == 1); }
+
+ uint32_t type() const {
+ CHECK(is_valid());
+ return type_;
+ }
+
+ uint32_t encoding() const {
+ CHECK(is_valid());
+ return encoding_;
+ }
+
+ uint32_t type_; // Encodes the type field (bits 27-25) in the instruction.
+ uint32_t encoding_;
+
+ friend class ArmAssembler;
+#ifdef SOURCE_ASSEMBLER_SUPPORT
+ friend class BinaryAssembler;
+#endif
+};
+
+
+enum LoadOperandType {
+ kLoadSignedByte,
+ kLoadUnsignedByte,
+ kLoadSignedHalfword,
+ kLoadUnsignedHalfword,
+ kLoadWord,
+ kLoadWordPair,
+ kLoadSWord,
+ kLoadDWord
+};
+
+
+enum StoreOperandType {
+ kStoreByte,
+ kStoreHalfword,
+ kStoreWord,
+ kStoreWordPair,
+ kStoreSWord,
+ kStoreDWord
+};
+
+
+// Load/store multiple addressing mode.
+enum BlockAddressMode {
+ // bit encoding P U W
+ DA = (0|0|0) << 21, // decrement after
+ IA = (0|4|0) << 21, // increment after
+ DB = (8|0|0) << 21, // decrement before
+ IB = (8|4|0) << 21, // increment before
+ DA_W = (0|0|1) << 21, // decrement after with writeback to base
+ IA_W = (0|4|1) << 21, // increment after with writeback to base
+ DB_W = (8|0|1) << 21, // decrement before with writeback to base
+ IB_W = (8|4|1) << 21 // increment before with writeback to base
+};
+
+
+class Address {
+ public:
+ // Memory operand addressing mode
+ enum Mode {
+ // bit encoding P U W
+ Offset = (8|4|0) << 21, // offset (w/o writeback to base)
+ PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback
+ PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback
+ NegOffset = (8|0|0) << 21, // negative offset (w/o writeback to base)
+ NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback
+ NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
+ };
+
+ explicit Address(Register rn, int32_t offset = 0, Mode am = Offset) {
+ CHECK(IsAbsoluteUint(12, offset));
+ if (offset < 0) {
+ encoding_ = (am ^ (1 << kUShift)) | -offset; // Flip U to adjust sign.
+ } else {
+ encoding_ = am | offset;
+ }
+ encoding_ |= static_cast<uint32_t>(rn) << kRnShift;
+ }
+
+ static bool CanHoldLoadOffset(LoadOperandType type, int offset);
+ static bool CanHoldStoreOffset(StoreOperandType type, int offset);
+
+ private:
+ uint32_t encoding() const { return encoding_; }
+
+ // Encoding for addressing mode 3.
+ uint32_t encoding3() const {
+ const uint32_t offset_mask = (1 << 12) - 1;
+ uint32_t offset = encoding_ & offset_mask;
+ CHECK_LT(offset, 256u);
+ return (encoding_ & ~offset_mask) | ((offset & 0xf0) << 4) | (offset & 0xf);
+ }
+
+ // Encoding for vfp load/store addressing.
+ uint32_t vencoding() const {
+ const uint32_t offset_mask = (1 << 12) - 1;
+ uint32_t offset = encoding_ & offset_mask;
+ CHECK(IsAbsoluteUint(10, offset)); // In the range -1020 to +1020.
+ CHECK_ALIGNED(offset, 2); // Multiple of 4.
+ int mode = encoding_ & ((8|4|1) << 21);
+ CHECK((mode == Offset) || (mode == NegOffset));
+ uint32_t vencoding = (encoding_ & (0xf << kRnShift)) | (offset >> 2);
+ if (mode == Offset) {
+ vencoding |= 1 << 23;
+ }
+ return vencoding;
+ }
+
+ uint32_t encoding_;
+
+ friend class ArmAssembler;
+};
+
+
+class ArmAssembler : public Assembler {
+ public:
+ ArmAssembler() {}
+ virtual ~ArmAssembler() {}
+
+ // Data-processing instructions.
+ void and_(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void eor(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void sub(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+ void subs(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void rsb(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+ void rsbs(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void add(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void adds(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void adc(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void sbc(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void rsc(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void tst(Register rn, ShifterOperand so, Condition cond = AL);
+
+ void teq(Register rn, ShifterOperand so, Condition cond = AL);
+
+ void cmp(Register rn, ShifterOperand so, Condition cond = AL);
+
+ void cmn(Register rn, ShifterOperand so, Condition cond = AL);
+
+ void orr(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+ void orrs(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void mov(Register rd, ShifterOperand so, Condition cond = AL);
+ void movs(Register rd, ShifterOperand so, Condition cond = AL);
+
+ void bic(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void mvn(Register rd, ShifterOperand so, Condition cond = AL);
+ void mvns(Register rd, ShifterOperand so, Condition cond = AL);
+
+ // Miscellaneous data-processing instructions.
+ void clz(Register rd, Register rm, Condition cond = AL);
+ void movw(Register rd, uint16_t imm16, Condition cond = AL);
+ void movt(Register rd, uint16_t imm16, Condition cond = AL);
+
+ // Multiply instructions.
+ void mul(Register rd, Register rn, Register rm, Condition cond = AL);
+ void mla(Register rd, Register rn, Register rm, Register ra,
+ Condition cond = AL);
+ void mls(Register rd, Register rn, Register rm, Register ra,
+ Condition cond = AL);
+ void umull(Register rd_lo, Register rd_hi, Register rn, Register rm,
+ Condition cond = AL);
+
+ // Load/store instructions.
+ void ldr(Register rd, Address ad, Condition cond = AL);
+ void str(Register rd, Address ad, Condition cond = AL);
+
+ void ldrb(Register rd, Address ad, Condition cond = AL);
+ void strb(Register rd, Address ad, Condition cond = AL);
+
+ void ldrh(Register rd, Address ad, Condition cond = AL);
+ void strh(Register rd, Address ad, Condition cond = AL);
+
+ void ldrsb(Register rd, Address ad, Condition cond = AL);
+ void ldrsh(Register rd, Address ad, Condition cond = AL);
+
+ void ldrd(Register rd, Address ad, Condition cond = AL);
+ void strd(Register rd, Address ad, Condition cond = AL);
+
+ void ldm(BlockAddressMode am, Register base,
+ RegList regs, Condition cond = AL);
+ void stm(BlockAddressMode am, Register base,
+ RegList regs, Condition cond = AL);
+
+ void ldrex(Register rd, Register rn, Condition cond = AL);
+ void strex(Register rd, Register rt, Register rn, Condition cond = AL);
+
+ // Miscellaneous instructions.
+ void clrex();
+ void nop(Condition cond = AL);
+
+ // Note that gdb sets breakpoints using the undefined instruction 0xe7f001f0.
+ void bkpt(uint16_t imm16);
+ void svc(uint32_t imm24);
+
+ // Floating point instructions (VFPv3-D16 and VFPv3-D32 profiles).
+ void vmovsr(SRegister sn, Register rt, Condition cond = AL);
+ void vmovrs(Register rt, SRegister sn, Condition cond = AL);
+ void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond = AL);
+ void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond = AL);
+ void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond = AL);
+ void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond = AL);
+ void vmovs(SRegister sd, SRegister sm, Condition cond = AL);
+ void vmovd(DRegister dd, DRegister dm, Condition cond = AL);
+
+ // Returns false if the immediate cannot be encoded.
+ bool vmovs(SRegister sd, float s_imm, Condition cond = AL);
+ bool vmovd(DRegister dd, double d_imm, Condition cond = AL);
+
+ void vldrs(SRegister sd, Address ad, Condition cond = AL);
+ void vstrs(SRegister sd, Address ad, Condition cond = AL);
+ void vldrd(DRegister dd, Address ad, Condition cond = AL);
+ void vstrd(DRegister dd, Address ad, Condition cond = AL);
+
+ void vadds(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+ void vaddd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+ void vsubs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+ void vsubd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+ void vmuls(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+ void vmuld(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+ void vmlas(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+ void vmlad(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+ void vmlss(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+ void vmlsd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+ void vdivs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+ void vdivd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+
+ void vabss(SRegister sd, SRegister sm, Condition cond = AL);
+ void vabsd(DRegister dd, DRegister dm, Condition cond = AL);
+ void vnegs(SRegister sd, SRegister sm, Condition cond = AL);
+ void vnegd(DRegister dd, DRegister dm, Condition cond = AL);
+ void vsqrts(SRegister sd, SRegister sm, Condition cond = AL);
+ void vsqrtd(DRegister dd, DRegister dm, Condition cond = AL);
+
+ void vcvtsd(SRegister sd, DRegister dm, Condition cond = AL);
+ void vcvtds(DRegister dd, SRegister sm, Condition cond = AL);
+ void vcvtis(SRegister sd, SRegister sm, Condition cond = AL);
+ void vcvtid(SRegister sd, DRegister dm, Condition cond = AL);
+ void vcvtsi(SRegister sd, SRegister sm, Condition cond = AL);
+ void vcvtdi(DRegister dd, SRegister sm, Condition cond = AL);
+ void vcvtus(SRegister sd, SRegister sm, Condition cond = AL);
+ void vcvtud(SRegister sd, DRegister dm, Condition cond = AL);
+ void vcvtsu(SRegister sd, SRegister sm, Condition cond = AL);
+ void vcvtdu(DRegister dd, SRegister sm, Condition cond = AL);
+
+ void vcmps(SRegister sd, SRegister sm, Condition cond = AL);
+ void vcmpd(DRegister dd, DRegister dm, Condition cond = AL);
+ void vcmpsz(SRegister sd, Condition cond = AL);
+ void vcmpdz(DRegister dd, Condition cond = AL);
+ void vmstat(Condition cond = AL); // VMRS APSR_nzcv, FPSCR
+
+ // Branch instructions.
+ void b(Label* label, Condition cond = AL);
+ void bl(Label* label, Condition cond = AL);
+ void blx(Register rm, Condition cond = AL);
+ void bx(Register rm, Condition cond = AL);
+
+ // Macros.
+ // Add signed constant value to rd. May clobber IP.
+ void AddConstant(Register rd, int32_t value, Condition cond = AL);
+ void AddConstant(Register rd, Register rn, int32_t value,
+ Condition cond = AL);
+ void AddConstantSetFlags(Register rd, Register rn, int32_t value,
+ Condition cond = AL);
+ void AddConstantWithCarry(Register rd, Register rn, int32_t value,
+ Condition cond = AL);
+
+ // Load and Store. May clobber IP.
+ void LoadImmediate(Register rd, int32_t value, Condition cond = AL);
+ void LoadSImmediate(SRegister sd, float value, Condition cond = AL);
+ void LoadDImmediate(DRegister dd, double value,
+ Register scratch, Condition cond = AL);
+ void MarkExceptionHandler(Label* label);
+ void LoadFromOffset(LoadOperandType type,
+ Register reg,
+ Register base,
+ int32_t offset,
+ Condition cond = AL);
+ void StoreToOffset(StoreOperandType type,
+ Register reg,
+ Register base,
+ int32_t offset,
+ Condition cond = AL);
+ void LoadSFromOffset(SRegister reg,
+ Register base,
+ int32_t offset,
+ Condition cond = AL);
+ void StoreSToOffset(SRegister reg,
+ Register base,
+ int32_t offset,
+ Condition cond = AL);
+ void LoadDFromOffset(DRegister reg,
+ Register base,
+ int32_t offset,
+ Condition cond = AL);
+ void StoreDToOffset(DRegister reg,
+ Register base,
+ int32_t offset,
+ Condition cond = AL);
+
+ void Push(Register rd, Condition cond = AL);
+ void Pop(Register rd, Condition cond = AL);
+
+ void PushList(RegList regs, Condition cond = AL);
+ void PopList(RegList regs, Condition cond = AL);
+
+ void Mov(Register rd, Register rm, Condition cond = AL);
+
+ // Convenience shift instructions. Use mov instruction with shifter operand
+ // for variants setting the status flags or using a register shift count.
+ void Lsl(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
+ void Lsr(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
+ void Asr(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
+ void Ror(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
+ void Rrx(Register rd, Register rm, Condition cond = AL);
+
+ // Encode a signed constant in tst instructions, only affecting the flags.
+ void EncodeUint32InTstInstructions(uint32_t data);
+ // ... and decode from a pc pointing to the start of encoding instructions.
+ static uint32_t DecodeUint32FromTstInstructions(uword pc);
+ static bool IsInstructionForExceptionHandling(uword pc);
+
+ // Emit data (e.g. encoded instruction or immediate) to the
+ // instruction stream.
+ void Emit(int32_t value);
+ void Bind(Label* label);
+
+ //
+ // Overridden common assembler high-level functionality
+ //
+
+ // Emit code that will create an activation on the stack
+ virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
+ const std::vector<ManagedRegister>& callee_save_regs,
+ const std::vector<ManagedRegister>& entry_spills);
+
+ // Emit code that will remove an activation from the stack
+ virtual void RemoveFrame(size_t frame_size,
+ const std::vector<ManagedRegister>& callee_save_regs);
+
+ virtual void IncreaseFrameSize(size_t adjust);
+ virtual void DecreaseFrameSize(size_t adjust);
+
+ // Store routines
+ virtual void Store(FrameOffset offs, ManagedRegister src, size_t size);
+ virtual void StoreRef(FrameOffset dest, ManagedRegister src);
+ virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src);
+
+ virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+ ManagedRegister scratch);
+
+ virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+ ManagedRegister scratch);
+
+ virtual void StoreStackOffsetToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch);
+
+ virtual void StoreStackPointerToThread(ThreadOffset thr_offs);
+
+ virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
+ FrameOffset in_off, ManagedRegister scratch);
+
+ // Load routines
+ virtual void Load(ManagedRegister dest, FrameOffset src, size_t size);
+
+ virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size);
+
+ virtual void LoadRef(ManagedRegister dest, FrameOffset src);
+
+ virtual void LoadRef(ManagedRegister dest, ManagedRegister base,
+ MemberOffset offs);
+
+ virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base,
+ Offset offs);
+
+ virtual void LoadRawPtrFromThread(ManagedRegister dest,
+ ThreadOffset offs);
+
+ // Copying routines
+ virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size);
+
+ virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
+ ManagedRegister scratch);
+
+ virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+ ManagedRegister scratch);
+
+ virtual void CopyRef(FrameOffset dest, FrameOffset src,
+ ManagedRegister scratch);
+
+ virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size);
+
+ virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+ ManagedRegister scratch, size_t size);
+
+ virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+ ManagedRegister scratch, size_t size);
+
+ virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
+ ManagedRegister scratch, size_t size);
+
+ virtual void Copy(ManagedRegister dest, Offset dest_offset,
+ ManagedRegister src, Offset src_offset,
+ ManagedRegister scratch, size_t size);
+
+ virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+ ManagedRegister scratch, size_t size);
+
+ virtual void MemoryBarrier(ManagedRegister scratch);
+
+ // Sign extension
+ virtual void SignExtend(ManagedRegister mreg, size_t size);
+
+ // Zero extension
+ virtual void ZeroExtend(ManagedRegister mreg, size_t size);
+
+ // Exploit fast access in managed code to Thread::Current()
+ virtual void GetCurrentThread(ManagedRegister tr);
+ virtual void GetCurrentThread(FrameOffset dest_offset,
+ ManagedRegister scratch);
+
+ // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the SIRT entry to see if the value is
+ // NULL.
+ virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+ ManagedRegister in_reg, bool null_allowed);
+
+ // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // value is null and null_allowed.
+ virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+ ManagedRegister scratch, bool null_allowed);
+
+ // src holds a SIRT entry (Object**) load this into dst
+ virtual void LoadReferenceFromSirt(ManagedRegister dst,
+ ManagedRegister src);
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ virtual void VerifyObject(ManagedRegister src, bool could_be_null);
+ virtual void VerifyObject(FrameOffset src, bool could_be_null);
+
+ // Call to address held at [base+offset]
+ virtual void Call(ManagedRegister base, Offset offset,
+ ManagedRegister scratch);
+ virtual void Call(FrameOffset base, Offset offset,
+ ManagedRegister scratch);
+ virtual void Call(ThreadOffset offset, ManagedRegister scratch);
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
+
+ private:
+ void EmitType01(Condition cond,
+ int type,
+ Opcode opcode,
+ int set_cc,
+ Register rn,
+ Register rd,
+ ShifterOperand so);
+
+ void EmitType5(Condition cond, int offset, bool link);
+
+ void EmitMemOp(Condition cond,
+ bool load,
+ bool byte,
+ Register rd,
+ Address ad);
+
+ void EmitMemOpAddressMode3(Condition cond,
+ int32_t mode,
+ Register rd,
+ Address ad);
+
+ void EmitMultiMemOp(Condition cond,
+ BlockAddressMode am,
+ bool load,
+ Register base,
+ RegList regs);
+
+ void EmitShiftImmediate(Condition cond,
+ Shift opcode,
+ Register rd,
+ Register rm,
+ ShifterOperand so);
+
+ void EmitShiftRegister(Condition cond,
+ Shift opcode,
+ Register rd,
+ Register rm,
+ ShifterOperand so);
+
+ void EmitMulOp(Condition cond,
+ int32_t opcode,
+ Register rd,
+ Register rn,
+ Register rm,
+ Register rs);
+
+ void EmitVFPsss(Condition cond,
+ int32_t opcode,
+ SRegister sd,
+ SRegister sn,
+ SRegister sm);
+
+ void EmitVFPddd(Condition cond,
+ int32_t opcode,
+ DRegister dd,
+ DRegister dn,
+ DRegister dm);
+
+ void EmitVFPsd(Condition cond,
+ int32_t opcode,
+ SRegister sd,
+ DRegister dm);
+
+ void EmitVFPds(Condition cond,
+ int32_t opcode,
+ DRegister dd,
+ SRegister sm);
+
+ void EmitBranch(Condition cond, Label* label, bool link);
+ static int32_t EncodeBranchOffset(int offset, int32_t inst);
+ static int DecodeBranchOffset(int32_t inst);
+ int32_t EncodeTstOffset(int offset, int32_t inst);
+ int DecodeTstOffset(int32_t inst);
+
+ // Returns whether or not the given register is used for passing parameters.
+ static int RegisterCompare(const Register* reg1, const Register* reg2) {
+ return *reg1 - *reg2;
+ }
+};
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class ArmExceptionSlowPath : public SlowPath {
+ public:
+ explicit ArmExceptionSlowPath(ArmManagedRegister scratch, size_t stack_adjust)
+ : scratch_(scratch), stack_adjust_(stack_adjust) {
+ }
+ virtual void Emit(Assembler *sp_asm);
+ private:
+ const ArmManagedRegister scratch_;
+ const size_t stack_adjust_;
+};
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_H_
diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h
new file mode 100644
index 0000000..cc795b1
--- /dev/null
+++ b/compiler/utils/arm/constants_arm.h
@@ -0,0 +1,449 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM_CONSTANTS_ARM_H_
+#define ART_COMPILER_UTILS_ARM_CONSTANTS_ARM_H_
+
+#include <stdint.h>
+
+#include <iosfwd>
+
+#include "arch/arm/registers_arm.h"
+#include "base/casts.h"
+#include "base/logging.h"
+#include "globals.h"
+
+namespace art {
+namespace arm {
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate ARM instructions.
+//
+// Section references in the code refer to the "ARM Architecture Reference
+// Manual" from July 2005 (available at http://www.arm.com/miscPDFs/14128.pdf)
+//
+// Constants for specific fields are defined in their respective named enums.
+// General constants are in an anonymous enum in class Instr.
+
+
+// We support both VFPv3-D16 and VFPv3-D32 profiles, but currently only one at
+// a time, so that compile time optimizations can be applied.
+// Warning: VFPv3-D32 is untested.
+#define VFPv3_D16
+#if defined(VFPv3_D16) == defined(VFPv3_D32)
+#error "Exactly one of VFPv3_D16 or VFPv3_D32 can be defined at a time."
+#endif
+
+
+enum ScaleFactor {
+ TIMES_1 = 0,
+ TIMES_2 = 1,
+ TIMES_4 = 2,
+ TIMES_8 = 3
+};
+
+// Values for double-precision floating point registers.
+enum DRegister {
+ D0 = 0,
+ D1 = 1,
+ D2 = 2,
+ D3 = 3,
+ D4 = 4,
+ D5 = 5,
+ D6 = 6,
+ D7 = 7,
+ D8 = 8,
+ D9 = 9,
+ D10 = 10,
+ D11 = 11,
+ D12 = 12,
+ D13 = 13,
+ D14 = 14,
+ D15 = 15,
+#ifdef VFPv3_D16
+ kNumberOfDRegisters = 16,
+#else
+ D16 = 16,
+ D17 = 17,
+ D18 = 18,
+ D19 = 19,
+ D20 = 20,
+ D21 = 21,
+ D22 = 22,
+ D23 = 23,
+ D24 = 24,
+ D25 = 25,
+ D26 = 26,
+ D27 = 27,
+ D28 = 28,
+ D29 = 29,
+ D30 = 30,
+ D31 = 31,
+ kNumberOfDRegisters = 32,
+#endif
+ kNumberOfOverlappingDRegisters = 16,
+ kNoDRegister = -1,
+};
+std::ostream& operator<<(std::ostream& os, const DRegister& rhs);
+
+
+// Values for the condition field as defined in section A3.2.
+enum Condition {
+ kNoCondition = -1,
+ EQ = 0, // equal
+ NE = 1, // not equal
+ CS = 2, // carry set/unsigned higher or same
+ CC = 3, // carry clear/unsigned lower
+ MI = 4, // minus/negative
+ PL = 5, // plus/positive or zero
+ VS = 6, // overflow
+ VC = 7, // no overflow
+ HI = 8, // unsigned higher
+ LS = 9, // unsigned lower or same
+ GE = 10, // signed greater than or equal
+ LT = 11, // signed less than
+ GT = 12, // signed greater than
+ LE = 13, // signed less than or equal
+ AL = 14, // always (unconditional)
+ kSpecialCondition = 15, // special condition (refer to section A3.2.1)
+ kMaxCondition = 16,
+};
+std::ostream& operator<<(std::ostream& os, const Condition& rhs);
+
+
+// Opcodes for Data-processing instructions (instructions with a type 0 and 1)
+// as defined in section A3.4
+enum Opcode {
+ kNoOperand = -1,
+ AND = 0, // Logical AND
+ EOR = 1, // Logical Exclusive OR
+ SUB = 2, // Subtract
+ RSB = 3, // Reverse Subtract
+ ADD = 4, // Add
+ ADC = 5, // Add with Carry
+ SBC = 6, // Subtract with Carry
+ RSC = 7, // Reverse Subtract with Carry
+ TST = 8, // Test
+ TEQ = 9, // Test Equivalence
+ CMP = 10, // Compare
+ CMN = 11, // Compare Negated
+ ORR = 12, // Logical (inclusive) OR
+ MOV = 13, // Move
+ BIC = 14, // Bit Clear
+ MVN = 15, // Move Not
+ kMaxOperand = 16
+};
+
+
+// Shifter types for Data-processing operands as defined in section A5.1.2.
+enum Shift {
+ kNoShift = -1,
+ LSL = 0, // Logical shift left
+ LSR = 1, // Logical shift right
+ ASR = 2, // Arithmetic shift right
+ ROR = 3, // Rotate right
+ kMaxShift = 4
+};
+
+
+// Constants used for the decoding or encoding of the individual fields of
+// instructions. Based on the "Figure 3-1 ARM instruction set summary".
+enum InstructionFields {
+ kConditionShift = 28,
+ kConditionBits = 4,
+ kTypeShift = 25,
+ kTypeBits = 3,
+ kLinkShift = 24,
+ kLinkBits = 1,
+ kUShift = 23,
+ kUBits = 1,
+ kOpcodeShift = 21,
+ kOpcodeBits = 4,
+ kSShift = 20,
+ kSBits = 1,
+ kRnShift = 16,
+ kRnBits = 4,
+ kRdShift = 12,
+ kRdBits = 4,
+ kRsShift = 8,
+ kRsBits = 4,
+ kRmShift = 0,
+ kRmBits = 4,
+
+ // Immediate instruction fields encoding.
+ kRotateShift = 8,
+ kRotateBits = 4,
+ kImmed8Shift = 0,
+ kImmed8Bits = 8,
+
+ // Shift instruction register fields encodings.
+ kShiftImmShift = 7,
+ kShiftRegisterShift = 8,
+ kShiftImmBits = 5,
+ kShiftShift = 5,
+ kShiftBits = 2,
+
+ // Load/store instruction offset field encoding.
+ kOffset12Shift = 0,
+ kOffset12Bits = 12,
+ kOffset12Mask = 0x00000fff,
+
+ // Mul instruction register fields encodings.
+ kMulRdShift = 16,
+ kMulRdBits = 4,
+ kMulRnShift = 12,
+ kMulRnBits = 4,
+
+ kBranchOffsetMask = 0x00ffffff
+};
+
+
+// Size (in bytes) of registers.
+const int kRegisterSize = 4;
+
+// List of registers used in load/store multiple.
+typedef uint16_t RegList;
+
+// The class Instr enables access to individual fields defined in the ARM
+// architecture instruction set encoding as described in figure A3-1.
+//
+// Example: Test whether the instruction at ptr does set the condition code
+// bits.
+//
+// bool InstructionSetsConditionCodes(byte* ptr) {
+// Instr* instr = Instr::At(ptr);
+// int type = instr->TypeField();
+// return ((type == 0) || (type == 1)) && instr->HasS();
+// }
+//
+class Instr {
+ public:
+ enum {
+ kInstrSize = 4,
+ kInstrSizeLog2 = 2,
+ kPCReadOffset = 8
+ };
+
+ bool IsBreakPoint() {
+ return IsBkpt();
+ }
+
+ // Get the raw instruction bits.
+ inline int32_t InstructionBits() const {
+ return *reinterpret_cast<const int32_t*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void SetInstructionBits(int32_t value) {
+ *reinterpret_cast<int32_t*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int Bit(int nr) const {
+ return (InstructionBits() >> nr) & 1;
+ }
+
+ // Read a bit field out of the instruction bits.
+ inline int Bits(int shift, int count) const {
+ return (InstructionBits() >> shift) & ((1 << count) - 1);
+ }
+
+
+ // Accessors for the different named fields used in the ARM encoding.
+ // The naming of these accessor corresponds to figure A3-1.
+ // Generally applicable fields
+ inline Condition ConditionField() const {
+ return static_cast<Condition>(Bits(kConditionShift, kConditionBits));
+ }
+ inline int TypeField() const { return Bits(kTypeShift, kTypeBits); }
+
+ inline Register RnField() const { return static_cast<Register>(
+ Bits(kRnShift, kRnBits)); }
+ inline Register RdField() const { return static_cast<Register>(
+ Bits(kRdShift, kRdBits)); }
+
+ // Fields used in Data processing instructions
+ inline Opcode OpcodeField() const {
+ return static_cast<Opcode>(Bits(kOpcodeShift, kOpcodeBits));
+ }
+ inline int SField() const { return Bits(kSShift, kSBits); }
+ // with register
+ inline Register RmField() const {
+ return static_cast<Register>(Bits(kRmShift, kRmBits));
+ }
+ inline Shift ShiftField() const { return static_cast<Shift>(
+ Bits(kShiftShift, kShiftBits)); }
+ inline int RegShiftField() const { return Bit(4); }
+ inline Register RsField() const {
+ return static_cast<Register>(Bits(kRsShift, kRsBits));
+ }
+ inline int ShiftAmountField() const { return Bits(kShiftImmShift,
+ kShiftImmBits); }
+ // with immediate
+ inline int RotateField() const { return Bits(kRotateShift, kRotateBits); }
+ inline int Immed8Field() const { return Bits(kImmed8Shift, kImmed8Bits); }
+
+ // Fields used in Load/Store instructions
+ inline int PUField() const { return Bits(23, 2); }
+ inline int BField() const { return Bit(22); }
+ inline int WField() const { return Bit(21); }
+ inline int LField() const { return Bit(20); }
+ // with register uses same fields as Data processing instructions above
+ // with immediate
+ inline int Offset12Field() const { return Bits(kOffset12Shift,
+ kOffset12Bits); }
+ // multiple
+ inline int RlistField() const { return Bits(0, 16); }
+ // extra loads and stores
+ inline int SignField() const { return Bit(6); }
+ inline int HField() const { return Bit(5); }
+ inline int ImmedHField() const { return Bits(8, 4); }
+ inline int ImmedLField() const { return Bits(0, 4); }
+
+ // Fields used in Branch instructions
+ inline int LinkField() const { return Bits(kLinkShift, kLinkBits); }
+ inline int SImmed24Field() const { return ((InstructionBits() << 8) >> 8); }
+
+ // Fields used in Supervisor Call instructions
+ inline uint32_t SvcField() const { return Bits(0, 24); }
+
+ // Field used in Breakpoint instruction
+ inline uint16_t BkptField() const {
+ return ((Bits(8, 12) << 4) | Bits(0, 4));
+ }
+
+ // Field used in 16-bit immediate move instructions
+ inline uint16_t MovwField() const {
+ return ((Bits(16, 4) << 12) | Bits(0, 12));
+ }
+
+ // Field used in VFP float immediate move instruction
+ inline float ImmFloatField() const {
+ uint32_t imm32 = (Bit(19) << 31) | (((1 << 5) - Bit(18)) << 25) |
+ (Bits(16, 2) << 23) | (Bits(0, 4) << 19);
+ return bit_cast<float, uint32_t>(imm32);
+ }
+
+ // Field used in VFP double immediate move instruction
+ inline double ImmDoubleField() const {
+ uint64_t imm64 = (Bit(19)*(1LL << 63)) | (((1LL << 8) - Bit(18)) << 54) |
+ (Bits(16, 2)*(1LL << 52)) | (Bits(0, 4)*(1LL << 48));
+ return bit_cast<double, uint64_t>(imm64);
+ }
+
+ // Test for data processing instructions of type 0 or 1.
+ // See "ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition",
+ // section A5.1 "ARM instruction set encoding".
+ inline bool IsDataProcessing() const {
+ CHECK_NE(ConditionField(), kSpecialCondition);
+ CHECK_EQ(Bits(26, 2), 0); // Type 0 or 1.
+ return ((Bits(20, 5) & 0x19) != 0x10) &&
+ ((Bit(25) == 1) || // Data processing immediate.
+ (Bit(4) == 0) || // Data processing register.
+ (Bit(7) == 0)); // Data processing register-shifted register.
+ }
+
+ // Tests for special encodings of type 0 instructions (extra loads and stores,
+ // as well as multiplications, synchronization primitives, and miscellaneous).
+ // Can only be called for a type 0 or 1 instruction.
+ inline bool IsMiscellaneous() const {
+ CHECK_EQ(Bits(26, 2), 0); // Type 0 or 1.
+ return ((Bit(25) == 0) && ((Bits(20, 5) & 0x19) == 0x10) && (Bit(7) == 0));
+ }
+ inline bool IsMultiplyOrSyncPrimitive() const {
+ CHECK_EQ(Bits(26, 2), 0); // Type 0 or 1.
+ return ((Bit(25) == 0) && (Bits(4, 4) == 9));
+ }
+
+ // Test for Supervisor Call instruction.
+ inline bool IsSvc() const {
+ return ((InstructionBits() & 0xff000000) == 0xef000000);
+ }
+
+ // Test for Breakpoint instruction.
+ inline bool IsBkpt() const {
+ return ((InstructionBits() & 0xfff000f0) == 0xe1200070);
+ }
+
+ // VFP register fields.
+ inline SRegister SnField() const {
+ return static_cast<SRegister>((Bits(kRnShift, kRnBits) << 1) + Bit(7));
+ }
+ inline SRegister SdField() const {
+ return static_cast<SRegister>((Bits(kRdShift, kRdBits) << 1) + Bit(22));
+ }
+ inline SRegister SmField() const {
+ return static_cast<SRegister>((Bits(kRmShift, kRmBits) << 1) + Bit(5));
+ }
+ inline DRegister DnField() const {
+ return static_cast<DRegister>(Bits(kRnShift, kRnBits) + (Bit(7) << 4));
+ }
+ inline DRegister DdField() const {
+ return static_cast<DRegister>(Bits(kRdShift, kRdBits) + (Bit(22) << 4));
+ }
+ inline DRegister DmField() const {
+ return static_cast<DRegister>(Bits(kRmShift, kRmBits) + (Bit(5) << 4));
+ }
+
+ // Test for VFP data processing or single transfer instructions of type 7.
+ inline bool IsVFPDataProcessingOrSingleTransfer() const {
+ CHECK_NE(ConditionField(), kSpecialCondition);
+ CHECK_EQ(TypeField(), 7);
+ return ((Bit(24) == 0) && (Bits(9, 3) == 5));
+ // Bit(4) == 0: Data Processing
+ // Bit(4) == 1: 8, 16, or 32-bit Transfer between ARM Core and VFP
+ }
+
+ // Test for VFP 64-bit transfer instructions of type 6.
+ inline bool IsVFPDoubleTransfer() const {
+ CHECK_NE(ConditionField(), kSpecialCondition);
+ CHECK_EQ(TypeField(), 6);
+ return ((Bits(21, 4) == 2) && (Bits(9, 3) == 5) &&
+ ((Bits(4, 4) & 0xd) == 1));
+ }
+
+ // Test for VFP load and store instructions of type 6.
+ inline bool IsVFPLoadStore() const {
+ CHECK_NE(ConditionField(), kSpecialCondition);
+ CHECK_EQ(TypeField(), 6);
+ return ((Bits(20, 5) & 0x12) == 0x10) && (Bits(9, 3) == 5);
+ }
+
+ // Special accessors that test for existence of a value.
+ inline bool HasS() const { return SField() == 1; }
+ inline bool HasB() const { return BField() == 1; }
+ inline bool HasW() const { return WField() == 1; }
+ inline bool HasL() const { return LField() == 1; }
+ inline bool HasSign() const { return SignField() == 1; }
+ inline bool HasH() const { return HField() == 1; }
+ inline bool HasLink() const { return LinkField() == 1; }
+
+ // Instructions are read out of a code stream. The only way to get a
+ // reference to an instruction is to convert a pointer. There is no way
+ // to allocate or create instances of class Instr.
+ // Use the At(pc) function to create references to Instr.
+ static Instr* At(uword pc) { return reinterpret_cast<Instr*>(pc); }
+ Instr* Next() { return this + kInstrSize; }
+
+ private:
+ // We need to prevent the creation of instances of class Instr.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
+};
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ARM_CONSTANTS_ARM_H_
diff --git a/compiler/utils/arm/managed_register_arm.cc b/compiler/utils/arm/managed_register_arm.cc
new file mode 100644
index 0000000..57c2305
--- /dev/null
+++ b/compiler/utils/arm/managed_register_arm.cc
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "managed_register_arm.h"
+
+#include "globals.h"
+
+namespace art {
+namespace arm {
+
+// We need all registers for caching of locals.
+// Register R9 .. R15 are reserved.
+static const int kNumberOfAvailableCoreRegisters = (R8 - R0) + 1;
+static const int kNumberOfAvailableSRegisters = kNumberOfSRegisters;
+static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters;
+static const int kNumberOfAvailableOverlappingDRegisters =
+ kNumberOfOverlappingDRegisters;
+static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs;
+
+
+// Returns true if this managed-register overlaps the other managed-register.
+bool ArmManagedRegister::Overlaps(const ArmManagedRegister& other) const {
+ if (IsNoRegister() || other.IsNoRegister()) return false;
+ if (Equals(other)) return true;
+ if (IsRegisterPair()) {
+ Register low = AsRegisterPairLow();
+ Register high = AsRegisterPairHigh();
+ return ArmManagedRegister::FromCoreRegister(low).Overlaps(other) ||
+ ArmManagedRegister::FromCoreRegister(high).Overlaps(other);
+ }
+ if (IsOverlappingDRegister()) {
+ if (other.IsDRegister()) return Equals(other);
+ if (other.IsSRegister()) {
+ SRegister low = AsOverlappingDRegisterLow();
+ SRegister high = AsOverlappingDRegisterHigh();
+ SRegister other_sreg = other.AsSRegister();
+ return (low == other_sreg) || (high == other_sreg);
+ }
+ return false;
+ }
+ if (other.IsRegisterPair() || other.IsOverlappingDRegister()) {
+ return other.Overlaps(*this);
+ }
+ return false;
+}
+
+
+int ArmManagedRegister::AllocIdLow() const {
+ CHECK(IsOverlappingDRegister() || IsRegisterPair());
+ const int r = RegId() - (kNumberOfCoreRegIds + kNumberOfSRegIds);
+ int low;
+ if (r < kNumberOfOverlappingDRegIds) {
+ CHECK(IsOverlappingDRegister());
+ low = (r * 2) + kNumberOfCoreRegIds; // Return a SRegister.
+ } else {
+ CHECK(IsRegisterPair());
+ low = (r - kNumberOfDRegIds) * 2; // Return a Register.
+ if (low > 6) {
+ // we didn't got a pair higher than R6_R7, must be the dalvik special case
+ low = 1;
+ }
+ }
+ return low;
+}
+
+
+int ArmManagedRegister::AllocIdHigh() const {
+ return AllocIdLow() + 1;
+}
+
+
+void ArmManagedRegister::Print(std::ostream& os) const {
+ if (!IsValidManagedRegister()) {
+ os << "No Register";
+ } else if (IsCoreRegister()) {
+ os << "Core: " << static_cast<int>(AsCoreRegister());
+ } else if (IsRegisterPair()) {
+ os << "Pair: " << static_cast<int>(AsRegisterPairLow()) << ", "
+ << static_cast<int>(AsRegisterPairHigh());
+ } else if (IsSRegister()) {
+ os << "SRegister: " << static_cast<int>(AsSRegister());
+ } else if (IsDRegister()) {
+ os << "DRegister: " << static_cast<int>(AsDRegister());
+ } else {
+ os << "??: " << RegId();
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, const ArmManagedRegister& reg) {
+ reg.Print(os);
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& r) {
+ os << ArmManagedRegister::FromRegisterPair(r);
+ return os;
+}
+
+} // namespace arm
+} // namespace art
diff --git a/compiler/utils/arm/managed_register_arm.h b/compiler/utils/arm/managed_register_arm.h
new file mode 100644
index 0000000..a496c87
--- /dev/null
+++ b/compiler/utils/arm/managed_register_arm.h
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM_MANAGED_REGISTER_ARM_H_
+#define ART_COMPILER_UTILS_ARM_MANAGED_REGISTER_ARM_H_
+
+#include "base/logging.h"
+#include "constants_arm.h"
+#include "utils/managed_register.h"
+
+namespace art {
+namespace arm {
+
+// Values for register pairs.
+enum RegisterPair {
+ R0_R1 = 0,
+ R2_R3 = 1,
+ R4_R5 = 2,
+ R6_R7 = 3,
+ R1_R2 = 4, // Dalvik style passing
+ kNumberOfRegisterPairs = 5,
+ kNoRegisterPair = -1,
+};
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg);
+
+const int kNumberOfCoreRegIds = kNumberOfCoreRegisters;
+const int kNumberOfCoreAllocIds = kNumberOfCoreRegisters;
+
+const int kNumberOfSRegIds = kNumberOfSRegisters;
+const int kNumberOfSAllocIds = kNumberOfSRegisters;
+
+const int kNumberOfDRegIds = kNumberOfDRegisters;
+const int kNumberOfOverlappingDRegIds = kNumberOfOverlappingDRegisters;
+const int kNumberOfDAllocIds = kNumberOfDRegIds - kNumberOfOverlappingDRegIds;
+
+const int kNumberOfPairRegIds = kNumberOfRegisterPairs;
+
+const int kNumberOfRegIds = kNumberOfCoreRegIds + kNumberOfSRegIds +
+ kNumberOfDRegIds + kNumberOfPairRegIds;
+const int kNumberOfAllocIds =
+ kNumberOfCoreAllocIds + kNumberOfSAllocIds + kNumberOfDAllocIds;
+
+// Register ids map:
+// [0..R[ core registers (enum Register)
+// [R..S[ single precision VFP registers (enum SRegister)
+// [S..D[ double precision VFP registers (enum DRegister)
+// [D..P[ core register pairs (enum RegisterPair)
+// where
+// R = kNumberOfCoreRegIds
+// S = R + kNumberOfSRegIds
+// D = S + kNumberOfDRegIds
+// P = D + kNumberOfRegisterPairs
+
+// Allocation ids map:
+// [0..R[ core registers (enum Register)
+// [R..S[ single precision VFP registers (enum SRegister)
+// [S..N[ non-overlapping double precision VFP registers (16-31 in enum
+// DRegister, VFPv3-D32 only)
+// where
+// R = kNumberOfCoreAllocIds
+// S = R + kNumberOfSAllocIds
+// N = S + kNumberOfDAllocIds
+
+
+// An instance of class 'ManagedRegister' represents a single ARM register or a
+// pair of core ARM registers (enum RegisterPair). A single register is either a
+// core register (enum Register), a VFP single precision register
+// (enum SRegister), or a VFP double precision register (enum DRegister).
+// 'ManagedRegister::NoRegister()' returns an invalid ManagedRegister.
+// There is a one-to-one mapping between ManagedRegister and register id.
+class ArmManagedRegister : public ManagedRegister {
+ public:
+ Register AsCoreRegister() const {
+ CHECK(IsCoreRegister());
+ return static_cast<Register>(id_);
+ }
+
+ SRegister AsSRegister() const {
+ CHECK(IsSRegister());
+ return static_cast<SRegister>(id_ - kNumberOfCoreRegIds);
+ }
+
+ DRegister AsDRegister() const {
+ CHECK(IsDRegister());
+ return static_cast<DRegister>(id_ - kNumberOfCoreRegIds - kNumberOfSRegIds);
+ }
+
+ SRegister AsOverlappingDRegisterLow() const {
+ CHECK(IsOverlappingDRegister());
+ DRegister d_reg = AsDRegister();
+ return static_cast<SRegister>(d_reg * 2);
+ }
+
+ SRegister AsOverlappingDRegisterHigh() const {
+ CHECK(IsOverlappingDRegister());
+ DRegister d_reg = AsDRegister();
+ return static_cast<SRegister>(d_reg * 2 + 1);
+ }
+
+ RegisterPair AsRegisterPair() const {
+ CHECK(IsRegisterPair());
+ Register reg_low = AsRegisterPairLow();
+ if (reg_low == R1) {
+ return R1_R2;
+ } else {
+ return static_cast<RegisterPair>(reg_low / 2);
+ }
+ }
+
+ Register AsRegisterPairLow() const {
+ CHECK(IsRegisterPair());
+ // Appropriate mapping of register ids allows to use AllocIdLow().
+ return FromRegId(AllocIdLow()).AsCoreRegister();
+ }
+
+ Register AsRegisterPairHigh() const {
+ CHECK(IsRegisterPair());
+ // Appropriate mapping of register ids allows to use AllocIdHigh().
+ return FromRegId(AllocIdHigh()).AsCoreRegister();
+ }
+
+ bool IsCoreRegister() const {
+ CHECK(IsValidManagedRegister());
+ return (0 <= id_) && (id_ < kNumberOfCoreRegIds);
+ }
+
+ bool IsSRegister() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - kNumberOfCoreRegIds;
+ return (0 <= test) && (test < kNumberOfSRegIds);
+ }
+
+ bool IsDRegister() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds);
+ return (0 <= test) && (test < kNumberOfDRegIds);
+ }
+
+ // Returns true if this DRegister overlaps SRegisters.
+ bool IsOverlappingDRegister() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds);
+ return (0 <= test) && (test < kNumberOfOverlappingDRegIds);
+ }
+
+ bool IsRegisterPair() const {
+ CHECK(IsValidManagedRegister());
+ const int test =
+ id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds + kNumberOfDRegIds);
+ return (0 <= test) && (test < kNumberOfPairRegIds);
+ }
+
+ bool IsSameType(ArmManagedRegister test) const {
+ CHECK(IsValidManagedRegister() && test.IsValidManagedRegister());
+ return
+ (IsCoreRegister() && test.IsCoreRegister()) ||
+ (IsSRegister() && test.IsSRegister()) ||
+ (IsDRegister() && test.IsDRegister()) ||
+ (IsRegisterPair() && test.IsRegisterPair());
+ }
+
+
+ // Returns true if the two managed-registers ('this' and 'other') overlap.
+ // Either managed-register may be the NoRegister. If both are the NoRegister
+ // then false is returned.
+ bool Overlaps(const ArmManagedRegister& other) const;
+
+ void Print(std::ostream& os) const;
+
+ static ArmManagedRegister FromCoreRegister(Register r) {
+ CHECK_NE(r, kNoRegister);
+ return FromRegId(r);
+ }
+
+ static ArmManagedRegister FromSRegister(SRegister r) {
+ CHECK_NE(r, kNoSRegister);
+ return FromRegId(r + kNumberOfCoreRegIds);
+ }
+
+ static ArmManagedRegister FromDRegister(DRegister r) {
+ CHECK_NE(r, kNoDRegister);
+ return FromRegId(r + (kNumberOfCoreRegIds + kNumberOfSRegIds));
+ }
+
+ static ArmManagedRegister FromRegisterPair(RegisterPair r) {
+ CHECK_NE(r, kNoRegisterPair);
+ return FromRegId(r + (kNumberOfCoreRegIds +
+ kNumberOfSRegIds + kNumberOfDRegIds));
+ }
+
+ // Return a RegisterPair consisting of Register r_low and r_low + 1.
+ static ArmManagedRegister FromCoreRegisterPair(Register r_low) {
+ if (r_low != R1) { // not the dalvik special case
+ CHECK_NE(r_low, kNoRegister);
+ CHECK_EQ(0, (r_low % 2));
+ const int r = r_low / 2;
+ CHECK_LT(r, kNumberOfPairRegIds);
+ return FromRegisterPair(static_cast<RegisterPair>(r));
+ } else {
+ return FromRegisterPair(R1_R2);
+ }
+ }
+
+ // Return a DRegister overlapping SRegister r_low and r_low + 1.
+ static ArmManagedRegister FromSRegisterPair(SRegister r_low) {
+ CHECK_NE(r_low, kNoSRegister);
+ CHECK_EQ(0, (r_low % 2));
+ const int r = r_low / 2;
+ CHECK_LT(r, kNumberOfOverlappingDRegIds);
+ return FromDRegister(static_cast<DRegister>(r));
+ }
+
+ private:
+ bool IsValidManagedRegister() const {
+ return (0 <= id_) && (id_ < kNumberOfRegIds);
+ }
+
+ int RegId() const {
+ CHECK(!IsNoRegister());
+ return id_;
+ }
+
+ int AllocId() const {
+ CHECK(IsValidManagedRegister() &&
+ !IsOverlappingDRegister() && !IsRegisterPair());
+ int r = id_;
+ if ((kNumberOfDAllocIds > 0) && IsDRegister()) { // VFPv3-D32 only.
+ r -= kNumberOfOverlappingDRegIds;
+ }
+ CHECK_LT(r, kNumberOfAllocIds);
+ return r;
+ }
+
+ int AllocIdLow() const;
+ int AllocIdHigh() const;
+
+ friend class ManagedRegister;
+
+ explicit ArmManagedRegister(int reg_id) : ManagedRegister(reg_id) {}
+
+ static ArmManagedRegister FromRegId(int reg_id) {
+ ArmManagedRegister reg(reg_id);
+ CHECK(reg.IsValidManagedRegister());
+ return reg;
+ }
+};
+
+std::ostream& operator<<(std::ostream& os, const ArmManagedRegister& reg);
+
+} // namespace arm
+
+inline arm::ArmManagedRegister ManagedRegister::AsArm() const {
+ arm::ArmManagedRegister reg(id_);
+ CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister());
+ return reg;
+}
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ARM_MANAGED_REGISTER_ARM_H_
diff --git a/compiler/utils/arm/managed_register_arm_test.cc b/compiler/utils/arm/managed_register_arm_test.cc
new file mode 100644
index 0000000..f5d4cc0
--- /dev/null
+++ b/compiler/utils/arm/managed_register_arm_test.cc
@@ -0,0 +1,767 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "globals.h"
+#include "managed_register_arm.h"
+#include "gtest/gtest.h"
+
+namespace art {
+namespace arm {
+
+TEST(ArmManagedRegister, NoRegister) {
+ ArmManagedRegister reg = ManagedRegister::NoRegister().AsArm();
+ EXPECT_TRUE(reg.IsNoRegister());
+ EXPECT_TRUE(!reg.Overlaps(reg));
+}
+
+TEST(ArmManagedRegister, CoreRegister) {
+ ArmManagedRegister reg = ArmManagedRegister::FromCoreRegister(R0);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(R0, reg.AsCoreRegister());
+
+ reg = ArmManagedRegister::FromCoreRegister(R1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(R1, reg.AsCoreRegister());
+
+ reg = ArmManagedRegister::FromCoreRegister(R8);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(R8, reg.AsCoreRegister());
+
+ reg = ArmManagedRegister::FromCoreRegister(R15);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(R15, reg.AsCoreRegister());
+}
+
+
+TEST(ArmManagedRegister, SRegister) {
+ ArmManagedRegister reg = ArmManagedRegister::FromSRegister(S0);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S0, reg.AsSRegister());
+
+ reg = ArmManagedRegister::FromSRegister(S1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S1, reg.AsSRegister());
+
+ reg = ArmManagedRegister::FromSRegister(S3);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S3, reg.AsSRegister());
+
+ reg = ArmManagedRegister::FromSRegister(S15);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S15, reg.AsSRegister());
+
+ reg = ArmManagedRegister::FromSRegister(S30);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S30, reg.AsSRegister());
+
+ reg = ArmManagedRegister::FromSRegister(S31);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S31, reg.AsSRegister());
+}
+
+
+TEST(ArmManagedRegister, DRegister) {
+ ArmManagedRegister reg = ArmManagedRegister::FromDRegister(D0);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D0, reg.AsDRegister());
+ EXPECT_EQ(S0, reg.AsOverlappingDRegisterLow());
+ EXPECT_EQ(S1, reg.AsOverlappingDRegisterHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S0)));
+
+ reg = ArmManagedRegister::FromDRegister(D1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D1, reg.AsDRegister());
+ EXPECT_EQ(S2, reg.AsOverlappingDRegisterLow());
+ EXPECT_EQ(S3, reg.AsOverlappingDRegisterHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S2)));
+
+ reg = ArmManagedRegister::FromDRegister(D6);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D6, reg.AsDRegister());
+ EXPECT_EQ(S12, reg.AsOverlappingDRegisterLow());
+ EXPECT_EQ(S13, reg.AsOverlappingDRegisterHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S12)));
+
+ reg = ArmManagedRegister::FromDRegister(D14);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D14, reg.AsDRegister());
+ EXPECT_EQ(S28, reg.AsOverlappingDRegisterLow());
+ EXPECT_EQ(S29, reg.AsOverlappingDRegisterHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S28)));
+
+ reg = ArmManagedRegister::FromDRegister(D15);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D15, reg.AsDRegister());
+ EXPECT_EQ(S30, reg.AsOverlappingDRegisterLow());
+ EXPECT_EQ(S31, reg.AsOverlappingDRegisterHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S30)));
+
+#ifdef VFPv3_D32
+ reg = ArmManagedRegister::FromDRegister(D16);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D16, reg.AsDRegister());
+
+ reg = ArmManagedRegister::FromDRegister(D18);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D18, reg.AsDRegister());
+
+ reg = ArmManagedRegister::FromDRegister(D30);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D30, reg.AsDRegister());
+
+ reg = ArmManagedRegister::FromDRegister(D31);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D31, reg.AsDRegister());
+#endif // VFPv3_D32
+}
+
+
+TEST(ArmManagedRegister, Pair) {
+ ArmManagedRegister reg = ArmManagedRegister::FromRegisterPair(R0_R1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(R0_R1, reg.AsRegisterPair());
+ EXPECT_EQ(R0, reg.AsRegisterPairLow());
+ EXPECT_EQ(R1, reg.AsRegisterPairHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R0)));
+
+ reg = ArmManagedRegister::FromRegisterPair(R1_R2);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(R1_R2, reg.AsRegisterPair());
+ EXPECT_EQ(R1, reg.AsRegisterPairLow());
+ EXPECT_EQ(R2, reg.AsRegisterPairHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R1)));
+
+ reg = ArmManagedRegister::FromRegisterPair(R2_R3);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(R2_R3, reg.AsRegisterPair());
+ EXPECT_EQ(R2, reg.AsRegisterPairLow());
+ EXPECT_EQ(R3, reg.AsRegisterPairHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R2)));
+
+ reg = ArmManagedRegister::FromRegisterPair(R4_R5);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(R4_R5, reg.AsRegisterPair());
+ EXPECT_EQ(R4, reg.AsRegisterPairLow());
+ EXPECT_EQ(R5, reg.AsRegisterPairHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R4)));
+
+ reg = ArmManagedRegister::FromRegisterPair(R6_R7);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(R6_R7, reg.AsRegisterPair());
+ EXPECT_EQ(R6, reg.AsRegisterPairLow());
+ EXPECT_EQ(R7, reg.AsRegisterPairHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R6)));
+}
+
+
+TEST(ArmManagedRegister, Equals) {
+ ManagedRegister no_reg = ManagedRegister::NoRegister();
+ EXPECT_TRUE(no_reg.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_R0 = ArmManagedRegister::FromCoreRegister(R0);
+ EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(reg_R0.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_R1 = ArmManagedRegister::FromCoreRegister(R1);
+ EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(reg_R1.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_R8 = ArmManagedRegister::FromCoreRegister(R8);
+ EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(reg_R8.Equals(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_S0 = ArmManagedRegister::FromSRegister(S0);
+ EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(reg_S0.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_S1 = ArmManagedRegister::FromSRegister(S1);
+ EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(reg_S1.Equals(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_S31 = ArmManagedRegister::FromSRegister(S31);
+ EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(reg_S31.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_D0 = ArmManagedRegister::FromDRegister(D0);
+ EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(reg_D0.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_D15 = ArmManagedRegister::FromDRegister(D15);
+ EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(reg_D15.Equals(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+#ifdef VFPv3_D32
+ ArmManagedRegister reg_D16 = ArmManagedRegister::FromDRegister(D16);
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(reg_D16.Equals(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_D30 = ArmManagedRegister::FromDRegister(D30);
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(reg_D30.Equals(ArmManagedRegister::FromDRegister(D30)));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_D31 = ArmManagedRegister::FromDRegister(D30);
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D30)));
+ EXPECT_TRUE(reg_D31.Equals(ArmManagedRegister::FromDRegister(D31)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+#endif // VFPv3_D32
+
+ ArmManagedRegister reg_R0R1 = ArmManagedRegister::FromRegisterPair(R0_R1);
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(reg_R0R1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromRegisterPair(R2_R3)));
+
+ ArmManagedRegister reg_R4R5 = ArmManagedRegister::FromRegisterPair(R4_R5);
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R4_R5)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R6_R7)));
+
+ ArmManagedRegister reg_R6R7 = ArmManagedRegister::FromRegisterPair(R6_R7);
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R4_R5)));
+ EXPECT_TRUE(reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R6_R7)));
+}
+
+
+TEST(ArmManagedRegister, Overlaps) {
+ ArmManagedRegister reg = ArmManagedRegister::FromCoreRegister(R0);
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromCoreRegister(R1);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromCoreRegister(R7);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromSRegister(S0);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromSRegister(S1);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromSRegister(S15);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromSRegister(S31);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromDRegister(D0);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromDRegister(D7);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromDRegister(D15);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+#ifdef VFPv3_D32
+ reg = ArmManagedRegister::FromDRegister(D16);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromDRegister(D31);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+#endif // VFPv3_D32
+
+ reg = ArmManagedRegister::FromRegisterPair(R0_R1);
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromRegisterPair(R4_R5);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+}
+
+} // namespace arm
+} // namespace art