summaryrefslogtreecommitdiff
path: root/compiler/utils
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/utils')
-rw-r--r--compiler/utils/arm/assembler_arm.h33
-rw-r--r--compiler/utils/arm/assembler_arm32.cc1725
-rw-r--r--compiler/utils/arm/assembler_arm32.h411
-rw-r--r--compiler/utils/arm/assembler_arm32_test.cc941
-rw-r--r--compiler/utils/arm/assembler_arm_shared.h53
-rw-r--r--compiler/utils/arm/assembler_arm_vixl.cc382
-rw-r--r--compiler/utils/arm/assembler_arm_vixl.h124
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm.cc4
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm_vixl.cc599
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm_vixl.h225
-rw-r--r--compiler/utils/arm/managed_register_arm.h29
-rw-r--r--compiler/utils/assembler.cc1
-rw-r--r--compiler/utils/assembler_thumb_test.cc194
-rw-r--r--compiler/utils/assembler_thumb_test_expected.cc.inc196
-rw-r--r--compiler/utils/jni_macro_assembler.cc4
-rw-r--r--compiler/utils/label.h2
-rw-r--r--compiler/utils/mips/assembler_mips.cc138
-rw-r--r--compiler/utils/mips/assembler_mips.h200
-rw-r--r--compiler/utils/x86/assembler_x86.cc17
-rw-r--r--compiler/utils/x86/assembler_x86.h3
-rw-r--r--compiler/utils/x86/assembler_x86_test.cc36
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc19
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h3
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc42
24 files changed, 2135 insertions, 3246 deletions
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index c52a5a94f4..3084e6e2b6 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -28,6 +28,7 @@
#include "base/stl_util.h"
#include "base/value_object.h"
#include "constants_arm.h"
+#include "utils/arm/assembler_arm_shared.h"
#include "utils/arm/managed_register_arm.h"
#include "utils/assembler.h"
#include "utils/jni_macro_assembler.h"
@@ -36,7 +37,6 @@
namespace art {
namespace arm {
-class Arm32Assembler;
class Thumb2Assembler;
// Assembler literal is a value embedded in code, retrieved using a PC-relative load.
@@ -208,7 +208,6 @@ class ShifterOperand {
uint32_t rotate_;
uint32_t immed_;
- friend class Arm32Assembler;
friend class Thumb2Assembler;
#ifdef SOURCE_ASSEMBLER_SUPPORT
@@ -216,29 +215,6 @@ class ShifterOperand {
#endif
};
-
-enum LoadOperandType {
- kLoadSignedByte,
- kLoadUnsignedByte,
- kLoadSignedHalfword,
- kLoadUnsignedHalfword,
- kLoadWord,
- kLoadWordPair,
- kLoadSWord,
- kLoadDWord
-};
-
-
-enum StoreOperandType {
- kStoreByte,
- kStoreHalfword,
- kStoreWord,
- kStoreWordPair,
- kStoreSWord,
- kStoreDWord
-};
-
-
// Load/store multiple addressing mode.
enum BlockAddressMode {
// bit encoding P U W
@@ -419,13 +395,6 @@ enum ItState {
kItE = kItElse
};
-// Set condition codes request.
-enum SetCc {
- kCcDontCare, // Allows prioritizing 16-bit instructions on Thumb2 whether they set CCs or not.
- kCcSet,
- kCcKeep,
-};
-
constexpr uint32_t kNoItCondition = 3;
constexpr uint32_t kInvalidModifiedImmediate = -1;
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
deleted file mode 100644
index b8eb60c387..0000000000
--- a/compiler/utils/arm/assembler_arm32.cc
+++ /dev/null
@@ -1,1725 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "assembler_arm32.h"
-
-#include "base/bit_utils.h"
-#include "base/logging.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "offsets.h"
-#include "thread.h"
-
-namespace art {
-namespace arm {
-
-bool Arm32Assembler::ShifterOperandCanHoldArm32(uint32_t immediate, ShifterOperand* shifter_op) {
- // Avoid the more expensive test for frequent small immediate values.
- if (immediate < (1 << kImmed8Bits)) {
- shifter_op->type_ = ShifterOperand::kImmediate;
- shifter_op->is_rotate_ = true;
- shifter_op->rotate_ = 0;
- shifter_op->immed_ = immediate;
- return true;
- }
- // Note that immediate must be unsigned for the test to work correctly.
- for (int rot = 0; rot < 16; rot++) {
- uint32_t imm8 = (immediate << 2*rot) | (immediate >> (32 - 2*rot));
- if (imm8 < (1 << kImmed8Bits)) {
- shifter_op->type_ = ShifterOperand::kImmediate;
- shifter_op->is_rotate_ = true;
- shifter_op->rotate_ = rot;
- shifter_op->immed_ = imm8;
- return true;
- }
- }
- return false;
-}
-
-bool Arm32Assembler::ShifterOperandCanAlwaysHold(uint32_t immediate) {
- ShifterOperand shifter_op;
- return ShifterOperandCanHoldArm32(immediate, &shifter_op);
-}
-
-bool Arm32Assembler::ShifterOperandCanHold(Register rd ATTRIBUTE_UNUSED,
- Register rn ATTRIBUTE_UNUSED,
- Opcode opcode ATTRIBUTE_UNUSED,
- uint32_t immediate,
- SetCc set_cc ATTRIBUTE_UNUSED,
- ShifterOperand* shifter_op) {
- return ShifterOperandCanHoldArm32(immediate, shifter_op);
-}
-
-void Arm32Assembler::and_(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitType01(cond, so.type(), AND, set_cc, rn, rd, so);
-}
-
-
-void Arm32Assembler::eor(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitType01(cond, so.type(), EOR, set_cc, rn, rd, so);
-}
-
-
-void Arm32Assembler::sub(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitType01(cond, so.type(), SUB, set_cc, rn, rd, so);
-}
-
-void Arm32Assembler::rsb(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitType01(cond, so.type(), RSB, set_cc, rn, rd, so);
-}
-
-void Arm32Assembler::add(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitType01(cond, so.type(), ADD, set_cc, rn, rd, so);
-}
-
-
-void Arm32Assembler::adc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitType01(cond, so.type(), ADC, set_cc, rn, rd, so);
-}
-
-
-void Arm32Assembler::sbc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitType01(cond, so.type(), SBC, set_cc, rn, rd, so);
-}
-
-
-void Arm32Assembler::rsc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitType01(cond, so.type(), RSC, set_cc, rn, rd, so);
-}
-
-
-void Arm32Assembler::tst(Register rn, const ShifterOperand& so, Condition cond) {
- CHECK_NE(rn, PC); // Reserve tst pc instruction for exception handler marker.
- EmitType01(cond, so.type(), TST, kCcSet, rn, R0, so);
-}
-
-
-void Arm32Assembler::teq(Register rn, const ShifterOperand& so, Condition cond) {
- CHECK_NE(rn, PC); // Reserve teq pc instruction for exception handler marker.
- EmitType01(cond, so.type(), TEQ, kCcSet, rn, R0, so);
-}
-
-
-void Arm32Assembler::cmp(Register rn, const ShifterOperand& so, Condition cond) {
- EmitType01(cond, so.type(), CMP, kCcSet, rn, R0, so);
-}
-
-
-void Arm32Assembler::cmn(Register rn, const ShifterOperand& so, Condition cond) {
- EmitType01(cond, so.type(), CMN, kCcSet, rn, R0, so);
-}
-
-
-void Arm32Assembler::orr(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitType01(cond, so.type(), ORR, set_cc, rn, rd, so);
-}
-
-
-void Arm32Assembler::orn(Register rd ATTRIBUTE_UNUSED,
- Register rn ATTRIBUTE_UNUSED,
- const ShifterOperand& so ATTRIBUTE_UNUSED,
- Condition cond ATTRIBUTE_UNUSED,
- SetCc set_cc ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "orn is not supported on ARM32";
-}
-
-
-void Arm32Assembler::mov(Register rd, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitType01(cond, so.type(), MOV, set_cc, R0, rd, so);
-}
-
-
-void Arm32Assembler::bic(Register rd, Register rn, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitType01(cond, so.type(), BIC, set_cc, rn, rd, so);
-}
-
-
-void Arm32Assembler::mvn(Register rd, const ShifterOperand& so,
- Condition cond, SetCc set_cc) {
- EmitType01(cond, so.type(), MVN, set_cc, R0, rd, so);
-}
-
-
-void Arm32Assembler::mul(Register rd, Register rn, Register rm, Condition cond) {
- // Assembler registers rd, rn, rm are encoded as rn, rm, rs.
- EmitMulOp(cond, 0, R0, rd, rn, rm);
-}
-
-
-void Arm32Assembler::mla(Register rd, Register rn, Register rm, Register ra,
- Condition cond) {
- // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
- EmitMulOp(cond, B21, ra, rd, rn, rm);
-}
-
-
-void Arm32Assembler::mls(Register rd, Register rn, Register rm, Register ra,
- Condition cond) {
- // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
- EmitMulOp(cond, B22 | B21, ra, rd, rn, rm);
-}
-
-
-void Arm32Assembler::smull(Register rd_lo, Register rd_hi, Register rn,
- Register rm, Condition cond) {
- // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
- EmitMulOp(cond, B23 | B22, rd_lo, rd_hi, rn, rm);
-}
-
-
-void Arm32Assembler::umull(Register rd_lo, Register rd_hi, Register rn,
- Register rm, Condition cond) {
- // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
- EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm);
-}
-
-
-void Arm32Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) {
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(rn, kNoRegister);
- CHECK_NE(rm, kNoRegister);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = B26 | B25 | B24 | B20 |
- B15 | B14 | B13 | B12 |
- (static_cast<int32_t>(cond) << kConditionShift) |
- (static_cast<int32_t>(rn) << 0) |
- (static_cast<int32_t>(rd) << 16) |
- (static_cast<int32_t>(rm) << 8) |
- B4;
- Emit(encoding);
-}
-
-
-void Arm32Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) {
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(rn, kNoRegister);
- CHECK_NE(rm, kNoRegister);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = B26 | B25 | B24 | B21 | B20 |
- B15 | B14 | B13 | B12 |
- (static_cast<int32_t>(cond) << kConditionShift) |
- (static_cast<int32_t>(rn) << 0) |
- (static_cast<int32_t>(rd) << 16) |
- (static_cast<int32_t>(rm) << 8) |
- B4;
- Emit(encoding);
-}
-
-
-void Arm32Assembler::sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond) {
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(rn, kNoRegister);
- CHECK_NE(cond, kNoCondition);
- CHECK_LE(lsb, 31U);
- CHECK(1U <= width && width <= 32U) << width;
- uint32_t widthminus1 = width - 1;
-
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B26 | B25 | B24 | B23 | B21 |
- (widthminus1 << 16) |
- (static_cast<uint32_t>(rd) << 12) |
- (lsb << 7) |
- B6 | B4 |
- static_cast<uint32_t>(rn);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond) {
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(rn, kNoRegister);
- CHECK_NE(cond, kNoCondition);
- CHECK_LE(lsb, 31U);
- CHECK(1U <= width && width <= 32U) << width;
- uint32_t widthminus1 = width - 1;
-
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B26 | B25 | B24 | B23 | B22 | B21 |
- (widthminus1 << 16) |
- (static_cast<uint32_t>(rd) << 12) |
- (lsb << 7) |
- B6 | B4 |
- static_cast<uint32_t>(rn);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::ldr(Register rd, const Address& ad, Condition cond) {
- EmitMemOp(cond, true, false, rd, ad);
-}
-
-
-void Arm32Assembler::str(Register rd, const Address& ad, Condition cond) {
- EmitMemOp(cond, false, false, rd, ad);
-}
-
-
-void Arm32Assembler::ldrb(Register rd, const Address& ad, Condition cond) {
- EmitMemOp(cond, true, true, rd, ad);
-}
-
-
-void Arm32Assembler::strb(Register rd, const Address& ad, Condition cond) {
- EmitMemOp(cond, false, true, rd, ad);
-}
-
-
-void Arm32Assembler::ldrh(Register rd, const Address& ad, Condition cond) {
- EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad);
-}
-
-
-void Arm32Assembler::strh(Register rd, const Address& ad, Condition cond) {
- EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad);
-}
-
-
-void Arm32Assembler::ldrsb(Register rd, const Address& ad, Condition cond) {
- EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad);
-}
-
-
-void Arm32Assembler::ldrsh(Register rd, const Address& ad, Condition cond) {
- EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad);
-}
-
-
-void Arm32Assembler::ldrd(Register rd, const Address& ad, Condition cond) {
- CHECK_EQ(rd % 2, 0);
- EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, ad);
-}
-
-
-void Arm32Assembler::strd(Register rd, const Address& ad, Condition cond) {
- CHECK_EQ(rd % 2, 0);
- EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, ad);
-}
-
-
-void Arm32Assembler::ldm(BlockAddressMode am,
- Register base,
- RegList regs,
- Condition cond) {
- EmitMultiMemOp(cond, am, true, base, regs);
-}
-
-
-void Arm32Assembler::stm(BlockAddressMode am,
- Register base,
- RegList regs,
- Condition cond) {
- EmitMultiMemOp(cond, am, false, base, regs);
-}
-
-
-void Arm32Assembler::vmovs(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm);
-}
-
-
-void Arm32Assembler::vmovd(DRegister dd, DRegister dm, Condition cond) {
- EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm);
-}
-
-
-bool Arm32Assembler::vmovs(SRegister sd, float s_imm, Condition cond) {
- uint32_t imm32 = bit_cast<uint32_t, float>(s_imm);
- if (((imm32 & ((1 << 19) - 1)) == 0) &&
- ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) ||
- (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) -1)))) {
- uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) |
- ((imm32 >> 19) & ((1 << 6) -1));
- EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | (imm8 & 0xf),
- sd, S0, S0);
- return true;
- }
- return false;
-}
-
-
-bool Arm32Assembler::vmovd(DRegister dd, double d_imm, Condition cond) {
- uint64_t imm64 = bit_cast<uint64_t, double>(d_imm);
- if (((imm64 & ((1LL << 48) - 1)) == 0) &&
- ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) ||
- (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) -1)))) {
- uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) |
- ((imm64 >> 48) & ((1 << 6) -1));
- EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | B8 | (imm8 & 0xf),
- dd, D0, D0);
- return true;
- }
- return false;
-}
-
-
-void Arm32Assembler::vadds(SRegister sd, SRegister sn, SRegister sm,
- Condition cond) {
- EmitVFPsss(cond, B21 | B20, sd, sn, sm);
-}
-
-
-void Arm32Assembler::vaddd(DRegister dd, DRegister dn, DRegister dm,
- Condition cond) {
- EmitVFPddd(cond, B21 | B20, dd, dn, dm);
-}
-
-
-void Arm32Assembler::vsubs(SRegister sd, SRegister sn, SRegister sm,
- Condition cond) {
- EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm);
-}
-
-
-void Arm32Assembler::vsubd(DRegister dd, DRegister dn, DRegister dm,
- Condition cond) {
- EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm);
-}
-
-
-void Arm32Assembler::vmuls(SRegister sd, SRegister sn, SRegister sm,
- Condition cond) {
- EmitVFPsss(cond, B21, sd, sn, sm);
-}
-
-
-void Arm32Assembler::vmuld(DRegister dd, DRegister dn, DRegister dm,
- Condition cond) {
- EmitVFPddd(cond, B21, dd, dn, dm);
-}
-
-
-void Arm32Assembler::vmlas(SRegister sd, SRegister sn, SRegister sm,
- Condition cond) {
- EmitVFPsss(cond, 0, sd, sn, sm);
-}
-
-
-void Arm32Assembler::vmlad(DRegister dd, DRegister dn, DRegister dm,
- Condition cond) {
- EmitVFPddd(cond, 0, dd, dn, dm);
-}
-
-
-void Arm32Assembler::vmlss(SRegister sd, SRegister sn, SRegister sm,
- Condition cond) {
- EmitVFPsss(cond, B6, sd, sn, sm);
-}
-
-
-void Arm32Assembler::vmlsd(DRegister dd, DRegister dn, DRegister dm,
- Condition cond) {
- EmitVFPddd(cond, B6, dd, dn, dm);
-}
-
-
-void Arm32Assembler::vdivs(SRegister sd, SRegister sn, SRegister sm,
- Condition cond) {
- EmitVFPsss(cond, B23, sd, sn, sm);
-}
-
-
-void Arm32Assembler::vdivd(DRegister dd, DRegister dn, DRegister dm,
- Condition cond) {
- EmitVFPddd(cond, B23, dd, dn, dm);
-}
-
-
-void Arm32Assembler::vabss(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm);
-}
-
-
-void Arm32Assembler::vabsd(DRegister dd, DRegister dm, Condition cond) {
- EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm);
-}
-
-
-void Arm32Assembler::vnegs(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm);
-}
-
-
-void Arm32Assembler::vnegd(DRegister dd, DRegister dm, Condition cond) {
- EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm);
-}
-
-
-void Arm32Assembler::vsqrts(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm);
-}
-
-void Arm32Assembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) {
- EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm);
-}
-
-
-void Arm32Assembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) {
- EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm);
-}
-
-
-void Arm32Assembler::vcvtds(DRegister dd, SRegister sm, Condition cond) {
- EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm);
-}
-
-
-void Arm32Assembler::vcvtis(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm);
-}
-
-
-void Arm32Assembler::vcvtid(SRegister sd, DRegister dm, Condition cond) {
- EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm);
-}
-
-
-void Arm32Assembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm);
-}
-
-
-void Arm32Assembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) {
- EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm);
-}
-
-
-void Arm32Assembler::vcvtus(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm);
-}
-
-
-void Arm32Assembler::vcvtud(SRegister sd, DRegister dm, Condition cond) {
- EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm);
-}
-
-
-void Arm32Assembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm);
-}
-
-
-void Arm32Assembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) {
- EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm);
-}
-
-
-void Arm32Assembler::vcmps(SRegister sd, SRegister sm, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm);
-}
-
-
-void Arm32Assembler::vcmpd(DRegister dd, DRegister dm, Condition cond) {
- EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm);
-}
-
-
-void Arm32Assembler::vcmpsz(SRegister sd, Condition cond) {
- EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0);
-}
-
-
-void Arm32Assembler::vcmpdz(DRegister dd, Condition cond) {
- EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0);
-}
-
-void Arm32Assembler::b(Label* label, Condition cond) {
- EmitBranch(cond, label, false);
-}
-
-
-void Arm32Assembler::bl(Label* label, Condition cond) {
- EmitBranch(cond, label, true);
-}
-
-
-void Arm32Assembler::MarkExceptionHandler(Label* label) {
- EmitType01(AL, 1, TST, kCcSet, PC, R0, ShifterOperand(0));
- Label l;
- b(&l);
- EmitBranch(AL, label, false);
- Bind(&l);
-}
-
-
-void Arm32Assembler::Emit(int32_t value) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- buffer_.Emit<int32_t>(value);
-}
-
-
-void Arm32Assembler::EmitType01(Condition cond,
- int type,
- Opcode opcode,
- SetCc set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so) {
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
- type << kTypeShift |
- static_cast<int32_t>(opcode) << kOpcodeShift |
- (set_cc == kCcSet ? 1 : 0) << kSShift |
- static_cast<int32_t>(rn) << kRnShift |
- static_cast<int32_t>(rd) << kRdShift |
- so.encodingArm();
- Emit(encoding);
-}
-
-
-void Arm32Assembler::EmitType5(Condition cond, int offset, bool link) {
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
- 5 << kTypeShift |
- (link ? 1 : 0) << kLinkShift;
- Emit(Arm32Assembler::EncodeBranchOffset(offset, encoding));
-}
-
-
-void Arm32Assembler::EmitMemOp(Condition cond,
- bool load,
- bool byte,
- Register rd,
- const Address& ad) {
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(cond, kNoCondition);
- const Address& addr = static_cast<const Address&>(ad);
-
- int32_t encoding = 0;
- if (!ad.IsImmediate() && ad.GetRegisterOffset() == PC) {
- // PC relative LDR(literal)
- int32_t offset = ad.GetOffset();
- int32_t u = B23;
- if (offset < 0) {
- offset = -offset;
- u = 0;
- }
- CHECK_LT(offset, (1 << 12));
- encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B26 | B24 | u | B20 |
- (load ? L : 0) |
- (byte ? B : 0) |
- (static_cast<int32_t>(rd) << kRdShift) |
- 0xf << 16 |
- (offset & 0xfff);
-
- } else {
- encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B26 |
- (load ? L : 0) |
- (byte ? B : 0) |
- (static_cast<int32_t>(rd) << kRdShift) |
- addr.encodingArm();
- }
- Emit(encoding);
-}
-
-
-void Arm32Assembler::EmitMemOpAddressMode3(Condition cond,
- int32_t mode,
- Register rd,
- const Address& ad) {
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(cond, kNoCondition);
- const Address& addr = static_cast<const Address&>(ad);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B22 |
- mode |
- (static_cast<int32_t>(rd) << kRdShift) |
- addr.encoding3();
- Emit(encoding);
-}
-
-
-void Arm32Assembler::EmitMultiMemOp(Condition cond,
- BlockAddressMode am,
- bool load,
- Register base,
- RegList regs) {
- CHECK_NE(base, kNoRegister);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 |
- am |
- (load ? L : 0) |
- (static_cast<int32_t>(base) << kRnShift) |
- regs;
- Emit(encoding);
-}
-
-
-void Arm32Assembler::EmitShiftImmediate(Condition cond,
- Shift opcode,
- Register rd,
- Register rm,
- const ShifterOperand& so) {
- CHECK_NE(cond, kNoCondition);
- CHECK(so.IsImmediate());
- int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
- static_cast<int32_t>(MOV) << kOpcodeShift |
- static_cast<int32_t>(rd) << kRdShift |
- so.encodingArm() << kShiftImmShift |
- static_cast<int32_t>(opcode) << kShiftShift |
- static_cast<int32_t>(rm);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::EmitShiftRegister(Condition cond,
- Shift opcode,
- Register rd,
- Register rm,
- const ShifterOperand& so) {
- CHECK_NE(cond, kNoCondition);
- CHECK(so.IsRegister());
- int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
- static_cast<int32_t>(MOV) << kOpcodeShift |
- static_cast<int32_t>(rd) << kRdShift |
- so.encodingArm() << kShiftRegisterShift |
- static_cast<int32_t>(opcode) << kShiftShift |
- B4 |
- static_cast<int32_t>(rm);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::EmitBranch(Condition cond, Label* label, bool link) {
- if (label->IsBound()) {
- EmitType5(cond, label->Position() - buffer_.Size(), link);
- } else {
- int position = buffer_.Size();
- // Use the offset field of the branch instruction for linking the sites.
- EmitType5(cond, label->position_, link);
- label->LinkTo(position);
- }
-}
-
-
-void Arm32Assembler::clz(Register rd, Register rm, Condition cond) {
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(rm, kNoRegister);
- CHECK_NE(cond, kNoCondition);
- CHECK_NE(rd, PC);
- CHECK_NE(rm, PC);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B24 | B22 | B21 | (0xf << 16) |
- (static_cast<int32_t>(rd) << kRdShift) |
- (0xf << 8) | B4 | static_cast<int32_t>(rm);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::movw(Register rd, uint16_t imm16, Condition cond) {
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
- B25 | B24 | ((imm16 >> 12) << 16) |
- static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::movt(Register rd, uint16_t imm16, Condition cond) {
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
- B25 | B24 | B22 | ((imm16 >> 12) << 16) |
- static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::EmitMiscellaneous(Condition cond, uint8_t op1,
- uint8_t op2, uint32_t a_part,
- uint32_t rest) {
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B26 | B25 | B23 |
- (op1 << 20) |
- (a_part << 16) |
- (op2 << 5) |
- B4 |
- rest;
- Emit(encoding);
-}
-
-
-void Arm32Assembler::EmitReverseBytes(Register rd, Register rm, Condition cond,
- uint8_t op1, uint8_t op2) {
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(rm, kNoRegister);
- CHECK_NE(cond, kNoCondition);
- CHECK_NE(rd, PC);
- CHECK_NE(rm, PC);
-
- int32_t encoding = (static_cast<int32_t>(rd) << kRdShift) |
- (0b1111 << 8) |
- static_cast<int32_t>(rm);
- EmitMiscellaneous(cond, op1, op2, 0b1111, encoding);
-}
-
-
-void Arm32Assembler::rbit(Register rd, Register rm, Condition cond) {
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(rm, kNoRegister);
- CHECK_NE(cond, kNoCondition);
- CHECK_NE(rd, PC);
- CHECK_NE(rm, PC);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B26 | B25 | B23 | B22 | B21 | B20 | (0xf << 16) |
- (static_cast<int32_t>(rd) << kRdShift) |
- (0xf << 8) | B5 | B4 | static_cast<int32_t>(rm);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::rev(Register rd, Register rm, Condition cond) {
- EmitReverseBytes(rd, rm, cond, 0b011, 0b001);
-}
-
-
-void Arm32Assembler::rev16(Register rd, Register rm, Condition cond) {
- EmitReverseBytes(rd, rm, cond, 0b011, 0b101);
-}
-
-
-void Arm32Assembler::revsh(Register rd, Register rm, Condition cond) {
- EmitReverseBytes(rd, rm, cond, 0b111, 0b101);
-}
-
-
-void Arm32Assembler::EmitMulOp(Condition cond, int32_t opcode,
- Register rd, Register rn,
- Register rm, Register rs) {
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(rn, kNoRegister);
- CHECK_NE(rm, kNoRegister);
- CHECK_NE(rs, kNoRegister);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = opcode |
- (static_cast<int32_t>(cond) << kConditionShift) |
- (static_cast<int32_t>(rn) << kRnShift) |
- (static_cast<int32_t>(rd) << kRdShift) |
- (static_cast<int32_t>(rs) << kRsShift) |
- B7 | B4 |
- (static_cast<int32_t>(rm) << kRmShift);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::ldrex(Register rt, Register rn, Condition cond) {
- CHECK_NE(rn, kNoRegister);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B24 |
- B23 |
- L |
- (static_cast<int32_t>(rn) << kLdExRnShift) |
- (static_cast<int32_t>(rt) << kLdExRtShift) |
- B11 | B10 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0;
- Emit(encoding);
-}
-
-
-void Arm32Assembler::ldrexd(Register rt, Register rt2, Register rn, Condition cond) {
- CHECK_NE(rn, kNoRegister);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(rt2, kNoRegister);
- CHECK_NE(rt, R14);
- CHECK_EQ(0u, static_cast<uint32_t>(rt) % 2);
- CHECK_EQ(static_cast<uint32_t>(rt) + 1, static_cast<uint32_t>(rt2));
- CHECK_NE(cond, kNoCondition);
-
- int32_t encoding =
- (static_cast<uint32_t>(cond) << kConditionShift) |
- B24 | B23 | B21 | B20 |
- static_cast<uint32_t>(rn) << 16 |
- static_cast<uint32_t>(rt) << 12 |
- B11 | B10 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0;
- Emit(encoding);
-}
-
-
-void Arm32Assembler::strex(Register rd,
- Register rt,
- Register rn,
- Condition cond) {
- CHECK_NE(rn, kNoRegister);
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B24 |
- B23 |
- (static_cast<int32_t>(rn) << kStrExRnShift) |
- (static_cast<int32_t>(rd) << kStrExRdShift) |
- B11 | B10 | B9 | B8 | B7 | B4 |
- (static_cast<int32_t>(rt) << kStrExRtShift);
- Emit(encoding);
-}
-
-void Arm32Assembler::strexd(Register rd, Register rt, Register rt2, Register rn, Condition cond) {
- CHECK_NE(rd, kNoRegister);
- CHECK_NE(rn, kNoRegister);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(rt2, kNoRegister);
- CHECK_NE(rt, R14);
- CHECK_NE(rd, rt);
- CHECK_NE(rd, rt2);
- CHECK_EQ(0u, static_cast<uint32_t>(rt) % 2);
- CHECK_EQ(static_cast<uint32_t>(rt) + 1, static_cast<uint32_t>(rt2));
- CHECK_NE(cond, kNoCondition);
-
- int32_t encoding =
- (static_cast<uint32_t>(cond) << kConditionShift) |
- B24 | B23 | B21 |
- static_cast<uint32_t>(rn) << 16 |
- static_cast<uint32_t>(rd) << 12 |
- B11 | B10 | B9 | B8 | B7 | B4 |
- static_cast<uint32_t>(rt);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::clrex(Condition cond) {
- CHECK_EQ(cond, AL); // This cannot be conditional on ARM.
- int32_t encoding = (kSpecialCondition << kConditionShift) |
- B26 | B24 | B22 | B21 | B20 | (0xff << 12) | B4 | 0xf;
- Emit(encoding);
-}
-
-
-void Arm32Assembler::nop(Condition cond) {
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B25 | B24 | B21 | (0xf << 12);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::vmovsr(SRegister sn, Register rt, Condition cond) {
- CHECK_NE(sn, kNoSRegister);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(rt, SP);
- CHECK_NE(rt, PC);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B25 |
- ((static_cast<int32_t>(sn) >> 1)*B16) |
- (static_cast<int32_t>(rt)*B12) | B11 | B9 |
- ((static_cast<int32_t>(sn) & 1)*B7) | B4;
- Emit(encoding);
-}
-
-
-void Arm32Assembler::vmovrs(Register rt, SRegister sn, Condition cond) {
- CHECK_NE(sn, kNoSRegister);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(rt, SP);
- CHECK_NE(rt, PC);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B25 | B20 |
- ((static_cast<int32_t>(sn) >> 1)*B16) |
- (static_cast<int32_t>(rt)*B12) | B11 | B9 |
- ((static_cast<int32_t>(sn) & 1)*B7) | B4;
- Emit(encoding);
-}
-
-
-void Arm32Assembler::vmovsrr(SRegister sm, Register rt, Register rt2,
- Condition cond) {
- CHECK_NE(sm, kNoSRegister);
- CHECK_NE(sm, S31);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(rt, SP);
- CHECK_NE(rt, PC);
- CHECK_NE(rt2, kNoRegister);
- CHECK_NE(rt2, SP);
- CHECK_NE(rt2, PC);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B22 |
- (static_cast<int32_t>(rt2)*B16) |
- (static_cast<int32_t>(rt)*B12) | B11 | B9 |
- ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
- (static_cast<int32_t>(sm) >> 1);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::vmovrrs(Register rt, Register rt2, SRegister sm,
- Condition cond) {
- CHECK_NE(sm, kNoSRegister);
- CHECK_NE(sm, S31);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(rt, SP);
- CHECK_NE(rt, PC);
- CHECK_NE(rt2, kNoRegister);
- CHECK_NE(rt2, SP);
- CHECK_NE(rt2, PC);
- CHECK_NE(rt, rt2);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B22 | B20 |
- (static_cast<int32_t>(rt2)*B16) |
- (static_cast<int32_t>(rt)*B12) | B11 | B9 |
- ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
- (static_cast<int32_t>(sm) >> 1);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::vmovdrr(DRegister dm, Register rt, Register rt2,
- Condition cond) {
- CHECK_NE(dm, kNoDRegister);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(rt, SP);
- CHECK_NE(rt, PC);
- CHECK_NE(rt2, kNoRegister);
- CHECK_NE(rt2, SP);
- CHECK_NE(rt2, PC);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B22 |
- (static_cast<int32_t>(rt2)*B16) |
- (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
- ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
- (static_cast<int32_t>(dm) & 0xf);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::vmovrrd(Register rt, Register rt2, DRegister dm,
- Condition cond) {
- CHECK_NE(dm, kNoDRegister);
- CHECK_NE(rt, kNoRegister);
- CHECK_NE(rt, SP);
- CHECK_NE(rt, PC);
- CHECK_NE(rt2, kNoRegister);
- CHECK_NE(rt2, SP);
- CHECK_NE(rt2, PC);
- CHECK_NE(rt, rt2);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B22 | B20 |
- (static_cast<int32_t>(rt2)*B16) |
- (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
- ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
- (static_cast<int32_t>(dm) & 0xf);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::vldrs(SRegister sd, const Address& ad, Condition cond) {
- const Address& addr = static_cast<const Address&>(ad);
- CHECK_NE(sd, kNoSRegister);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B24 | B20 |
- ((static_cast<int32_t>(sd) & 1)*B22) |
- ((static_cast<int32_t>(sd) >> 1)*B12) |
- B11 | B9 | addr.vencoding();
- Emit(encoding);
-}
-
-
-void Arm32Assembler::vstrs(SRegister sd, const Address& ad, Condition cond) {
- const Address& addr = static_cast<const Address&>(ad);
- CHECK_NE(static_cast<Register>(addr.encodingArm() & (0xf << kRnShift)), PC);
- CHECK_NE(sd, kNoSRegister);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B24 |
- ((static_cast<int32_t>(sd) & 1)*B22) |
- ((static_cast<int32_t>(sd) >> 1)*B12) |
- B11 | B9 | addr.vencoding();
- Emit(encoding);
-}
-
-
-void Arm32Assembler::vldrd(DRegister dd, const Address& ad, Condition cond) {
- const Address& addr = static_cast<const Address&>(ad);
- CHECK_NE(dd, kNoDRegister);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B24 | B20 |
- ((static_cast<int32_t>(dd) >> 4)*B22) |
- ((static_cast<int32_t>(dd) & 0xf)*B12) |
- B11 | B9 | B8 | addr.vencoding();
- Emit(encoding);
-}
-
-
-void Arm32Assembler::vstrd(DRegister dd, const Address& ad, Condition cond) {
- const Address& addr = static_cast<const Address&>(ad);
- CHECK_NE(static_cast<Register>(addr.encodingArm() & (0xf << kRnShift)), PC);
- CHECK_NE(dd, kNoDRegister);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B24 |
- ((static_cast<int32_t>(dd) >> 4)*B22) |
- ((static_cast<int32_t>(dd) & 0xf)*B12) |
- B11 | B9 | B8 | addr.vencoding();
- Emit(encoding);
-}
-
-
-void Arm32Assembler::vpushs(SRegister reg, int nregs, Condition cond) {
- EmitVPushPop(static_cast<uint32_t>(reg), nregs, true, false, cond);
-}
-
-
-void Arm32Assembler::vpushd(DRegister reg, int nregs, Condition cond) {
- EmitVPushPop(static_cast<uint32_t>(reg), nregs, true, true, cond);
-}
-
-
-void Arm32Assembler::vpops(SRegister reg, int nregs, Condition cond) {
- EmitVPushPop(static_cast<uint32_t>(reg), nregs, false, false, cond);
-}
-
-
-void Arm32Assembler::vpopd(DRegister reg, int nregs, Condition cond) {
- EmitVPushPop(static_cast<uint32_t>(reg), nregs, false, true, cond);
-}
-
-
-void Arm32Assembler::vldmiad(Register, DRegister, int, Condition) {
- LOG(FATAL) << "Unimplemented.";
- UNREACHABLE();
-}
-
-
-void Arm32Assembler::vstmiad(Register, DRegister, int, Condition) {
- LOG(FATAL) << "Unimplemented.";
- UNREACHABLE();
-}
-
-
-void Arm32Assembler::EmitVPushPop(uint32_t reg, int nregs, bool push, bool dbl, Condition cond) {
- CHECK_NE(cond, kNoCondition);
- CHECK_GT(nregs, 0);
- uint32_t D;
- uint32_t Vd;
- if (dbl) {
- // Encoded as D:Vd.
- D = (reg >> 4) & 1;
- Vd = reg & 15U /* 0b1111 */;
- } else {
- // Encoded as Vd:D.
- D = reg & 1;
- Vd = (reg >> 1) & 15U /* 0b1111 */;
- }
- int32_t encoding = B27 | B26 | B21 | B19 | B18 | B16 |
- B11 | B9 |
- (dbl ? B8 : 0) |
- (push ? B24 : (B23 | B20)) |
- static_cast<int32_t>(cond) << kConditionShift |
- nregs << (dbl ? 1 : 0) |
- D << 22 |
- Vd << 12;
- Emit(encoding);
-}
-
-
-void Arm32Assembler::EmitVFPsss(Condition cond, int32_t opcode,
- SRegister sd, SRegister sn, SRegister sm) {
- CHECK_NE(sd, kNoSRegister);
- CHECK_NE(sn, kNoSRegister);
- CHECK_NE(sm, kNoSRegister);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B25 | B11 | B9 | opcode |
- ((static_cast<int32_t>(sd) & 1)*B22) |
- ((static_cast<int32_t>(sn) >> 1)*B16) |
- ((static_cast<int32_t>(sd) >> 1)*B12) |
- ((static_cast<int32_t>(sn) & 1)*B7) |
- ((static_cast<int32_t>(sm) & 1)*B5) |
- (static_cast<int32_t>(sm) >> 1);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::EmitVFPddd(Condition cond, int32_t opcode,
- DRegister dd, DRegister dn, DRegister dm) {
- CHECK_NE(dd, kNoDRegister);
- CHECK_NE(dn, kNoDRegister);
- CHECK_NE(dm, kNoDRegister);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B25 | B11 | B9 | B8 | opcode |
- ((static_cast<int32_t>(dd) >> 4)*B22) |
- ((static_cast<int32_t>(dn) & 0xf)*B16) |
- ((static_cast<int32_t>(dd) & 0xf)*B12) |
- ((static_cast<int32_t>(dn) >> 4)*B7) |
- ((static_cast<int32_t>(dm) >> 4)*B5) |
- (static_cast<int32_t>(dm) & 0xf);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::EmitVFPsd(Condition cond, int32_t opcode,
- SRegister sd, DRegister dm) {
- CHECK_NE(sd, kNoSRegister);
- CHECK_NE(dm, kNoDRegister);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B25 | B11 | B9 | opcode |
- ((static_cast<int32_t>(sd) & 1)*B22) |
- ((static_cast<int32_t>(sd) >> 1)*B12) |
- ((static_cast<int32_t>(dm) >> 4)*B5) |
- (static_cast<int32_t>(dm) & 0xf);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::EmitVFPds(Condition cond, int32_t opcode,
- DRegister dd, SRegister sm) {
- CHECK_NE(dd, kNoDRegister);
- CHECK_NE(sm, kNoSRegister);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B25 | B11 | B9 | opcode |
- ((static_cast<int32_t>(dd) >> 4)*B22) |
- ((static_cast<int32_t>(dd) & 0xf)*B12) |
- ((static_cast<int32_t>(sm) & 1)*B5) |
- (static_cast<int32_t>(sm) >> 1);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::Lsl(Register rd, Register rm, uint32_t shift_imm,
- Condition cond, SetCc set_cc) {
- CHECK_LE(shift_imm, 31u);
- mov(rd, ShifterOperand(rm, LSL, shift_imm), cond, set_cc);
-}
-
-
-void Arm32Assembler::Lsr(Register rd, Register rm, uint32_t shift_imm,
- Condition cond, SetCc set_cc) {
- CHECK(1u <= shift_imm && shift_imm <= 32u);
- if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax.
- mov(rd, ShifterOperand(rm, LSR, shift_imm), cond, set_cc);
-}
-
-
-void Arm32Assembler::Asr(Register rd, Register rm, uint32_t shift_imm,
- Condition cond, SetCc set_cc) {
- CHECK(1u <= shift_imm && shift_imm <= 32u);
- if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax.
- mov(rd, ShifterOperand(rm, ASR, shift_imm), cond, set_cc);
-}
-
-
-void Arm32Assembler::Ror(Register rd, Register rm, uint32_t shift_imm,
- Condition cond, SetCc set_cc) {
- CHECK(1u <= shift_imm && shift_imm <= 31u);
- mov(rd, ShifterOperand(rm, ROR, shift_imm), cond, set_cc);
-}
-
-void Arm32Assembler::Rrx(Register rd, Register rm, Condition cond, SetCc set_cc) {
- mov(rd, ShifterOperand(rm, ROR, 0), cond, set_cc);
-}
-
-
-void Arm32Assembler::Lsl(Register rd, Register rm, Register rn,
- Condition cond, SetCc set_cc) {
- mov(rd, ShifterOperand(rm, LSL, rn), cond, set_cc);
-}
-
-
-void Arm32Assembler::Lsr(Register rd, Register rm, Register rn,
- Condition cond, SetCc set_cc) {
- mov(rd, ShifterOperand(rm, LSR, rn), cond, set_cc);
-}
-
-
-void Arm32Assembler::Asr(Register rd, Register rm, Register rn,
- Condition cond, SetCc set_cc) {
- mov(rd, ShifterOperand(rm, ASR, rn), cond, set_cc);
-}
-
-
-void Arm32Assembler::Ror(Register rd, Register rm, Register rn,
- Condition cond, SetCc set_cc) {
- mov(rd, ShifterOperand(rm, ROR, rn), cond, set_cc);
-}
-
-void Arm32Assembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 |
- (static_cast<int32_t>(PC)*B12) |
- B11 | B9 | B4;
- Emit(encoding);
-}
-
-void Arm32Assembler::vcntd(DRegister dd, DRegister dm) {
- uint32_t encoding = (B31 | B30 | B29 | B28 | B25 | B24 | B23 | B21 | B20) |
- ((static_cast<int32_t>(dd) >> 4) * B22) |
- ((static_cast<uint32_t>(dd) & 0xf) * B12) |
- (B10 | B8) |
- ((static_cast<int32_t>(dm) >> 4) * B5) |
- (static_cast<uint32_t>(dm) & 0xf);
-
- Emit(encoding);
-}
-
-void Arm32Assembler::vpaddld(DRegister dd, DRegister dm, int32_t size, bool is_unsigned) {
- CHECK(size == 8 || size == 16 || size == 32) << size;
- uint32_t encoding = (B31 | B30 | B29 | B28 | B25 | B24 | B23 | B21 | B20) |
- ((static_cast<uint32_t>(size >> 4) & 0x3) * B18) |
- ((static_cast<int32_t>(dd) >> 4) * B22) |
- ((static_cast<uint32_t>(dd) & 0xf) * B12) |
- (B9) |
- (is_unsigned ? B7 : 0) |
- ((static_cast<int32_t>(dm) >> 4) * B5) |
- (static_cast<uint32_t>(dm) & 0xf);
-
- Emit(encoding);
-}
-
-
-void Arm32Assembler::svc(uint32_t imm24) {
- CHECK(IsUint<24>(imm24)) << imm24;
- int32_t encoding = (AL << kConditionShift) | B27 | B26 | B25 | B24 | imm24;
- Emit(encoding);
-}
-
-
-void Arm32Assembler::bkpt(uint16_t imm16) {
- int32_t encoding = (AL << kConditionShift) | B24 | B21 |
- ((imm16 >> 4) << 8) | B6 | B5 | B4 | (imm16 & 0xf);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::blx(Register rm, Condition cond) {
- CHECK_NE(rm, kNoRegister);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B24 | B21 | (0xfff << 8) | B5 | B4 |
- (static_cast<int32_t>(rm) << kRmShift);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::bx(Register rm, Condition cond) {
- CHECK_NE(rm, kNoRegister);
- CHECK_NE(cond, kNoCondition);
- int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
- B24 | B21 | (0xfff << 8) | B4 |
- (static_cast<int32_t>(rm) << kRmShift);
- Emit(encoding);
-}
-
-
-void Arm32Assembler::Push(Register rd, Condition cond) {
- str(rd, Address(SP, -kRegisterSize, Address::PreIndex), cond);
-}
-
-
-void Arm32Assembler::Pop(Register rd, Condition cond) {
- ldr(rd, Address(SP, kRegisterSize, Address::PostIndex), cond);
-}
-
-
-void Arm32Assembler::PushList(RegList regs, Condition cond) {
- stm(DB_W, SP, regs, cond);
-}
-
-
-void Arm32Assembler::PopList(RegList regs, Condition cond) {
- ldm(IA_W, SP, regs, cond);
-}
-
-
-void Arm32Assembler::Mov(Register rd, Register rm, Condition cond) {
- if (rd != rm) {
- mov(rd, ShifterOperand(rm), cond);
- }
-}
-
-
-void Arm32Assembler::Bind(Label* label) {
- CHECK(!label->IsBound());
- int bound_pc = buffer_.Size();
- while (label->IsLinked()) {
- int32_t position = label->Position();
- int32_t next = buffer_.Load<int32_t>(position);
- int32_t encoded = Arm32Assembler::EncodeBranchOffset(bound_pc - position, next);
- buffer_.Store<int32_t>(position, encoded);
- label->position_ = Arm32Assembler::DecodeBranchOffset(next);
- }
- label->BindTo(bound_pc);
-}
-
-
-int32_t Arm32Assembler::EncodeBranchOffset(int offset, int32_t inst) {
- // The offset is off by 8 due to the way the ARM CPUs read PC.
- offset -= 8;
- CHECK_ALIGNED(offset, 4);
- CHECK(IsInt(POPCOUNT(kBranchOffsetMask), offset)) << offset;
-
- // Properly preserve only the bits supported in the instruction.
- offset >>= 2;
- offset &= kBranchOffsetMask;
- return (inst & ~kBranchOffsetMask) | offset;
-}
-
-
-int Arm32Assembler::DecodeBranchOffset(int32_t inst) {
- // Sign-extend, left-shift by 2, then add 8.
- return ((((inst & kBranchOffsetMask) << 8) >> 6) + 8);
-}
-
-
-uint32_t Arm32Assembler::GetAdjustedPosition(uint32_t old_position ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unimplemented.";
- UNREACHABLE();
-}
-
-Literal* Arm32Assembler::NewLiteral(size_t size ATTRIBUTE_UNUSED,
- const uint8_t* data ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unimplemented.";
- UNREACHABLE();
-}
-
-void Arm32Assembler::LoadLiteral(Register rt ATTRIBUTE_UNUSED,
- Literal* literal ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unimplemented.";
- UNREACHABLE();
-}
-
-void Arm32Assembler::LoadLiteral(Register rt ATTRIBUTE_UNUSED, Register rt2 ATTRIBUTE_UNUSED,
- Literal* literal ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unimplemented.";
- UNREACHABLE();
-}
-
-void Arm32Assembler::LoadLiteral(SRegister sd ATTRIBUTE_UNUSED,
- Literal* literal ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unimplemented.";
- UNREACHABLE();
-}
-
-void Arm32Assembler::LoadLiteral(DRegister dd ATTRIBUTE_UNUSED,
- Literal* literal ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unimplemented.";
- UNREACHABLE();
-}
-
-
-void Arm32Assembler::AddConstant(Register rd, Register rn, int32_t value,
- Condition cond, SetCc set_cc) {
- if (value == 0 && set_cc != kCcSet) {
- if (rd != rn) {
- mov(rd, ShifterOperand(rn), cond, set_cc);
- }
- return;
- }
- // We prefer to select the shorter code sequence rather than selecting add for
- // positive values and sub for negatives ones, which would slightly improve
- // the readability of generated code for some constants.
- ShifterOperand shifter_op;
- if (ShifterOperandCanHoldArm32(value, &shifter_op)) {
- add(rd, rn, shifter_op, cond, set_cc);
- } else if (ShifterOperandCanHoldArm32(-value, &shifter_op)) {
- sub(rd, rn, shifter_op, cond, set_cc);
- } else {
- CHECK(rn != IP);
- if (ShifterOperandCanHoldArm32(~value, &shifter_op)) {
- mvn(IP, shifter_op, cond, kCcKeep);
- add(rd, rn, ShifterOperand(IP), cond, set_cc);
- } else if (ShifterOperandCanHoldArm32(~(-value), &shifter_op)) {
- mvn(IP, shifter_op, cond, kCcKeep);
- sub(rd, rn, ShifterOperand(IP), cond, set_cc);
- } else {
- movw(IP, Low16Bits(value), cond);
- uint16_t value_high = High16Bits(value);
- if (value_high != 0) {
- movt(IP, value_high, cond);
- }
- add(rd, rn, ShifterOperand(IP), cond, set_cc);
- }
- }
-}
-
-void Arm32Assembler::CmpConstant(Register rn, int32_t value, Condition cond) {
- ShifterOperand shifter_op;
- if (ShifterOperandCanHoldArm32(value, &shifter_op)) {
- cmp(rn, shifter_op, cond);
- } else if (ShifterOperandCanHoldArm32(~value, &shifter_op)) {
- cmn(rn, shifter_op, cond);
- } else {
- movw(IP, Low16Bits(value), cond);
- uint16_t value_high = High16Bits(value);
- if (value_high != 0) {
- movt(IP, value_high, cond);
- }
- cmp(rn, ShifterOperand(IP), cond);
- }
-}
-
-void Arm32Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
- ShifterOperand shifter_op;
- if (ShifterOperandCanHoldArm32(value, &shifter_op)) {
- mov(rd, shifter_op, cond);
- } else if (ShifterOperandCanHoldArm32(~value, &shifter_op)) {
- mvn(rd, shifter_op, cond);
- } else {
- movw(rd, Low16Bits(value), cond);
- uint16_t value_high = High16Bits(value);
- if (value_high != 0) {
- movt(rd, value_high, cond);
- }
- }
-}
-
-void Arm32Assembler::LoadDImmediate(DRegister dd, double value, Condition cond) {
- if (!vmovd(dd, value, cond)) {
- uint64_t int_value = bit_cast<uint64_t, double>(value);
- if (int_value == bit_cast<uint64_t, double>(0.0)) {
- // 0.0 is quite common, so we special case it by loading
- // 2.0 in `dd` and then subtracting it.
- bool success = vmovd(dd, 2.0, cond);
- CHECK(success);
- vsubd(dd, dd, dd, cond);
- } else {
- if (dd < 16) {
- // Note: Depending on the particular CPU, this may cause register
- // forwarding hazard, negatively impacting the performance.
- SRegister low = static_cast<SRegister>(dd << 1);
- SRegister high = static_cast<SRegister>(low + 1);
- LoadSImmediate(low, bit_cast<float, uint32_t>(Low32Bits(int_value)), cond);
- if (High32Bits(int_value) == Low32Bits(int_value)) {
- vmovs(high, low);
- } else {
- LoadSImmediate(high, bit_cast<float, uint32_t>(High32Bits(int_value)), cond);
- }
- } else {
- LOG(FATAL) << "Unimplemented loading of double into a D register "
- << "that cannot be split into two S registers";
- }
- }
- }
-}
-
-// Implementation note: this method must emit at most one instruction when
-// Address::CanHoldLoadOffsetArm.
-void Arm32Assembler::LoadFromOffset(LoadOperandType type,
- Register reg,
- Register base,
- int32_t offset,
- Condition cond) {
- if (!Address::CanHoldLoadOffsetArm(type, offset)) {
- CHECK(base != IP);
- LoadImmediate(IP, offset, cond);
- add(IP, IP, ShifterOperand(base), cond);
- base = IP;
- offset = 0;
- }
- CHECK(Address::CanHoldLoadOffsetArm(type, offset));
- switch (type) {
- case kLoadSignedByte:
- ldrsb(reg, Address(base, offset), cond);
- break;
- case kLoadUnsignedByte:
- ldrb(reg, Address(base, offset), cond);
- break;
- case kLoadSignedHalfword:
- ldrsh(reg, Address(base, offset), cond);
- break;
- case kLoadUnsignedHalfword:
- ldrh(reg, Address(base, offset), cond);
- break;
- case kLoadWord:
- ldr(reg, Address(base, offset), cond);
- break;
- case kLoadWordPair:
- ldrd(reg, Address(base, offset), cond);
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- UNREACHABLE();
- }
-}
-
-
-// Implementation note: this method must emit at most one instruction when
-// Address::CanHoldLoadOffsetArm, as expected by JIT::GuardedLoadFromOffset.
-void Arm32Assembler::LoadSFromOffset(SRegister reg,
- Register base,
- int32_t offset,
- Condition cond) {
- if (!Address::CanHoldLoadOffsetArm(kLoadSWord, offset)) {
- CHECK_NE(base, IP);
- LoadImmediate(IP, offset, cond);
- add(IP, IP, ShifterOperand(base), cond);
- base = IP;
- offset = 0;
- }
- CHECK(Address::CanHoldLoadOffsetArm(kLoadSWord, offset));
- vldrs(reg, Address(base, offset), cond);
-}
-
-
-// Implementation note: this method must emit at most one instruction when
-// Address::CanHoldLoadOffsetArm, as expected by JIT::GuardedLoadFromOffset.
-void Arm32Assembler::LoadDFromOffset(DRegister reg,
- Register base,
- int32_t offset,
- Condition cond) {
- if (!Address::CanHoldLoadOffsetArm(kLoadDWord, offset)) {
- CHECK_NE(base, IP);
- LoadImmediate(IP, offset, cond);
- add(IP, IP, ShifterOperand(base), cond);
- base = IP;
- offset = 0;
- }
- CHECK(Address::CanHoldLoadOffsetArm(kLoadDWord, offset));
- vldrd(reg, Address(base, offset), cond);
-}
-
-
-// Implementation note: this method must emit at most one instruction when
-// Address::CanHoldStoreOffsetArm.
-void Arm32Assembler::StoreToOffset(StoreOperandType type,
- Register reg,
- Register base,
- int32_t offset,
- Condition cond) {
- if (!Address::CanHoldStoreOffsetArm(type, offset)) {
- CHECK(reg != IP);
- CHECK(base != IP);
- LoadImmediate(IP, offset, cond);
- add(IP, IP, ShifterOperand(base), cond);
- base = IP;
- offset = 0;
- }
- CHECK(Address::CanHoldStoreOffsetArm(type, offset));
- switch (type) {
- case kStoreByte:
- strb(reg, Address(base, offset), cond);
- break;
- case kStoreHalfword:
- strh(reg, Address(base, offset), cond);
- break;
- case kStoreWord:
- str(reg, Address(base, offset), cond);
- break;
- case kStoreWordPair:
- strd(reg, Address(base, offset), cond);
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- UNREACHABLE();
- }
-}
-
-
-// Implementation note: this method must emit at most one instruction when
-// Address::CanHoldStoreOffsetArm, as expected by JIT::GuardedStoreToOffset.
-void Arm32Assembler::StoreSToOffset(SRegister reg,
- Register base,
- int32_t offset,
- Condition cond) {
- if (!Address::CanHoldStoreOffsetArm(kStoreSWord, offset)) {
- CHECK_NE(base, IP);
- LoadImmediate(IP, offset, cond);
- add(IP, IP, ShifterOperand(base), cond);
- base = IP;
- offset = 0;
- }
- CHECK(Address::CanHoldStoreOffsetArm(kStoreSWord, offset));
- vstrs(reg, Address(base, offset), cond);
-}
-
-
-// Implementation note: this method must emit at most one instruction when
-// Address::CanHoldStoreOffsetArm, as expected by JIT::GuardedStoreSToOffset.
-void Arm32Assembler::StoreDToOffset(DRegister reg,
- Register base,
- int32_t offset,
- Condition cond) {
- if (!Address::CanHoldStoreOffsetArm(kStoreDWord, offset)) {
- CHECK_NE(base, IP);
- LoadImmediate(IP, offset, cond);
- add(IP, IP, ShifterOperand(base), cond);
- base = IP;
- offset = 0;
- }
- CHECK(Address::CanHoldStoreOffsetArm(kStoreDWord, offset));
- vstrd(reg, Address(base, offset), cond);
-}
-
-
-void Arm32Assembler::dmb(DmbOptions flavor) {
- int32_t encoding = 0xf57ff05f; // dmb
- Emit(encoding | flavor);
-}
-
-
-void Arm32Assembler::cbz(Register rn ATTRIBUTE_UNUSED, Label* target ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "cbz is not supported on ARM32";
-}
-
-
-void Arm32Assembler::cbnz(Register rn ATTRIBUTE_UNUSED, Label* target ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "cbnz is not supported on ARM32";
-}
-
-
-void Arm32Assembler::CompareAndBranchIfZero(Register r, Label* label) {
- cmp(r, ShifterOperand(0));
- b(label, EQ);
-}
-
-
-void Arm32Assembler::CompareAndBranchIfNonZero(Register r, Label* label) {
- cmp(r, ShifterOperand(0));
- b(label, NE);
-}
-
-JumpTable* Arm32Assembler::CreateJumpTable(std::vector<Label*>&& labels ATTRIBUTE_UNUSED,
- Register base_reg ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "CreateJumpTable is not supported on ARM32";
- UNREACHABLE();
-}
-
-void Arm32Assembler::EmitJumpTableDispatch(JumpTable* jump_table ATTRIBUTE_UNUSED,
- Register displacement_reg ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "EmitJumpTableDispatch is not supported on ARM32";
- UNREACHABLE();
-}
-
-void Arm32Assembler::FinalizeCode() {
- ArmAssembler::FinalizeCode();
- // Currently the arm32 assembler does not support fixups, and thus no tracking. We must not call
- // FinalizeTrackedLabels(), which would lead to an abort.
-}
-
-} // namespace arm
-} // namespace art
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
deleted file mode 100644
index 0cb6b171ce..0000000000
--- a/compiler/utils/arm/assembler_arm32.h
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM32_H_
-#define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM32_H_
-
-#include <vector>
-
-#include "base/logging.h"
-#include "constants_arm.h"
-#include "utils/arm/managed_register_arm.h"
-#include "utils/arm/assembler_arm.h"
-#include "offsets.h"
-
-namespace art {
-namespace arm {
-
-class Arm32Assembler FINAL : public ArmAssembler {
- public:
- explicit Arm32Assembler(ArenaAllocator* arena) : ArmAssembler(arena) {}
- virtual ~Arm32Assembler() {}
-
- bool IsThumb() const OVERRIDE {
- return false;
- }
-
- // Data-processing instructions.
- virtual void and_(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void eor(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void sub(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void rsb(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void add(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void adc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void sbc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void rsc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- void tst(Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
-
- void teq(Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
-
- void cmp(Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
-
- void cmn(Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
-
- virtual void orr(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void orn(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void mov(Register rd, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void bic(Register rd, Register rn, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void mvn(Register rd, const ShifterOperand& so,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- // Miscellaneous data-processing instructions.
- void clz(Register rd, Register rm, Condition cond = AL) OVERRIDE;
- void movw(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
- void movt(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
- void rbit(Register rd, Register rm, Condition cond = AL) OVERRIDE;
- void rev(Register rd, Register rm, Condition cond = AL) OVERRIDE;
- void rev16(Register rd, Register rm, Condition cond = AL) OVERRIDE;
- void revsh(Register rd, Register rm, Condition cond = AL) OVERRIDE;
-
- // Multiply instructions.
- void mul(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
- void mla(Register rd, Register rn, Register rm, Register ra,
- Condition cond = AL) OVERRIDE;
- void mls(Register rd, Register rn, Register rm, Register ra,
- Condition cond = AL) OVERRIDE;
- void smull(Register rd_lo, Register rd_hi, Register rn, Register rm,
- Condition cond = AL) OVERRIDE;
- void umull(Register rd_lo, Register rd_hi, Register rn, Register rm,
- Condition cond = AL) OVERRIDE;
-
- void sdiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
- void udiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
-
- // Bit field extract instructions.
- void sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond = AL) OVERRIDE;
- void ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond = AL) OVERRIDE;
-
- // Load/store instructions.
- void ldr(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
- void str(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
-
- void ldrb(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
- void strb(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
-
- void ldrh(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
- void strh(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
-
- void ldrsb(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
- void ldrsh(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
-
- void ldrd(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
- void strd(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
-
- void ldm(BlockAddressMode am, Register base,
- RegList regs, Condition cond = AL) OVERRIDE;
- void stm(BlockAddressMode am, Register base,
- RegList regs, Condition cond = AL) OVERRIDE;
-
- void ldrex(Register rd, Register rn, Condition cond = AL) OVERRIDE;
- void strex(Register rd, Register rt, Register rn, Condition cond = AL) OVERRIDE;
- void ldrexd(Register rt, Register rt2, Register rn, Condition cond = AL) OVERRIDE;
- void strexd(Register rd, Register rt, Register rt2, Register rn, Condition cond = AL) OVERRIDE;
-
- // Miscellaneous instructions.
- void clrex(Condition cond = AL) OVERRIDE;
- void nop(Condition cond = AL) OVERRIDE;
-
- // Note that gdb sets breakpoints using the undefined instruction 0xe7f001f0.
- void bkpt(uint16_t imm16) OVERRIDE;
- void svc(uint32_t imm24) OVERRIDE;
-
- void cbz(Register rn, Label* target) OVERRIDE;
- void cbnz(Register rn, Label* target) OVERRIDE;
-
- // Floating point instructions (VFPv3-D16 and VFPv3-D32 profiles).
- void vmovsr(SRegister sn, Register rt, Condition cond = AL) OVERRIDE;
- void vmovrs(Register rt, SRegister sn, Condition cond = AL) OVERRIDE;
- void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond = AL) OVERRIDE;
- void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond = AL) OVERRIDE;
- void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond = AL) OVERRIDE;
- void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond = AL) OVERRIDE;
- void vmovs(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vmovd(DRegister dd, DRegister dm, Condition cond = AL) OVERRIDE;
-
- // Returns false if the immediate cannot be encoded.
- bool vmovs(SRegister sd, float s_imm, Condition cond = AL) OVERRIDE;
- bool vmovd(DRegister dd, double d_imm, Condition cond = AL) OVERRIDE;
-
- void vldrs(SRegister sd, const Address& ad, Condition cond = AL) OVERRIDE;
- void vstrs(SRegister sd, const Address& ad, Condition cond = AL) OVERRIDE;
- void vldrd(DRegister dd, const Address& ad, Condition cond = AL) OVERRIDE;
- void vstrd(DRegister dd, const Address& ad, Condition cond = AL) OVERRIDE;
-
- void vadds(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
- void vaddd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
- void vsubs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
- void vsubd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
- void vmuls(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
- void vmuld(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
- void vmlas(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
- void vmlad(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
- void vmlss(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
- void vmlsd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
- void vdivs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
- void vdivd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
-
- void vabss(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vabsd(DRegister dd, DRegister dm, Condition cond = AL) OVERRIDE;
- void vnegs(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vnegd(DRegister dd, DRegister dm, Condition cond = AL) OVERRIDE;
- void vsqrts(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vsqrtd(DRegister dd, DRegister dm, Condition cond = AL) OVERRIDE;
-
- void vcvtsd(SRegister sd, DRegister dm, Condition cond = AL) OVERRIDE;
- void vcvtds(DRegister dd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vcvtis(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vcvtid(SRegister sd, DRegister dm, Condition cond = AL) OVERRIDE;
- void vcvtsi(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vcvtdi(DRegister dd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vcvtus(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vcvtud(SRegister sd, DRegister dm, Condition cond = AL) OVERRIDE;
- void vcvtsu(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vcvtdu(DRegister dd, SRegister sm, Condition cond = AL) OVERRIDE;
-
- void vcmps(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
- void vcmpd(DRegister dd, DRegister dm, Condition cond = AL) OVERRIDE;
- void vcmpsz(SRegister sd, Condition cond = AL) OVERRIDE;
- void vcmpdz(DRegister dd, Condition cond = AL) OVERRIDE;
- void vmstat(Condition cond = AL) OVERRIDE; // VMRS APSR_nzcv, FPSCR
-
- void vcntd(DRegister dd, DRegister dm) OVERRIDE;
- void vpaddld(DRegister dd, DRegister dm, int32_t size, bool is_unsigned) OVERRIDE;
-
- void vpushs(SRegister reg, int nregs, Condition cond = AL) OVERRIDE;
- void vpushd(DRegister reg, int nregs, Condition cond = AL) OVERRIDE;
- void vpops(SRegister reg, int nregs, Condition cond = AL) OVERRIDE;
- void vpopd(DRegister reg, int nregs, Condition cond = AL) OVERRIDE;
- void vldmiad(Register base_reg, DRegister reg, int nregs, Condition cond = AL) OVERRIDE;
- void vstmiad(Register base_reg, DRegister reg, int nregs, Condition cond = AL) OVERRIDE;
-
- // Branch instructions.
- void b(Label* label, Condition cond = AL) OVERRIDE;
- void bl(Label* label, Condition cond = AL) OVERRIDE;
- void blx(Register rm, Condition cond = AL) OVERRIDE;
- void bx(Register rm, Condition cond = AL) OVERRIDE;
- virtual void Lsl(Register rd, Register rm, uint32_t shift_imm,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- virtual void Lsr(Register rd, Register rm, uint32_t shift_imm,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- virtual void Asr(Register rd, Register rm, uint32_t shift_imm,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- virtual void Ror(Register rd, Register rm, uint32_t shift_imm,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- virtual void Rrx(Register rd, Register rm,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- virtual void Lsl(Register rd, Register rm, Register rn,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- virtual void Lsr(Register rd, Register rm, Register rn,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- virtual void Asr(Register rd, Register rm, Register rn,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- virtual void Ror(Register rd, Register rm, Register rn,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- void Push(Register rd, Condition cond = AL) OVERRIDE;
- void Pop(Register rd, Condition cond = AL) OVERRIDE;
-
- void PushList(RegList regs, Condition cond = AL) OVERRIDE;
- void PopList(RegList regs, Condition cond = AL) OVERRIDE;
-
- void Mov(Register rd, Register rm, Condition cond = AL) OVERRIDE;
-
- void CompareAndBranchIfZero(Register r, Label* label) OVERRIDE;
- void CompareAndBranchIfNonZero(Register r, Label* label) OVERRIDE;
-
- // Memory barriers.
- void dmb(DmbOptions flavor) OVERRIDE;
-
- // Get the final position of a label after local fixup based on the old position
- // recorded before FinalizeCode().
- uint32_t GetAdjustedPosition(uint32_t old_position) OVERRIDE;
-
- Literal* NewLiteral(size_t size, const uint8_t* data) OVERRIDE;
- void LoadLiteral(Register rt, Literal* literal) OVERRIDE;
- void LoadLiteral(Register rt, Register rt2, Literal* literal) OVERRIDE;
- void LoadLiteral(SRegister sd, Literal* literal) OVERRIDE;
- void LoadLiteral(DRegister dd, Literal* literal) OVERRIDE;
-
- // Add signed constant value to rd. May clobber IP.
- void AddConstant(Register rd, Register rn, int32_t value,
- Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
-
- void CmpConstant(Register rn, int32_t value, Condition cond = AL) OVERRIDE;
-
- // Load and Store. May clobber IP.
- void LoadImmediate(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
- void LoadDImmediate(DRegister dd, double value, Condition cond = AL) OVERRIDE;
- void MarkExceptionHandler(Label* label) OVERRIDE;
- void LoadFromOffset(LoadOperandType type,
- Register reg,
- Register base,
- int32_t offset,
- Condition cond = AL) OVERRIDE;
- void StoreToOffset(StoreOperandType type,
- Register reg,
- Register base,
- int32_t offset,
- Condition cond = AL) OVERRIDE;
- void LoadSFromOffset(SRegister reg,
- Register base,
- int32_t offset,
- Condition cond = AL) OVERRIDE;
- void StoreSToOffset(SRegister reg,
- Register base,
- int32_t offset,
- Condition cond = AL) OVERRIDE;
- void LoadDFromOffset(DRegister reg,
- Register base,
- int32_t offset,
- Condition cond = AL) OVERRIDE;
- void StoreDToOffset(DRegister reg,
- Register base,
- int32_t offset,
- Condition cond = AL) OVERRIDE;
-
- bool ShifterOperandCanHold(Register rd,
- Register rn,
- Opcode opcode,
- uint32_t immediate,
- SetCc set_cc,
- ShifterOperand* shifter_op) OVERRIDE;
- using ArmAssembler::ShifterOperandCanHold; // Don't hide the non-virtual override.
-
- bool ShifterOperandCanAlwaysHold(uint32_t immediate) OVERRIDE;
-
- static bool IsInstructionForExceptionHandling(uintptr_t pc);
-
- // Emit data (e.g. encoded instruction or immediate) to the
- // instruction stream.
- void Emit(int32_t value);
- void Bind(Label* label) OVERRIDE;
-
- JumpTable* CreateJumpTable(std::vector<Label*>&& labels, Register base_reg) OVERRIDE;
- void EmitJumpTableDispatch(JumpTable* jump_table, Register displacement_reg) OVERRIDE;
-
- void FinalizeCode() OVERRIDE;
-
- private:
- void EmitType01(Condition cond,
- int type,
- Opcode opcode,
- SetCc set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
-
- void EmitType5(Condition cond, int offset, bool link);
-
- void EmitMemOp(Condition cond,
- bool load,
- bool byte,
- Register rd,
- const Address& ad);
-
- void EmitMemOpAddressMode3(Condition cond,
- int32_t mode,
- Register rd,
- const Address& ad);
-
- void EmitMultiMemOp(Condition cond,
- BlockAddressMode am,
- bool load,
- Register base,
- RegList regs);
-
- void EmitShiftImmediate(Condition cond,
- Shift opcode,
- Register rd,
- Register rm,
- const ShifterOperand& so);
-
- void EmitShiftRegister(Condition cond,
- Shift opcode,
- Register rd,
- Register rm,
- const ShifterOperand& so);
-
- void EmitMulOp(Condition cond,
- int32_t opcode,
- Register rd,
- Register rn,
- Register rm,
- Register rs);
-
- void EmitVFPsss(Condition cond,
- int32_t opcode,
- SRegister sd,
- SRegister sn,
- SRegister sm);
-
- void EmitVFPddd(Condition cond,
- int32_t opcode,
- DRegister dd,
- DRegister dn,
- DRegister dm);
-
- void EmitVFPsd(Condition cond,
- int32_t opcode,
- SRegister sd,
- DRegister dm);
-
- void EmitVFPds(Condition cond,
- int32_t opcode,
- DRegister dd,
- SRegister sm);
-
- void EmitVPushPop(uint32_t reg, int nregs, bool push, bool dbl, Condition cond);
-
- void EmitMiscellaneous(Condition cond, uint8_t op1, uint8_t op2,
- uint32_t a_part, uint32_t rest);
- void EmitReverseBytes(Register rd, Register rm, Condition cond,
- uint8_t op1, uint8_t op2);
-
- void EmitBranch(Condition cond, Label* label, bool link);
- static int32_t EncodeBranchOffset(int offset, int32_t inst);
- static int DecodeBranchOffset(int32_t inst);
- bool ShifterOperandCanHoldArm32(uint32_t immediate, ShifterOperand* shifter_op);
-};
-
-} // namespace arm
-} // namespace art
-
-#endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM32_H_
diff --git a/compiler/utils/arm/assembler_arm32_test.cc b/compiler/utils/arm/assembler_arm32_test.cc
deleted file mode 100644
index b214062e18..0000000000
--- a/compiler/utils/arm/assembler_arm32_test.cc
+++ /dev/null
@@ -1,941 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "assembler_arm32.h"
-
-#include <functional>
-#include <type_traits>
-
-#include "base/macros.h"
-#include "base/stl_util.h"
-#include "utils/arm/assembler_arm_test.h"
-
-namespace art {
-
-using std::placeholders::_1;
-using std::placeholders::_2;
-using std::placeholders::_3;
-using std::placeholders::_4;
-using std::placeholders::_5;
-
-// To speed up tests, don't use all register combinations.
-static constexpr bool kUseSparseRegisterList = true;
-
-// To speed up tests, don't use all condition codes.
-static constexpr bool kUseSparseConditionList = true;
-
-// To speed up tests, don't use all shift immediates.
-static constexpr bool kUseSparseShiftImmediates = true;
-
-class AssemblerArm32Test : public AssemblerArmTest<arm::Arm32Assembler,
- arm::Register, arm::SRegister,
- uint32_t, arm::ShifterOperand, arm::Condition,
- arm::SetCc> {
- protected:
- std::string GetArchitectureString() OVERRIDE {
- return "arm";
- }
-
- std::string GetAssemblerParameters() OVERRIDE {
- // Arm-v7a, cortex-a15 (means we have sdiv).
- return " -march=armv7-a -mcpu=cortex-a15 -mfpu=neon";
- }
-
- const char* GetAssemblyHeader() OVERRIDE {
- return kArm32AssemblyHeader;
- }
-
- std::string GetDisassembleParameters() OVERRIDE {
- return " -D -bbinary -marm --no-show-raw-insn";
- }
-
- void SetUpHelpers() OVERRIDE {
- if (registers_.size() == 0) {
- if (kUseSparseRegisterList) {
- registers_.insert(end(registers_),
- { // NOLINT(whitespace/braces)
- new arm::Register(arm::R0),
- new arm::Register(arm::R1),
- new arm::Register(arm::R4),
- new arm::Register(arm::R8),
- new arm::Register(arm::R11),
- new arm::Register(arm::R12),
- new arm::Register(arm::R13),
- new arm::Register(arm::R14),
- new arm::Register(arm::R15)
- });
- } else {
- registers_.insert(end(registers_),
- { // NOLINT(whitespace/braces)
- new arm::Register(arm::R0),
- new arm::Register(arm::R1),
- new arm::Register(arm::R2),
- new arm::Register(arm::R3),
- new arm::Register(arm::R4),
- new arm::Register(arm::R5),
- new arm::Register(arm::R6),
- new arm::Register(arm::R7),
- new arm::Register(arm::R8),
- new arm::Register(arm::R9),
- new arm::Register(arm::R10),
- new arm::Register(arm::R11),
- new arm::Register(arm::R12),
- new arm::Register(arm::R13),
- new arm::Register(arm::R14),
- new arm::Register(arm::R15)
- });
- }
- }
-
- if (!kUseSparseConditionList) {
- conditions_.push_back(arm::Condition::EQ);
- conditions_.push_back(arm::Condition::NE);
- conditions_.push_back(arm::Condition::CS);
- conditions_.push_back(arm::Condition::CC);
- conditions_.push_back(arm::Condition::MI);
- conditions_.push_back(arm::Condition::PL);
- conditions_.push_back(arm::Condition::VS);
- conditions_.push_back(arm::Condition::VC);
- conditions_.push_back(arm::Condition::HI);
- conditions_.push_back(arm::Condition::LS);
- conditions_.push_back(arm::Condition::GE);
- conditions_.push_back(arm::Condition::LT);
- conditions_.push_back(arm::Condition::GT);
- conditions_.push_back(arm::Condition::LE);
- conditions_.push_back(arm::Condition::AL);
- } else {
- conditions_.push_back(arm::Condition::EQ);
- conditions_.push_back(arm::Condition::NE);
- conditions_.push_back(arm::Condition::CC);
- conditions_.push_back(arm::Condition::VC);
- conditions_.push_back(arm::Condition::HI);
- conditions_.push_back(arm::Condition::LT);
- conditions_.push_back(arm::Condition::AL);
- }
-
- set_ccs_.push_back(arm::kCcDontCare);
- set_ccs_.push_back(arm::kCcSet);
- set_ccs_.push_back(arm::kCcKeep);
-
- shifter_operands_.push_back(arm::ShifterOperand(0));
- shifter_operands_.push_back(arm::ShifterOperand(1));
- shifter_operands_.push_back(arm::ShifterOperand(2));
- shifter_operands_.push_back(arm::ShifterOperand(3));
- shifter_operands_.push_back(arm::ShifterOperand(4));
- shifter_operands_.push_back(arm::ShifterOperand(5));
- shifter_operands_.push_back(arm::ShifterOperand(127));
- shifter_operands_.push_back(arm::ShifterOperand(128));
- shifter_operands_.push_back(arm::ShifterOperand(254));
- shifter_operands_.push_back(arm::ShifterOperand(255));
-
- if (!kUseSparseRegisterList) {
- shifter_operands_.push_back(arm::ShifterOperand(arm::R0));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R1));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R2));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R3));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R4));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R5));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R6));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R7));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R8));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R9));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R10));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R11));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R12));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R13));
- } else {
- shifter_operands_.push_back(arm::ShifterOperand(arm::R0));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R1));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R4));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R8));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R11));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R12));
- shifter_operands_.push_back(arm::ShifterOperand(arm::R13));
- }
-
- std::vector<arm::Shift> shifts {
- arm::Shift::LSL, arm::Shift::LSR, arm::Shift::ASR, arm::Shift::ROR, arm::Shift::RRX
- };
-
- // ShifterOperands of form "reg shift-type imm."
- for (arm::Shift shift : shifts) {
- for (arm::Register* reg : registers_) { // Note: this will pick up the sparse set.
- if (*reg == arm::R15) { // Skip PC.
- continue;
- }
- if (shift != arm::Shift::RRX) {
- if (!kUseSparseShiftImmediates) {
- for (uint32_t imm = 1; imm < 32; ++imm) {
- shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, imm));
- }
- } else {
- shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 1));
- shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 2));
- shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 3));
- shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 7));
- shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 15));
- shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 16));
- shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 30));
- shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 31));
- }
- } else {
- // RRX doesn't have an immediate.
- shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 0));
- }
- }
- }
- }
-
- std::vector<arm::ShifterOperand> CreateRegisterShifts(std::vector<arm::Register*>& base_regs,
- int32_t shift_min, int32_t shift_max) {
- std::vector<arm::ShifterOperand> res;
- static constexpr arm::Shift kShifts[] = { arm::Shift::LSL, arm::Shift::LSR, arm::Shift::ASR,
- arm::Shift::ROR };
-
- for (arm::Shift shift : kShifts) {
- for (arm::Register* reg : base_regs) {
- // Take the min, the max, and three values in between.
- res.push_back(arm::ShifterOperand(*reg, shift, shift_min));
- if (shift_min != shift_max) {
- res.push_back(arm::ShifterOperand(*reg, shift, shift_max));
- int32_t middle = (shift_min + shift_max) / 2;
- res.push_back(arm::ShifterOperand(*reg, shift, middle));
- res.push_back(arm::ShifterOperand(*reg, shift, middle - 1));
- res.push_back(arm::ShifterOperand(*reg, shift, middle + 1));
- }
- }
- }
-
- return res;
- }
-
- void TearDown() OVERRIDE {
- AssemblerArmTest::TearDown();
- STLDeleteElements(&registers_);
- }
-
- std::vector<arm::Register*> GetRegisters() OVERRIDE {
- return registers_;
- }
-
- uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
- return imm_value;
- }
-
- std::vector<arm::Condition>& GetConditions() OVERRIDE {
- return conditions_;
- }
-
- std::string GetConditionString(arm::Condition c) OVERRIDE {
- std::ostringstream oss;
- oss << c;
- return oss.str();
- }
-
- std::vector<arm::SetCc>& GetSetCcs() OVERRIDE {
- return set_ccs_;
- }
-
- std::string GetSetCcString(arm::SetCc s) OVERRIDE {
- // For arm32, kCcDontCare defaults to not setting condition codes.
- return s == arm::kCcSet ? "s" : "";
- }
-
- arm::Register GetPCRegister() OVERRIDE {
- return arm::R15;
- }
-
- std::vector<arm::ShifterOperand>& GetShiftOperands() OVERRIDE {
- return shifter_operands_;
- }
-
- std::string GetShiftString(arm::ShifterOperand sop) OVERRIDE {
- std::ostringstream oss;
- if (sop.IsShift()) {
- // Not a rotate...
- if (sop.GetShift() == arm::Shift::RRX) {
- oss << sop.GetRegister() << ", " << sop.GetShift();
- } else {
- oss << sop.GetRegister() << ", " << sop.GetShift() << " #" << sop.GetImmediate();
- }
- } else if (sop.IsRegister()) {
- oss << sop.GetRegister();
- } else {
- CHECK(sop.IsImmediate());
- oss << "#" << sop.GetImmediate();
- }
- return oss.str();
- }
-
- static const char* GetRegTokenFromDepth(int depth) {
- switch (depth) {
- case 0:
- return Base::REG1_TOKEN;
- case 1:
- return Base::REG2_TOKEN;
- case 2:
- return Base::REG3_TOKEN;
- case 3:
- return REG4_TOKEN;
- default:
- LOG(FATAL) << "Depth problem.";
- UNREACHABLE();
- }
- }
-
- void ExecuteAndPrint(std::function<void()> f, std::string fmt, std::ostringstream& oss) {
- if (first_) {
- first_ = false;
- } else {
- oss << "\n";
- }
- oss << fmt;
-
- f();
- }
-
- // NOTE: Only support simple test like "aaa=bbb"
- bool EvalFilterString(std::string filter) {
- if (filter.compare("") == 0) {
- return false;
- }
-
- size_t equal_sign_index = filter.find('=');
- if (equal_sign_index == std::string::npos) {
- EXPECT_TRUE(false) << "Unsupported filter string.";
- }
-
- std::string lhs = filter.substr(0, equal_sign_index);
- std::string rhs = filter.substr(equal_sign_index + 1, std::string::npos);
- return lhs.compare(rhs) == 0;
- }
-
- void TemplateHelper(std::function<void(arm::Register)> f, int depth ATTRIBUTE_UNUSED,
- bool without_pc, std::string fmt, std::string filter,
- std::ostringstream& oss) {
- std::vector<arm::Register*> registers = without_pc ? GetRegistersWithoutPC() : GetRegisters();
- for (auto reg : registers) {
- std::string after_reg = fmt;
- std::string after_reg_filter = filter;
-
- std::string reg_string = GetRegName<RegisterView::kUsePrimaryName>(*reg);
- size_t reg_index;
- const char* reg_token = GetRegTokenFromDepth(depth);
-
- while ((reg_index = after_reg.find(reg_token)) != std::string::npos) {
- after_reg.replace(reg_index, strlen(reg_token), reg_string);
- }
-
- while ((reg_index = after_reg_filter.find(reg_token)) != std::string::npos) {
- after_reg_filter.replace(reg_index, strlen(reg_token), reg_string);
- }
- if (EvalFilterString(after_reg_filter)) {
- continue;
- }
-
- ExecuteAndPrint([&] () { f(*reg); }, after_reg, oss);
- }
- }
-
- void TemplateHelper(std::function<void(const arm::ShifterOperand&)> f, int depth ATTRIBUTE_UNUSED,
- bool without_pc ATTRIBUTE_UNUSED, std::string fmt, std::string filter,
- std::ostringstream& oss) {
- for (const arm::ShifterOperand& shift : GetShiftOperands()) {
- std::string after_shift = fmt;
- std::string after_shift_filter = filter;
-
- std::string shift_string = GetShiftString(shift);
- size_t shift_index;
- while ((shift_index = after_shift.find(SHIFT_TOKEN)) != std::string::npos) {
- after_shift.replace(shift_index, ConstexprStrLen(SHIFT_TOKEN), shift_string);
- }
-
- while ((shift_index = after_shift_filter.find(SHIFT_TOKEN)) != std::string::npos) {
- after_shift_filter.replace(shift_index, ConstexprStrLen(SHIFT_TOKEN), shift_string);
- }
- if (EvalFilterString(after_shift_filter)) {
- continue;
- }
-
- ExecuteAndPrint([&] () { f(shift); }, after_shift, oss);
- }
- }
-
- void TemplateHelper(std::function<void(arm::Condition)> f, int depth ATTRIBUTE_UNUSED,
- bool without_pc ATTRIBUTE_UNUSED, std::string fmt, std::string filter,
- std::ostringstream& oss) {
- for (arm::Condition c : GetConditions()) {
- std::string after_cond = fmt;
- std::string after_cond_filter = filter;
-
- size_t cond_index = after_cond.find(COND_TOKEN);
- if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
- }
-
- cond_index = after_cond_filter.find(COND_TOKEN);
- if (cond_index != std::string::npos) {
- after_cond_filter.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
- }
- if (EvalFilterString(after_cond_filter)) {
- continue;
- }
-
- ExecuteAndPrint([&] () { f(c); }, after_cond, oss);
- }
- }
-
- void TemplateHelper(std::function<void(arm::SetCc)> f, int depth ATTRIBUTE_UNUSED,
- bool without_pc ATTRIBUTE_UNUSED, std::string fmt, std::string filter,
- std::ostringstream& oss) {
- for (arm::SetCc s : GetSetCcs()) {
- std::string after_cond = fmt;
- std::string after_cond_filter = filter;
-
- size_t cond_index = after_cond.find(SET_CC_TOKEN);
- if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(SET_CC_TOKEN), GetSetCcString(s));
- }
-
- cond_index = after_cond_filter.find(SET_CC_TOKEN);
- if (cond_index != std::string::npos) {
- after_cond_filter.replace(cond_index, ConstexprStrLen(SET_CC_TOKEN), GetSetCcString(s));
- }
- if (EvalFilterString(after_cond_filter)) {
- continue;
- }
-
- ExecuteAndPrint([&] () { f(s); }, after_cond, oss);
- }
- }
-
- template <typename... Args>
- void TemplateHelper(std::function<void(arm::Register, Args...)> f, int depth, bool without_pc,
- std::string fmt, std::string filter, std::ostringstream& oss) {
- std::vector<arm::Register*> registers = without_pc ? GetRegistersWithoutPC() : GetRegisters();
- for (auto reg : registers) {
- std::string after_reg = fmt;
- std::string after_reg_filter = filter;
-
- std::string reg_string = GetRegName<RegisterView::kUsePrimaryName>(*reg);
- size_t reg_index;
- const char* reg_token = GetRegTokenFromDepth(depth);
-
- while ((reg_index = after_reg.find(reg_token)) != std::string::npos) {
- after_reg.replace(reg_index, strlen(reg_token), reg_string);
- }
-
- while ((reg_index = after_reg_filter.find(reg_token)) != std::string::npos) {
- after_reg_filter.replace(reg_index, strlen(reg_token), reg_string);
- }
- if (EvalFilterString(after_reg_filter)) {
- continue;
- }
-
- auto lambda = [&] (Args... args) { f(*reg, args...); }; // NOLINT [readability/braces] [4]
- TemplateHelper(std::function<void(Args...)>(lambda), depth + 1, without_pc,
- after_reg, after_reg_filter, oss);
- }
- }
-
- template <typename... Args>
- void TemplateHelper(std::function<void(const arm::ShifterOperand&, Args...)> f, int depth,
- bool without_pc, std::string fmt, std::string filter,
- std::ostringstream& oss) {
- for (const arm::ShifterOperand& shift : GetShiftOperands()) {
- std::string after_shift = fmt;
- std::string after_shift_filter = filter;
-
- std::string shift_string = GetShiftString(shift);
- size_t shift_index;
- while ((shift_index = after_shift.find(SHIFT_TOKEN)) != std::string::npos) {
- after_shift.replace(shift_index, ConstexprStrLen(SHIFT_TOKEN), shift_string);
- }
-
- while ((shift_index = after_shift_filter.find(SHIFT_TOKEN)) != std::string::npos) {
- after_shift_filter.replace(shift_index, ConstexprStrLen(SHIFT_TOKEN), shift_string);
- }
- if (EvalFilterString(after_shift_filter)) {
- continue;
- }
-
- auto lambda = [&] (Args... args) { f(shift, args...); }; // NOLINT [readability/braces] [4]
- TemplateHelper(std::function<void(Args...)>(lambda), depth, without_pc,
- after_shift, after_shift_filter, oss);
- }
- }
-
- template <typename... Args>
- void TemplateHelper(std::function<void(arm::Condition, Args...)> f, int depth, bool without_pc,
- std::string fmt, std::string filter, std::ostringstream& oss) {
- for (arm::Condition c : GetConditions()) {
- std::string after_cond = fmt;
- std::string after_cond_filter = filter;
-
- size_t cond_index = after_cond.find(COND_TOKEN);
- if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
- }
-
- cond_index = after_cond_filter.find(COND_TOKEN);
- if (cond_index != std::string::npos) {
- after_cond_filter.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
- }
- if (EvalFilterString(after_cond_filter)) {
- continue;
- }
-
- auto lambda = [&] (Args... args) { f(c, args...); }; // NOLINT [readability/braces] [4]
- TemplateHelper(std::function<void(Args...)>(lambda), depth, without_pc,
- after_cond, after_cond_filter, oss);
- }
- }
-
- template <typename... Args>
- void TemplateHelper(std::function<void(arm::SetCc, Args...)> f, int depth, bool without_pc,
- std::string fmt, std::string filter, std::ostringstream& oss) {
- for (arm::SetCc s : GetSetCcs()) {
- std::string after_cond = fmt;
- std::string after_cond_filter = filter;
-
- size_t cond_index = after_cond.find(SET_CC_TOKEN);
- if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(SET_CC_TOKEN), GetSetCcString(s));
- }
-
- cond_index = after_cond_filter.find(SET_CC_TOKEN);
- if (cond_index != std::string::npos) {
- after_cond_filter.replace(cond_index, ConstexprStrLen(SET_CC_TOKEN), GetSetCcString(s));
- }
- if (EvalFilterString(after_cond_filter)) {
- continue;
- }
-
- auto lambda = [&] (Args... args) { f(s, args...); }; // NOLINT [readability/braces] [4]
- TemplateHelper(std::function<void(Args...)>(lambda), depth, without_pc,
- after_cond, after_cond_filter, oss);
- }
- }
-
- template <typename Assembler, typename T1, typename T2>
- std::function<void(T1, T2)> GetBoundFunction2(void (Assembler::*f)(T1, T2)) {
- return std::bind(f, GetAssembler(), _1, _2);
- }
-
- template <typename Assembler, typename T1, typename T2, typename T3>
- std::function<void(T1, T2, T3)> GetBoundFunction3(void (Assembler::*f)(T1, T2, T3)) {
- return std::bind(f, GetAssembler(), _1, _2, _3);
- }
-
- template <typename Assembler, typename T1, typename T2, typename T3, typename T4>
- std::function<void(T1, T2, T3, T4)> GetBoundFunction4(
- void (Assembler::*f)(T1, T2, T3, T4)) {
- return std::bind(f, GetAssembler(), _1, _2, _3, _4);
- }
-
- template <typename Assembler, typename T1, typename T2, typename T3, typename T4, typename T5>
- std::function<void(T1, T2, T3, T4, T5)> GetBoundFunction5(
- void (Assembler::*f)(T1, T2, T3, T4, T5)) {
- return std::bind(f, GetAssembler(), _1, _2, _3, _4, _5);
- }
-
- template <typename... Args>
- void GenericTemplateHelper(std::function<void(Args...)> f, bool without_pc,
- std::string fmt, std::string test_name, std::string filter) {
- first_ = false;
- WarnOnCombinations(CountHelper<Args...>(without_pc));
-
- std::ostringstream oss;
-
- TemplateHelper(f, 0, without_pc, fmt, filter, oss);
-
- oss << "\n"; // Trailing newline.
-
- DriverStr(oss.str(), test_name);
- }
-
- template <typename Assembler, typename... Args>
- void T2Helper(void (Assembler::*f)(Args...), bool without_pc, std::string fmt,
- std::string test_name, std::string filter = "") {
- GenericTemplateHelper(GetBoundFunction2(f), without_pc, fmt, test_name, filter);
- }
-
- template <typename Assembler, typename... Args>
- void T3Helper(void (Assembler::*f)(Args...), bool without_pc, std::string fmt,
- std::string test_name, std::string filter = "") {
- GenericTemplateHelper(GetBoundFunction3(f), without_pc, fmt, test_name, filter);
- }
-
- template <typename Assembler, typename... Args>
- void T4Helper(void (Assembler::*f)(Args...), bool without_pc, std::string fmt,
- std::string test_name, std::string filter = "") {
- GenericTemplateHelper(GetBoundFunction4(f), without_pc, fmt, test_name, filter);
- }
-
- template <typename Assembler, typename... Args>
- void T5Helper(void (Assembler::*f)(Args...), bool without_pc, std::string fmt,
- std::string test_name, std::string filter = "") {
- GenericTemplateHelper(GetBoundFunction5(f), without_pc, fmt, test_name, filter);
- }
-
- private:
- template <typename T>
- size_t CountHelper(bool without_pc) {
- size_t tmp;
- if (std::is_same<T, arm::Register>::value) {
- tmp = GetRegisters().size();
- if (without_pc) {
- tmp--;; // Approximation...
- }
- return tmp;
- } else if (std::is_same<T, const arm::ShifterOperand&>::value) {
- return GetShiftOperands().size();
- } else if (std::is_same<T, arm::Condition>::value) {
- return GetConditions().size();
- } else {
- LOG(WARNING) << "Unknown type while counting.";
- return 1;
- }
- }
-
- template <typename T1, typename T2, typename... Args>
- size_t CountHelper(bool without_pc) {
- size_t tmp;
- if (std::is_same<T1, arm::Register>::value) {
- tmp = GetRegisters().size();
- if (without_pc) {
- tmp--;; // Approximation...
- }
- } else if (std::is_same<T1, const arm::ShifterOperand&>::value) {
- tmp = GetShiftOperands().size();
- } else if (std::is_same<T1, arm::Condition>::value) {
- tmp = GetConditions().size();
- } else {
- LOG(WARNING) << "Unknown type while counting.";
- tmp = 1;
- }
- size_t rec = CountHelper<T2, Args...>(without_pc);
- return rec * tmp;
- }
-
- bool first_;
-
- static constexpr const char* kArm32AssemblyHeader = ".arm\n";
-
- std::vector<arm::Register*> registers_;
- std::vector<arm::Condition> conditions_;
- std::vector<arm::SetCc> set_ccs_;
- std::vector<arm::ShifterOperand> shifter_operands_;
-};
-
-
-TEST_F(AssemblerArm32Test, Toolchain) {
- EXPECT_TRUE(CheckTools());
-}
-
-TEST_F(AssemblerArm32Test, Sbfx) {
- std::vector<std::pair<uint32_t, uint32_t>> immediates;
- immediates.push_back({0, 1});
- immediates.push_back({0, 8});
- immediates.push_back({0, 15});
- immediates.push_back({0, 16});
- immediates.push_back({0, 31});
- immediates.push_back({0, 32});
-
- immediates.push_back({1, 1});
- immediates.push_back({1, 15});
- immediates.push_back({1, 31});
-
- immediates.push_back({8, 1});
- immediates.push_back({8, 15});
- immediates.push_back({8, 16});
- immediates.push_back({8, 24});
-
- immediates.push_back({31, 1});
-
- DriverStr(RepeatRRiiC(&arm::Arm32Assembler::sbfx, immediates,
- "sbfx{cond} {reg1}, {reg2}, #{imm1}, #{imm2}"), "sbfx");
-}
-
-TEST_F(AssemblerArm32Test, Ubfx) {
- std::vector<std::pair<uint32_t, uint32_t>> immediates;
- immediates.push_back({0, 1});
- immediates.push_back({0, 8});
- immediates.push_back({0, 15});
- immediates.push_back({0, 16});
- immediates.push_back({0, 31});
- immediates.push_back({0, 32});
-
- immediates.push_back({1, 1});
- immediates.push_back({1, 15});
- immediates.push_back({1, 31});
-
- immediates.push_back({8, 1});
- immediates.push_back({8, 15});
- immediates.push_back({8, 16});
- immediates.push_back({8, 24});
-
- immediates.push_back({31, 1});
-
- DriverStr(RepeatRRiiC(&arm::Arm32Assembler::ubfx, immediates,
- "ubfx{cond} {reg1}, {reg2}, #{imm1}, #{imm2}"), "ubfx");
-}
-
-TEST_F(AssemblerArm32Test, Mul) {
- T4Helper(&arm::Arm32Assembler::mul, true, "mul{cond} {reg1}, {reg2}, {reg3}", "mul");
-}
-
-TEST_F(AssemblerArm32Test, Mla) {
- T5Helper(&arm::Arm32Assembler::mla, true, "mla{cond} {reg1}, {reg2}, {reg3}, {reg4}", "mla");
-}
-
-TEST_F(AssemblerArm32Test, Umull) {
- T5Helper(&arm::Arm32Assembler::umull, true, "umull{cond} {reg1}, {reg2}, {reg3}, {reg4}",
- "umull", "{reg1}={reg2}"); // Skip the cases where reg1 == reg2.
-}
-
-TEST_F(AssemblerArm32Test, Smull) {
- T5Helper(&arm::Arm32Assembler::smull, true, "smull{cond} {reg1}, {reg2}, {reg3}, {reg4}",
- "smull", "{reg1}={reg2}"); // Skip the cases where reg1 == reg2.
-}
-
-TEST_F(AssemblerArm32Test, Sdiv) {
- T4Helper(&arm::Arm32Assembler::sdiv, true, "sdiv{cond} {reg1}, {reg2}, {reg3}", "sdiv");
-}
-
-TEST_F(AssemblerArm32Test, Udiv) {
- T4Helper(&arm::Arm32Assembler::udiv, true, "udiv{cond} {reg1}, {reg2}, {reg3}", "udiv");
-}
-
-TEST_F(AssemblerArm32Test, And) {
- T5Helper(&arm::Arm32Assembler::and_, true, "and{cond}{s} {reg1}, {reg2}, {shift}", "and");
-}
-
-TEST_F(AssemblerArm32Test, Ands) {
- T4Helper(&arm::Arm32Assembler::ands, true, "and{cond}s {reg1}, {reg2}, {shift}", "ands");
-}
-
-TEST_F(AssemblerArm32Test, Eor) {
- T5Helper(&arm::Arm32Assembler::eor, true, "eor{cond}{s} {reg1}, {reg2}, {shift}", "eor");
-}
-
-TEST_F(AssemblerArm32Test, Eors) {
- T4Helper(&arm::Arm32Assembler::eors, true, "eor{cond}s {reg1}, {reg2}, {shift}", "eors");
-}
-
-TEST_F(AssemblerArm32Test, Orr) {
- T5Helper(&arm::Arm32Assembler::orr, true, "orr{cond}{s} {reg1}, {reg2}, {shift}", "orr");
-}
-
-TEST_F(AssemblerArm32Test, Orrs) {
- T4Helper(&arm::Arm32Assembler::orrs, true, "orr{cond}s {reg1}, {reg2}, {shift}", "orrs");
-}
-
-TEST_F(AssemblerArm32Test, Bic) {
- T5Helper(&arm::Arm32Assembler::bic, true, "bic{cond}{s} {reg1}, {reg2}, {shift}", "bic");
-}
-
-TEST_F(AssemblerArm32Test, Bics) {
- T4Helper(&arm::Arm32Assembler::bics, true, "bic{cond}s {reg1}, {reg2}, {shift}", "bics");
-}
-
-TEST_F(AssemblerArm32Test, Mov) {
- T4Helper(&arm::Arm32Assembler::mov, true, "mov{cond}{s} {reg1}, {shift}", "mov");
-}
-
-TEST_F(AssemblerArm32Test, Movs) {
- T3Helper(&arm::Arm32Assembler::movs, true, "mov{cond}s {reg1}, {shift}", "movs");
-}
-
-TEST_F(AssemblerArm32Test, Mvn) {
- T4Helper(&arm::Arm32Assembler::mvn, true, "mvn{cond}{s} {reg1}, {shift}", "mvn");
-}
-
-TEST_F(AssemblerArm32Test, Mvns) {
- T3Helper(&arm::Arm32Assembler::mvns, true, "mvn{cond}s {reg1}, {shift}", "mvns");
-}
-
-TEST_F(AssemblerArm32Test, Add) {
- T5Helper(&arm::Arm32Assembler::add, false, "add{cond}{s} {reg1}, {reg2}, {shift}", "add");
-}
-
-TEST_F(AssemblerArm32Test, Adds) {
- T4Helper(&arm::Arm32Assembler::adds, false, "add{cond}s {reg1}, {reg2}, {shift}", "adds");
-}
-
-TEST_F(AssemblerArm32Test, Adc) {
- T5Helper(&arm::Arm32Assembler::adc, false, "adc{cond}{s} {reg1}, {reg2}, {shift}", "adc");
-}
-
-TEST_F(AssemblerArm32Test, Adcs) {
- T4Helper(&arm::Arm32Assembler::adcs, false, "adc{cond}s {reg1}, {reg2}, {shift}", "adcs");
-}
-
-TEST_F(AssemblerArm32Test, Sub) {
- T5Helper(&arm::Arm32Assembler::sub, false, "sub{cond}{s} {reg1}, {reg2}, {shift}", "sub");
-}
-
-TEST_F(AssemblerArm32Test, Subs) {
- T4Helper(&arm::Arm32Assembler::subs, false, "sub{cond}s {reg1}, {reg2}, {shift}", "subs");
-}
-
-TEST_F(AssemblerArm32Test, Sbc) {
- T5Helper(&arm::Arm32Assembler::sbc, false, "sbc{cond}{s} {reg1}, {reg2}, {shift}", "sbc");
-}
-
-TEST_F(AssemblerArm32Test, Sbcs) {
- T4Helper(&arm::Arm32Assembler::sbcs, false, "sbc{cond}s {reg1}, {reg2}, {shift}", "sbcs");
-}
-
-TEST_F(AssemblerArm32Test, Rsb) {
- T5Helper(&arm::Arm32Assembler::rsb, true, "rsb{cond}{s} {reg1}, {reg2}, {shift}", "rsb");
-}
-
-TEST_F(AssemblerArm32Test, Rsbs) {
- T4Helper(&arm::Arm32Assembler::rsbs, true, "rsb{cond}s {reg1}, {reg2}, {shift}", "rsbs");
-}
-
-TEST_F(AssemblerArm32Test, Rsc) {
- T5Helper(&arm::Arm32Assembler::rsc, true, "rsc{cond}{s} {reg1}, {reg2}, {shift}", "rsc");
-}
-
-TEST_F(AssemblerArm32Test, Rscs) {
- T4Helper(&arm::Arm32Assembler::rscs, false, "rsc{cond}s {reg1}, {reg2}, {shift}", "rscs");
-}
-
-/* TODO: Need better filter support.
-TEST_F(AssemblerArm32Test, Strex) {
- T4Helper(&arm::Arm32Assembler::strex, "strex{cond} {reg1}, {reg2}, [{reg3}]", "strex",
- "{reg1}={reg2}||{reg1}={reg3}"); // Skip the cases where reg1 == reg2 || reg1 == reg3.
-}
-*/
-
-TEST_F(AssemblerArm32Test, Clz) {
- T3Helper(&arm::Arm32Assembler::clz, true, "clz{cond} {reg1}, {reg2}", "clz");
-}
-
-TEST_F(AssemblerArm32Test, Tst) {
- T3Helper(&arm::Arm32Assembler::tst, true, "tst{cond} {reg1}, {shift}", "tst");
-}
-
-TEST_F(AssemblerArm32Test, Teq) {
- T3Helper(&arm::Arm32Assembler::teq, true, "teq{cond} {reg1}, {shift}", "teq");
-}
-
-TEST_F(AssemblerArm32Test, Cmp) {
- T3Helper(&arm::Arm32Assembler::cmp, true, "cmp{cond} {reg1}, {shift}", "cmp");
-}
-
-TEST_F(AssemblerArm32Test, Cmn) {
- T3Helper(&arm::Arm32Assembler::cmn, true, "cmn{cond} {reg1}, {shift}", "cmn");
-}
-
-TEST_F(AssemblerArm32Test, Blx) {
- T2Helper(&arm::Arm32Assembler::blx, true, "blx{cond} {reg1}", "blx");
-}
-
-TEST_F(AssemblerArm32Test, Bx) {
- T2Helper(&arm::Arm32Assembler::bx, true, "bx{cond} {reg1}", "bx");
-}
-
-TEST_F(AssemblerArm32Test, Vmstat) {
- GetAssembler()->vmstat();
-
- const char* expected = "vmrs APSR_nzcv, FPSCR\n";
-
- DriverStr(expected, "vmrs");
-}
-
-TEST_F(AssemblerArm32Test, ldrexd) {
- GetAssembler()->ldrexd(arm::R0, arm::R1, arm::R0);
- GetAssembler()->ldrexd(arm::R0, arm::R1, arm::R1);
- GetAssembler()->ldrexd(arm::R0, arm::R1, arm::R2);
-
- const char* expected =
- "ldrexd r0, r1, [r0]\n"
- "ldrexd r0, r1, [r1]\n"
- "ldrexd r0, r1, [r2]\n";
- DriverStr(expected, "ldrexd");
-}
-
-TEST_F(AssemblerArm32Test, strexd) {
- GetAssembler()->strexd(arm::R9, arm::R0, arm::R1, arm::R0);
- GetAssembler()->strexd(arm::R9, arm::R0, arm::R1, arm::R1);
- GetAssembler()->strexd(arm::R9, arm::R0, arm::R1, arm::R2);
-
- const char* expected =
- "strexd r9, r0, r1, [r0]\n"
- "strexd r9, r0, r1, [r1]\n"
- "strexd r9, r0, r1, [r2]\n";
- DriverStr(expected, "strexd");
-}
-
-TEST_F(AssemblerArm32Test, rbit) {
- T3Helper(&arm::Arm32Assembler::rbit, true, "rbit{cond} {reg1}, {reg2}", "rbit");
-}
-
-TEST_F(AssemblerArm32Test, rev) {
- T3Helper(&arm::Arm32Assembler::rev, true, "rev{cond} {reg1}, {reg2}", "rev");
-}
-
-TEST_F(AssemblerArm32Test, rev16) {
- T3Helper(&arm::Arm32Assembler::rev16, true, "rev16{cond} {reg1}, {reg2}", "rev16");
-}
-
-TEST_F(AssemblerArm32Test, revsh) {
- T3Helper(&arm::Arm32Assembler::revsh, true, "revsh{cond} {reg1}, {reg2}", "revsh");
-}
-
-TEST_F(AssemblerArm32Test, vcnt) {
- // Different D register numbers are used here, to test register encoding.
- // Source register number is encoded as M:Vm, destination register number is encoded as D:Vd,
- // For source and destination registers which use D0..D15, the M bit and D bit should be 0.
- // For source and destination registers which use D16..D32, the M bit and D bit should be 1.
- GetAssembler()->vcntd(arm::D0, arm::D1);
- GetAssembler()->vcntd(arm::D19, arm::D20);
- GetAssembler()->vcntd(arm::D0, arm::D9);
- GetAssembler()->vcntd(arm::D16, arm::D20);
-
- std::string expected =
- "vcnt.8 d0, d1\n"
- "vcnt.8 d19, d20\n"
- "vcnt.8 d0, d9\n"
- "vcnt.8 d16, d20\n";
-
- DriverStr(expected, "vcnt");
-}
-
-TEST_F(AssemblerArm32Test, vpaddl) {
- // Different D register numbers are used here, to test register encoding.
- // Source register number is encoded as M:Vm, destination register number is encoded as D:Vd,
- // For source and destination registers which use D0..D15, the M bit and D bit should be 0.
- // For source and destination registers which use D16..D32, the M bit and D bit should be 1.
- // Different data types (signed and unsigned) are also tested.
- GetAssembler()->vpaddld(arm::D0, arm::D0, 8, true);
- GetAssembler()->vpaddld(arm::D20, arm::D20, 8, false);
- GetAssembler()->vpaddld(arm::D0, arm::D20, 16, false);
- GetAssembler()->vpaddld(arm::D20, arm::D0, 32, true);
-
- std::string expected =
- "vpaddl.u8 d0, d0\n"
- "vpaddl.s8 d20, d20\n"
- "vpaddl.s16 d0, d20\n"
- "vpaddl.u32 d20, d0\n";
-
- DriverStr(expected, "vpaddl");
-}
-
-} // namespace art
diff --git a/compiler/utils/arm/assembler_arm_shared.h b/compiler/utils/arm/assembler_arm_shared.h
new file mode 100644
index 0000000000..21f13eeab7
--- /dev/null
+++ b/compiler/utils/arm/assembler_arm_shared.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_SHARED_H_
+#define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_SHARED_H_
+
+namespace art {
+namespace arm {
+
+enum LoadOperandType {
+ kLoadSignedByte,
+ kLoadUnsignedByte,
+ kLoadSignedHalfword,
+ kLoadUnsignedHalfword,
+ kLoadWord,
+ kLoadWordPair,
+ kLoadSWord,
+ kLoadDWord
+};
+
+enum StoreOperandType {
+ kStoreByte,
+ kStoreHalfword,
+ kStoreWord,
+ kStoreWordPair,
+ kStoreSWord,
+ kStoreDWord
+};
+
+// Set condition codes request.
+enum SetCc {
+ kCcDontCare, // Allows prioritizing 16-bit instructions on Thumb2 whether they set CCs or not.
+ kCcSet,
+ kCcKeep,
+};
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_SHARED_H_
diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc
new file mode 100644
index 0000000000..3c5973ebe6
--- /dev/null
+++ b/compiler/utils/arm/assembler_arm_vixl.cc
@@ -0,0 +1,382 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <type_traits>
+
+#include "assembler_arm_vixl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "thread.h"
+
+using namespace vixl::aarch32; // NOLINT(build/namespaces)
+
+namespace art {
+namespace arm {
+
+#ifdef ___
+#error "ARM Assembler macro already defined."
+#else
+#define ___ vixl_masm_.
+#endif
+
+extern const vixl32::Register tr(TR);
+
+void ArmVIXLAssembler::FinalizeCode() {
+ vixl_masm_.FinalizeCode();
+}
+
+size_t ArmVIXLAssembler::CodeSize() const {
+ return vixl_masm_.GetSizeOfCodeGenerated();
+}
+
+const uint8_t* ArmVIXLAssembler::CodeBufferBaseAddress() const {
+ return vixl_masm_.GetStartAddress<uint8_t*>();
+}
+
+void ArmVIXLAssembler::FinalizeInstructions(const MemoryRegion& region) {
+ // Copy the instructions from the buffer.
+ MemoryRegion from(vixl_masm_.GetStartAddress<void*>(), CodeSize());
+ region.CopyFrom(0, from);
+}
+
+void ArmVIXLAssembler::PoisonHeapReference(vixl::aarch32::Register reg) {
+ // reg = -reg.
+ ___ Rsb(reg, reg, 0);
+}
+
+void ArmVIXLAssembler::UnpoisonHeapReference(vixl::aarch32::Register reg) {
+ // reg = -reg.
+ ___ Rsb(reg, reg, 0);
+}
+
+void ArmVIXLAssembler::MaybeUnpoisonHeapReference(vixl32::Register reg) {
+ if (kPoisonHeapReferences) {
+ UnpoisonHeapReference(reg);
+ }
+}
+
+void ArmVIXLAssembler::LoadImmediate(vixl32::Register rd, int32_t value) {
+ // TODO(VIXL): Implement this optimization in VIXL.
+ if (!ShifterOperandCanAlwaysHold(value) && ShifterOperandCanAlwaysHold(~value)) {
+ ___ Mvn(rd, ~value);
+ } else {
+ ___ Mov(rd, value);
+ }
+}
+
+bool ArmVIXLAssembler::ShifterOperandCanAlwaysHold(uint32_t immediate) {
+ return vixl_masm_.IsModifiedImmediate(immediate);
+}
+
+bool ArmVIXLAssembler::ShifterOperandCanHold(Opcode opcode, uint32_t immediate, SetCc set_cc) {
+ switch (opcode) {
+ case ADD:
+ case SUB:
+ // Less than (or equal to) 12 bits can be done if we don't need to set condition codes.
+ if (IsUint<12>(immediate) && set_cc != kCcSet) {
+ return true;
+ }
+ return ShifterOperandCanAlwaysHold(immediate);
+
+ case MOV:
+ // TODO: Support less than or equal to 12bits.
+ return ShifterOperandCanAlwaysHold(immediate);
+
+ case MVN:
+ default:
+ return ShifterOperandCanAlwaysHold(immediate);
+ }
+}
+
+bool ArmVIXLAssembler::CanSplitLoadStoreOffset(int32_t allowed_offset_bits,
+ int32_t offset,
+ /*out*/ int32_t* add_to_base,
+ /*out*/ int32_t* offset_for_load_store) {
+ int32_t other_bits = offset & ~allowed_offset_bits;
+ if (ShifterOperandCanAlwaysHold(other_bits) || ShifterOperandCanAlwaysHold(-other_bits)) {
+ *add_to_base = offset & ~allowed_offset_bits;
+ *offset_for_load_store = offset & allowed_offset_bits;
+ return true;
+ }
+ return false;
+}
+
+int32_t ArmVIXLAssembler::AdjustLoadStoreOffset(int32_t allowed_offset_bits,
+ vixl32::Register temp,
+ vixl32::Register base,
+ int32_t offset) {
+ DCHECK_NE(offset & ~allowed_offset_bits, 0);
+ int32_t add_to_base, offset_for_load;
+ if (CanSplitLoadStoreOffset(allowed_offset_bits, offset, &add_to_base, &offset_for_load)) {
+ ___ Add(temp, base, add_to_base);
+ return offset_for_load;
+ } else {
+ ___ Mov(temp, offset);
+ ___ Add(temp, temp, base);
+ return 0;
+ }
+}
+
+// TODO(VIXL): Implement this in VIXL.
+int32_t ArmVIXLAssembler::GetAllowedLoadOffsetBits(LoadOperandType type) {
+ switch (type) {
+ case kLoadSignedByte:
+ case kLoadSignedHalfword:
+ case kLoadUnsignedHalfword:
+ case kLoadUnsignedByte:
+ case kLoadWord:
+ // We can encode imm12 offset.
+ return 0xfff;
+ case kLoadSWord:
+ case kLoadDWord:
+ case kLoadWordPair:
+ // We can encode imm8:'00' offset.
+ return 0xff << 2;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+}
+
+// TODO(VIXL): Implement this in VIXL.
+int32_t ArmVIXLAssembler::GetAllowedStoreOffsetBits(StoreOperandType type) {
+ switch (type) {
+ case kStoreHalfword:
+ case kStoreByte:
+ case kStoreWord:
+ // We can encode imm12 offset.
+ return 0xfff;
+ case kStoreSWord:
+ case kStoreDWord:
+ case kStoreWordPair:
+ // We can encode imm8:'00' offset.
+ return 0xff << 2;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+}
+
+// TODO(VIXL): Implement this in VIXL.
+static bool CanHoldLoadOffsetThumb(LoadOperandType type, int offset) {
+ switch (type) {
+ case kLoadSignedByte:
+ case kLoadSignedHalfword:
+ case kLoadUnsignedHalfword:
+ case kLoadUnsignedByte:
+ case kLoadWord:
+ return IsAbsoluteUint<12>(offset);
+ case kLoadSWord:
+ case kLoadDWord:
+ return IsAbsoluteUint<10>(offset) && IsAligned<4>(offset); // VFP addressing mode.
+ case kLoadWordPair:
+ return IsAbsoluteUint<10>(offset) && IsAligned<4>(offset);
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+}
+
+// TODO(VIXL): Implement this in VIXL.
+static bool CanHoldStoreOffsetThumb(StoreOperandType type, int offset) {
+ switch (type) {
+ case kStoreHalfword:
+ case kStoreByte:
+ case kStoreWord:
+ return IsAbsoluteUint<12>(offset);
+ case kStoreSWord:
+ case kStoreDWord:
+ return IsAbsoluteUint<10>(offset) && IsAligned<4>(offset); // VFP addressing mode.
+ case kStoreWordPair:
+ return IsAbsoluteUint<10>(offset) && IsAligned<4>(offset);
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldStoreOffsetThumb.
+// TODO(VIXL): Implement AdjustLoadStoreOffset logic in VIXL.
+void ArmVIXLAssembler::StoreToOffset(StoreOperandType type,
+ vixl32::Register reg,
+ vixl32::Register base,
+ int32_t offset) {
+ vixl32::Register tmp_reg;
+ UseScratchRegisterScope temps(&vixl_masm_);
+
+ if (!CanHoldStoreOffsetThumb(type, offset)) {
+ CHECK_NE(base.GetCode(), kIpCode);
+ if ((reg.GetCode() != kIpCode) &&
+ ((type != kStoreWordPair) || (reg.GetCode() + 1 != kIpCode))) {
+ tmp_reg = temps.Acquire();
+ } else {
+ // Be careful not to use ip twice (for `reg` (or `reg` + 1 in
+ // the case of a word-pair store) and `base`) to build the
+ // Address object used by the store instruction(s) below.
+ // Instead, save R5 on the stack (or R6 if R5 is already used by
+ // `base`), use it as secondary temporary register, and restore
+ // it after the store instruction has been emitted.
+ tmp_reg = (base.GetCode() != 5) ? r5 : r6;
+ ___ Push(tmp_reg);
+ if (base.GetCode() == kSpCode) {
+ offset += kRegisterSize;
+ }
+ }
+ // TODO: Implement indexed store (not available for STRD), inline AdjustLoadStoreOffset()
+ // and in the "unsplittable" path get rid of the "add" by using the store indexed instead.
+ offset = AdjustLoadStoreOffset(GetAllowedStoreOffsetBits(type), tmp_reg, base, offset);
+ base = tmp_reg;
+ }
+ DCHECK(CanHoldStoreOffsetThumb(type, offset));
+ switch (type) {
+ case kStoreByte:
+ ___ Strb(reg, MemOperand(base, offset));
+ break;
+ case kStoreHalfword:
+ ___ Strh(reg, MemOperand(base, offset));
+ break;
+ case kStoreWord:
+ ___ Str(reg, MemOperand(base, offset));
+ break;
+ case kStoreWordPair:
+ ___ Strd(reg, vixl32::Register(reg.GetCode() + 1), MemOperand(base, offset));
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+ if ((tmp_reg.IsValid()) && (tmp_reg.GetCode() != kIpCode)) {
+ CHECK(tmp_reg.Is(r5) || tmp_reg.Is(r6)) << tmp_reg;
+ ___ Pop(tmp_reg);
+ }
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldLoadOffsetThumb.
+// TODO(VIXL): Implement AdjustLoadStoreOffset logic in VIXL.
+void ArmVIXLAssembler::LoadFromOffset(LoadOperandType type,
+ vixl32::Register dest,
+ vixl32::Register base,
+ int32_t offset) {
+ if (!CanHoldLoadOffsetThumb(type, offset)) {
+ CHECK(!base.Is(ip));
+ // Inlined AdjustLoadStoreOffset() allows us to pull a few more tricks.
+ int32_t allowed_offset_bits = GetAllowedLoadOffsetBits(type);
+ DCHECK_NE(offset & ~allowed_offset_bits, 0);
+ int32_t add_to_base, offset_for_load;
+ if (CanSplitLoadStoreOffset(allowed_offset_bits, offset, &add_to_base, &offset_for_load)) {
+ // Use reg for the adjusted base. If it's low reg, we may end up using 16-bit load.
+ AddConstant(dest, base, add_to_base);
+ base = dest;
+ offset = offset_for_load;
+ } else {
+ UseScratchRegisterScope temps(&vixl_masm_);
+ vixl32::Register temp = (dest.Is(base)) ? temps.Acquire() : dest;
+ LoadImmediate(temp, offset);
+ // TODO: Implement indexed load (not available for LDRD) and use it here to avoid the ADD.
+ // Use reg for the adjusted base. If it's low reg, we may end up using 16-bit load.
+ ___ Add(dest, dest, (dest.Is(base)) ? temp : base);
+ base = dest;
+ offset = 0;
+ }
+ }
+
+ DCHECK(CanHoldLoadOffsetThumb(type, offset));
+ switch (type) {
+ case kLoadSignedByte:
+ ___ Ldrsb(dest, MemOperand(base, offset));
+ break;
+ case kLoadUnsignedByte:
+ ___ Ldrb(dest, MemOperand(base, offset));
+ break;
+ case kLoadSignedHalfword:
+ ___ Ldrsh(dest, MemOperand(base, offset));
+ break;
+ case kLoadUnsignedHalfword:
+ ___ Ldrh(dest, MemOperand(base, offset));
+ break;
+ case kLoadWord:
+ CHECK(!dest.IsSP());
+ ___ Ldr(dest, MemOperand(base, offset));
+ break;
+ case kLoadWordPair:
+ ___ Ldrd(dest, vixl32::Register(dest.GetCode() + 1), MemOperand(base, offset));
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+}
+
+void ArmVIXLAssembler::StoreSToOffset(vixl32::SRegister source,
+ vixl32::Register base,
+ int32_t offset) {
+ ___ Vstr(source, MemOperand(base, offset));
+}
+
+void ArmVIXLAssembler::StoreDToOffset(vixl32::DRegister source,
+ vixl32::Register base,
+ int32_t offset) {
+ ___ Vstr(source, MemOperand(base, offset));
+}
+
+void ArmVIXLAssembler::LoadSFromOffset(vixl32::SRegister reg,
+ vixl32::Register base,
+ int32_t offset) {
+ ___ Vldr(reg, MemOperand(base, offset));
+}
+
+void ArmVIXLAssembler::LoadDFromOffset(vixl32::DRegister reg,
+ vixl32::Register base,
+ int32_t offset) {
+ ___ Vldr(reg, MemOperand(base, offset));
+}
+
+void ArmVIXLAssembler::AddConstant(vixl32::Register rd, int32_t value) {
+ AddConstant(rd, rd, value);
+}
+
+// TODO(VIXL): think about using adds which updates flags where possible.
+void ArmVIXLAssembler::AddConstant(vixl32::Register rd,
+ vixl32::Register rn,
+ int32_t value) {
+ DCHECK(vixl_masm_.OutsideITBlock());
+ // TODO(VIXL): implement this optimization in VIXL.
+ if (value == 0) {
+ if (!rd.Is(rn)) {
+ ___ Mov(rd, rn);
+ }
+ return;
+ }
+ ___ Add(rd, rn, value);
+}
+
+// Inside IT block we must use assembler, macroassembler instructions are not permitted.
+void ArmVIXLAssembler::AddConstantInIt(vixl32::Register rd,
+ vixl32::Register rn,
+ int32_t value,
+ vixl32::Condition cond) {
+ DCHECK(vixl_masm_.InITBlock());
+ if (value == 0) {
+ ___ mov(cond, rd, rn);
+ } else {
+ ___ add(cond, rd, rn, value);
+ }
+}
+
+} // namespace arm
+} // namespace art
diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h
new file mode 100644
index 0000000000..c8f3a9b863
--- /dev/null
+++ b/compiler/utils/arm/assembler_arm_vixl.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
+#define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
+
+#include "base/arena_containers.h"
+#include "base/logging.h"
+#include "constants_arm.h"
+#include "offsets.h"
+#include "utils/arm/assembler_arm_shared.h"
+#include "utils/arm/managed_register_arm.h"
+#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
+
+// TODO(VIXL): Make VIXL compile with -Wshadow and remove pragmas.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "aarch32/macro-assembler-aarch32.h"
+#pragma GCC diagnostic pop
+
+namespace vixl32 = vixl::aarch32;
+
+namespace art {
+namespace arm {
+
+class ArmVIXLAssembler FINAL : public Assembler {
+ private:
+ class ArmException;
+ public:
+ explicit ArmVIXLAssembler(ArenaAllocator* arena)
+ : Assembler(arena) {
+ // Use Thumb2 instruction set.
+ vixl_masm_.UseT32();
+ }
+
+ virtual ~ArmVIXLAssembler() {}
+ vixl32::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
+ void FinalizeCode() OVERRIDE;
+
+ // Size of generated code.
+ size_t CodeSize() const OVERRIDE;
+ const uint8_t* CodeBufferBaseAddress() const OVERRIDE;
+
+ // Copy instructions out of assembly buffer into the given region of memory.
+ void FinalizeInstructions(const MemoryRegion& region) OVERRIDE;
+
+ void Bind(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM";
+ }
+ void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM";
+ }
+
+ //
+ // Heap poisoning.
+ //
+ // Poison a heap reference contained in `reg`.
+ void PoisonHeapReference(vixl32::Register reg);
+ // Unpoison a heap reference contained in `reg`.
+ void UnpoisonHeapReference(vixl32::Register reg);
+ // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
+ void MaybeUnpoisonHeapReference(vixl32::Register reg);
+
+ void StoreToOffset(StoreOperandType type,
+ vixl32::Register reg,
+ vixl32::Register base,
+ int32_t offset);
+ void StoreSToOffset(vixl32::SRegister source, vixl32::Register base, int32_t offset);
+ void StoreDToOffset(vixl32::DRegister source, vixl32::Register base, int32_t offset);
+
+ void LoadImmediate(vixl32::Register dest, int32_t value);
+ void LoadFromOffset(LoadOperandType type,
+ vixl32::Register reg,
+ vixl32::Register base,
+ int32_t offset);
+ void LoadSFromOffset(vixl32::SRegister reg, vixl32::Register base, int32_t offset);
+ void LoadDFromOffset(vixl32::DRegister reg, vixl32::Register base, int32_t offset);
+
+ bool ShifterOperandCanAlwaysHold(uint32_t immediate);
+ bool ShifterOperandCanHold(Opcode opcode, uint32_t immediate, SetCc set_cc);
+ bool CanSplitLoadStoreOffset(int32_t allowed_offset_bits,
+ int32_t offset,
+ /*out*/ int32_t* add_to_base,
+ /*out*/ int32_t* offset_for_load_store);
+ int32_t AdjustLoadStoreOffset(int32_t allowed_offset_bits,
+ vixl32::Register temp,
+ vixl32::Register base,
+ int32_t offset);
+ int32_t GetAllowedLoadOffsetBits(LoadOperandType type);
+ int32_t GetAllowedStoreOffsetBits(StoreOperandType type);
+
+ void AddConstant(vixl32::Register rd, int32_t value);
+ void AddConstant(vixl32::Register rd, vixl32::Register rn, int32_t value);
+ void AddConstantInIt(vixl32::Register rd,
+ vixl32::Register rn,
+ int32_t value,
+ vixl32::Condition cond = vixl32::al);
+
+ private:
+ // VIXL assembler.
+ vixl32::MacroAssembler vixl_masm_;
+};
+
+// Thread register declaration.
+extern const vixl32::Register tr;
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
diff --git a/compiler/utils/arm/jni_macro_assembler_arm.cc b/compiler/utils/arm/jni_macro_assembler_arm.cc
index af5ebb4ce8..e0bfa12b2a 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm.cc
@@ -18,7 +18,6 @@
#include <algorithm>
-#include "assembler_arm32.h"
#include "assembler_thumb2.h"
#include "base/arena_allocator.h"
#include "base/bit_utils.h"
@@ -47,9 +46,6 @@ class ArmExceptionSlowPath FINAL : public SlowPath {
ArmJNIMacroAssembler::ArmJNIMacroAssembler(ArenaAllocator* arena, InstructionSet isa) {
switch (isa) {
case kArm:
- asm_.reset(new (arena) Arm32Assembler(arena));
- break;
-
case kThumb2:
asm_.reset(new (arena) Thumb2Assembler(arena));
break;
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
new file mode 100644
index 0000000000..719fe7f3a1
--- /dev/null
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -0,0 +1,599 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <type_traits>
+
+#include "jni_macro_assembler_arm_vixl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "thread.h"
+
+using namespace vixl::aarch32; // NOLINT(build/namespaces)
+namespace vixl32 = vixl::aarch32;
+
+namespace art {
+namespace arm {
+
+#ifdef ___
+#error "ARM Assembler macro already defined."
+#else
+#define ___ asm_.GetVIXLAssembler()->
+#endif
+
+void ArmVIXLJNIMacroAssembler::FinalizeCode() {
+ for (const std::unique_ptr<
+ ArmVIXLJNIMacroAssembler::ArmException>& exception : exception_blocks_) {
+ EmitExceptionPoll(exception.get());
+ }
+ asm_.FinalizeCode();
+}
+
+static dwarf::Reg DWARFReg(vixl32::Register reg) {
+ return dwarf::Reg::ArmCore(static_cast<int>(reg.GetCode()));
+}
+
+static dwarf::Reg DWARFReg(vixl32::SRegister reg) {
+ return dwarf::Reg::ArmFp(static_cast<int>(reg.GetCode()));
+}
+
+static constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize);;
+
+void ArmVIXLJNIMacroAssembler::BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ CHECK(r0.Is(method_reg.AsArm().AsVIXLRegister()));
+
+ // Push callee saves and link register.
+ RegList core_spill_mask = 1 << LR;
+ uint32_t fp_spill_mask = 0;
+ for (const ManagedRegister& reg : callee_save_regs) {
+ if (reg.AsArm().IsCoreRegister()) {
+ core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
+ } else {
+ fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
+ }
+ }
+ ___ Push(RegisterList(core_spill_mask));
+ cfi().AdjustCFAOffset(POPCOUNT(core_spill_mask) * kFramePointerSize);
+ cfi().RelOffsetForMany(DWARFReg(r0), 0, core_spill_mask, kFramePointerSize);
+ if (fp_spill_mask != 0) {
+ uint32_t first = CTZ(fp_spill_mask);
+ uint32_t last = first + POPCOUNT(fp_spill_mask) - 1;
+
+ // Check that list is contiguous.
+ DCHECK_EQ(fp_spill_mask >> CTZ(fp_spill_mask), ~0u >> (32 - POPCOUNT(fp_spill_mask)));
+
+ ___ Vpush(SRegisterList(vixl32::SRegister(first), vixl32::SRegister(last)));
+ cfi().AdjustCFAOffset(POPCOUNT(fp_spill_mask) * kFramePointerSize);
+ cfi().RelOffsetForMany(DWARFReg(s0), 0, fp_spill_mask, kFramePointerSize);
+ }
+
+ // Increase frame to required size.
+ int pushed_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
+ // Must at least have space for Method*.
+ CHECK_GT(frame_size, pushed_values * kFramePointerSize);
+ IncreaseFrameSize(frame_size - pushed_values * kFramePointerSize); // handles CFI as well.
+
+ // Write out Method*.
+ asm_.StoreToOffset(kStoreWord, r0, sp, 0);
+
+ // Write out entry spills.
+ int32_t offset = frame_size + kFramePointerSize;
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ ArmManagedRegister reg = entry_spills.at(i).AsArm();
+ if (reg.IsNoRegister()) {
+ // only increment stack offset.
+ ManagedRegisterSpill spill = entry_spills.at(i);
+ offset += spill.getSize();
+ } else if (reg.IsCoreRegister()) {
+ asm_.StoreToOffset(kStoreWord, reg.AsVIXLRegister(), sp, offset);
+ offset += 4;
+ } else if (reg.IsSRegister()) {
+ asm_.StoreSToOffset(reg.AsVIXLSRegister(), sp, offset);
+ offset += 4;
+ } else if (reg.IsDRegister()) {
+ asm_.StoreDToOffset(reg.AsVIXLDRegister(), sp, offset);
+ offset += 8;
+ }
+ }
+}
+
+void ArmVIXLJNIMacroAssembler::RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ cfi().RememberState();
+
+ // Compute callee saves to pop and PC.
+ RegList core_spill_mask = 1 << PC;
+ uint32_t fp_spill_mask = 0;
+ for (const ManagedRegister& reg : callee_save_regs) {
+ if (reg.AsArm().IsCoreRegister()) {
+ core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
+ } else {
+ fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
+ }
+ }
+
+ // Decrease frame to start of callee saves.
+ int pop_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
+ CHECK_GT(frame_size, pop_values * kFramePointerSize);
+ DecreaseFrameSize(frame_size - (pop_values * kFramePointerSize)); // handles CFI as well.
+
+ if (fp_spill_mask != 0) {
+ uint32_t first = CTZ(fp_spill_mask);
+ uint32_t last = first + POPCOUNT(fp_spill_mask) - 1;
+ // Check that list is contiguous.
+ DCHECK_EQ(fp_spill_mask >> CTZ(fp_spill_mask), ~0u >> (32 - POPCOUNT(fp_spill_mask)));
+
+ ___ Vpop(SRegisterList(vixl32::SRegister(first), vixl32::SRegister(last)));
+ cfi().AdjustCFAOffset(-kFramePointerSize * POPCOUNT(fp_spill_mask));
+ cfi().RestoreMany(DWARFReg(s0), fp_spill_mask);
+ }
+
+ // Pop callee saves and PC.
+ ___ Pop(RegisterList(core_spill_mask));
+
+ // The CFI should be restored for any code that follows the exit block.
+ cfi().RestoreState();
+ cfi().DefCFAOffset(frame_size);
+}
+
+
+void ArmVIXLJNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
+ asm_.AddConstant(sp, -adjust);
+ cfi().AdjustCFAOffset(adjust);
+}
+
+void ArmVIXLJNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
+ asm_.AddConstant(sp, adjust);
+ cfi().AdjustCFAOffset(-adjust);
+}
+
+void ArmVIXLJNIMacroAssembler::Store(FrameOffset dest, ManagedRegister m_src, size_t size) {
+ ArmManagedRegister src = m_src.AsArm();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsCoreRegister()) {
+ CHECK_EQ(4u, size);
+ asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
+ } else if (src.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ asm_.StoreToOffset(kStoreWord, src.AsVIXLRegisterPairLow(), sp, dest.Int32Value());
+ asm_.StoreToOffset(kStoreWord, src.AsVIXLRegisterPairHigh(), sp, dest.Int32Value() + 4);
+ } else if (src.IsSRegister()) {
+ CHECK_EQ(4u, size);
+ asm_.StoreSToOffset(src.AsVIXLSRegister(), sp, dest.Int32Value());
+ } else {
+ CHECK_EQ(8u, size);
+ CHECK(src.IsDRegister()) << src;
+ asm_.StoreDToOffset(src.AsVIXLDRegister(), sp, dest.Int32Value());
+ }
+}
+
+void ArmVIXLJNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+ ArmManagedRegister src = msrc.AsArm();
+ CHECK(src.IsCoreRegister()) << src;
+ asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+ ArmManagedRegister src = msrc.AsArm();
+ CHECK(src.IsCoreRegister()) << src;
+ asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::StoreSpanning(FrameOffset dest,
+ ManagedRegister msrc,
+ FrameOffset in_off,
+ ManagedRegister mscratch) {
+ ArmManagedRegister src = msrc.AsArm();
+ ArmManagedRegister scratch = mscratch.AsArm();
+ asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
+ asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, in_off.Int32Value());
+ asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, dest.Int32Value() + 4);
+}
+
+void ArmVIXLJNIMacroAssembler::CopyRef(FrameOffset dest,
+ FrameOffset src,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, src.Int32Value());
+ asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, dest.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::LoadRef(ManagedRegister dest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) {
+ ArmManagedRegister dst = dest.AsArm();
+ CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
+ asm_.LoadFromOffset(kLoadWord,
+ dst.AsVIXLRegister(),
+ base.AsArm().AsVIXLRegister(),
+ offs.Int32Value());
+
+ if (unpoison_reference) {
+ asm_.MaybeUnpoisonHeapReference(dst.AsVIXLRegister());
+ }
+}
+
+void ArmVIXLJNIMacroAssembler::LoadRef(ManagedRegister dest ATTRIBUTE_UNUSED,
+ FrameOffset src ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::LoadRawPtr(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ManagedRegister base ATTRIBUTE_UNUSED,
+ Offset offs ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest,
+ uint32_t imm,
+ ManagedRegister scratch) {
+ ArmManagedRegister mscratch = scratch.AsArm();
+ CHECK(mscratch.IsCoreRegister()) << mscratch;
+ asm_.LoadImmediate(mscratch.AsVIXLRegister(), imm);
+ asm_.StoreToOffset(kStoreWord, mscratch.AsVIXLRegister(), sp, dest.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
+ return Load(m_dst.AsArm(), sp, src.Int32Value(), size);
+}
+
+void ArmVIXLJNIMacroAssembler::LoadFromThread(ManagedRegister m_dst ATTRIBUTE_UNUSED,
+ ThreadOffset32 src ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset32 offs) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ CHECK(dst.IsCoreRegister()) << dst;
+ asm_.LoadFromOffset(kLoadWord, dst.AsVIXLRegister(), tr, offs.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), tr, thr_offs.Int32Value());
+ asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, fr_offs.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ asm_.AddConstant(scratch.AsVIXLRegister(), sp, fr_offs.Int32Value());
+ asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), tr, thr_offs.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
+ asm_.StoreToOffset(kStoreWord, sp, tr, thr_offs.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::SignExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm";
+}
+
+void ArmVIXLJNIMacroAssembler::ZeroExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
+}
+
+void ArmVIXLJNIMacroAssembler::Move(ManagedRegister m_dst,
+ ManagedRegister m_src,
+ size_t size ATTRIBUTE_UNUSED) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ ArmManagedRegister src = m_src.AsArm();
+ if (!dst.Equals(src)) {
+ if (dst.IsCoreRegister()) {
+ CHECK(src.IsCoreRegister()) << src;
+ ___ Mov(dst.AsVIXLRegister(), src.AsVIXLRegister());
+ } else if (dst.IsDRegister()) {
+ CHECK(src.IsDRegister()) << src;
+ ___ Vmov(F64, dst.AsVIXLDRegister(), src.AsVIXLDRegister());
+ } else if (dst.IsSRegister()) {
+ CHECK(src.IsSRegister()) << src;
+ ___ Vmov(F32, dst.AsVIXLSRegister(), src.AsVIXLSRegister());
+ } else {
+ CHECK(dst.IsRegisterPair()) << dst;
+ CHECK(src.IsRegisterPair()) << src;
+ // Ensure that the first move doesn't clobber the input of the second.
+ if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
+ ___ Mov(dst.AsVIXLRegisterPairLow(), src.AsVIXLRegisterPairLow());
+ ___ Mov(dst.AsVIXLRegisterPairHigh(), src.AsVIXLRegisterPairHigh());
+ } else {
+ ___ Mov(dst.AsVIXLRegisterPairHigh(), src.AsVIXLRegisterPairHigh());
+ ___ Mov(dst.AsVIXLRegisterPairLow(), src.AsVIXLRegisterPairLow());
+ }
+ }
+ }
+}
+
+void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dest,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) {
+ ArmManagedRegister temp = scratch.AsArm();
+ CHECK(temp.IsCoreRegister()) << temp;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ asm_.LoadFromOffset(kLoadWord, temp.AsVIXLRegister(), sp, src.Int32Value());
+ asm_.StoreToOffset(kStoreWord, temp.AsVIXLRegister(), sp, dest.Int32Value());
+ } else if (size == 8) {
+ asm_.LoadFromOffset(kLoadWord, temp.AsVIXLRegister(), sp, src.Int32Value());
+ asm_.StoreToOffset(kStoreWord, temp.AsVIXLRegister(), sp, dest.Int32Value());
+ asm_.LoadFromOffset(kLoadWord, temp.AsVIXLRegister(), sp, src.Int32Value() + 4);
+ asm_.StoreToOffset(kStoreWord, temp.AsVIXLRegister(), sp, dest.Int32Value() + 4);
+ }
+}
+
+void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED,
+ ManagedRegister src_base ATTRIBUTE_UNUSED,
+ Offset src_offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::Copy(ManagedRegister dest_base ATTRIBUTE_UNUSED,
+ Offset dest_offset ATTRIBUTE_UNUSED,
+ FrameOffset src ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dst ATTRIBUTE_UNUSED,
+ FrameOffset src_base ATTRIBUTE_UNUSED,
+ Offset src_offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::Copy(ManagedRegister dest ATTRIBUTE_UNUSED,
+ Offset dest_offset ATTRIBUTE_UNUSED,
+ ManagedRegister src ATTRIBUTE_UNUSED,
+ Offset src_offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dst ATTRIBUTE_UNUSED,
+ Offset dest_offset ATTRIBUTE_UNUSED,
+ FrameOffset src ATTRIBUTE_UNUSED,
+ Offset src_offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+static constexpr uint32_t kArmInstrMaxSizeInBytes = 4;
+
+void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
+ ManagedRegister min_reg,
+ bool null_allowed) {
+ ArmManagedRegister out_reg = mout_reg.AsArm();
+ ArmManagedRegister in_reg = min_reg.AsArm();
+ CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
+ CHECK(out_reg.IsCoreRegister()) << out_reg;
+ if (null_allowed) {
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+ if (in_reg.IsNoRegister()) {
+ asm_.LoadFromOffset(kLoadWord,
+ out_reg.AsVIXLRegister(),
+ sp,
+ handle_scope_offset.Int32Value());
+ in_reg = out_reg;
+ }
+ ___ Cmp(in_reg.AsVIXLRegister(), 0);
+
+ if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value(), kCcDontCare)) {
+ if (!out_reg.Equals(in_reg)) {
+ AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
+ 3 * kArmInstrMaxSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ ___ it(eq, 0xc);
+ ___ mov(eq, out_reg.AsVIXLRegister(), 0);
+ asm_.AddConstantInIt(out_reg.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
+ } else {
+ AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
+ 2 * kArmInstrMaxSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ ___ it(ne, 0x8);
+ asm_.AddConstantInIt(out_reg.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
+ }
+ } else {
+ // TODO: Implement this (old arm assembler would have crashed here).
+ UNIMPLEMENTED(FATAL);
+ }
+ } else {
+ asm_.AddConstant(out_reg.AsVIXLRegister(), sp, handle_scope_offset.Int32Value());
+ }
+}
+
+void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ if (null_allowed) {
+ asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
+ ___ Cmp(scratch.AsVIXLRegister(), 0);
+
+ if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value(), kCcDontCare)) {
+ AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
+ 2 * kArmInstrMaxSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ ___ it(ne, 0x8);
+ asm_.AddConstantInIt(scratch.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
+ } else {
+ // TODO: Implement this (old arm assembler would have crashed here).
+ UNIMPLEMENTED(FATAL);
+ }
+ } else {
+ asm_.AddConstant(scratch.AsVIXLRegister(), sp, handle_scope_offset.Int32Value());
+ }
+ asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, out_off.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::LoadReferenceFromHandleScope(
+ ManagedRegister mout_reg ATTRIBUTE_UNUSED,
+ ManagedRegister min_reg ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::VerifyObject(ManagedRegister src ATTRIBUTE_UNUSED,
+ bool could_be_null ATTRIBUTE_UNUSED) {
+ // TODO: not validating references.
+}
+
+void ArmVIXLJNIMacroAssembler::VerifyObject(FrameOffset src ATTRIBUTE_UNUSED,
+ bool could_be_null ATTRIBUTE_UNUSED) {
+ // TODO: not validating references.
+}
+
+void ArmVIXLJNIMacroAssembler::Call(ManagedRegister mbase,
+ Offset offset,
+ ManagedRegister mscratch) {
+ ArmManagedRegister base = mbase.AsArm();
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(base.IsCoreRegister()) << base;
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ asm_.LoadFromOffset(kLoadWord,
+ scratch.AsVIXLRegister(),
+ base.AsVIXLRegister(),
+ offset.Int32Value());
+ ___ Blx(scratch.AsVIXLRegister());
+ // TODO: place reference map on call.
+}
+
+void ArmVIXLJNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ // Call *(*(SP + base) + offset)
+ asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, base.Int32Value());
+ asm_.LoadFromOffset(kLoadWord,
+ scratch.AsVIXLRegister(),
+ scratch.AsVIXLRegister(),
+ offset.Int32Value());
+ ___ Blx(scratch.AsVIXLRegister());
+ // TODO: place reference map on call
+}
+
+void ArmVIXLJNIMacroAssembler::CallFromThread(ThreadOffset32 offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::GetCurrentThread(ManagedRegister mtr) {
+ ___ Mov(mtr.AsArm().AsVIXLRegister(), tr);
+}
+
+void ArmVIXLJNIMacroAssembler::GetCurrentThread(FrameOffset dest_offset,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
+ asm_.StoreToOffset(kStoreWord, tr, sp, dest_offset.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
+ CHECK_ALIGNED(stack_adjust, kStackAlignment);
+ ArmManagedRegister scratch = m_scratch.AsArm();
+ exception_blocks_.emplace_back(
+ new ArmVIXLJNIMacroAssembler::ArmException(scratch, stack_adjust));
+ asm_.LoadFromOffset(kLoadWord,
+ scratch.AsVIXLRegister(),
+ tr,
+ Thread::ExceptionOffset<kArmPointerSize>().Int32Value());
+
+ ___ Cmp(scratch.AsVIXLRegister(), 0);
+ {
+ AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
+ kArmInstrMaxSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ ___ b(ne, Narrow, exception_blocks_.back()->Entry());
+ }
+ // TODO: think about using CBNZ here.
+}
+
+void ArmVIXLJNIMacroAssembler::EmitExceptionPoll(
+ ArmVIXLJNIMacroAssembler::ArmException* exception) {
+ ___ Bind(exception->Entry());
+ if (exception->stack_adjust_ != 0) { // Fix up the frame.
+ DecreaseFrameSize(exception->stack_adjust_);
+ }
+ // Pass exception object as argument.
+ // Don't care about preserving r0 as this won't return.
+ ___ Mov(r0, exception->scratch_.AsVIXLRegister());
+ // TODO: check that exception->scratch_ is dead by this point.
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ ___ Ldr(temp,
+ MemOperand(tr,
+ QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pDeliverException).Int32Value()));
+ ___ Blx(temp);
+}
+
+void ArmVIXLJNIMacroAssembler::MemoryBarrier(ManagedRegister scratch ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::Load(ArmManagedRegister
+ dest,
+ vixl32::Register base,
+ int32_t offset,
+ size_t size) {
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size) << dest;
+ } else if (dest.IsCoreRegister()) {
+ CHECK_EQ(4u, size) << dest;
+ CHECK(!dest.AsVIXLRegister().Is(sp)) << dest;
+ ___ Ldr(dest.AsVIXLRegister(), MemOperand(base, offset));
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(8u, size) << dest;
+ ___ Ldr(dest.AsVIXLRegisterPairLow(), MemOperand(base, offset));
+ ___ Ldr(dest.AsVIXLRegisterPairHigh(), MemOperand(base, offset + 4));
+ } else if (dest.IsSRegister()) {
+ ___ Vldr(dest.AsVIXLSRegister(), MemOperand(base, offset));
+ } else {
+ CHECK(dest.IsDRegister()) << dest;
+ ___ Vldr(dest.AsVIXLDRegister(), MemOperand(base, offset));
+ }
+}
+
+} // namespace arm
+} // namespace art
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
new file mode 100644
index 0000000000..dfc35b7878
--- /dev/null
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM_JNI_MACRO_ASSEMBLER_ARM_VIXL_H_
+#define ART_COMPILER_UTILS_ARM_JNI_MACRO_ASSEMBLER_ARM_VIXL_H_
+
+#include "base/arena_containers.h"
+#include "base/logging.h"
+#include "constants_arm.h"
+#include "offsets.h"
+#include "utils/arm/assembler_arm_shared.h"
+#include "utils/arm/assembler_arm_vixl.h"
+#include "utils/arm/managed_register_arm.h"
+#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
+
+namespace art {
+namespace arm {
+
+class ArmVIXLJNIMacroAssembler FINAL
+ : public JNIMacroAssemblerFwd<ArmVIXLAssembler, PointerSize::k32> {
+ private:
+ class ArmException;
+ public:
+ explicit ArmVIXLJNIMacroAssembler(ArenaAllocator* arena)
+ : JNIMacroAssemblerFwd(arena),
+ exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
+
+ virtual ~ArmVIXLJNIMacroAssembler() {}
+ void FinalizeCode() OVERRIDE;
+
+ //
+ // Overridden common assembler high-level functionality
+ //
+
+ // Emit code that will create an activation on the stack.
+ void BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+
+ // Emit code that will remove an activation from the stack.
+ void RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs) OVERRIDE;
+
+ void IncreaseFrameSize(size_t adjust) OVERRIDE;
+ void DecreaseFrameSize(size_t adjust) OVERRIDE;
+
+ // Store routines.
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
+ void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+
+ void StoreSpanning(FrameOffset dest,
+ ManagedRegister src,
+ FrameOffset in_off,
+ ManagedRegister scratch) OVERRIDE;
+
+ // Load routines.
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+
+ void LoadFromThread(ManagedRegister dest,
+ ThreadOffset32 src,
+ size_t size) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) OVERRIDE;
+
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
+
+ // Copying routines.
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void CopyRawPtrToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest,
+ ManagedRegister src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest,
+ FrameOffset src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest,
+ Offset dest_offset,
+ FrameOffset src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ // Sign extension.
+ void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Zero extension.
+ void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Exploit fast access in managed code to Thread::Current().
+ void GetCurrentThread(ManagedRegister mtr) OVERRIDE;
+ void GetCurrentThread(FrameOffset dest_offset,
+ ManagedRegister scratch) OVERRIDE;
+
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the handle scope entry to see if the value is
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg,
+ FrameOffset handlescope_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) OVERRIDE;
+
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed.
+ void CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handlescope_offset,
+ ManagedRegister scratch,
+ bool null_allowed) OVERRIDE;
+
+ // src holds a handle scope entry (Object**) load this into dst.
+ void LoadReferenceFromHandleScope(ManagedRegister dst,
+ ManagedRegister src) OVERRIDE;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+ void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+
+ // Call to address held at [base+offset].
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
+
+ void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
+
+ void EmitExceptionPoll(ArmVIXLJNIMacroAssembler::ArmException *exception);
+ void Load(ArmManagedRegister dest, vixl32::Register base, int32_t offset, size_t size);
+
+ private:
+ class ArmException {
+ private:
+ ArmException(ArmManagedRegister scratch, size_t stack_adjust)
+ : scratch_(scratch), stack_adjust_(stack_adjust) {}
+
+ vixl32::Label* Entry() { return &exception_entry_; }
+
+ // Register used for passing Thread::Current()->exception_ .
+ const ArmManagedRegister scratch_;
+
+ // Stack adjust for ExceptionPool.
+ const size_t stack_adjust_;
+
+ vixl32::Label exception_entry_;
+
+ friend class ArmVIXLJNIMacroAssembler;
+ DISALLOW_COPY_AND_ASSIGN(ArmException);
+ };
+
+ // List of exception blocks to generate at the end of the code cache.
+ ArenaVector<std::unique_ptr<ArmVIXLJNIMacroAssembler::ArmException>> exception_blocks_;
+ // Used for testing.
+ friend class ArmVIXAssemblerTest_VixlLoadFromOffset_Test;
+ friend class ArmVIXAssemblerTest_VixlStoreToOffset_Test;
+};
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ARM_JNI_MACRO_ASSEMBLER_ARM_VIXL_H_
diff --git a/compiler/utils/arm/managed_register_arm.h b/compiler/utils/arm/managed_register_arm.h
index 276db4420c..2be2d5638e 100644
--- a/compiler/utils/arm/managed_register_arm.h
+++ b/compiler/utils/arm/managed_register_arm.h
@@ -22,6 +22,12 @@
#include "debug/dwarf/register.h"
#include "utils/managed_register.h"
+// TODO(VIXL): Make VIXL compile with -Wshadow.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "aarch32/macro-assembler-aarch32.h"
+#pragma GCC diagnostic pop
+
namespace art {
namespace arm {
@@ -90,16 +96,31 @@ class ArmManagedRegister : public ManagedRegister {
return static_cast<Register>(id_);
}
+ vixl::aarch32::Register AsVIXLRegister() const {
+ CHECK(IsCoreRegister());
+ return vixl::aarch32::Register(id_);
+ }
+
constexpr SRegister AsSRegister() const {
CHECK(IsSRegister());
return static_cast<SRegister>(id_ - kNumberOfCoreRegIds);
}
+ vixl::aarch32::SRegister AsVIXLSRegister() const {
+ CHECK(IsSRegister());
+ return vixl::aarch32::SRegister(id_ - kNumberOfCoreRegIds);
+ }
+
constexpr DRegister AsDRegister() const {
CHECK(IsDRegister());
return static_cast<DRegister>(id_ - kNumberOfCoreRegIds - kNumberOfSRegIds);
}
+ vixl::aarch32::DRegister AsVIXLDRegister() const {
+ CHECK(IsDRegister());
+ return vixl::aarch32::DRegister(id_ - kNumberOfCoreRegIds - kNumberOfSRegIds);
+ }
+
constexpr SRegister AsOverlappingDRegisterLow() const {
CHECK(IsOverlappingDRegister());
DRegister d_reg = AsDRegister();
@@ -128,12 +149,20 @@ class ArmManagedRegister : public ManagedRegister {
return FromRegId(AllocIdLow()).AsCoreRegister();
}
+ vixl::aarch32::Register AsVIXLRegisterPairLow() const {
+ return vixl::aarch32::Register(AsRegisterPairLow());
+ }
+
constexpr Register AsRegisterPairHigh() const {
CHECK(IsRegisterPair());
// Appropriate mapping of register ids allows to use AllocIdHigh().
return FromRegId(AllocIdHigh()).AsCoreRegister();
}
+ vixl::aarch32::Register AsVIXLRegisterPairHigh() const {
+ return vixl::aarch32::Register(AsRegisterPairHigh());
+ }
+
constexpr bool IsCoreRegister() const {
CHECK(IsValidManagedRegister());
return (0 <= id_) && (id_ < kNumberOfCoreRegIds);
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 81159e69a0..57f3b1570a 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -20,7 +20,6 @@
#include <vector>
#ifdef ART_ENABLE_CODEGEN_arm
-#include "arm/assembler_arm32.h"
#include "arm/assembler_thumb2.h"
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 9c9271db33..41cb04b251 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -23,6 +23,10 @@
#include "gtest/gtest.h"
#include "utils/arm/assembler_thumb2.h"
+
+#include "jni/quick/calling_convention.h"
+#include "utils/arm/jni_macro_assembler_arm_vixl.h"
+
#include "base/hex_dump.h"
#include "common_runtime_test.h"
@@ -1608,6 +1612,196 @@ TEST_F(Thumb2AssemblerTest, CmpConstant) {
EmitAndCheck(&assembler, "CmpConstant");
}
+#define ENABLE_VIXL_TEST
+
+#ifdef ENABLE_VIXL_TEST
+
+#define ARM_VIXL
+
+#ifdef ARM_VIXL
+typedef arm::ArmVIXLJNIMacroAssembler JniAssemblerType;
+#else
+typedef arm::Thumb2Assembler AssemblerType;
+#endif
+
+class ArmVIXAssemblerTest : public ::testing::Test {
+ public:
+ ArmVIXAssemblerTest() : pool(), arena(&pool), assembler(&arena) { }
+
+ ArenaPool pool;
+ ArenaAllocator arena;
+ JniAssemblerType assembler;
+};
+
+#undef __
+#define __ assembler->
+
+void EmitAndCheck(JniAssemblerType* assembler, const char* testname,
+ const char* const* results) {
+ __ FinalizeCode();
+ size_t cs = __ CodeSize();
+ std::vector<uint8_t> managed_code(cs);
+ MemoryRegion code(&managed_code[0], managed_code.size());
+ __ FinalizeInstructions(code);
+
+ DumpAndCheck(managed_code, testname, results);
+}
+
+void EmitAndCheck(JniAssemblerType* assembler, const char* testname) {
+ InitResults();
+ std::map<std::string, const char* const*>::iterator results = test_results.find(testname);
+ ASSERT_NE(results, test_results.end());
+
+ EmitAndCheck(assembler, testname, results->second);
+}
+
+#undef __
+#define __ assembler.
+
+TEST_F(ArmVIXAssemblerTest, VixlJniHelpers) {
+ const bool is_static = true;
+ const bool is_synchronized = false;
+ const char* shorty = "IIFII";
+
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+
+ std::unique_ptr<JniCallingConvention> jni_conv(
+ JniCallingConvention::Create(&arena, is_static, is_synchronized, shorty, kThumb2));
+ std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
+ ManagedRuntimeCallingConvention::Create(&arena, is_static, is_synchronized, shorty, kThumb2));
+ const int frame_size(jni_conv->FrameSize());
+ ArrayRef<const ManagedRegister> callee_save_regs = jni_conv->CalleeSaveRegisters();
+
+ const ManagedRegister method_register = ArmManagedRegister::FromCoreRegister(R0);
+ const ManagedRegister scratch_register = ArmManagedRegister::FromCoreRegister(R12);
+
+ __ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills());
+ __ IncreaseFrameSize(32);
+
+ // Loads
+ __ IncreaseFrameSize(4096);
+ __ Load(method_register, FrameOffset(32), 4);
+ __ Load(method_register, FrameOffset(124), 4);
+ __ Load(method_register, FrameOffset(132), 4);
+ __ Load(method_register, FrameOffset(1020), 4);
+ __ Load(method_register, FrameOffset(1024), 4);
+ __ Load(scratch_register, FrameOffset(4092), 4);
+ __ Load(scratch_register, FrameOffset(4096), 4);
+ __ LoadRawPtrFromThread(scratch_register, ThreadOffset32(512));
+ __ LoadRef(method_register, scratch_register, MemberOffset(128), true);
+
+ // Stores
+ __ Store(FrameOffset(32), method_register, 4);
+ __ Store(FrameOffset(124), method_register, 4);
+ __ Store(FrameOffset(132), method_register, 4);
+ __ Store(FrameOffset(1020), method_register, 4);
+ __ Store(FrameOffset(1024), method_register, 4);
+ __ Store(FrameOffset(4092), scratch_register, 4);
+ __ Store(FrameOffset(4096), scratch_register, 4);
+ __ StoreImmediateToFrame(FrameOffset(48), 0xFF, scratch_register);
+ __ StoreImmediateToFrame(FrameOffset(48), 0xFFFFFF, scratch_register);
+ __ StoreRawPtr(FrameOffset(48), scratch_register);
+ __ StoreRef(FrameOffset(48), scratch_register);
+ __ StoreSpanning(FrameOffset(48), method_register, FrameOffset(48), scratch_register);
+ __ StoreStackOffsetToThread(ThreadOffset32(512), FrameOffset(4096), scratch_register);
+ __ StoreStackPointerToThread(ThreadOffset32(512));
+
+ // Other
+ __ Call(method_register, FrameOffset(48), scratch_register);
+ __ Copy(FrameOffset(48), FrameOffset(44), scratch_register, 4);
+ __ CopyRawPtrFromThread(FrameOffset(44), ThreadOffset32(512), scratch_register);
+ __ CopyRef(FrameOffset(48), FrameOffset(44), scratch_register);
+ __ GetCurrentThread(method_register);
+ __ GetCurrentThread(FrameOffset(48), scratch_register);
+ __ Move(scratch_register, method_register, 4);
+ __ VerifyObject(scratch_register, false);
+
+ __ CreateHandleScopeEntry(scratch_register, FrameOffset(48), scratch_register, true);
+ __ CreateHandleScopeEntry(scratch_register, FrameOffset(48), scratch_register, false);
+ __ CreateHandleScopeEntry(method_register, FrameOffset(48), scratch_register, true);
+ __ CreateHandleScopeEntry(FrameOffset(48), FrameOffset(64), scratch_register, true);
+ __ CreateHandleScopeEntry(method_register, FrameOffset(0), scratch_register, true);
+ __ CreateHandleScopeEntry(method_register, FrameOffset(1025), scratch_register, true);
+ __ CreateHandleScopeEntry(scratch_register, FrameOffset(1025), scratch_register, true);
+
+ __ ExceptionPoll(scratch_register, 0);
+
+ __ DecreaseFrameSize(4096);
+ __ DecreaseFrameSize(32);
+ __ RemoveFrame(frame_size, callee_save_regs);
+
+ EmitAndCheck(&assembler, "VixlJniHelpers");
+}
+
+#ifdef ARM_VIXL
+#define R0 vixl::aarch32::r0
+#define R2 vixl::aarch32::r2
+#define R4 vixl::aarch32::r4
+#define R12 vixl::aarch32::r12
+#undef __
+#define __ assembler.asm_.
+#endif
+
+TEST_F(ArmVIXAssemblerTest, VixlLoadFromOffset) {
+ __ LoadFromOffset(kLoadWord, R2, R4, 12);
+ __ LoadFromOffset(kLoadWord, R2, R4, 0xfff);
+ __ LoadFromOffset(kLoadWord, R2, R4, 0x1000);
+ __ LoadFromOffset(kLoadWord, R2, R4, 0x1000a4);
+ __ LoadFromOffset(kLoadWord, R2, R4, 0x101000);
+ __ LoadFromOffset(kLoadWord, R4, R4, 0x101000);
+ __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 12);
+ __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0xfff);
+ __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000);
+ __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000a4);
+ __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x101000);
+ __ LoadFromOffset(kLoadUnsignedHalfword, R4, R4, 0x101000);
+ __ LoadFromOffset(kLoadWordPair, R2, R4, 12);
+ __ LoadFromOffset(kLoadWordPair, R2, R4, 0x3fc);
+ __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400);
+ __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400a4);
+ __ LoadFromOffset(kLoadWordPair, R2, R4, 0x40400);
+ __ LoadFromOffset(kLoadWordPair, R4, R4, 0x40400);
+
+ __ LoadFromOffset(kLoadWord, R0, R12, 12); // 32-bit because of R12.
+ __ LoadFromOffset(kLoadWord, R2, R4, 0xa4 - 0x100000);
+
+ __ LoadFromOffset(kLoadSignedByte, R2, R4, 12);
+ __ LoadFromOffset(kLoadUnsignedByte, R2, R4, 12);
+ __ LoadFromOffset(kLoadSignedHalfword, R2, R4, 12);
+
+ EmitAndCheck(&assembler, "VixlLoadFromOffset");
+}
+
+TEST_F(ArmVIXAssemblerTest, VixlStoreToOffset) {
+ __ StoreToOffset(kStoreWord, R2, R4, 12);
+ __ StoreToOffset(kStoreWord, R2, R4, 0xfff);
+ __ StoreToOffset(kStoreWord, R2, R4, 0x1000);
+ __ StoreToOffset(kStoreWord, R2, R4, 0x1000a4);
+ __ StoreToOffset(kStoreWord, R2, R4, 0x101000);
+ __ StoreToOffset(kStoreWord, R4, R4, 0x101000);
+ __ StoreToOffset(kStoreHalfword, R2, R4, 12);
+ __ StoreToOffset(kStoreHalfword, R2, R4, 0xfff);
+ __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000);
+ __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000a4);
+ __ StoreToOffset(kStoreHalfword, R2, R4, 0x101000);
+ __ StoreToOffset(kStoreHalfword, R4, R4, 0x101000);
+ __ StoreToOffset(kStoreWordPair, R2, R4, 12);
+ __ StoreToOffset(kStoreWordPair, R2, R4, 0x3fc);
+ __ StoreToOffset(kStoreWordPair, R2, R4, 0x400);
+ __ StoreToOffset(kStoreWordPair, R2, R4, 0x400a4);
+ __ StoreToOffset(kStoreWordPair, R2, R4, 0x40400);
+ __ StoreToOffset(kStoreWordPair, R4, R4, 0x40400);
+
+ __ StoreToOffset(kStoreWord, R0, R12, 12); // 32-bit because of R12.
+ __ StoreToOffset(kStoreWord, R2, R4, 0xa4 - 0x100000);
+
+ __ StoreToOffset(kStoreByte, R2, R4, 12);
+
+ EmitAndCheck(&assembler, "VixlStoreToOffset");
+}
+
#undef __
+#endif // ENABLE_VIXL_TEST
} // namespace arm
} // namespace art
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 6736015bf1..81c6ec5fac 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -5468,6 +5468,199 @@ const char* const CmpConstantResults[] = {
nullptr
};
+const char* const VixlJniHelpersResults[] = {
+ " 0: e92d 4de0 stmdb sp!, {r5, r6, r7, r8, sl, fp, lr}\n",
+ " 4: ed2d 8a10 vpush {s16-s31}\n",
+ " 8: b089 sub sp, #36 ; 0x24\n",
+ " a: 9000 str r0, [sp, #0]\n",
+ " c: 9121 str r1, [sp, #132] ; 0x84\n",
+ " e: ed8d 0a22 vstr s0, [sp, #136] ; 0x88\n",
+ " 12: 9223 str r2, [sp, #140] ; 0x8c\n",
+ " 14: 9324 str r3, [sp, #144] ; 0x90\n",
+ " 16: b088 sub sp, #32\n",
+ " 18: f5ad 5d80 sub.w sp, sp, #4096 ; 0x1000\n",
+ " 1c: 9808 ldr r0, [sp, #32]\n",
+ " 1e: 981f ldr r0, [sp, #124] ; 0x7c\n",
+ " 20: 9821 ldr r0, [sp, #132] ; 0x84\n",
+ " 22: 98ff ldr r0, [sp, #1020] ; 0x3fc\n",
+ " 24: f8dd 0400 ldr.w r0, [sp, #1024] ; 0x400\n",
+ " 28: f8dd cffc ldr.w ip, [sp, #4092] ; 0xffc\n",
+ " 2c: f50d 5c80 add.w ip, sp, #4096 ; 0x1000\n",
+ " 30: f8dc c000 ldr.w ip, [ip]\n",
+ " 34: f8d9 c200 ldr.w ip, [r9, #512] ; 0x200\n",
+ " 38: f8dc 0080 ldr.w r0, [ip, #128] ; 0x80\n",
+ " 3c: 9008 str r0, [sp, #32]\n",
+ " 3e: 901f str r0, [sp, #124] ; 0x7c\n",
+ " 40: 9021 str r0, [sp, #132] ; 0x84\n",
+ " 42: 90ff str r0, [sp, #1020] ; 0x3fc\n",
+ " 44: f8cd 0400 str.w r0, [sp, #1024] ; 0x400\n",
+ " 48: f8cd cffc str.w ip, [sp, #4092] ; 0xffc\n",
+ " 4c: f84d 5d04 str.w r5, [sp, #-4]!\n",
+ " 50: f50d 5580 add.w r5, sp, #4096 ; 0x1000\n",
+ " 54: f8c5 c004 str.w ip, [r5, #4]\n",
+ " 58: f85d 5b04 ldr.w r5, [sp], #4\n",
+ " 5c: f04f 0cff mov.w ip, #255 ; 0xff\n",
+ " 60: f8cd c030 str.w ip, [sp, #48] ; 0x30\n",
+ " 64: f06f 4c7f mvn.w ip, #4278190080 ; 0xff000000\n",
+ " 68: f8cd c030 str.w ip, [sp, #48] ; 0x30\n",
+ " 6c: f8cd c030 str.w ip, [sp, #48] ; 0x30\n",
+ " 70: f8cd c030 str.w ip, [sp, #48] ; 0x30\n",
+ " 74: 900c str r0, [sp, #48] ; 0x30\n",
+ " 76: f8dd c030 ldr.w ip, [sp, #48] ; 0x30\n",
+ " 7a: f8cd c034 str.w ip, [sp, #52] ; 0x34\n",
+ " 7e: f50d 5c80 add.w ip, sp, #4096 ; 0x1000\n",
+ " 82: f8c9 c200 str.w ip, [r9, #512] ; 0x200\n",
+ " 86: f8c9 d200 str.w sp, [r9, #512] ; 0x200\n",
+ " 8a: f8d0 c030 ldr.w ip, [r0, #48] ; 0x30\n",
+ " 8e: 47e0 blx ip\n",
+ " 90: f8dd c02c ldr.w ip, [sp, #44] ; 0x2c\n",
+ " 94: f8cd c030 str.w ip, [sp, #48] ; 0x30\n",
+ " 98: f8d9 c200 ldr.w ip, [r9, #512] ; 0x200\n",
+ " 9c: f8cd c02c str.w ip, [sp, #44] ; 0x2c\n",
+ " a0: f8dd c02c ldr.w ip, [sp, #44] ; 0x2c\n",
+ " a4: f8cd c030 str.w ip, [sp, #48] ; 0x30\n",
+ " a8: 4648 mov r0, r9\n",
+ " aa: f8cd 9030 str.w r9, [sp, #48] ; 0x30\n",
+ " ae: 4684 mov ip, r0\n",
+ " b0: f1bc 0f00 cmp.w ip, #0\n",
+ " b4: bf18 it ne\n",
+ " b6: f10d 0c30 addne.w ip, sp, #48 ; 0x30\n",
+ " ba: f10d 0c30 add.w ip, sp, #48 ; 0x30\n",
+ " be: f1bc 0f00 cmp.w ip, #0\n",
+ " c2: bf0c ite eq\n",
+ " c4: 2000 moveq r0, #0\n",
+ " c6: a80c addne r0, sp, #48 ; 0x30\n",
+ " c8: f8dd c040 ldr.w ip, [sp, #64] ; 0x40\n",
+ " cc: f1bc 0f00 cmp.w ip, #0\n",
+ " d0: bf18 it ne\n",
+ " d2: f10d 0c40 addne.w ip, sp, #64 ; 0x40\n",
+ " d6: f8cd c030 str.w ip, [sp, #48] ; 0x30\n",
+ " da: f1bc 0f00 cmp.w ip, #0\n",
+ " de: bf0c ite eq\n",
+ " e0: 2000 moveq r0, #0\n",
+ " e2: 4668 movne r0, sp\n",
+ " e4: f1bc 0f00 cmp.w ip, #0\n",
+ " e8: bf0c ite eq\n",
+ " ea: 2000 moveq r0, #0\n",
+ " ec: f20d 4001 addwne r0, sp, #1025 ; 0x401\n",
+ " f0: f1bc 0f00 cmp.w ip, #0\n",
+ " f4: bf18 it ne\n",
+ " f6: f20d 4c01 addwne ip, sp, #1025 ; 0x401\n",
+ " fa: f8d9 c084 ldr.w ip, [r9, #132] ; 0x84\n",
+ " fe: f1bc 0f00 cmp.w ip, #0\n",
+ " 102: d107 bne.n 114 <VixlJniHelpers+0x114>\n",
+ " 104: f50d 5d80 add.w sp, sp, #4096 ; 0x1000\n",
+ " 108: b008 add sp, #32\n",
+ " 10a: b009 add sp, #36 ; 0x24\n",
+ " 10c: ecbd 8a10 vpop {s16-s31}\n",
+ " 110: e8bd 8de0 ldmia.w sp!, {r5, r6, r7, r8, sl, fp, pc}\n",
+ " 114: 4660 mov r0, ip\n",
+ " 116: f8d9 c2ac ldr.w ip, [r9, #684] ; 0x2ac\n",
+ " 11a: 47e0 blx ip\n",
+ nullptr
+};
+
+const char* const VixlLoadFromOffsetResults[] = {
+ " 0: 68e2 ldr r2, [r4, #12]\n",
+ " 2: f8d4 2fff ldr.w r2, [r4, #4095] ; 0xfff\n",
+ " 6: f504 5280 add.w r2, r4, #4096 ; 0x1000\n",
+ " a: 6812 ldr r2, [r2, #0]\n",
+ " c: f504 1280 add.w r2, r4, #1048576 ; 0x100000\n",
+ " 10: f8d2 20a4 ldr.w r2, [r2, #164] ; 0xa4\n",
+ " 14: f44f 5280 mov.w r2, #4096 ; 0x1000\n",
+ " 18: f2c0 0210 movt r2, #16\n",
+ " 1c: 4422 add r2, r4\n",
+ " 1e: 6812 ldr r2, [r2, #0]\n",
+ " 20: f44f 5c80 mov.w ip, #4096 ; 0x1000\n",
+ " 24: f2c0 0c10 movt ip, #16\n",
+ " 28: 4464 add r4, ip\n",
+ " 2a: 6824 ldr r4, [r4, #0]\n",
+ " 2c: 89a2 ldrh r2, [r4, #12]\n",
+ " 2e: f8b4 2fff ldrh.w r2, [r4, #4095] ; 0xfff\n",
+ " 32: f504 5280 add.w r2, r4, #4096 ; 0x1000\n",
+ " 36: 8812 ldrh r2, [r2, #0]\n",
+ " 38: f504 1280 add.w r2, r4, #1048576 ; 0x100000\n",
+ " 3c: f8b2 20a4 ldrh.w r2, [r2, #164] ; 0xa4\n",
+ " 40: f44f 5280 mov.w r2, #4096 ; 0x1000\n",
+ " 44: f2c0 0210 movt r2, #16\n",
+ " 48: 4422 add r2, r4\n",
+ " 4a: 8812 ldrh r2, [r2, #0]\n",
+ " 4c: f44f 5c80 mov.w ip, #4096 ; 0x1000\n",
+ " 50: f2c0 0c10 movt ip, #16\n",
+ " 54: 4464 add r4, ip\n",
+ " 56: 8824 ldrh r4, [r4, #0]\n",
+ " 58: e9d4 2303 ldrd r2, r3, [r4, #12]\n",
+ " 5c: e9d4 23ff ldrd r2, r3, [r4, #1020] ; 0x3fc\n",
+ " 60: f504 6280 add.w r2, r4, #1024 ; 0x400\n",
+ " 64: e9d2 2300 ldrd r2, r3, [r2]\n",
+ " 68: f504 2280 add.w r2, r4, #262144 ; 0x40000\n",
+ " 6c: e9d2 2329 ldrd r2, r3, [r2, #164] ; 0xa4\n",
+ " 70: f44f 6280 mov.w r2, #1024 ; 0x400\n",
+ " 74: f2c0 0204 movt r2, #4\n",
+ " 78: 4422 add r2, r4\n",
+ " 7a: e9d2 2300 ldrd r2, r3, [r2]\n",
+ " 7e: f44f 6c80 mov.w ip, #1024 ; 0x400\n",
+ " 82: f2c0 0c04 movt ip, #4\n",
+ " 86: 4464 add r4, ip\n",
+ " 88: e9d4 4500 ldrd r4, r5, [r4]\n",
+ " 8c: f8dc 000c ldr.w r0, [ip, #12]\n",
+ " 90: f5a4 1280 sub.w r2, r4, #1048576 ; 0x100000\n",
+ " 94: f8d2 20a4 ldr.w r2, [r2, #164] ; 0xa4\n",
+ " 98: f994 200c ldrsb.w r2, [r4, #12]\n",
+ " 9c: 7b22 ldrb r2, [r4, #12]\n",
+ " 9e: f9b4 200c ldrsh.w r2, [r4, #12]\n",
+ nullptr
+};
+const char* const VixlStoreToOffsetResults[] = {
+ " 0: 60e2 str r2, [r4, #12]\n",
+ " 2: f8c4 2fff str.w r2, [r4, #4095] ; 0xfff\n",
+ " 6: f504 5c80 add.w ip, r4, #4096 ; 0x1000\n",
+ " a: f8cc 2000 str.w r2, [ip]\n",
+ " e: f504 1c80 add.w ip, r4, #1048576 ; 0x100000\n",
+ " 12: f8cc 20a4 str.w r2, [ip, #164] ; 0xa4\n",
+ " 16: f44f 5c80 mov.w ip, #4096 ; 0x1000\n",
+ " 1a: f2c0 0c10 movt ip, #16\n",
+ " 1e: 44a4 add ip, r4\n",
+ " 20: f8cc 2000 str.w r2, [ip]\n",
+ " 24: f44f 5c80 mov.w ip, #4096 ; 0x1000\n",
+ " 28: f2c0 0c10 movt ip, #16\n",
+ " 2c: 44a4 add ip, r4\n",
+ " 2e: f8cc 4000 str.w r4, [ip]\n",
+ " 32: 81a2 strh r2, [r4, #12]\n",
+ " 34: f8a4 2fff strh.w r2, [r4, #4095] ; 0xfff\n",
+ " 38: f504 5c80 add.w ip, r4, #4096 ; 0x1000\n",
+ " 3c: f8ac 2000 strh.w r2, [ip]\n",
+ " 40: f504 1c80 add.w ip, r4, #1048576 ; 0x100000\n",
+ " 44: f8ac 20a4 strh.w r2, [ip, #164] ; 0xa4\n",
+ " 48: f44f 5c80 mov.w ip, #4096 ; 0x1000\n",
+ " 4c: f2c0 0c10 movt ip, #16\n",
+ " 50: 44a4 add ip, r4\n",
+ " 52: f8ac 2000 strh.w r2, [ip]\n",
+ " 56: f44f 5c80 mov.w ip, #4096 ; 0x1000\n",
+ " 5a: f2c0 0c10 movt ip, #16\n",
+ " 5e: 44a4 add ip, r4\n",
+ " 60: f8ac 4000 strh.w r4, [ip]\n",
+ " 64: e9c4 2303 strd r2, r3, [r4, #12]\n",
+ " 68: e9c4 23ff strd r2, r3, [r4, #1020] ; 0x3fc\n",
+ " 6c: f504 6c80 add.w ip, r4, #1024 ; 0x400\n",
+ " 70: e9cc 2300 strd r2, r3, [ip]\n",
+ " 74: f504 2c80 add.w ip, r4, #262144 ; 0x40000\n",
+ " 78: e9cc 2329 strd r2, r3, [ip, #164] ; 0xa4\n",
+ " 7c: f44f 6c80 mov.w ip, #1024 ; 0x400\n",
+ " 80: f2c0 0c04 movt ip, #4\n",
+ " 84: 44a4 add ip, r4\n",
+ " 86: e9cc 2300 strd r2, r3, [ip]\n",
+ " 8a: f44f 6c80 mov.w ip, #1024 ; 0x400\n",
+ " 8e: f2c0 0c04 movt ip, #4\n",
+ " 92: 44a4 add ip, r4\n",
+ " 94: e9cc 4500 strd r4, r5, [ip]\n",
+ " 98: f8cc 000c str.w r0, [ip, #12]\n",
+ " 9c: f5a4 1c80 sub.w ip, r4, #1048576 ; 0x100000\n",
+ " a0: f8cc 20a4 str.w r2, [ip, #164] ; 0xa4\n",
+ " a4: 7322 strb r2, [r4, #12]\n",
+ nullptr
+};
+
std::map<std::string, const char* const*> test_results;
void setup_results() {
test_results["SimpleMov"] = SimpleMovResults;
@@ -5520,4 +5713,7 @@ void setup_results() {
test_results["CompareAndBranch"] = CompareAndBranchResults;
test_results["AddConstant"] = AddConstantResults;
test_results["CmpConstant"] = CmpConstantResults;
+ test_results["VixlJniHelpers"] = VixlJniHelpersResults;
+ test_results["VixlStoreToOffset"] = VixlStoreToOffsetResults;
+ test_results["VixlLoadFromOffset"] = VixlLoadFromOffsetResults;
}
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
index 1b743134ed..2f154fb862 100644
--- a/compiler/utils/jni_macro_assembler.cc
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -20,7 +20,7 @@
#include <vector>
#ifdef ART_ENABLE_CODEGEN_arm
-#include "arm/jni_macro_assembler_arm.h"
+#include "arm/jni_macro_assembler_arm_vixl.h"
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
#include "arm64/jni_macro_assembler_arm64.h"
@@ -58,7 +58,7 @@ MacroAsm32UniquePtr JNIMacroAssembler<PointerSize::k32>::Create(
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2:
- return MacroAsm32UniquePtr(new (arena) arm::ArmJNIMacroAssembler(arena, instruction_set));
+ return MacroAsm32UniquePtr(new (arena) arm::ArmVIXLJNIMacroAssembler(arena));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
diff --git a/compiler/utils/label.h b/compiler/utils/label.h
index 1038f44ffe..0f82ad5ff1 100644
--- a/compiler/utils/label.h
+++ b/compiler/utils/label.h
@@ -28,7 +28,6 @@ class AssemblerFixup;
namespace arm {
class ArmAssembler;
- class Arm32Assembler;
class Thumb2Assembler;
}
namespace arm64 {
@@ -118,7 +117,6 @@ class Label {
}
friend class arm::ArmAssembler;
- friend class arm::Arm32Assembler;
friend class arm::Thumb2Assembler;
friend class arm64::Arm64Assembler;
friend class mips::MipsAssembler;
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 8b7da3fa77..bfc63d14da 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -1407,44 +1407,6 @@ void MipsAssembler::LoadConst64(Register reg_hi, Register reg_lo, int64_t value)
}
}
-void MipsAssembler::StoreConst32ToOffset(int32_t value,
- Register base,
- int32_t offset,
- Register temp) {
- CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base.
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ false);
- if (value == 0) {
- temp = ZERO;
- } else {
- LoadConst32(temp, value);
- }
- Sw(temp, base, offset);
-}
-
-void MipsAssembler::StoreConst64ToOffset(int64_t value,
- Register base,
- int32_t offset,
- Register temp) {
- CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base.
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ true);
- uint32_t low = Low32Bits(value);
- uint32_t high = High32Bits(value);
- if (low == 0) {
- Sw(ZERO, base, offset);
- } else {
- LoadConst32(temp, low);
- Sw(temp, base, offset);
- }
- if (high == 0) {
- Sw(ZERO, base, offset + kMipsWordSize);
- } else {
- if (high != low) {
- LoadConst32(temp, high);
- }
- Sw(temp, base, offset + kMipsWordSize);
- }
-}
-
void MipsAssembler::LoadSConst32(FRegister r, int32_t value, Register temp) {
if (value == 0) {
temp = ZERO;
@@ -2533,61 +2495,19 @@ void MipsAssembler::AdjustBaseAndOffset(Register& base,
CHECK_EQ(misalignment, offset & (kMipsDoublewordSize - 1));
}
-void MipsAssembler::LoadFromOffset(LoadOperandType type, Register reg, Register base,
+void MipsAssembler::LoadFromOffset(LoadOperandType type,
+ Register reg,
+ Register base,
int32_t offset) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword));
- switch (type) {
- case kLoadSignedByte:
- Lb(reg, base, offset);
- break;
- case kLoadUnsignedByte:
- Lbu(reg, base, offset);
- break;
- case kLoadSignedHalfword:
- Lh(reg, base, offset);
- break;
- case kLoadUnsignedHalfword:
- Lhu(reg, base, offset);
- break;
- case kLoadWord:
- Lw(reg, base, offset);
- break;
- case kLoadDoubleword:
- if (reg == base) {
- // This will clobber the base when loading the lower register. Since we have to load the
- // higher register as well, this will fail. Solution: reverse the order.
- Lw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize);
- Lw(reg, base, offset);
- } else {
- Lw(reg, base, offset);
- Lw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize);
- }
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- }
+ LoadFromOffset<>(type, reg, base, offset);
}
void MipsAssembler::LoadSFromOffset(FRegister reg, Register base, int32_t offset) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ false, /* is_float */ true);
- Lwc1(reg, base, offset);
+ LoadSFromOffset<>(reg, base, offset);
}
void MipsAssembler::LoadDFromOffset(FRegister reg, Register base, int32_t offset) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ true, /* is_float */ true);
- if (offset & 0x7) {
- if (Is32BitFPU()) {
- Lwc1(reg, base, offset);
- Lwc1(static_cast<FRegister>(reg + 1), base, offset + kMipsWordSize);
- } else {
- // 64-bit FPU.
- Lwc1(reg, base, offset);
- Lw(T8, base, offset + kMipsWordSize);
- Mthc1(T8, reg);
- }
- } else {
- Ldc1(reg, base, offset);
- }
+ LoadDFromOffset<>(reg, base, offset);
}
void MipsAssembler::EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset,
@@ -2611,53 +2531,19 @@ void MipsAssembler::EmitLoad(ManagedRegister m_dst, Register src_register, int32
}
}
-void MipsAssembler::StoreToOffset(StoreOperandType type, Register reg, Register base,
+void MipsAssembler::StoreToOffset(StoreOperandType type,
+ Register reg,
+ Register base,
int32_t offset) {
- // Must not use AT as `reg`, so as not to overwrite the value being stored
- // with the adjusted `base`.
- CHECK_NE(reg, AT);
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
- switch (type) {
- case kStoreByte:
- Sb(reg, base, offset);
- break;
- case kStoreHalfword:
- Sh(reg, base, offset);
- break;
- case kStoreWord:
- Sw(reg, base, offset);
- break;
- case kStoreDoubleword:
- CHECK_NE(reg, base);
- CHECK_NE(static_cast<Register>(reg + 1), base);
- Sw(reg, base, offset);
- Sw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize);
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- }
+ StoreToOffset<>(type, reg, base, offset);
}
void MipsAssembler::StoreSToOffset(FRegister reg, Register base, int32_t offset) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ false, /* is_float */ true);
- Swc1(reg, base, offset);
+ StoreSToOffset<>(reg, base, offset);
}
void MipsAssembler::StoreDToOffset(FRegister reg, Register base, int32_t offset) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ true, /* is_float */ true);
- if (offset & 0x7) {
- if (Is32BitFPU()) {
- Swc1(reg, base, offset);
- Swc1(static_cast<FRegister>(reg + 1), base, offset + kMipsWordSize);
- } else {
- // 64-bit FPU.
- Mfhc1(T8, reg);
- Swc1(reg, base, offset);
- Sw(T8, base, offset + kMipsWordSize);
- }
- } else {
- Sdc1(reg, base, offset);
- }
+ StoreDToOffset<>(reg, base, offset);
}
static dwarf::Reg DWARFReg(Register reg) {
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 41b6c6bd32..434ca679d5 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -412,8 +412,6 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
void LoadConst64(Register reg_hi, Register reg_lo, int64_t value);
void LoadDConst64(FRegister rd, int64_t value, Register temp);
void LoadSConst32(FRegister r, int32_t value, Register temp);
- void StoreConst32ToOffset(int32_t value, Register base, int32_t offset, Register temp);
- void StoreConst64ToOffset(int64_t value, Register base, int32_t offset, Register temp);
void Addiu32(Register rt, Register rs, int32_t value, Register rtmp = AT);
// These will generate R2 branches or R6 branches as appropriate.
@@ -444,6 +442,204 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
int32_t& offset,
bool is_doubleword,
bool is_float = false);
+
+ private:
+ struct NoImplicitNullChecker {
+ void operator()() {}
+ };
+
+ public:
+ template <typename ImplicitNullChecker = NoImplicitNullChecker>
+ void StoreConst32ToOffset(int32_t value,
+ Register base,
+ int32_t offset,
+ Register temp,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base.
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ false);
+ if (value == 0) {
+ temp = ZERO;
+ } else {
+ LoadConst32(temp, value);
+ }
+ Sw(temp, base, offset);
+ null_checker();
+ }
+
+ template <typename ImplicitNullChecker = NoImplicitNullChecker>
+ void StoreConst64ToOffset(int64_t value,
+ Register base,
+ int32_t offset,
+ Register temp,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base.
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ true);
+ uint32_t low = Low32Bits(value);
+ uint32_t high = High32Bits(value);
+ if (low == 0) {
+ Sw(ZERO, base, offset);
+ } else {
+ LoadConst32(temp, low);
+ Sw(temp, base, offset);
+ }
+ null_checker();
+ if (high == 0) {
+ Sw(ZERO, base, offset + kMipsWordSize);
+ } else {
+ if (high != low) {
+ LoadConst32(temp, high);
+ }
+ Sw(temp, base, offset + kMipsWordSize);
+ }
+ }
+
+ template <typename ImplicitNullChecker = NoImplicitNullChecker>
+ void LoadFromOffset(LoadOperandType type,
+ Register reg,
+ Register base,
+ int32_t offset,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword));
+ switch (type) {
+ case kLoadSignedByte:
+ Lb(reg, base, offset);
+ break;
+ case kLoadUnsignedByte:
+ Lbu(reg, base, offset);
+ break;
+ case kLoadSignedHalfword:
+ Lh(reg, base, offset);
+ break;
+ case kLoadUnsignedHalfword:
+ Lhu(reg, base, offset);
+ break;
+ case kLoadWord:
+ Lw(reg, base, offset);
+ break;
+ case kLoadDoubleword:
+ if (reg == base) {
+ // This will clobber the base when loading the lower register. Since we have to load the
+ // higher register as well, this will fail. Solution: reverse the order.
+ Lw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize);
+ null_checker();
+ Lw(reg, base, offset);
+ } else {
+ Lw(reg, base, offset);
+ null_checker();
+ Lw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize);
+ }
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+ if (type != kLoadDoubleword) {
+ null_checker();
+ }
+ }
+
+ template <typename ImplicitNullChecker = NoImplicitNullChecker>
+ void LoadSFromOffset(FRegister reg,
+ Register base,
+ int32_t offset,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ false, /* is_float */ true);
+ Lwc1(reg, base, offset);
+ null_checker();
+ }
+
+ template <typename ImplicitNullChecker = NoImplicitNullChecker>
+ void LoadDFromOffset(FRegister reg,
+ Register base,
+ int32_t offset,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ true, /* is_float */ true);
+ if (IsAligned<kMipsDoublewordSize>(offset)) {
+ Ldc1(reg, base, offset);
+ null_checker();
+ } else {
+ if (Is32BitFPU()) {
+ Lwc1(reg, base, offset);
+ null_checker();
+ Lwc1(static_cast<FRegister>(reg + 1), base, offset + kMipsWordSize);
+ } else {
+ // 64-bit FPU.
+ Lwc1(reg, base, offset);
+ null_checker();
+ Lw(T8, base, offset + kMipsWordSize);
+ Mthc1(T8, reg);
+ }
+ }
+ }
+
+ template <typename ImplicitNullChecker = NoImplicitNullChecker>
+ void StoreToOffset(StoreOperandType type,
+ Register reg,
+ Register base,
+ int32_t offset,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ // Must not use AT as `reg`, so as not to overwrite the value being stored
+ // with the adjusted `base`.
+ CHECK_NE(reg, AT);
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+ switch (type) {
+ case kStoreByte:
+ Sb(reg, base, offset);
+ break;
+ case kStoreHalfword:
+ Sh(reg, base, offset);
+ break;
+ case kStoreWord:
+ Sw(reg, base, offset);
+ break;
+ case kStoreDoubleword:
+ CHECK_NE(reg, base);
+ CHECK_NE(static_cast<Register>(reg + 1), base);
+ Sw(reg, base, offset);
+ null_checker();
+ Sw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize);
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+ if (type != kStoreDoubleword) {
+ null_checker();
+ }
+ }
+
+ template <typename ImplicitNullChecker = NoImplicitNullChecker>
+ void StoreSToOffset(FRegister reg,
+ Register base,
+ int32_t offset,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ false, /* is_float */ true);
+ Swc1(reg, base, offset);
+ null_checker();
+ }
+
+ template <typename ImplicitNullChecker = NoImplicitNullChecker>
+ void StoreDToOffset(FRegister reg,
+ Register base,
+ int32_t offset,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ true, /* is_float */ true);
+ if (IsAligned<kMipsDoublewordSize>(offset)) {
+ Sdc1(reg, base, offset);
+ null_checker();
+ } else {
+ if (Is32BitFPU()) {
+ Swc1(reg, base, offset);
+ null_checker();
+ Swc1(static_cast<FRegister>(reg + 1), base, offset + kMipsWordSize);
+ } else {
+ // 64-bit FPU.
+ Mfhc1(T8, reg);
+ Swc1(reg, base, offset);
+ null_checker();
+ Sw(T8, base, offset + kMipsWordSize);
+ }
+ }
+ }
+
void LoadFromOffset(LoadOperandType type, Register reg, Register base, int32_t offset);
void LoadSFromOffset(FRegister reg, Register base, int32_t offset);
void LoadDFromOffset(FRegister reg, Register base, int32_t offset);
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index f1a991574b..f2ef41f400 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1148,6 +1148,23 @@ void X86Assembler::testl(Register reg, const Immediate& immediate) {
}
+void X86Assembler::testb(const Address& dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF6);
+ EmitOperand(EAX, dst);
+ CHECK(imm.is_int8());
+ EmitUint8(imm.value() & 0xFF);
+}
+
+
+void X86Assembler::testl(const Address& dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF7);
+ EmitOperand(0, dst);
+ EmitImmediate(imm);
+}
+
+
void X86Assembler::andl(Register dst, Register src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x23);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 63aa4a4b8f..2ddcd760dd 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -496,6 +496,9 @@ class X86Assembler FINAL : public Assembler {
void testl(Register reg, const Immediate& imm);
void testl(Register reg1, const Address& address);
+ void testb(const Address& dst, const Immediate& imm);
+ void testl(const Address& dst, const Immediate& imm);
+
void andl(Register dst, const Immediate& imm);
void andl(Register dst, Register src);
void andl(Register dst, const Address& address);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index 307e034b76..61d70d714a 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -375,6 +375,42 @@ TEST_F(AssemblerX86Test, CmovlAddress) {
DriverStr(expected, "cmovl_address");
}
+TEST_F(AssemblerX86Test, TestbAddressImmediate) {
+ GetAssembler()->testb(
+ x86::Address(x86::Register(x86::EDI), x86::Register(x86::EBX), x86::TIMES_4, 12),
+ x86::Immediate(1));
+ GetAssembler()->testb(
+ x86::Address(x86::Register(x86::ESP), FrameOffset(7)),
+ x86::Immediate(-128));
+ GetAssembler()->testb(
+ x86::Address(x86::Register(x86::EBX), MemberOffset(130)),
+ x86::Immediate(127));
+ const char* expected =
+ "testb $1, 0xc(%EDI,%EBX,4)\n"
+ "testb $-128, 0x7(%ESP)\n"
+ "testb $127, 0x82(%EBX)\n";
+
+ DriverStr(expected, "TestbAddressImmediate");
+}
+
+TEST_F(AssemblerX86Test, TestlAddressImmediate) {
+ GetAssembler()->testl(
+ x86::Address(x86::Register(x86::EDI), x86::Register(x86::EBX), x86::TIMES_4, 12),
+ x86::Immediate(1));
+ GetAssembler()->testl(
+ x86::Address(x86::Register(x86::ESP), FrameOffset(7)),
+ x86::Immediate(-100000));
+ GetAssembler()->testl(
+ x86::Address(x86::Register(x86::EBX), MemberOffset(130)),
+ x86::Immediate(77777777));
+ const char* expected =
+ "testl $1, 0xc(%EDI,%EBX,4)\n"
+ "testl $-100000, 0x7(%ESP)\n"
+ "testl $77777777, 0x82(%EBX)\n";
+
+ DriverStr(expected, "TestlAddressImmediate");
+}
+
/////////////////
// Near labels //
/////////////////
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index ddc824425e..1f73aa7374 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1389,6 +1389,25 @@ void X86_64Assembler::testq(CpuRegister reg, const Address& address) {
}
+void X86_64Assembler::testb(const Address& dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(dst);
+ EmitUint8(0xF6);
+ EmitOperand(Register::RAX, dst);
+ CHECK(imm.is_int8());
+ EmitUint8(imm.value() & 0xFF);
+}
+
+
+void X86_64Assembler::testl(const Address& dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(dst);
+ EmitUint8(0xF7);
+ EmitOperand(0, dst);
+ EmitImmediate(imm);
+}
+
+
void X86_64Assembler::andl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index a4166f965d..3a4bfca6b0 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -528,6 +528,9 @@ class X86_64Assembler FINAL : public Assembler {
void testq(CpuRegister reg1, CpuRegister reg2);
void testq(CpuRegister reg, const Address& address);
+ void testb(const Address& address, const Immediate& imm);
+ void testl(const Address& address, const Immediate& imm);
+
void andl(CpuRegister dst, const Immediate& imm);
void andl(CpuRegister dst, CpuRegister src);
void andl(CpuRegister reg, const Address& address);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 36c966b3cf..48a18760f1 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -1526,6 +1526,48 @@ TEST_F(AssemblerX86_64Test, Cmpb) {
DriverStr(expected, "cmpb");
}
+TEST_F(AssemblerX86_64Test, TestbAddressImmediate) {
+ GetAssembler()->testb(
+ x86_64::Address(x86_64::CpuRegister(x86_64::RDI),
+ x86_64::CpuRegister(x86_64::RBX),
+ x86_64::TIMES_4,
+ 12),
+ x86_64::Immediate(1));
+ GetAssembler()->testb(
+ x86_64::Address(x86_64::CpuRegister(x86_64::RSP), FrameOffset(7)),
+ x86_64::Immediate(-128));
+ GetAssembler()->testb(
+ x86_64::Address(x86_64::CpuRegister(x86_64::RBX), MemberOffset(130)),
+ x86_64::Immediate(127));
+ const char* expected =
+ "testb $1, 0xc(%RDI,%RBX,4)\n"
+ "testb $-128, 0x7(%RSP)\n"
+ "testb $127, 0x82(%RBX)\n";
+
+ DriverStr(expected, "TestbAddressImmediate");
+}
+
+TEST_F(AssemblerX86_64Test, TestlAddressImmediate) {
+ GetAssembler()->testl(
+ x86_64::Address(x86_64::CpuRegister(x86_64::RDI),
+ x86_64::CpuRegister(x86_64::RBX),
+ x86_64::TIMES_4,
+ 12),
+ x86_64::Immediate(1));
+ GetAssembler()->testl(
+ x86_64::Address(x86_64::CpuRegister(x86_64::RSP), FrameOffset(7)),
+ x86_64::Immediate(-100000));
+ GetAssembler()->testl(
+ x86_64::Address(x86_64::CpuRegister(x86_64::RBX), MemberOffset(130)),
+ x86_64::Immediate(77777777));
+ const char* expected =
+ "testl $1, 0xc(%RDI,%RBX,4)\n"
+ "testl $-100000, 0x7(%RSP)\n"
+ "testl $77777777, 0x82(%RBX)\n";
+
+ DriverStr(expected, "TestlAddressImmediate");
+}
+
class JNIMacroAssemblerX86_64Test : public JNIMacroAssemblerTest<x86_64::X86_64JNIMacroAssembler> {
public:
using Base = JNIMacroAssemblerTest<x86_64::X86_64JNIMacroAssembler>;