summaryrefslogtreecommitdiff
path: root/compiler/utils
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/utils')
-rw-r--r--compiler/utils/arena_allocator.cc9
-rw-r--r--compiler/utils/arena_allocator.h11
-rw-r--r--compiler/utils/arm/assembler_arm.cc30
-rw-r--r--compiler/utils/arm/assembler_arm.h41
-rw-r--r--compiler/utils/arm/assembler_arm32.cc51
-rw-r--r--compiler/utils/arm/assembler_arm32.h7
-rw-r--r--compiler/utils/arm/assembler_arm32_test.cc11
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc110
-rw-r--r--compiler/utils/arm/assembler_thumb2.h6
-rw-r--r--compiler/utils/arm/assembler_thumb2_test.cc10
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc12
-rw-r--r--compiler/utils/x86/assembler_x86.cc31
-rw-r--r--compiler/utils/x86/assembler_x86.h5
-rw-r--r--compiler/utils/x86/assembler_x86_test.cc88
-rw-r--r--compiler/utils/x86/constants_x86.h3
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc42
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h4
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc8
-rw-r--r--compiler/utils/x86_64/constants_x86_64.h3
19 files changed, 363 insertions, 119 deletions
diff --git a/compiler/utils/arena_allocator.cc b/compiler/utils/arena_allocator.cc
index 004af98852..a80ad938a6 100644
--- a/compiler/utils/arena_allocator.cc
+++ b/compiler/utils/arena_allocator.cc
@@ -189,6 +189,15 @@ Arena* ArenaPool::AllocArena(size_t size) {
return ret;
}
+size_t ArenaPool::GetBytesAllocated() const {
+ size_t total = 0;
+ MutexLock lock(Thread::Current(), lock_);
+ for (Arena* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
+ total += arena->GetBytesAllocated();
+ }
+ return total;
+}
+
void ArenaPool::FreeArenaChain(Arena* first) {
if (UNLIKELY(RUNNING_ON_VALGRIND > 0)) {
for (Arena* arena = first; arena != nullptr; arena = arena->next_) {
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index 6d213991d3..7f5bc9ac4c 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -135,6 +135,10 @@ class Arena {
return Size() - bytes_allocated_;
}
+ size_t GetBytesAllocated() const {
+ return bytes_allocated_;
+ }
+
private:
size_t bytes_allocated_;
uint8_t* memory_;
@@ -153,11 +157,12 @@ class ArenaPool {
public:
ArenaPool();
~ArenaPool();
- Arena* AllocArena(size_t size);
- void FreeArenaChain(Arena* first);
+ Arena* AllocArena(size_t size) LOCKS_EXCLUDED(lock_);
+ void FreeArenaChain(Arena* first) LOCKS_EXCLUDED(lock_);
+ size_t GetBytesAllocated() const LOCKS_EXCLUDED(lock_);
private:
- Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Arena* free_arenas_ GUARDED_BY(lock_);
DISALLOW_COPY_AND_ASSIGN(ArenaPool);
};
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 0f28591775..05287732c5 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -165,36 +165,6 @@ uint32_t ShifterOperand::encodingThumb() const {
return 0;
}
-bool ShifterOperand::CanHoldThumb(Register rd, Register rn, Opcode opcode,
- uint32_t immediate, ShifterOperand* shifter_op) {
- shifter_op->type_ = kImmediate;
- shifter_op->immed_ = immediate;
- shifter_op->is_shift_ = false;
- shifter_op->is_rotate_ = false;
- switch (opcode) {
- case ADD:
- case SUB:
- if (rn == SP) {
- if (rd == SP) {
- return immediate < (1 << 9); // 9 bits allowed.
- } else {
- return immediate < (1 << 12); // 12 bits.
- }
- }
- if (immediate < (1 << 12)) { // Less than (or equal to) 12 bits can always be done.
- return true;
- }
- return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
-
- case MOV:
- // TODO: Support less than or equal to 12bits.
- return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
- case MVN:
- default:
- return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
- }
-}
-
uint32_t Address::encodingArm() const {
CHECK(IsAbsoluteUint(12, offset_));
uint32_t encoding;
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index d288b700ed..c86ec4b3d6 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -30,6 +30,9 @@
namespace art {
namespace arm {
+class Arm32Assembler;
+class Thumb2Assembler;
+
class ShifterOperand {
public:
ShifterOperand() : type_(kUnknown), rm_(kNoRegister), rs_(kNoRegister),
@@ -103,33 +106,6 @@ class ShifterOperand {
kImmediate
};
- static bool CanHoldArm(uint32_t immediate, ShifterOperand* shifter_op) {
- // Avoid the more expensive test for frequent small immediate values.
- if (immediate < (1 << kImmed8Bits)) {
- shifter_op->type_ = kImmediate;
- shifter_op->is_rotate_ = true;
- shifter_op->rotate_ = 0;
- shifter_op->immed_ = immediate;
- return true;
- }
- // Note that immediate must be unsigned for the test to work correctly.
- for (int rot = 0; rot < 16; rot++) {
- uint32_t imm8 = (immediate << 2*rot) | (immediate >> (32 - 2*rot));
- if (imm8 < (1 << kImmed8Bits)) {
- shifter_op->type_ = kImmediate;
- shifter_op->is_rotate_ = true;
- shifter_op->rotate_ = rot;
- shifter_op->immed_ = imm8;
- return true;
- }
- }
- return false;
- }
-
- static bool CanHoldThumb(Register rd, Register rn, Opcode opcode,
- uint32_t immediate, ShifterOperand* shifter_op);
-
-
private:
Type type_;
Register rm_;
@@ -140,6 +116,9 @@ class ShifterOperand {
uint32_t rotate_;
uint32_t immed_;
+ friend class Arm32Assembler;
+ friend class Thumb2Assembler;
+
#ifdef SOURCE_ASSEMBLER_SUPPORT
friend class BinaryAssembler;
#endif
@@ -611,6 +590,14 @@ class ArmAssembler : public Assembler {
virtual void Ror(Register rd, Register rm, Register rn, bool setcc = false,
Condition cond = AL) = 0;
+ // Returns whether the `immediate` can fit in a `ShifterOperand`. If yes,
+ // `shifter_op` contains the operand.
+ virtual bool ShifterOperandCanHold(Register rd,
+ Register rn,
+ Opcode opcode,
+ uint32_t immediate,
+ ShifterOperand* shifter_op) = 0;
+
static bool IsInstructionForExceptionHandling(uintptr_t pc);
virtual void Bind(Label* label) = 0;
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index a541763881..8f6d45ab53 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -25,6 +25,37 @@
namespace art {
namespace arm {
+bool Arm32Assembler::ShifterOperandCanHoldArm32(uint32_t immediate, ShifterOperand* shifter_op) {
+ // Avoid the more expensive test for frequent small immediate values.
+ if (immediate < (1 << kImmed8Bits)) {
+ shifter_op->type_ = ShifterOperand::kImmediate;
+ shifter_op->is_rotate_ = true;
+ shifter_op->rotate_ = 0;
+ shifter_op->immed_ = immediate;
+ return true;
+ }
+ // Note that immediate must be unsigned for the test to work correctly.
+ for (int rot = 0; rot < 16; rot++) {
+ uint32_t imm8 = (immediate << 2*rot) | (immediate >> (32 - 2*rot));
+ if (imm8 < (1 << kImmed8Bits)) {
+ shifter_op->type_ = ShifterOperand::kImmediate;
+ shifter_op->is_rotate_ = true;
+ shifter_op->rotate_ = rot;
+ shifter_op->immed_ = imm8;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Arm32Assembler::ShifterOperandCanHold(Register rd ATTRIBUTE_UNUSED,
+ Register rn ATTRIBUTE_UNUSED,
+ Opcode opcode ATTRIBUTE_UNUSED,
+ uint32_t immediate,
+ ShifterOperand* shifter_op) {
+ return ShifterOperandCanHoldArm32(immediate, shifter_op);
+}
+
void Arm32Assembler::and_(Register rd, Register rn, const ShifterOperand& so,
Condition cond) {
EmitType01(cond, so.type(), AND, 0, rn, rd, so);
@@ -1291,16 +1322,16 @@ void Arm32Assembler::AddConstant(Register rd, Register rn, int32_t value,
// positive values and sub for negatives ones, which would slightly improve
// the readability of generated code for some constants.
ShifterOperand shifter_op;
- if (ShifterOperand::CanHoldArm(value, &shifter_op)) {
+ if (ShifterOperandCanHoldArm32(value, &shifter_op)) {
add(rd, rn, shifter_op, cond);
- } else if (ShifterOperand::CanHoldArm(-value, &shifter_op)) {
+ } else if (ShifterOperandCanHoldArm32(-value, &shifter_op)) {
sub(rd, rn, shifter_op, cond);
} else {
CHECK(rn != IP);
- if (ShifterOperand::CanHoldArm(~value, &shifter_op)) {
+ if (ShifterOperandCanHoldArm32(~value, &shifter_op)) {
mvn(IP, shifter_op, cond);
add(rd, rn, ShifterOperand(IP), cond);
- } else if (ShifterOperand::CanHoldArm(~(-value), &shifter_op)) {
+ } else if (ShifterOperandCanHoldArm32(~(-value), &shifter_op)) {
mvn(IP, shifter_op, cond);
sub(rd, rn, ShifterOperand(IP), cond);
} else {
@@ -1318,16 +1349,16 @@ void Arm32Assembler::AddConstant(Register rd, Register rn, int32_t value,
void Arm32Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t value,
Condition cond) {
ShifterOperand shifter_op;
- if (ShifterOperand::CanHoldArm(value, &shifter_op)) {
+ if (ShifterOperandCanHoldArm32(value, &shifter_op)) {
adds(rd, rn, shifter_op, cond);
- } else if (ShifterOperand::CanHoldArm(-value, &shifter_op)) {
+ } else if (ShifterOperandCanHoldArm32(-value, &shifter_op)) {
subs(rd, rn, shifter_op, cond);
} else {
CHECK(rn != IP);
- if (ShifterOperand::CanHoldArm(~value, &shifter_op)) {
+ if (ShifterOperandCanHoldArm32(~value, &shifter_op)) {
mvn(IP, shifter_op, cond);
adds(rd, rn, ShifterOperand(IP), cond);
- } else if (ShifterOperand::CanHoldArm(~(-value), &shifter_op)) {
+ } else if (ShifterOperandCanHoldArm32(~(-value), &shifter_op)) {
mvn(IP, shifter_op, cond);
subs(rd, rn, ShifterOperand(IP), cond);
} else {
@@ -1343,9 +1374,9 @@ void Arm32Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t value
void Arm32Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
ShifterOperand shifter_op;
- if (ShifterOperand::CanHoldArm(value, &shifter_op)) {
+ if (ShifterOperandCanHoldArm32(value, &shifter_op)) {
mov(rd, shifter_op, cond);
- } else if (ShifterOperand::CanHoldArm(~value, &shifter_op)) {
+ } else if (ShifterOperandCanHoldArm32(~value, &shifter_op)) {
mvn(rd, shifter_op, cond);
} else {
movw(rd, Low16Bits(value), cond);
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index 0b009e16d9..6c8d41587b 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -273,6 +273,12 @@ class Arm32Assembler FINAL : public ArmAssembler {
int32_t offset,
Condition cond = AL) OVERRIDE;
+ bool ShifterOperandCanHold(Register rd,
+ Register rn,
+ Opcode opcode,
+ uint32_t immediate,
+ ShifterOperand* shifter_op) OVERRIDE;
+
static bool IsInstructionForExceptionHandling(uintptr_t pc);
@@ -359,6 +365,7 @@ class Arm32Assembler FINAL : public ArmAssembler {
static int DecodeBranchOffset(int32_t inst);
int32_t EncodeTstOffset(int offset, int32_t inst);
int DecodeTstOffset(int32_t inst);
+ bool ShifterOperandCanHoldArm32(uint32_t immediate, ShifterOperand* shifter_op);
};
} // namespace arm
diff --git a/compiler/utils/arm/assembler_arm32_test.cc b/compiler/utils/arm/assembler_arm32_test.cc
index 837fe1ec18..951792d45b 100644
--- a/compiler/utils/arm/assembler_arm32_test.cc
+++ b/compiler/utils/arm/assembler_arm32_test.cc
@@ -49,7 +49,8 @@ class AssemblerArm32Test : public AssemblerArmTest<arm::Arm32Assembler,
}
std::string GetAssemblerParameters() OVERRIDE {
- return " -march=armv7-a -mcpu=cortex-a15"; // Arm-v7a, cortex-a15 (means we have sdiv).
+ // Arm-v7a, cortex-a15 (means we have sdiv).
+ return " -march=armv7-a -mcpu=cortex-a15 -mfpu=neon";
}
const char* GetAssemblyHeader() OVERRIDE {
@@ -688,4 +689,12 @@ TEST_F(AssemblerArm32Test, Bx) {
T2Helper(&arm::Arm32Assembler::bx, true, "bx{cond} {reg1}", "bx");
}
+TEST_F(AssemblerArm32Test, Vmstat) {
+ GetAssembler()->vmstat();
+
+ const char* expected = "vmrs APSR_nzcv, FPSCR\n";
+
+ DriverStr(expected, "vmrs");
+}
+
} // namespace art
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index a377cb2892..479186c5d7 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -25,6 +25,39 @@
namespace art {
namespace arm {
+bool Thumb2Assembler::ShifterOperandCanHold(Register rd,
+ Register rn,
+ Opcode opcode,
+ uint32_t immediate,
+ ShifterOperand* shifter_op) {
+ shifter_op->type_ = ShifterOperand::kImmediate;
+ shifter_op->immed_ = immediate;
+ shifter_op->is_shift_ = false;
+ shifter_op->is_rotate_ = false;
+ switch (opcode) {
+ case ADD:
+ case SUB:
+ if (rn == SP) {
+ if (rd == SP) {
+ return immediate < (1 << 9); // 9 bits allowed.
+ } else {
+ return immediate < (1 << 12); // 12 bits.
+ }
+ }
+ if (immediate < (1 << 12)) { // Less than (or equal to) 12 bits can always be done.
+ return true;
+ }
+ return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
+
+ case MOV:
+ // TODO: Support less than or equal to 12bits.
+ return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
+ case MVN:
+ default:
+ return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
+ }
+}
+
void Thumb2Assembler::and_(Register rd, Register rn, const ShifterOperand& so,
Condition cond) {
EmitDataProcessing(cond, AND, 0, rn, rd, so);
@@ -374,16 +407,11 @@ void Thumb2Assembler::ldm(BlockAddressMode am,
Register base,
RegList regs,
Condition cond) {
- if (__builtin_popcount(regs) == 1) {
+ CHECK_NE(regs, 0u); // Do not use ldm if there's nothing to load.
+ if (IsPowerOfTwo(regs)) {
// Thumb doesn't support one reg in the list.
// Find the register number.
- int reg = 0;
- while (reg < 16) {
- if ((regs & (1 << reg)) != 0) {
- break;
- }
- ++reg;
- }
+ int reg = CTZ(static_cast<uint32_t>(regs));
CHECK_LT(reg, 16);
CHECK(am == DB_W); // Only writeback is supported.
ldr(static_cast<Register>(reg), Address(base, kRegisterSize, Address::PostIndex), cond);
@@ -397,16 +425,11 @@ void Thumb2Assembler::stm(BlockAddressMode am,
Register base,
RegList regs,
Condition cond) {
- if (__builtin_popcount(regs) == 1) {
+ CHECK_NE(regs, 0u); // Do not use stm if there's nothing to store.
+ if (IsPowerOfTwo(regs)) {
// Thumb doesn't support one reg in the list.
// Find the register number.
- int reg = 0;
- while (reg < 16) {
- if ((regs & (1 << reg)) != 0) {
- break;
- }
- ++reg;
- }
+ int reg = CTZ(static_cast<uint32_t>(regs));
CHECK_LT(reg, 16);
CHECK(am == IA || am == IA_W);
Address::Mode strmode = am == IA ? Address::PreIndex : Address::Offset;
@@ -813,6 +836,7 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
if (thumb_opcode == 255U /* 0b11111111 */) {
LOG(FATAL) << "Invalid thumb2 opcode " << opcode;
+ UNREACHABLE();
}
int32_t encoding = 0;
@@ -842,6 +866,7 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
uint32_t imm = ModifiedImmediate(so.encodingThumb());
if (imm == kInvalidModifiedImmediate) {
LOG(FATAL) << "Immediate value cannot fit in thumb2 modified immediate";
+ UNREACHABLE();
}
encoding = B31 | B30 | B29 | B28 |
thumb_opcode << 21 |
@@ -979,6 +1004,7 @@ void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
if (thumb_opcode == 255U /* 0b11111111 */) {
LOG(FATAL) << "Invalid thumb1 opcode " << opcode;
+ UNREACHABLE();
}
int16_t encoding = dp_opcode << 14 |
@@ -1116,7 +1142,7 @@ void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED,
break;
default:
LOG(FATAL) << "This opcode is not an ADD or SUB: " << opcode;
- return;
+ UNREACHABLE();
}
int16_t encoding = dp_opcode << 14 |
@@ -1157,6 +1183,7 @@ void Thumb2Assembler::EmitShift(Register rd, Register rm, Shift shift, uint8_t a
case RRX: opcode = 3U /* 0b11 */; amount = 0; break;
default:
LOG(FATAL) << "Unsupported thumb2 shift opcode";
+ UNREACHABLE();
}
// 32 bit.
int32_t encoding = B31 | B30 | B29 | B27 | B25 | B22 |
@@ -1174,7 +1201,8 @@ void Thumb2Assembler::EmitShift(Register rd, Register rm, Shift shift, uint8_t a
case LSR: opcode = 1U /* 0b01 */; break;
case ASR: opcode = 2U /* 0b10 */; break;
default:
- LOG(FATAL) << "Unsupported thumb2 shift opcode";
+ LOG(FATAL) << "Unsupported thumb2 shift opcode";
+ UNREACHABLE();
}
int16_t encoding = opcode << 11 | amount << 6 | static_cast<int16_t>(rm) << 3 |
static_cast<int16_t>(rd);
@@ -1198,6 +1226,7 @@ void Thumb2Assembler::EmitShift(Register rd, Register rn, Shift shift, Register
case ROR: opcode = 3U /* 0b11 */; break;
default:
LOG(FATAL) << "Unsupported thumb2 shift opcode";
+ UNREACHABLE();
}
// 32 bit.
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 |
@@ -1212,7 +1241,8 @@ void Thumb2Assembler::EmitShift(Register rd, Register rn, Shift shift, Register
case LSR: opcode = 3U /* 0b0011 */; break;
case ASR: opcode = 4U /* 0b0100 */; break;
default:
- LOG(FATAL) << "Unsupported thumb2 shift opcode";
+ LOG(FATAL) << "Unsupported thumb2 shift opcode";
+ UNREACHABLE();
}
int16_t encoding = B14 | opcode << 6 | static_cast<int16_t>(rm) << 3 |
static_cast<int16_t>(rd);
@@ -1241,6 +1271,7 @@ void Thumb2Assembler::Branch::Emit(AssemblerBuffer* buffer) const {
} else {
if (x) {
LOG(FATAL) << "Invalid use of BX";
+ UNREACHABLE();
} else {
if (cond_ == AL) {
// Can use the T4 encoding allowing a 24 bit offset.
@@ -1469,6 +1500,15 @@ void Thumb2Assembler::EmitMultiMemOp(Condition cond,
CheckCondition(cond);
bool must_be_32bit = force_32bit_;
+ if (!must_be_32bit && base == SP && bam == (load ? IA_W : DB_W) &&
+ (regs & 0xff00 & ~(1 << (load ? PC : LR))) == 0) {
+ // Use 16-bit PUSH/POP.
+ int16_t encoding = B15 | B13 | B12 | (load ? B11 : 0) | B10 |
+ ((regs & (1 << (load ? PC : LR))) != 0 ? B8 : 0) | (regs & 0x00ff);
+ Emit16(encoding);
+ return;
+ }
+
if ((regs & 0xff00) != 0) {
must_be_32bit = true;
}
@@ -1495,6 +1535,7 @@ void Thumb2Assembler::EmitMultiMemOp(Condition cond,
case DA_W:
case IB_W:
LOG(FATAL) << "LDM/STM mode not supported on thumb: " << bam;
+ UNREACHABLE();
}
if (load) {
// Cannot have SP in the list.
@@ -1981,8 +2022,13 @@ void Thumb2Assembler::EmitVFPds(Condition cond, int32_t opcode,
void Thumb2Assembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR.
+ CHECK_NE(cond, kNoCondition);
CheckCondition(cond);
- UNIMPLEMENTED(FATAL) << "Unimplemented thumb instruction";
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 |
+ (static_cast<int32_t>(PC)*B12) |
+ B11 | B9 | B4;
+ Emit32(encoding);
}
@@ -2068,6 +2114,7 @@ void Thumb2Assembler::cbz(Register rn, Label* label) {
CheckCondition(AL);
if (label->IsBound()) {
LOG(FATAL) << "cbz can only be used to branch forwards";
+ UNREACHABLE();
} else {
uint16_t branchid = EmitCompareAndBranch(rn, static_cast<uint16_t>(label->position_), false);
label->LinkTo(branchid);
@@ -2079,6 +2126,7 @@ void Thumb2Assembler::cbnz(Register rn, Label* label) {
CheckCondition(AL);
if (label->IsBound()) {
LOG(FATAL) << "cbnz can only be used to branch forwards";
+ UNREACHABLE();
} else {
uint16_t branchid = EmitCompareAndBranch(rn, static_cast<uint16_t>(label->position_), true);
label->LinkTo(branchid);
@@ -2360,16 +2408,16 @@ void Thumb2Assembler::AddConstant(Register rd, Register rn, int32_t value,
// positive values and sub for negatives ones, which would slightly improve
// the readability of generated code for some constants.
ShifterOperand shifter_op;
- if (ShifterOperand::CanHoldThumb(rd, rn, ADD, value, &shifter_op)) {
+ if (ShifterOperandCanHold(rd, rn, ADD, value, &shifter_op)) {
add(rd, rn, shifter_op, cond);
- } else if (ShifterOperand::CanHoldThumb(rd, rn, SUB, -value, &shifter_op)) {
+ } else if (ShifterOperandCanHold(rd, rn, SUB, -value, &shifter_op)) {
sub(rd, rn, shifter_op, cond);
} else {
CHECK(rn != IP);
- if (ShifterOperand::CanHoldThumb(rd, rn, MVN, ~value, &shifter_op)) {
+ if (ShifterOperandCanHold(rd, rn, MVN, ~value, &shifter_op)) {
mvn(IP, shifter_op, cond);
add(rd, rn, ShifterOperand(IP), cond);
- } else if (ShifterOperand::CanHoldThumb(rd, rn, MVN, ~(-value), &shifter_op)) {
+ } else if (ShifterOperandCanHold(rd, rn, MVN, ~(-value), &shifter_op)) {
mvn(IP, shifter_op, cond);
sub(rd, rn, ShifterOperand(IP), cond);
} else {
@@ -2387,16 +2435,16 @@ void Thumb2Assembler::AddConstant(Register rd, Register rn, int32_t value,
void Thumb2Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t value,
Condition cond) {
ShifterOperand shifter_op;
- if (ShifterOperand::CanHoldThumb(rd, rn, ADD, value, &shifter_op)) {
+ if (ShifterOperandCanHold(rd, rn, ADD, value, &shifter_op)) {
adds(rd, rn, shifter_op, cond);
- } else if (ShifterOperand::CanHoldThumb(rd, rn, ADD, -value, &shifter_op)) {
+ } else if (ShifterOperandCanHold(rd, rn, ADD, -value, &shifter_op)) {
subs(rd, rn, shifter_op, cond);
} else {
CHECK(rn != IP);
- if (ShifterOperand::CanHoldThumb(rd, rn, MVN, ~value, &shifter_op)) {
+ if (ShifterOperandCanHold(rd, rn, MVN, ~value, &shifter_op)) {
mvn(IP, shifter_op, cond);
adds(rd, rn, ShifterOperand(IP), cond);
- } else if (ShifterOperand::CanHoldThumb(rd, rn, MVN, ~(-value), &shifter_op)) {
+ } else if (ShifterOperandCanHold(rd, rn, MVN, ~(-value), &shifter_op)) {
mvn(IP, shifter_op, cond);
subs(rd, rn, ShifterOperand(IP), cond);
} else {
@@ -2410,11 +2458,12 @@ void Thumb2Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t valu
}
}
+
void Thumb2Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
ShifterOperand shifter_op;
- if (ShifterOperand::CanHoldThumb(rd, R0, MOV, value, &shifter_op)) {
+ if (ShifterOperandCanHold(rd, R0, MOV, value, &shifter_op)) {
mov(rd, shifter_op, cond);
- } else if (ShifterOperand::CanHoldThumb(rd, R0, MVN, ~value, &shifter_op)) {
+ } else if (ShifterOperandCanHold(rd, R0, MVN, ~value, &shifter_op)) {
mvn(rd, shifter_op, cond);
} else {
movw(rd, Low16Bits(value), cond);
@@ -2425,6 +2474,7 @@ void Thumb2Assembler::LoadImmediate(Register rd, int32_t value, Condition cond)
}
}
+
// Implementation note: this method must emit at most one instruction when
// Address::CanHoldLoadOffsetThumb.
void Thumb2Assembler::LoadFromOffset(LoadOperandType type,
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index cfa251acf2..48a3a7eeb2 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -304,6 +304,12 @@ class Thumb2Assembler FINAL : public ArmAssembler {
int32_t offset,
Condition cond = AL) OVERRIDE;
+ bool ShifterOperandCanHold(Register rd,
+ Register rn,
+ Opcode opcode,
+ uint32_t immediate,
+ ShifterOperand* shifter_op) OVERRIDE;
+
static bool IsInstructionForExceptionHandling(uintptr_t pc);
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
index 65d6d45296..6ae95a40e6 100644
--- a/compiler/utils/arm/assembler_thumb2_test.cc
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -30,7 +30,7 @@ class AssemblerThumb2Test : public AssemblerTest<arm::Thumb2Assembler,
}
std::string GetAssemblerParameters() OVERRIDE {
- return " -mthumb";
+ return " -mthumb -mfpu=neon";
}
std::string GetDisassembleParameters() OVERRIDE {
@@ -156,4 +156,12 @@ TEST_F(AssemblerThumb2Test, Ubfx) {
DriverStr(expected, "ubfx");
}
+TEST_F(AssemblerThumb2Test, Vmstat) {
+ GetAssembler()->vmstat();
+
+ const char* expected = "vmrs APSR_nzcv, FPSCR\n";
+
+ DriverStr(expected, "vmrs");
+}
+
} // namespace art
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 390f2ea449..21014c8bba 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -329,12 +329,12 @@ void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t s
if (dst.IsXRegister()) {
if (size == 4) {
CHECK(src.IsWRegister());
- ___ Mov(reg_x(dst.AsXRegister()), reg_w(src.AsWRegister()));
+ ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister()));
} else {
if (src.IsXRegister()) {
___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister()));
} else {
- ___ Mov(reg_x(dst.AsXRegister()), reg_w(src.AsWRegister()));
+ ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister()));
}
}
} else if (dst.IsWRegister()) {
@@ -484,9 +484,9 @@ void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
CHECK(size == 1 || size == 2) << size;
CHECK(reg.IsWRegister()) << reg;
if (size == 1) {
- ___ sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
} else {
- ___ sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
}
}
@@ -495,9 +495,9 @@ void Arm64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
CHECK(size == 1 || size == 2) << size;
CHECK(reg.IsWRegister()) << reg;
if (size == 1) {
- ___ uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
} else {
- ___ uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
}
}
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index a297ea3b6e..f0353f6cd2 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -613,6 +613,23 @@ void X86Assembler::comisd(XmmRegister a, XmmRegister b) {
}
+void X86Assembler::ucomiss(XmmRegister a, XmmRegister b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x2E);
+ EmitXmmRegisterOperand(a, b);
+}
+
+
+void X86Assembler::ucomisd(XmmRegister a, XmmRegister b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x2E);
+ EmitXmmRegisterOperand(a, b);
+}
+
+
void X86Assembler::sqrtsd(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF2);
@@ -1318,13 +1335,19 @@ void X86Assembler::AddImmediate(Register reg, const Immediate& imm) {
}
+void X86Assembler::LoadLongConstant(XmmRegister dst, int64_t value) {
+ // TODO: Need to have a code constants table.
+ pushl(Immediate(High32Bits(value)));
+ pushl(Immediate(Low32Bits(value)));
+ movsd(dst, Address(ESP, 0));
+ addl(ESP, Immediate(2 * sizeof(int32_t)));
+}
+
+
void X86Assembler::LoadDoubleConstant(XmmRegister dst, double value) {
// TODO: Need to have a code constants table.
int64_t constant = bit_cast<int64_t, double>(value);
- pushl(Immediate(High32Bits(constant)));
- pushl(Immediate(Low32Bits(constant)));
- movsd(dst, Address(ESP, 0));
- addl(ESP, Immediate(2 * sizeof(intptr_t)));
+ LoadLongConstant(dst, constant);
}
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 6ea66a5fa7..9fecf1edf0 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -42,8 +42,6 @@ class Immediate : public ValueObject {
private:
const int32_t value_;
-
- DISALLOW_COPY_AND_ASSIGN(Immediate);
};
@@ -301,6 +299,8 @@ class X86Assembler FINAL : public Assembler {
void comiss(XmmRegister a, XmmRegister b);
void comisd(XmmRegister a, XmmRegister b);
+ void ucomiss(XmmRegister a, XmmRegister b);
+ void ucomisd(XmmRegister a, XmmRegister b);
void sqrtsd(XmmRegister dst, XmmRegister src);
void sqrtss(XmmRegister dst, XmmRegister src);
@@ -441,6 +441,7 @@ class X86Assembler FINAL : public Assembler {
void AddImmediate(Register reg, const Immediate& imm);
+ void LoadLongConstant(XmmRegister dst, int64_t value);
void LoadDoubleConstant(XmmRegister dst, double value);
void DoubleNegate(XmmRegister d);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index 5d8a3b1521..d901673691 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -16,7 +16,8 @@
#include "assembler_x86.h"
-#include "gtest/gtest.h"
+#include "base/stl_util.h"
+#include "utils/assembler_test.h"
namespace art {
@@ -29,4 +30,89 @@ TEST(AssemblerX86, CreateBuffer) {
ASSERT_EQ(static_cast<size_t>(5), buffer.Size());
}
+class AssemblerX86Test : public AssemblerTest<x86::X86Assembler, x86::Register,
+ x86::XmmRegister, x86::Immediate> {
+ protected:
+ std::string GetArchitectureString() OVERRIDE {
+ return "x86";
+ }
+
+ std::string GetAssemblerParameters() OVERRIDE {
+ return " --32";
+ }
+
+ std::string GetDisassembleParameters() OVERRIDE {
+ return " -D -bbinary -mi386 --no-show-raw-insn";
+ }
+
+ void SetUpHelpers() OVERRIDE {
+ if (registers_.size() == 0) {
+ registers_.insert(end(registers_),
+ { // NOLINT(whitespace/braces)
+ new x86::Register(x86::EAX),
+ new x86::Register(x86::EBX),
+ new x86::Register(x86::ECX),
+ new x86::Register(x86::EDX),
+ new x86::Register(x86::EBP),
+ new x86::Register(x86::ESP),
+ new x86::Register(x86::ESI),
+ new x86::Register(x86::EDI)
+ });
+ }
+
+ if (fp_registers_.size() == 0) {
+ fp_registers_.insert(end(fp_registers_),
+ { // NOLINT(whitespace/braces)
+ new x86::XmmRegister(x86::XMM0),
+ new x86::XmmRegister(x86::XMM1),
+ new x86::XmmRegister(x86::XMM2),
+ new x86::XmmRegister(x86::XMM3),
+ new x86::XmmRegister(x86::XMM4),
+ new x86::XmmRegister(x86::XMM5),
+ new x86::XmmRegister(x86::XMM6),
+ new x86::XmmRegister(x86::XMM7)
+ });
+ }
+ }
+
+ void TearDown() OVERRIDE {
+ AssemblerTest::TearDown();
+ STLDeleteElements(&registers_);
+ STLDeleteElements(&fp_registers_);
+ }
+
+ std::vector<x86::Register*> GetRegisters() OVERRIDE {
+ return registers_;
+ }
+
+ std::vector<x86::XmmRegister*> GetFPRegisters() OVERRIDE {
+ return fp_registers_;
+ }
+
+ x86::Immediate CreateImmediate(int64_t imm_value) OVERRIDE {
+ return x86::Immediate(imm_value);
+ }
+
+ private:
+ std::vector<x86::Register*> registers_;
+ std::vector<x86::XmmRegister*> fp_registers_;
+};
+
+
+TEST_F(AssemblerX86Test, Movl) {
+ GetAssembler()->movl(x86::EAX, x86::EBX);
+ const char* expected = "mov %ebx, %eax\n";
+ DriverStr(expected, "movl");
+}
+
+TEST_F(AssemblerX86Test, LoadLongConstant) {
+ GetAssembler()->LoadLongConstant(x86::XMM0, 51);
+ const char* expected =
+ "push $0x0\n"
+ "push $0x33\n"
+ "movsd 0(%esp), %xmm0\n"
+ "add $8, %esp\n";
+ DriverStr(expected, "LoadLongConstant");
+}
+
} // namespace art
diff --git a/compiler/utils/x86/constants_x86.h b/compiler/utils/x86/constants_x86.h
index 45c3834a98..2dfb65c479 100644
--- a/compiler/utils/x86/constants_x86.h
+++ b/compiler/utils/x86/constants_x86.h
@@ -96,7 +96,8 @@ enum Condition {
kZero = kEqual,
kNotZero = kNotEqual,
kNegative = kSign,
- kPositive = kNotSign
+ kPositive = kNotSign,
+ kUnordered = kParityEven
};
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index dff3849076..474d8a909e 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -593,9 +593,19 @@ void X86_64Assembler::divsd(XmmRegister dst, const Address& src) {
void X86_64Assembler::cvtsi2ss(XmmRegister dst, CpuRegister src) {
+ cvtsi2ss(dst, src, false);
+}
+
+
+void X86_64Assembler::cvtsi2ss(XmmRegister dst, CpuRegister src, bool is64bit) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF3);
- EmitOptionalRex32(dst, src);
+ if (is64bit) {
+ // Emit a REX.W prefix if the operand size is 64 bits.
+ EmitRex64(dst, src);
+ } else {
+ EmitOptionalRex32(dst, src);
+ }
EmitUint8(0x0F);
EmitUint8(0x2A);
EmitOperand(dst.LowBits(), Operand(src));
@@ -603,9 +613,19 @@ void X86_64Assembler::cvtsi2ss(XmmRegister dst, CpuRegister src) {
void X86_64Assembler::cvtsi2sd(XmmRegister dst, CpuRegister src) {
+ cvtsi2sd(dst, src, false);
+}
+
+
+void X86_64Assembler::cvtsi2sd(XmmRegister dst, CpuRegister src, bool is64bit) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF2);
- EmitOptionalRex32(dst, src);
+ if (is64bit) {
+ // Emit a REX.W prefix if the operand size is 64 bits.
+ EmitRex64(dst, src);
+ } else {
+ EmitOptionalRex32(dst, src);
+ }
EmitUint8(0x0F);
EmitUint8(0x2A);
EmitOperand(dst.LowBits(), Operand(src));
@@ -700,6 +720,24 @@ void X86_64Assembler::comisd(XmmRegister a, XmmRegister b) {
EmitXmmRegisterOperand(a.LowBits(), b);
}
+void X86_64Assembler::ucomiss(XmmRegister a, XmmRegister b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(a, b);
+ EmitUint8(0x0F);
+ EmitUint8(0x2E);
+ EmitXmmRegisterOperand(a.LowBits(), b);
+}
+
+
+void X86_64Assembler::ucomisd(XmmRegister a, XmmRegister b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(a, b);
+ EmitUint8(0x0F);
+ EmitUint8(0x2E);
+ EmitXmmRegisterOperand(a.LowBits(), b);
+}
+
void X86_64Assembler::sqrtsd(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index ab1bc9e97d..6e71e4a5bb 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -329,7 +329,9 @@ class X86_64Assembler FINAL : public Assembler {
void divsd(XmmRegister dst, const Address& src);
void cvtsi2ss(XmmRegister dst, CpuRegister src); // Note: this is the r/m32 version.
+ void cvtsi2ss(XmmRegister dst, CpuRegister src, bool is64bit);
void cvtsi2sd(XmmRegister dst, CpuRegister src); // Note: this is the r/m32 version.
+ void cvtsi2sd(XmmRegister dst, CpuRegister src, bool is64bit);
void cvtss2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version.
void cvtss2sd(XmmRegister dst, XmmRegister src);
@@ -344,6 +346,8 @@ class X86_64Assembler FINAL : public Assembler {
void comiss(XmmRegister a, XmmRegister b);
void comisd(XmmRegister a, XmmRegister b);
+ void ucomiss(XmmRegister a, XmmRegister b);
+ void ucomisd(XmmRegister a, XmmRegister b);
void sqrtsd(XmmRegister dst, XmmRegister src);
void sqrtss(XmmRegister dst, XmmRegister src);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 14a98b9359..c8e923c9d6 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -660,6 +660,14 @@ TEST_F(AssemblerX86_64Test, Comisd) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::comisd, "comisd %{reg2}, %{reg1}"), "comisd");
}
+TEST_F(AssemblerX86_64Test, Ucomiss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::ucomiss, "ucomiss %{reg2}, %{reg1}"), "ucomiss");
+}
+
+TEST_F(AssemblerX86_64Test, Ucomisd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::ucomisd, "ucomisd %{reg2}, %{reg1}"), "ucomisd");
+}
+
TEST_F(AssemblerX86_64Test, Sqrtss) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::sqrtss, "sqrtss %{reg2}, %{reg1}"), "sqrtss");
}
diff --git a/compiler/utils/x86_64/constants_x86_64.h b/compiler/utils/x86_64/constants_x86_64.h
index 2a5b43da46..0c782d46cd 100644
--- a/compiler/utils/x86_64/constants_x86_64.h
+++ b/compiler/utils/x86_64/constants_x86_64.h
@@ -105,7 +105,8 @@ enum Condition {
kZero = kEqual,
kNotZero = kNotEqual,
kNegative = kSign,
- kPositive = kNotSign
+ kPositive = kNotSign,
+ kUnordered = kParityEven
};