Optimizing/ARM: Implement kDexCachePcRelative dispatch.
Change-Id: I0fe2da50a30a3f62bec8ea01688dd1fec84b1831
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 7ad5b44..cdeb443 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -2569,30 +2569,19 @@
void Thumb2Assembler::movw(Register rd, uint16_t imm16, Condition cond) {
CheckCondition(cond);
- bool must_be_32bit = force_32bit_;
- if (IsHighRegister(rd)|| imm16 >= 256u) {
- must_be_32bit = true;
- }
-
- if (must_be_32bit) {
- // Use encoding T3.
- uint32_t imm4 = (imm16 >> 12) & 15U /* 0b1111 */;
- uint32_t i = (imm16 >> 11) & 1U /* 0b1 */;
- uint32_t imm3 = (imm16 >> 8) & 7U /* 0b111 */;
- uint32_t imm8 = imm16 & 0xff;
- int32_t encoding = B31 | B30 | B29 | B28 |
- B25 | B22 |
- static_cast<uint32_t>(rd) << 8 |
- i << 26 |
- imm4 << 16 |
- imm3 << 12 |
- imm8;
- Emit32(encoding);
- } else {
- int16_t encoding = B13 | static_cast<uint16_t>(rd) << 8 |
- imm16;
- Emit16(encoding);
- }
+ // Always 32 bits, encoding T3. (Other encondings are called MOV, not MOVW.)
+ uint32_t imm4 = (imm16 >> 12) & 15U /* 0b1111 */;
+ uint32_t i = (imm16 >> 11) & 1U /* 0b1 */;
+ uint32_t imm3 = (imm16 >> 8) & 7U /* 0b111 */;
+ uint32_t imm8 = imm16 & 0xff;
+ int32_t encoding = B31 | B30 | B29 | B28 |
+ B25 | B22 |
+ static_cast<uint32_t>(rd) << 8 |
+ i << 26 |
+ imm4 << 16 |
+ imm3 << 12 |
+ imm8;
+ Emit32(encoding);
}
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 1de51a2..5ae2cc2 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -832,11 +832,12 @@
TEST(Thumb2AssemblerTest, MovWMovT) {
arm::Thumb2Assembler assembler;
- __ movw(R4, 0); // 16 bit.
- __ movw(R4, 0x34); // 16 bit.
- __ movw(R9, 0x34); // 32 bit due to high register.
- __ movw(R3, 0x1234); // 32 bit due to large value.
- __ movw(R9, 0xffff); // 32 bit due to large value and high register.
+ // Always 32 bit.
+ __ movw(R4, 0);
+ __ movw(R4, 0x34);
+ __ movw(R9, 0x34);
+ __ movw(R3, 0x1234);
+ __ movw(R9, 0xffff);
// Always 32 bit.
__ movt(R0, 0);
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 9246c82..886295e 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -439,14 +439,14 @@
nullptr
};
const char* MovWMovTResults[] = {
- " 0: 2400 movs r4, #0\n",
- " 2: 2434 movs r4, #52 ; 0x34\n",
- " 4: f240 0934 movw r9, #52 ; 0x34\n",
- " 8: f241 2334 movw r3, #4660 ; 0x1234\n",
- " c: f64f 79ff movw r9, #65535 ; 0xffff\n",
- " 10: f2c0 0000 movt r0, #0\n",
- " 14: f2c1 2034 movt r0, #4660 ; 0x1234\n",
- " 18: f6cf 71ff movt r1, #65535 ; 0xffff\n",
+ " 0: f240 0400 movw r4, #0\n",
+ " 4: f240 0434 movw r4, #52 ; 0x34\n",
+ " 8: f240 0934 movw r9, #52 ; 0x34\n",
+ " c: f241 2334 movw r3, #4660 ; 0x1234\n",
+ " 10: f64f 79ff movw r9, #65535 ; 0xffff\n",
+ " 14: f2c0 0000 movt r0, #0\n",
+ " 18: f2c1 2034 movt r0, #4660 ; 0x1234\n",
+ " 1c: f6cf 71ff movt r1, #65535 ; 0xffff\n",
nullptr
};
const char* SpecialAddSubResults[] = {