diff options
| author | 2016-03-30 14:01:24 +0000 | |
|---|---|---|
| committer | 2016-03-30 14:01:24 +0000 | |
| commit | 0b92b73c62ab4cc13ca040d4233fa69d182971cc (patch) | |
| tree | df7d6180b13430de8f255884b12f29f191d1468e /compiler/linker | |
| parent | 01098af7a530ca65dcf5a40b3799c5147e49bd64 (diff) | |
| parent | 085055f933d76579c32586488951a4497ffcf10e (diff) | |
Merge "Optimizing: Improve const-string code generation."
am: 085055f
* commit '085055f933d76579c32586488951a4497ffcf10e':
Optimizing: Improve const-string code generation.
Change-Id: Idd5a215e18abba1e2161f1848cb08aefb4719cf0
Diffstat (limited to 'compiler/linker')
| -rw-r--r-- | compiler/linker/arm/relative_patcher_thumb2.cc | 8 | ||||
| -rw-r--r-- | compiler/linker/arm/relative_patcher_thumb2.h | 8 | ||||
| -rw-r--r-- | compiler/linker/arm/relative_patcher_thumb2_test.cc | 144 | ||||
| -rw-r--r-- | compiler/linker/arm64/relative_patcher_arm64.cc | 45 | ||||
| -rw-r--r-- | compiler/linker/arm64/relative_patcher_arm64.h | 8 | ||||
| -rw-r--r-- | compiler/linker/arm64/relative_patcher_arm64_test.cc | 362 | ||||
| -rw-r--r-- | compiler/linker/multi_oat_relative_patcher.h | 10 | ||||
| -rw-r--r-- | compiler/linker/multi_oat_relative_patcher_test.cc | 10 | ||||
| -rw-r--r-- | compiler/linker/relative_patcher.cc | 8 | ||||
| -rw-r--r-- | compiler/linker/relative_patcher.h | 8 | ||||
| -rw-r--r-- | compiler/linker/relative_patcher_test.h | 18 | ||||
| -rw-r--r-- | compiler/linker/x86/relative_patcher_x86.cc | 8 | ||||
| -rw-r--r-- | compiler/linker/x86/relative_patcher_x86.h | 8 | ||||
| -rw-r--r-- | compiler/linker/x86/relative_patcher_x86_test.cc | 56 | ||||
| -rw-r--r-- | compiler/linker/x86_64/relative_patcher_x86_64.cc | 8 | ||||
| -rw-r--r-- | compiler/linker/x86_64/relative_patcher_x86_64.h | 8 | ||||
| -rw-r--r-- | compiler/linker/x86_64/relative_patcher_x86_64_test.cc | 58 |
17 files changed, 570 insertions, 205 deletions
diff --git a/compiler/linker/arm/relative_patcher_thumb2.cc b/compiler/linker/arm/relative_patcher_thumb2.cc index c090dffc55..582ecb3bbf 100644 --- a/compiler/linker/arm/relative_patcher_thumb2.cc +++ b/compiler/linker/arm/relative_patcher_thumb2.cc @@ -56,10 +56,10 @@ void Thumb2RelativePatcher::PatchCall(std::vector<uint8_t>* code, SetInsn32(code, literal_offset, value); } -void Thumb2RelativePatcher::PatchDexCacheReference(std::vector<uint8_t>* code, - const LinkerPatch& patch, - uint32_t patch_offset, - uint32_t target_offset) { +void Thumb2RelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code, + const LinkerPatch& patch, + uint32_t patch_offset, + uint32_t target_offset) { uint32_t literal_offset = patch.LiteralOffset(); uint32_t pc_literal_offset = patch.PcInsnOffset(); uint32_t pc_base = patch_offset + (pc_literal_offset - literal_offset) + 4u /* PC adjustment */; diff --git a/compiler/linker/arm/relative_patcher_thumb2.h b/compiler/linker/arm/relative_patcher_thumb2.h index 0d903c0b41..d85739c51f 100644 --- a/compiler/linker/arm/relative_patcher_thumb2.h +++ b/compiler/linker/arm/relative_patcher_thumb2.h @@ -30,10 +30,10 @@ class Thumb2RelativePatcher FINAL : public ArmBaseRelativePatcher { uint32_t literal_offset, uint32_t patch_offset, uint32_t target_offset) OVERRIDE; - void PatchDexCacheReference(std::vector<uint8_t>* code, - const LinkerPatch& patch, - uint32_t patch_offset, - uint32_t target_offset) OVERRIDE; + void PatchPcRelativeReference(std::vector<uint8_t>* code, + const LinkerPatch& patch, + uint32_t patch_offset, + uint32_t target_offset) OVERRIDE; private: static std::vector<uint8_t> CompileThunkCode(); diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc index a259cda986..a8078e3049 100644 --- a/compiler/linker/arm/relative_patcher_thumb2_test.cc +++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc @@ -30,6 +30,9 @@ class Thumb2RelativePatcherTest : public RelativePatcherTest { static const ArrayRef<const uint8_t> kCallCode; static const uint8_t kNopRawCode[]; static const ArrayRef<const uint8_t> kNopCode; + static const uint8_t kUnpatchedPcRelativeRawCode[]; + static const ArrayRef<const uint8_t> kUnpatchedPcRelativeCode; + static const uint32_t kPcInsnOffset; // Branches within range [-256, 256) can be created from these by adding the low 8 bits. static constexpr uint32_t kBlPlus0 = 0xf000f800; @@ -123,47 +126,9 @@ class Thumb2RelativePatcherTest : public RelativePatcherTest { return result; } - void TestDexCachereference(uint32_t dex_cache_arrays_begin, uint32_t element_offset) { - dex_cache_arrays_begin_ = dex_cache_arrays_begin; - static const uint8_t raw_code[] = { - 0x40, 0xf2, 0x00, 0x00, // MOVW r0, #0 (placeholder) - 0xc0, 0xf2, 0x00, 0x00, // MOVT r0, #0 (placeholder) - 0x78, 0x44, // ADD r0, pc - }; - constexpr uint32_t pc_insn_offset = 8u; - const ArrayRef<const uint8_t> code(raw_code); - LinkerPatch patches[] = { - LinkerPatch::DexCacheArrayPatch(0u, nullptr, pc_insn_offset, element_offset), - LinkerPatch::DexCacheArrayPatch(4u, nullptr, pc_insn_offset, element_offset), - }; - AddCompiledMethod(MethodRef(1u), code, ArrayRef<const LinkerPatch>(patches)); - Link(); - - uint32_t method1_offset = GetMethodOffset(1u); - uint32_t pc_base_offset = method1_offset + pc_insn_offset + 4u /* PC adjustment */; - uint32_t diff = dex_cache_arrays_begin_ + element_offset - pc_base_offset; - // Distribute the bits of the diff between the MOVW and MOVT: - uint32_t diffw = diff & 0xffffu; - uint32_t difft = diff >> 16; - uint32_t movw = 0xf2400000u | // MOVW r0, #0 (placeholder), - ((diffw & 0xf000u) << (16 - 12)) | // move imm4 from bits 12-15 to bits 16-19, - ((diffw & 0x0800u) << (26 - 11)) | // move imm from bit 11 to bit 26, - ((diffw & 0x0700u) << (12 - 8)) | // move imm3 from bits 8-10 to bits 12-14, - ((diffw & 0x00ffu)); // keep imm8 at bits 0-7. - uint32_t movt = 0xf2c00000u | // MOVT r0, #0 (placeholder), - ((difft & 0xf000u) << (16 - 12)) | // move imm4 from bits 12-15 to bits 16-19, - ((difft & 0x0800u) << (26 - 11)) | // move imm from bit 11 to bit 26, - ((difft & 0x0700u) << (12 - 8)) | // move imm3 from bits 8-10 to bits 12-14, - ((difft & 0x00ffu)); // keep imm8 at bits 0-7. - const uint8_t expected_code[] = { - static_cast<uint8_t>(movw >> 16), static_cast<uint8_t>(movw >> 24), - static_cast<uint8_t>(movw >> 0), static_cast<uint8_t>(movw >> 8), - static_cast<uint8_t>(movt >> 16), static_cast<uint8_t>(movt >> 24), - static_cast<uint8_t>(movt >> 0), static_cast<uint8_t>(movt >> 8), - 0x78, 0x44, - }; - EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code))); - } + void TestDexCacheReference(uint32_t dex_cache_arrays_begin, uint32_t element_offset); + void TestStringReference(uint32_t string_offset); + void CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches, uint32_t target_offset); }; const uint8_t Thumb2RelativePatcherTest::kCallRawCode[] = { @@ -178,6 +143,67 @@ const uint8_t Thumb2RelativePatcherTest::kNopRawCode[] = { const ArrayRef<const uint8_t> Thumb2RelativePatcherTest::kNopCode(kNopRawCode); +const uint8_t Thumb2RelativePatcherTest::kUnpatchedPcRelativeRawCode[] = { + 0x40, 0xf2, 0x00, 0x00, // MOVW r0, #0 (placeholder) + 0xc0, 0xf2, 0x00, 0x00, // MOVT r0, #0 (placeholder) + 0x78, 0x44, // ADD r0, pc +}; +const ArrayRef<const uint8_t> Thumb2RelativePatcherTest::kUnpatchedPcRelativeCode( + kUnpatchedPcRelativeRawCode); +const uint32_t Thumb2RelativePatcherTest::kPcInsnOffset = 8u; + +void Thumb2RelativePatcherTest::TestDexCacheReference(uint32_t dex_cache_arrays_begin, + uint32_t element_offset) { + dex_cache_arrays_begin_ = dex_cache_arrays_begin; + LinkerPatch patches[] = { + LinkerPatch::DexCacheArrayPatch(0u, nullptr, kPcInsnOffset, element_offset), + LinkerPatch::DexCacheArrayPatch(4u, nullptr, kPcInsnOffset, element_offset), + }; + CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches), + dex_cache_arrays_begin_ + element_offset); +} + +void Thumb2RelativePatcherTest::TestStringReference(uint32_t string_offset) { + constexpr uint32_t kStringIndex = 1u; + string_index_to_offset_map_.Put(kStringIndex, string_offset); + LinkerPatch patches[] = { + LinkerPatch::RelativeStringPatch(0u, nullptr, kPcInsnOffset, kStringIndex), + LinkerPatch::RelativeStringPatch(4u, nullptr, kPcInsnOffset, kStringIndex), + }; + CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches), string_offset); +} + +void Thumb2RelativePatcherTest::CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches, + uint32_t target_offset) { + AddCompiledMethod(MethodRef(1u), kUnpatchedPcRelativeCode, ArrayRef<const LinkerPatch>(patches)); + Link(); + + uint32_t method1_offset = GetMethodOffset(1u); + uint32_t pc_base_offset = method1_offset + kPcInsnOffset + 4u /* PC adjustment */; + uint32_t diff = target_offset - pc_base_offset; + // Distribute the bits of the diff between the MOVW and MOVT: + uint32_t diffw = diff & 0xffffu; + uint32_t difft = diff >> 16; + uint32_t movw = 0xf2400000u | // MOVW r0, #0 (placeholder), + ((diffw & 0xf000u) << (16 - 12)) | // move imm4 from bits 12-15 to bits 16-19, + ((diffw & 0x0800u) << (26 - 11)) | // move imm from bit 11 to bit 26, + ((diffw & 0x0700u) << (12 - 8)) | // move imm3 from bits 8-10 to bits 12-14, + ((diffw & 0x00ffu)); // keep imm8 at bits 0-7. + uint32_t movt = 0xf2c00000u | // MOVT r0, #0 (placeholder), + ((difft & 0xf000u) << (16 - 12)) | // move imm4 from bits 12-15 to bits 16-19, + ((difft & 0x0800u) << (26 - 11)) | // move imm from bit 11 to bit 26, + ((difft & 0x0700u) << (12 - 8)) | // move imm3 from bits 8-10 to bits 12-14, + ((difft & 0x00ffu)); // keep imm8 at bits 0-7. + const uint8_t expected_code[] = { + static_cast<uint8_t>(movw >> 16), static_cast<uint8_t>(movw >> 24), + static_cast<uint8_t>(movw >> 0), static_cast<uint8_t>(movw >> 8), + static_cast<uint8_t>(movt >> 16), static_cast<uint8_t>(movt >> 24), + static_cast<uint8_t>(movt >> 0), static_cast<uint8_t>(movt >> 8), + 0x78, 0x44, + }; + EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code))); +} + TEST_F(Thumb2RelativePatcherTest, CallSelf) { LinkerPatch patches[] = { LinkerPatch::RelativeCodePatch(0u, nullptr, 1u), @@ -366,23 +392,43 @@ TEST_F(Thumb2RelativePatcherTest, CallOtherJustTooFarBefore) { EXPECT_TRUE(CheckThunk(thunk_offset)); } -TEST_F(Thumb2RelativePatcherTest, DexCacheReferenceImm8) { - TestDexCachereference(0x00ff0000u, 0x00fcu); +TEST_F(Thumb2RelativePatcherTest, DexCacheReference1) { + TestDexCacheReference(0x00ff0000u, 0x00fcu); + ASSERT_LT(GetMethodOffset(1u), 0xfcu); +} + +TEST_F(Thumb2RelativePatcherTest, DexCacheReference2) { + TestDexCacheReference(0x02ff0000u, 0x05fcu); + ASSERT_LT(GetMethodOffset(1u), 0xfcu); +} + +TEST_F(Thumb2RelativePatcherTest, DexCacheReference3) { + TestDexCacheReference(0x08ff0000u, 0x08fcu); + ASSERT_LT(GetMethodOffset(1u), 0xfcu); +} + +TEST_F(Thumb2RelativePatcherTest, DexCacheReference4) { + TestDexCacheReference(0xd0ff0000u, 0x60fcu); + ASSERT_LT(GetMethodOffset(1u), 0xfcu); +} + +TEST_F(Thumb2RelativePatcherTest, StringReference1) { + TestStringReference(0x00ff00fcu); ASSERT_LT(GetMethodOffset(1u), 0xfcu); } -TEST_F(Thumb2RelativePatcherTest, DexCacheReferenceImm3) { - TestDexCachereference(0x02ff0000u, 0x05fcu); +TEST_F(Thumb2RelativePatcherTest, StringReference2) { + TestStringReference(0x02ff05fcu); ASSERT_LT(GetMethodOffset(1u), 0xfcu); } -TEST_F(Thumb2RelativePatcherTest, DexCacheReferenceImm) { - TestDexCachereference(0x08ff0000u, 0x08fcu); +TEST_F(Thumb2RelativePatcherTest, StringReference3) { + TestStringReference(0x08ff08fcu); ASSERT_LT(GetMethodOffset(1u), 0xfcu); } -TEST_F(Thumb2RelativePatcherTest, DexCacheReferenceimm4) { - TestDexCachereference(0xd0ff0000u, 0x60fcu); +TEST_F(Thumb2RelativePatcherTest, StringReference4) { + TestStringReference(0xd0ff60fcu); ASSERT_LT(GetMethodOffset(1u), 0xfcu); } diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc index a81c85c707..0549327e7a 100644 --- a/compiler/linker/arm64/relative_patcher_arm64.cc +++ b/compiler/linker/arm64/relative_patcher_arm64.cc @@ -28,6 +28,16 @@ namespace art { namespace linker { +namespace { + +inline bool IsAdrpPatch(const LinkerPatch& patch) { + LinkerPatchType type = patch.Type(); + return (type == kLinkerPatchStringRelative || type == kLinkerPatchDexCacheArray) && + patch.LiteralOffset() == patch.PcInsnOffset(); +} + +} // anonymous namespace + Arm64RelativePatcher::Arm64RelativePatcher(RelativePatcherTargetProvider* provider, const Arm64InstructionSetFeatures* features) : ArmBaseRelativePatcher(provider, kArm64, CompileThunkCode(), @@ -61,8 +71,7 @@ uint32_t Arm64RelativePatcher::ReserveSpace(uint32_t offset, size_t num_adrp = 0u; DCHECK(compiled_method != nullptr); for (const LinkerPatch& patch : compiled_method->GetPatches()) { - if (patch.Type() == kLinkerPatchDexCacheArray && - patch.LiteralOffset() == patch.PcInsnOffset()) { // ADRP patch + if (IsAdrpPatch(patch)) { ++num_adrp; } } @@ -78,8 +87,7 @@ uint32_t Arm64RelativePatcher::ReserveSpace(uint32_t offset, uint32_t thunk_offset = compiled_method->AlignCode(quick_code_offset + code.size()); DCHECK(compiled_method != nullptr); for (const LinkerPatch& patch : compiled_method->GetPatches()) { - if (patch.Type() == kLinkerPatchDexCacheArray && - patch.LiteralOffset() == patch.PcInsnOffset()) { // ADRP patch + if (IsAdrpPatch(patch)) { uint32_t patch_offset = quick_code_offset + patch.LiteralOffset(); if (NeedsErratum843419Thunk(code, patch.LiteralOffset(), patch_offset)) { adrp_thunk_locations_.emplace_back(patch_offset, thunk_offset); @@ -151,10 +159,10 @@ void Arm64RelativePatcher::PatchCall(std::vector<uint8_t>* code, SetInsn(code, literal_offset, insn); } -void Arm64RelativePatcher::PatchDexCacheReference(std::vector<uint8_t>* code, - const LinkerPatch& patch, - uint32_t patch_offset, - uint32_t target_offset) { +void Arm64RelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code, + const LinkerPatch& patch, + uint32_t patch_offset, + uint32_t target_offset) { DCHECK_EQ(patch_offset & 3u, 0u); DCHECK_EQ(target_offset & 3u, 0u); uint32_t literal_offset = patch.LiteralOffset(); @@ -199,8 +207,15 @@ void Arm64RelativePatcher::PatchDexCacheReference(std::vector<uint8_t>* code, // Write the new ADRP (or B to the erratum 843419 thunk). SetInsn(code, literal_offset, insn); } else { - // LDR 32-bit or 64-bit with imm12 == 0 (unset). - DCHECK_EQ(insn & 0xbffffc00, 0xb9400000) << insn; + if ((insn & 0xfffffc00) == 0x91000000) { + // ADD immediate, 64-bit with imm12 == 0 (unset). + DCHECK(patch.Type() == kLinkerPatchStringRelative) << patch.Type(); + shift = 0u; // No shift for ADD. + } else { + // LDR 32-bit or 64-bit with imm12 == 0 (unset). + DCHECK(patch.Type() == kLinkerPatchDexCacheArray) << patch.Type(); + DCHECK_EQ(insn & 0xbffffc00, 0xb9400000) << std::hex << insn; + } if (kIsDebugBuild) { uint32_t adrp = GetInsn(code, pc_insn_offset); if ((adrp & 0x9f000000u) != 0x90000000u) { @@ -263,7 +278,7 @@ bool Arm64RelativePatcher::NeedsErratum843419Thunk(ArrayRef<const uint8_t> code, DCHECK_EQ(patch_offset & 0x3u, 0u); if ((patch_offset & 0xff8) == 0xff8) { // ...ff8 or ...ffc uint32_t adrp = GetInsn(code, literal_offset); - DCHECK_EQ(adrp & 0xff000000, 0x90000000); + DCHECK_EQ(adrp & 0x9f000000, 0x90000000); uint32_t next_offset = patch_offset + 4u; uint32_t next_insn = GetInsn(code, literal_offset + 4u); @@ -277,6 +292,14 @@ bool Arm64RelativePatcher::NeedsErratum843419Thunk(ArrayRef<const uint8_t> code, return false; } + // And since kLinkerPatchStringRelative is using the result of the ADRP for an ADD immediate, + // check for that as well. We generalize a bit to include ADD/ADDS/SUB/SUBS immediate that + // either uses the ADRP destination or stores the result to a different register. + if ((next_insn & 0x1f000000) == 0x11000000 && + ((((next_insn >> 5) ^ adrp) & 0x1f) == 0 || ((next_insn ^ adrp) & 0x1f) != 0)) { + return false; + } + // LDR <Wt>, <label> is always aligned and thus it doesn't cause boundary crossing. if ((next_insn & 0xff000000) == 0x18000000) { return false; diff --git a/compiler/linker/arm64/relative_patcher_arm64.h b/compiler/linker/arm64/relative_patcher_arm64.h index f9b76e6250..48ad1059b0 100644 --- a/compiler/linker/arm64/relative_patcher_arm64.h +++ b/compiler/linker/arm64/relative_patcher_arm64.h @@ -37,10 +37,10 @@ class Arm64RelativePatcher FINAL : public ArmBaseRelativePatcher { uint32_t literal_offset, uint32_t patch_offset, uint32_t target_offset) OVERRIDE; - void PatchDexCacheReference(std::vector<uint8_t>* code, - const LinkerPatch& patch, - uint32_t patch_offset, - uint32_t target_offset) OVERRIDE; + void PatchPcRelativeReference(std::vector<uint8_t>* code, + const LinkerPatch& patch, + uint32_t patch_offset, + uint32_t target_offset) OVERRIDE; private: static std::vector<uint8_t> CompileThunkCode(); diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc index 0bfef5e6d3..09729fdf96 100644 --- a/compiler/linker/arm64/relative_patcher_arm64_test.cc +++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc @@ -40,6 +40,15 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { static constexpr uint32_t kBlPlusMax = 0x95ffffffu; static constexpr uint32_t kBlMinusMax = 0x96000000u; + // LDR immediate, 32-bit. + static constexpr uint32_t kLdrWInsn = 0xb9400000u; + + // ADD/ADDS/SUB/SUBS immediate, 64-bit. + static constexpr uint32_t kAddXInsn = 0x91000000u; + static constexpr uint32_t kAddsXInsn = 0xb1000000u; + static constexpr uint32_t kSubXInsn = 0xd1000000u; + static constexpr uint32_t kSubsXInsn = 0xf1000000u; + // LDUR x2, [sp, #4], i.e. unaligned load crossing 64-bit boundary (assuming aligned sp). static constexpr uint32_t kLdurInsn = 0xf840405fu; @@ -109,7 +118,7 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { uint32_t GetMethodOffset(uint32_t method_idx) { auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx)); CHECK(result.first); - CHECK_EQ(result.second & 3u, 0u); + CHECK_ALIGNED(result.second, 4u); return result.second; } @@ -147,20 +156,29 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { return result; } - std::vector<uint8_t> GenNopsAndAdrpLdr(size_t num_nops, - uint32_t method_offset, uint32_t target_offset) { + std::vector<uint8_t> GenNopsAndAdrpAndUse(size_t num_nops, + uint32_t method_offset, + uint32_t target_offset, + uint32_t use_insn) { std::vector<uint8_t> result; result.reserve(num_nops * 4u + 8u); for (size_t i = 0; i != num_nops; ++i) { result.insert(result.end(), kNopCode.begin(), kNopCode.end()); } - DCHECK_EQ(method_offset & 3u, 0u); - DCHECK_EQ(target_offset & 3u, 0u); + CHECK_ALIGNED(method_offset, 4u); + CHECK_ALIGNED(target_offset, 4u); uint32_t adrp_offset = method_offset + num_nops * 4u; uint32_t disp = target_offset - (adrp_offset & ~0xfffu); - DCHECK_EQ(disp & 3u, 0u); - uint32_t ldr = 0xb9400001 | // LDR w1, [x0, #(imm12 * 2)] - ((disp & 0xfffu) << (10 - 2)); // imm12 = ((disp & 0xfffu) >> 2) is at bit 10. + if (use_insn == kLdrWInsn) { + DCHECK_ALIGNED(disp, 1u << 2); + use_insn |= 1 | // LDR x1, [x0, #(imm12 << 2)] + ((disp & 0xfffu) << (10 - 2)); // imm12 = ((disp & 0xfffu) >> 2) is at bit 10. + } else if (use_insn == kAddXInsn) { + use_insn |= 1 | // ADD x1, x0, #imm + (disp & 0xfffu) << 10; // imm12 = (disp & 0xfffu) is at bit 10. + } else { + LOG(FATAL) << "Unexpected instruction: 0x" << std::hex << use_insn; + } uint32_t adrp = 0x90000000 | // ADRP x0, +SignExtend(immhi:immlo:Zeros(12), 64) ((disp & 0x3000u) << (29 - 12)) | // immlo = ((disp & 0x3000u) >> 12) is at bit 29, ((disp & 0xffffc000) >> (14 - 5)) | // immhi = (disp >> 14) is at bit 5, @@ -170,13 +188,19 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { result.push_back(static_cast<uint8_t>(adrp >> 8)); result.push_back(static_cast<uint8_t>(adrp >> 16)); result.push_back(static_cast<uint8_t>(adrp >> 24)); - result.push_back(static_cast<uint8_t>(ldr)); - result.push_back(static_cast<uint8_t>(ldr >> 8)); - result.push_back(static_cast<uint8_t>(ldr >> 16)); - result.push_back(static_cast<uint8_t>(ldr >> 24)); + result.push_back(static_cast<uint8_t>(use_insn)); + result.push_back(static_cast<uint8_t>(use_insn >> 8)); + result.push_back(static_cast<uint8_t>(use_insn >> 16)); + result.push_back(static_cast<uint8_t>(use_insn >> 24)); return result; } + std::vector<uint8_t> GenNopsAndAdrpLdr(size_t num_nops, + uint32_t method_offset, + uint32_t target_offset) { + return GenNopsAndAdrpAndUse(num_nops, method_offset, target_offset, kLdrWInsn); + } + void TestNopsAdrpLdr(size_t num_nops, uint32_t dex_cache_arrays_begin, uint32_t element_offset) { dex_cache_arrays_begin_ = dex_cache_arrays_begin; auto code = GenNopsAndAdrpLdr(num_nops, 0u, 0u); // Unpatched. @@ -184,7 +208,8 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { LinkerPatch::DexCacheArrayPatch(num_nops * 4u , nullptr, num_nops * 4u, element_offset), LinkerPatch::DexCacheArrayPatch(num_nops * 4u + 4u, nullptr, num_nops * 4u, element_offset), }; - AddCompiledMethod(MethodRef(1u), ArrayRef<const uint8_t>(code), + AddCompiledMethod(MethodRef(1u), + ArrayRef<const uint8_t>(code), ArrayRef<const LinkerPatch>(patches)); Link(); @@ -194,6 +219,30 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code))); } + std::vector<uint8_t> GenNopsAndAdrpAdd(size_t num_nops, + uint32_t method_offset, + uint32_t target_offset) { + return GenNopsAndAdrpAndUse(num_nops, method_offset, target_offset, kAddXInsn); + } + + void TestNopsAdrpAdd(size_t num_nops, uint32_t string_offset) { + constexpr uint32_t kStringIndex = 1u; + string_index_to_offset_map_.Put(kStringIndex, string_offset); + auto code = GenNopsAndAdrpAdd(num_nops, 0u, 0u); // Unpatched. + LinkerPatch patches[] = { + LinkerPatch::RelativeStringPatch(num_nops * 4u , nullptr, num_nops * 4u, kStringIndex), + LinkerPatch::RelativeStringPatch(num_nops * 4u + 4u, nullptr, num_nops * 4u, kStringIndex), + }; + AddCompiledMethod(MethodRef(1u), + ArrayRef<const uint8_t>(code), + ArrayRef<const LinkerPatch>(patches)); + Link(); + + uint32_t method1_offset = GetMethodOffset(1u); + auto expected_code = GenNopsAndAdrpAdd(num_nops, method1_offset, string_offset); + EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code))); + } + void InsertInsn(std::vector<uint8_t>* code, size_t pos, uint32_t insn) { CHECK_LE(pos, code->size()); const uint8_t insn_code[] = { @@ -204,8 +253,10 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { code->insert(code->begin() + pos, insn_code, insn_code + sizeof(insn_code)); } - void PrepareNopsAdrpInsn2Ldr(size_t num_nops, uint32_t insn2, - uint32_t dex_cache_arrays_begin, uint32_t element_offset) { + void PrepareNopsAdrpInsn2Ldr(size_t num_nops, + uint32_t insn2, + uint32_t dex_cache_arrays_begin, + uint32_t element_offset) { dex_cache_arrays_begin_ = dex_cache_arrays_begin; auto code = GenNopsAndAdrpLdr(num_nops, 0u, 0u); // Unpatched. InsertInsn(&code, num_nops * 4u + 4u, insn2); @@ -213,26 +264,41 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { LinkerPatch::DexCacheArrayPatch(num_nops * 4u , nullptr, num_nops * 4u, element_offset), LinkerPatch::DexCacheArrayPatch(num_nops * 4u + 8u, nullptr, num_nops * 4u, element_offset), }; - AddCompiledMethod(MethodRef(1u), ArrayRef<const uint8_t>(code), + AddCompiledMethod(MethodRef(1u), + ArrayRef<const uint8_t>(code), ArrayRef<const LinkerPatch>(patches)); Link(); } - void TestNopsAdrpInsn2Ldr(size_t num_nops, uint32_t insn2, - uint32_t dex_cache_arrays_begin, uint32_t element_offset) { - PrepareNopsAdrpInsn2Ldr(num_nops, insn2, dex_cache_arrays_begin, element_offset); + void PrepareNopsAdrpInsn2Add(size_t num_nops, uint32_t insn2, uint32_t string_offset) { + constexpr uint32_t kStringIndex = 1u; + string_index_to_offset_map_.Put(kStringIndex, string_offset); + auto code = GenNopsAndAdrpAdd(num_nops, 0u, 0u); // Unpatched. + InsertInsn(&code, num_nops * 4u + 4u, insn2); + LinkerPatch patches[] = { + LinkerPatch::RelativeStringPatch(num_nops * 4u , nullptr, num_nops * 4u, kStringIndex), + LinkerPatch::RelativeStringPatch(num_nops * 4u + 8u, nullptr, num_nops * 4u, kStringIndex), + }; + AddCompiledMethod(MethodRef(1u), + ArrayRef<const uint8_t>(code), + ArrayRef<const LinkerPatch>(patches)); + Link(); + } + void TestNopsAdrpInsn2AndUse(size_t num_nops, + uint32_t insn2, + uint32_t target_offset, + uint32_t use_insn) { uint32_t method1_offset = GetMethodOffset(1u); - uint32_t target_offset = dex_cache_arrays_begin_ + element_offset; - auto expected_code = GenNopsAndAdrpLdr(num_nops, method1_offset, target_offset); + auto expected_code = GenNopsAndAdrpAndUse(num_nops, method1_offset, target_offset, use_insn); InsertInsn(&expected_code, num_nops * 4u + 4u, insn2); EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code))); } - void TestNopsAdrpInsn2LdrHasThunk(size_t num_nops, uint32_t insn2, - uint32_t dex_cache_arrays_begin, uint32_t element_offset) { - PrepareNopsAdrpInsn2Ldr(num_nops, insn2, dex_cache_arrays_begin, element_offset); - + void TestNopsAdrpInsn2AndUseHasThunk(size_t num_nops, + uint32_t insn2, + uint32_t target_offset, + uint32_t use_insn) { uint32_t method1_offset = GetMethodOffset(1u); CHECK(!compiled_method_refs_.empty()); CHECK_EQ(compiled_method_refs_[0].dex_method_index, 1u); @@ -240,13 +306,12 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { uint32_t method1_size = compiled_methods_[0]->GetQuickCode().size(); uint32_t thunk_offset = CompiledCode::AlignCode(method1_offset + method1_size, kArm64); uint32_t b_diff = thunk_offset - (method1_offset + num_nops * 4u); - ASSERT_EQ(b_diff & 3u, 0u); + CHECK_ALIGNED(b_diff, 4u); ASSERT_LT(b_diff, 128 * MB); uint32_t b_out = kBPlus0 + ((b_diff >> 2) & 0x03ffffffu); uint32_t b_in = kBPlus0 + ((-b_diff >> 2) & 0x03ffffffu); - uint32_t target_offset = dex_cache_arrays_begin_ + element_offset; - auto expected_code = GenNopsAndAdrpLdr(num_nops, method1_offset, target_offset); + auto expected_code = GenNopsAndAdrpAndUse(num_nops, method1_offset, target_offset, use_insn); InsertInsn(&expected_code, num_nops * 4u + 4u, insn2); // Replace adrp with bl. expected_code.erase(expected_code.begin() + num_nops * 4u, @@ -270,29 +335,39 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { } } - void TestAdrpInsn2Ldr(uint32_t insn2, uint32_t adrp_offset, bool has_thunk, - uint32_t dex_cache_arrays_begin, uint32_t element_offset) { + void TestAdrpInsn2Ldr(uint32_t insn2, + uint32_t adrp_offset, + bool has_thunk, + uint32_t dex_cache_arrays_begin, + uint32_t element_offset) { uint32_t method1_offset = CompiledCode::AlignCode(kTrampolineSize, kArm64) + sizeof(OatQuickMethodHeader); ASSERT_LT(method1_offset, adrp_offset); - ASSERT_EQ(adrp_offset & 3u, 0u); + CHECK_ALIGNED(adrp_offset, 4u); uint32_t num_nops = (adrp_offset - method1_offset) / 4u; + PrepareNopsAdrpInsn2Ldr(num_nops, insn2, dex_cache_arrays_begin, element_offset); + uint32_t target_offset = dex_cache_arrays_begin_ + element_offset; if (has_thunk) { - TestNopsAdrpInsn2LdrHasThunk(num_nops, insn2, dex_cache_arrays_begin, element_offset); + TestNopsAdrpInsn2AndUseHasThunk(num_nops, insn2, target_offset, kLdrWInsn); } else { - TestNopsAdrpInsn2Ldr(num_nops, insn2, dex_cache_arrays_begin, element_offset); + TestNopsAdrpInsn2AndUse(num_nops, insn2, target_offset, kLdrWInsn); } ASSERT_EQ(method1_offset, GetMethodOffset(1u)); // If this fails, num_nops is wrong. } - void TestAdrpLdurLdr(uint32_t adrp_offset, bool has_thunk, - uint32_t dex_cache_arrays_begin, uint32_t element_offset) { + void TestAdrpLdurLdr(uint32_t adrp_offset, + bool has_thunk, + uint32_t dex_cache_arrays_begin, + uint32_t element_offset) { TestAdrpInsn2Ldr(kLdurInsn, adrp_offset, has_thunk, dex_cache_arrays_begin, element_offset); } - void TestAdrpLdrPcRelLdr(uint32_t pcrel_ldr_insn, int32_t pcrel_disp, - uint32_t adrp_offset, bool has_thunk, - uint32_t dex_cache_arrays_begin, uint32_t element_offset) { + void TestAdrpLdrPcRelLdr(uint32_t pcrel_ldr_insn, + int32_t pcrel_disp, + uint32_t adrp_offset, + bool has_thunk, + uint32_t dex_cache_arrays_begin, + uint32_t element_offset) { ASSERT_LT(pcrel_disp, 0x100000); ASSERT_GE(pcrel_disp, -0x100000); ASSERT_EQ(pcrel_disp & 0x3, 0); @@ -300,13 +375,60 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { TestAdrpInsn2Ldr(insn2, adrp_offset, has_thunk, dex_cache_arrays_begin, element_offset); } - void TestAdrpLdrSpRelLdr(uint32_t sprel_ldr_insn, uint32_t sprel_disp_in_load_units, - uint32_t adrp_offset, bool has_thunk, - uint32_t dex_cache_arrays_begin, uint32_t element_offset) { + void TestAdrpLdrSpRelLdr(uint32_t sprel_ldr_insn, + uint32_t sprel_disp_in_load_units, + uint32_t adrp_offset, + bool has_thunk, + uint32_t dex_cache_arrays_begin, + uint32_t element_offset) { ASSERT_LT(sprel_disp_in_load_units, 0x1000u); uint32_t insn2 = sprel_ldr_insn | ((sprel_disp_in_load_units & 0xfffu) << 10); TestAdrpInsn2Ldr(insn2, adrp_offset, has_thunk, dex_cache_arrays_begin, element_offset); } + + void TestAdrpInsn2Add(uint32_t insn2, + uint32_t adrp_offset, + bool has_thunk, + uint32_t string_offset) { + uint32_t method1_offset = + CompiledCode::AlignCode(kTrampolineSize, kArm64) + sizeof(OatQuickMethodHeader); + ASSERT_LT(method1_offset, adrp_offset); + CHECK_ALIGNED(adrp_offset, 4u); + uint32_t num_nops = (adrp_offset - method1_offset) / 4u; + PrepareNopsAdrpInsn2Add(num_nops, insn2, string_offset); + if (has_thunk) { + TestNopsAdrpInsn2AndUseHasThunk(num_nops, insn2, string_offset, kAddXInsn); + } else { + TestNopsAdrpInsn2AndUse(num_nops, insn2, string_offset, kAddXInsn); + } + ASSERT_EQ(method1_offset, GetMethodOffset(1u)); // If this fails, num_nops is wrong. + } + + void TestAdrpLdurAdd(uint32_t adrp_offset, bool has_thunk, uint32_t string_offset) { + TestAdrpInsn2Add(kLdurInsn, adrp_offset, has_thunk, string_offset); + } + + void TestAdrpLdrPcRelAdd(uint32_t pcrel_ldr_insn, + int32_t pcrel_disp, + uint32_t adrp_offset, + bool has_thunk, + uint32_t string_offset) { + ASSERT_LT(pcrel_disp, 0x100000); + ASSERT_GE(pcrel_disp, -0x100000); + ASSERT_EQ(pcrel_disp & 0x3, 0); + uint32_t insn2 = pcrel_ldr_insn | (((static_cast<uint32_t>(pcrel_disp) >> 2) & 0x7ffffu) << 5); + TestAdrpInsn2Add(insn2, adrp_offset, has_thunk, string_offset); + } + + void TestAdrpLdrSpRelAdd(uint32_t sprel_ldr_insn, + uint32_t sprel_disp_in_load_units, + uint32_t adrp_offset, + bool has_thunk, + uint32_t string_offset) { + ASSERT_LT(sprel_disp_in_load_units, 0x1000u); + uint32_t insn2 = sprel_ldr_insn | ((sprel_disp_in_load_units & 0xfffu) << 10); + TestAdrpInsn2Add(insn2, adrp_offset, has_thunk, string_offset); + } }; const uint8_t Arm64RelativePatcherTest::kCallRawCode[] = { @@ -358,14 +480,14 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOther) { uint32_t method1_offset = GetMethodOffset(1u); uint32_t method2_offset = GetMethodOffset(2u); uint32_t diff_after = method2_offset - method1_offset; - ASSERT_EQ(diff_after & 3u, 0u); + CHECK_ALIGNED(diff_after, 4u); ASSERT_LT(diff_after >> 2, 1u << 8); // Simple encoding, (diff_after >> 2) fits into 8 bits. static const uint8_t method1_expected_code[] = { static_cast<uint8_t>(diff_after >> 2), 0x00, 0x00, 0x94 }; EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(method1_expected_code))); uint32_t diff_before = method1_offset - method2_offset; - ASSERT_EQ(diff_before & 3u, 0u); + CHECK_ALIGNED(diff_before, 4u); ASSERT_GE(diff_before, -1u << 27); auto method2_expected_code = GenNopsAndBl(0u, kBlPlus0 | ((diff_before >> 2) & 0x03ffffffu)); EXPECT_TRUE(CheckLinkedMethod(MethodRef(2u), ArrayRef<const uint8_t>(method2_expected_code))); @@ -411,7 +533,7 @@ TEST_F(Arm64RelativePatcherTestDefault, CallTrampolineTooFar) { uint32_t thunk_offset = CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64); uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method); - ASSERT_EQ(diff & 3u, 0u); + CHECK_ALIGNED(diff, 4u); ASSERT_LT(diff, 128 * MB); auto expected_code = GenNopsAndBl(1u, kBlPlus0 | (diff >> 2)); EXPECT_TRUE(CheckLinkedMethod(MethodRef(last_method_idx), @@ -497,7 +619,7 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarAfter) { uint32_t thunk_offset = last_method_header_offset - CompiledCode::AlignCode(ThunkSize(), kArm64); ASSERT_TRUE(IsAligned<kArm64Alignment>(thunk_offset)); uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1); - ASSERT_EQ(diff & 3u, 0u); + CHECK_ALIGNED(diff, 4u); ASSERT_LT(diff, 128 * MB); auto expected_code = GenNopsAndBl(0u, kBlPlus0 | (diff >> 2)); EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code))); @@ -527,7 +649,7 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarBefore) { uint32_t thunk_offset = CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64); uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method); - ASSERT_EQ(diff & 3u, 0u); + CHECK_ALIGNED(diff, 4u); ASSERT_LT(diff, 128 * MB); auto expected_code = GenNopsAndBl(1u, kBlPlus0 | (diff >> 2)); EXPECT_TRUE(CheckLinkedMethod(MethodRef(last_method_idx), @@ -551,74 +673,158 @@ TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference4) { TestNopsAdrpLdr(0u, 0x12345000u, 0x4000u); } -TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference0xff4) { - TestAdrpLdurLdr(0xff4u, false, 0x12345678u, 0x1234u); +TEST_F(Arm64RelativePatcherTestDefault, StringReference1) { + TestNopsAdrpAdd(0u, 0x12345678u); } -TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference0xff8) { - TestAdrpLdurLdr(0xff8u, true, 0x12345678u, 0x1234u); +TEST_F(Arm64RelativePatcherTestDefault, StringReference2) { + TestNopsAdrpAdd(0u, -0x12345678u); } -TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference0xffc) { - TestAdrpLdurLdr(0xffcu, true, 0x12345678u, 0x1234u); +TEST_F(Arm64RelativePatcherTestDefault, StringReference3) { + TestNopsAdrpAdd(0u, 0x12345000u); } -TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference0x1000) { - TestAdrpLdurLdr(0x1000u, false, 0x12345678u, 0x1234u); +TEST_F(Arm64RelativePatcherTestDefault, StringReference4) { + TestNopsAdrpAdd(0u, 0x12345ffcu); } -TEST_F(Arm64RelativePatcherTestDenver64, DexCacheReference0xff4) { - TestAdrpLdurLdr(0xff4u, false, 0x12345678u, 0x1234u); -} +#define TEST_FOR_OFFSETS(test, disp1, disp2) \ + test(0xff4u, disp1) test(0xff8u, disp1) test(0xffcu, disp1) test(0x1000u, disp1) \ + test(0xff4u, disp2) test(0xff8u, disp2) test(0xffcu, disp2) test(0x1000u, disp2) -TEST_F(Arm64RelativePatcherTestDenver64, DexCacheReference0xff8) { - TestAdrpLdurLdr(0xff8u, false, 0x12345678u, 0x1234u); -} +#define DEFAULT_LDUR_LDR_TEST(adrp_offset, disp) \ + TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## Ldur ## disp) { \ + bool has_thunk = (adrp_offset == 0xff8u || adrp_offset == 0xffcu); \ + TestAdrpLdurLdr(adrp_offset, has_thunk, 0x12345678u, disp); \ + } -TEST_F(Arm64RelativePatcherTestDenver64, DexCacheReference0xffc) { - TestAdrpLdurLdr(0xffcu, false, 0x12345678u, 0x1234u); -} +TEST_FOR_OFFSETS(DEFAULT_LDUR_LDR_TEST, 0x1234, 0x1238) -TEST_F(Arm64RelativePatcherTestDenver64, DexCacheReference0x1000) { - TestAdrpLdurLdr(0x1000u, false, 0x12345678u, 0x1234u); -} +#define DENVER64_LDUR_LDR_TEST(adrp_offset, disp) \ + TEST_F(Arm64RelativePatcherTestDenver64, DexCacheReference ## adrp_offset ## Ldur ## disp) { \ + TestAdrpLdurLdr(adrp_offset, false, 0x12345678u, disp); \ + } -#define TEST_FOR_OFFSETS(test, disp1, disp2) \ - test(0xff4u, disp1) test(0xff8u, disp1) test(0xffcu, disp1) test(0x1000u, disp1) \ - test(0xff4u, disp2) test(0xff8u, disp2) test(0xffcu, disp2) test(0x1000u, disp2) +TEST_FOR_OFFSETS(DENVER64_LDUR_LDR_TEST, 0x1234, 0x1238) // LDR <Wt>, <label> is always aligned. We should never have to use a fixup. -#define LDRW_PCREL_TEST(adrp_offset, disp) \ +#define LDRW_PCREL_LDR_TEST(adrp_offset, disp) \ TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## WPcRel ## disp) { \ TestAdrpLdrPcRelLdr(kLdrWPcRelInsn, disp, adrp_offset, false, 0x12345678u, 0x1234u); \ } -TEST_FOR_OFFSETS(LDRW_PCREL_TEST, 0x1234, 0x1238) +TEST_FOR_OFFSETS(LDRW_PCREL_LDR_TEST, 0x1234, 0x1238) // LDR <Xt>, <label> is aligned when offset + displacement is a multiple of 8. -#define LDRX_PCREL_TEST(adrp_offset, disp) \ +#define LDRX_PCREL_LDR_TEST(adrp_offset, disp) \ TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## XPcRel ## disp) { \ - bool unaligned = ((adrp_offset + 4u + static_cast<uint32_t>(disp)) & 7u) != 0; \ + bool unaligned = !IsAligned<8u>(adrp_offset + 4u + static_cast<uint32_t>(disp)); \ bool has_thunk = (adrp_offset == 0xff8u || adrp_offset == 0xffcu) && unaligned; \ TestAdrpLdrPcRelLdr(kLdrXPcRelInsn, disp, adrp_offset, has_thunk, 0x12345678u, 0x1234u); \ } -TEST_FOR_OFFSETS(LDRX_PCREL_TEST, 0x1234, 0x1238) +TEST_FOR_OFFSETS(LDRX_PCREL_LDR_TEST, 0x1234, 0x1238) // LDR <Wt>, [SP, #<pimm>] and LDR <Xt>, [SP, #<pimm>] are always aligned. No fixup needed. -#define LDRW_SPREL_TEST(adrp_offset, disp) \ +#define LDRW_SPREL_LDR_TEST(adrp_offset, disp) \ TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## WSpRel ## disp) { \ TestAdrpLdrSpRelLdr(kLdrWSpRelInsn, disp >> 2, adrp_offset, false, 0x12345678u, 0x1234u); \ } -TEST_FOR_OFFSETS(LDRW_SPREL_TEST, 0, 4) +TEST_FOR_OFFSETS(LDRW_SPREL_LDR_TEST, 0, 4) -#define LDRX_SPREL_TEST(adrp_offset, disp) \ +#define LDRX_SPREL_LDR_TEST(adrp_offset, disp) \ TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## XSpRel ## disp) { \ TestAdrpLdrSpRelLdr(kLdrXSpRelInsn, disp >> 3, adrp_offset, false, 0x12345678u, 0x1234u); \ } -TEST_FOR_OFFSETS(LDRX_SPREL_TEST, 0, 8) +TEST_FOR_OFFSETS(LDRX_SPREL_LDR_TEST, 0, 8) + +#define DEFAULT_LDUR_ADD_TEST(adrp_offset, disp) \ + TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## Ldur ## disp) { \ + bool has_thunk = (adrp_offset == 0xff8u || adrp_offset == 0xffcu); \ + TestAdrpLdurAdd(adrp_offset, has_thunk, disp); \ + } + +TEST_FOR_OFFSETS(DEFAULT_LDUR_ADD_TEST, 0x12345678, 0xffffc840) + +#define DENVER64_LDUR_ADD_TEST(adrp_offset, disp) \ + TEST_F(Arm64RelativePatcherTestDenver64, StringReference ## adrp_offset ## Ldur ## disp) { \ + TestAdrpLdurAdd(adrp_offset, false, disp); \ + } + +TEST_FOR_OFFSETS(DENVER64_LDUR_ADD_TEST, 0x12345678, 0xffffc840) + +#define DEFAULT_SUBX3X2_ADD_TEST(adrp_offset, disp) \ + TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## SubX3X2 ## disp) { \ + /* SUB unrelated to "ADRP x0, addr". */ \ + uint32_t sub = kSubXInsn | (100 << 10) | (2u << 5) | 3u; /* SUB x3, x2, #100 */ \ + TestAdrpInsn2Add(sub, adrp_offset, false, disp); \ + } + +TEST_FOR_OFFSETS(DEFAULT_SUBX3X2_ADD_TEST, 0x12345678, 0xffffc840) + +#define DEFAULT_SUBSX3X0_ADD_TEST(adrp_offset, disp) \ + TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## SubsX3X0 ## disp) { \ + /* SUBS that uses the result of "ADRP x0, addr". */ \ + uint32_t subs = kSubsXInsn | (100 << 10) | (0u << 5) | 3u; /* SUBS x3, x0, #100 */ \ + TestAdrpInsn2Add(subs, adrp_offset, false, disp); \ + } + +TEST_FOR_OFFSETS(DEFAULT_SUBSX3X0_ADD_TEST, 0x12345678, 0xffffc840) + +#define DEFAULT_ADDX0X0_ADD_TEST(adrp_offset, disp) \ + TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## AddX0X0 ## disp) { \ + /* ADD that uses the result register of "ADRP x0, addr" as both source and destination. */ \ + uint32_t add = kSubXInsn | (100 << 10) | (0u << 5) | 0u; /* ADD x0, x0, #100 */ \ + TestAdrpInsn2Add(add, adrp_offset, false, disp); \ + } + +TEST_FOR_OFFSETS(DEFAULT_ADDX0X0_ADD_TEST, 0x12345678, 0xffffc840) + +#define DEFAULT_ADDSX0X2_ADD_TEST(adrp_offset, disp) \ + TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## AddsX0X2 ## disp) { \ + /* ADDS that does not use the result of "ADRP x0, addr" but overwrites that register. */ \ + uint32_t adds = kAddsXInsn | (100 << 10) | (2u << 5) | 0u; /* ADDS x0, x2, #100 */ \ + bool has_thunk = (adrp_offset == 0xff8u || adrp_offset == 0xffcu); \ + TestAdrpInsn2Add(adds, adrp_offset, has_thunk, disp); \ + } + +TEST_FOR_OFFSETS(DEFAULT_ADDSX0X2_ADD_TEST, 0x12345678, 0xffffc840) + +// LDR <Wt>, <label> is always aligned. We should never have to use a fixup. +#define LDRW_PCREL_ADD_TEST(adrp_offset, disp) \ + TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## WPcRel ## disp) { \ + TestAdrpLdrPcRelAdd(kLdrWPcRelInsn, disp, adrp_offset, false, 0x12345678u); \ + } + +TEST_FOR_OFFSETS(LDRW_PCREL_ADD_TEST, 0x1234, 0x1238) + +// LDR <Xt>, <label> is aligned when offset + displacement is a multiple of 8. +#define LDRX_PCREL_ADD_TEST(adrp_offset, disp) \ + TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## XPcRel ## disp) { \ + bool unaligned = !IsAligned<8u>(adrp_offset + 4u + static_cast<uint32_t>(disp)); \ + bool has_thunk = (adrp_offset == 0xff8u || adrp_offset == 0xffcu) && unaligned; \ + TestAdrpLdrPcRelAdd(kLdrXPcRelInsn, disp, adrp_offset, has_thunk, 0x12345678u); \ + } + +TEST_FOR_OFFSETS(LDRX_PCREL_ADD_TEST, 0x1234, 0x1238) + +// LDR <Wt>, [SP, #<pimm>] and LDR <Xt>, [SP, #<pimm>] are always aligned. No fixup needed. +#define LDRW_SPREL_ADD_TEST(adrp_offset, disp) \ + TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## WSpRel ## disp) { \ + TestAdrpLdrSpRelAdd(kLdrWSpRelInsn, disp >> 2, adrp_offset, false, 0x12345678u); \ + } + +TEST_FOR_OFFSETS(LDRW_SPREL_ADD_TEST, 0, 4) + +#define LDRX_SPREL_ADD_TEST(adrp_offset, disp) \ + TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## XSpRel ## disp) { \ + TestAdrpLdrSpRelAdd(kLdrXSpRelInsn, disp >> 3, adrp_offset, false, 0x12345678u); \ + } + +TEST_FOR_OFFSETS(LDRX_SPREL_ADD_TEST, 0, 8) } // namespace linker } // namespace art diff --git a/compiler/linker/multi_oat_relative_patcher.h b/compiler/linker/multi_oat_relative_patcher.h index 1727d529fc..dbda03fd3b 100644 --- a/compiler/linker/multi_oat_relative_patcher.h +++ b/compiler/linker/multi_oat_relative_patcher.h @@ -103,13 +103,13 @@ class MultiOatRelativePatcher FINAL { } // Wrapper around RelativePatcher::PatchDexCacheReference(), doing offset adjustment. - void PatchDexCacheReference(std::vector<uint8_t>* code, - const LinkerPatch& patch, - uint32_t patch_offset, - uint32_t target_offset) { + void PatchPcRelativeReference(std::vector<uint8_t>* code, + const LinkerPatch& patch, + uint32_t patch_offset, + uint32_t target_offset) { patch_offset += adjustment_; target_offset += adjustment_; - relative_patcher_->PatchDexCacheReference(code, patch, patch_offset, target_offset); + relative_patcher_->PatchPcRelativeReference(code, patch, patch_offset, target_offset); } // Wrappers around RelativePatcher for statistics retrieval. diff --git a/compiler/linker/multi_oat_relative_patcher_test.cc b/compiler/linker/multi_oat_relative_patcher_test.cc index 792cdfe8e9..92a96a0bd3 100644 --- a/compiler/linker/multi_oat_relative_patcher_test.cc +++ b/compiler/linker/multi_oat_relative_patcher_test.cc @@ -86,10 +86,10 @@ class MultiOatRelativePatcherTest : public testing::Test { last_target_offset_ = target_offset; } - void PatchDexCacheReference(std::vector<uint8_t>* code ATTRIBUTE_UNUSED, - const LinkerPatch& patch, - uint32_t patch_offset, - uint32_t target_offset) OVERRIDE { + void PatchPcRelativeReference(std::vector<uint8_t>* code ATTRIBUTE_UNUSED, + const LinkerPatch& patch, + uint32_t patch_offset, + uint32_t target_offset) OVERRIDE { last_literal_offset_ = patch.LiteralOffset(); last_patch_offset_ = patch_offset; last_target_offset_ = target_offset; @@ -277,7 +277,7 @@ TEST_F(MultiOatRelativePatcherTest, Patch) { uint32_t method2_target_offset = 0xccccu; LinkerPatch method2_patch = LinkerPatch::DexCacheArrayPatch(method2_literal_offset, nullptr, 0u, 1234u); - patcher_.PatchDexCacheReference( + patcher_.PatchPcRelativeReference( &code, method2_patch, method2_patch_offset, method2_target_offset); DCHECK_EQ(method2_literal_offset, mock_->last_literal_offset_); DCHECK_EQ(method2_patch_offset + adjustment1, mock_->last_patch_offset_); diff --git a/compiler/linker/relative_patcher.cc b/compiler/linker/relative_patcher.cc index 6727c17583..3a229831d0 100644 --- a/compiler/linker/relative_patcher.cc +++ b/compiler/linker/relative_patcher.cc @@ -62,10 +62,10 @@ std::unique_ptr<RelativePatcher> RelativePatcher::Create( LOG(FATAL) << "Unexpected relative call patch."; } - virtual void PatchDexCacheReference(std::vector<uint8_t>* code ATTRIBUTE_UNUSED, - const LinkerPatch& patch ATTRIBUTE_UNUSED, - uint32_t patch_offset ATTRIBUTE_UNUSED, - uint32_t target_offset ATTRIBUTE_UNUSED) { + void PatchPcRelativeReference(std::vector<uint8_t>* code ATTRIBUTE_UNUSED, + const LinkerPatch& patch ATTRIBUTE_UNUSED, + uint32_t patch_offset ATTRIBUTE_UNUSED, + uint32_t target_offset ATTRIBUTE_UNUSED) OVERRIDE { LOG(FATAL) << "Unexpected relative dex cache array patch."; } diff --git a/compiler/linker/relative_patcher.h b/compiler/linker/relative_patcher.h index ba374512a1..a22b9f2c2d 100644 --- a/compiler/linker/relative_patcher.h +++ b/compiler/linker/relative_patcher.h @@ -104,10 +104,10 @@ class RelativePatcher { uint32_t target_offset) = 0; // Patch a reference to a dex cache location. - virtual void PatchDexCacheReference(std::vector<uint8_t>* code, - const LinkerPatch& patch, - uint32_t patch_offset, - uint32_t target_offset) = 0; + virtual void PatchPcRelativeReference(std::vector<uint8_t>* code, + const LinkerPatch& patch, + uint32_t patch_offset, + uint32_t target_offset) = 0; protected: RelativePatcher() diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h index 704135a7b5..c9fb543d2e 100644 --- a/compiler/linker/relative_patcher_test.h +++ b/compiler/linker/relative_patcher_test.h @@ -150,12 +150,19 @@ class RelativePatcherTest : public testing::Test { offset + patch.LiteralOffset(), target_offset); } else if (patch.Type() == kLinkerPatchDexCacheArray) { uint32_t target_offset = dex_cache_arrays_begin_ + patch.TargetDexCacheElementOffset(); - patcher_->PatchDexCacheReference(&patched_code_, - patch, - offset + patch.LiteralOffset(), - target_offset); + patcher_->PatchPcRelativeReference(&patched_code_, + patch, + offset + patch.LiteralOffset(), + target_offset); + } else if (patch.Type() == kLinkerPatchStringRelative) { + uint32_t target_offset = string_index_to_offset_map_.Get(patch.TargetStringIndex()); + patcher_->PatchPcRelativeReference(&patched_code_, + patch, + offset + patch.LiteralOffset(), + target_offset); } else { - LOG(FATAL) << "Bad patch type."; + LOG(FATAL) << "Bad patch type. " << patch.Type(); + UNREACHABLE(); } } } @@ -257,6 +264,7 @@ class RelativePatcherTest : public testing::Test { MethodOffsetMap method_offset_map_; std::unique_ptr<RelativePatcher> patcher_; uint32_t dex_cache_arrays_begin_; + SafeMap<uint32_t, uint32_t> string_index_to_offset_map_; std::vector<MethodReference> compiled_method_refs_; std::vector<std::unique_ptr<CompiledMethod>> compiled_methods_; std::vector<uint8_t> patched_code_; diff --git a/compiler/linker/x86/relative_patcher_x86.cc b/compiler/linker/x86/relative_patcher_x86.cc index 24b1481c31..768d31abf4 100644 --- a/compiler/linker/x86/relative_patcher_x86.cc +++ b/compiler/linker/x86/relative_patcher_x86.cc @@ -21,10 +21,10 @@ namespace art { namespace linker { -void X86RelativePatcher::PatchDexCacheReference(std::vector<uint8_t>* code, - const LinkerPatch& patch, - uint32_t patch_offset, - uint32_t target_offset) { +void X86RelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code, + const LinkerPatch& patch, + uint32_t patch_offset, + uint32_t target_offset) { uint32_t anchor_literal_offset = patch.PcInsnOffset(); uint32_t literal_offset = patch.LiteralOffset(); diff --git a/compiler/linker/x86/relative_patcher_x86.h b/compiler/linker/x86/relative_patcher_x86.h index ddc244c269..fbf9ad4671 100644 --- a/compiler/linker/x86/relative_patcher_x86.h +++ b/compiler/linker/x86/relative_patcher_x86.h @@ -26,10 +26,10 @@ class X86RelativePatcher FINAL : public X86BaseRelativePatcher { public: X86RelativePatcher() { } - void PatchDexCacheReference(std::vector<uint8_t>* code, - const LinkerPatch& patch, - uint32_t patch_offset, - uint32_t target_offset) OVERRIDE; + void PatchPcRelativeReference(std::vector<uint8_t>* code, + const LinkerPatch& patch, + uint32_t patch_offset, + uint32_t target_offset) OVERRIDE; }; } // namespace linker diff --git a/compiler/linker/x86/relative_patcher_x86_test.cc b/compiler/linker/x86/relative_patcher_x86_test.cc index 7acc33004a..2a44b7990e 100644 --- a/compiler/linker/x86/relative_patcher_x86_test.cc +++ b/compiler/linker/x86/relative_patcher_x86_test.cc @@ -70,15 +70,19 @@ TEST_F(X86RelativePatcherTest, CallOther) { uint32_t diff_after = method2_offset - (method1_offset + kCallCode.size() /* PC adjustment */); static const uint8_t method1_expected_code[] = { 0xe8, - static_cast<uint8_t>(diff_after), static_cast<uint8_t>(diff_after >> 8), - static_cast<uint8_t>(diff_after >> 16), static_cast<uint8_t>(diff_after >> 24) + static_cast<uint8_t>(diff_after), + static_cast<uint8_t>(diff_after >> 8), + static_cast<uint8_t>(diff_after >> 16), + static_cast<uint8_t>(diff_after >> 24) }; EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(method1_expected_code))); uint32_t diff_before = method1_offset - (method2_offset + kCallCode.size() /* PC adjustment */); static const uint8_t method2_expected_code[] = { 0xe8, - static_cast<uint8_t>(diff_before), static_cast<uint8_t>(diff_before >> 8), - static_cast<uint8_t>(diff_before >> 16), static_cast<uint8_t>(diff_before >> 24) + static_cast<uint8_t>(diff_before), + static_cast<uint8_t>(diff_before >> 8), + static_cast<uint8_t>(diff_before >> 16), + static_cast<uint8_t>(diff_before >> 24) }; EXPECT_TRUE(CheckLinkedMethod(MethodRef(2u), ArrayRef<const uint8_t>(method2_expected_code))); } @@ -95,8 +99,10 @@ TEST_F(X86RelativePatcherTest, CallTrampoline) { uint32_t diff = kTrampolineOffset - (result.second + kCallCode.size()); static const uint8_t expected_code[] = { 0xe8, - static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), - static_cast<uint8_t>(diff >> 16), static_cast<uint8_t>(diff >> 24) + static_cast<uint8_t>(diff), + static_cast<uint8_t>(diff >> 8), + static_cast<uint8_t>(diff >> 16), + static_cast<uint8_t>(diff >> 24) }; EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code))); } @@ -125,8 +131,42 @@ TEST_F(X86RelativePatcherTest, DexCacheReference) { 0xe8, 0x00, 0x00, 0x00, 0x00, // call +0 0x5b, // pop ebx 0x8b, 0x83, // mov eax, [ebx + diff] - static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), - static_cast<uint8_t>(diff >> 16), static_cast<uint8_t>(diff >> 24) + static_cast<uint8_t>(diff), + static_cast<uint8_t>(diff >> 8), + static_cast<uint8_t>(diff >> 16), + static_cast<uint8_t>(diff >> 24) + }; + EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code))); +} + +TEST_F(X86RelativePatcherTest, StringReference) { + constexpr uint32_t kStringIndex = 1u; + constexpr uint32_t kStringOffset = 0x12345678; + string_index_to_offset_map_.Put(kStringIndex, kStringOffset); + static const uint8_t raw_code[] = { + 0xe8, 0x00, 0x00, 0x00, 0x00, // call +0 + 0x5b, // pop ebx + 0x8d, 0x83, 0x00, 0x01, 0x00, 0x00, // lea eax, [ebx + 256 (kDummy32BitValue)] + }; + constexpr uint32_t anchor_offset = 5u; // After call +0. + ArrayRef<const uint8_t> code(raw_code); + LinkerPatch patches[] = { + LinkerPatch::RelativeStringPatch(code.size() - 4u, nullptr, anchor_offset, kStringIndex), + }; + AddCompiledMethod(MethodRef(1u), code, ArrayRef<const LinkerPatch>(patches)); + Link(); + + auto result = method_offset_map_.FindMethodOffset(MethodRef(1u)); + ASSERT_TRUE(result.first); + uint32_t diff = kStringOffset - (result.second + anchor_offset); + static const uint8_t expected_code[] = { + 0xe8, 0x00, 0x00, 0x00, 0x00, // call +0 + 0x5b, // pop ebx + 0x8d, 0x83, // lea eax, [ebx + diff] + static_cast<uint8_t>(diff), + static_cast<uint8_t>(diff >> 8), + static_cast<uint8_t>(diff >> 16), + static_cast<uint8_t>(diff >> 24) }; EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code))); } diff --git a/compiler/linker/x86_64/relative_patcher_x86_64.cc b/compiler/linker/x86_64/relative_patcher_x86_64.cc index e571f50d2f..2ff69308c4 100644 --- a/compiler/linker/x86_64/relative_patcher_x86_64.cc +++ b/compiler/linker/x86_64/relative_patcher_x86_64.cc @@ -21,10 +21,10 @@ namespace art { namespace linker { -void X86_64RelativePatcher::PatchDexCacheReference(std::vector<uint8_t>* code, - const LinkerPatch& patch, - uint32_t patch_offset, - uint32_t target_offset) { +void X86_64RelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code, + const LinkerPatch& patch, + uint32_t patch_offset, + uint32_t target_offset) { DCHECK_LE(patch.LiteralOffset() + 4u, code->size()); // Unsigned arithmetic with its well-defined overflow behavior is just fine here. uint32_t displacement = target_offset - patch_offset; diff --git a/compiler/linker/x86_64/relative_patcher_x86_64.h b/compiler/linker/x86_64/relative_patcher_x86_64.h index feecb3a2ad..11bb6d59e3 100644 --- a/compiler/linker/x86_64/relative_patcher_x86_64.h +++ b/compiler/linker/x86_64/relative_patcher_x86_64.h @@ -26,10 +26,10 @@ class X86_64RelativePatcher FINAL : public X86BaseRelativePatcher { public: X86_64RelativePatcher() { } - void PatchDexCacheReference(std::vector<uint8_t>* code, - const LinkerPatch& patch, - uint32_t patch_offset, - uint32_t target_offset) OVERRIDE; + void PatchPcRelativeReference(std::vector<uint8_t>* code, + const LinkerPatch& patch, + uint32_t patch_offset, + uint32_t target_offset) OVERRIDE; }; } // namespace linker diff --git a/compiler/linker/x86_64/relative_patcher_x86_64_test.cc b/compiler/linker/x86_64/relative_patcher_x86_64_test.cc index 36e0f01a50..2b46453255 100644 --- a/compiler/linker/x86_64/relative_patcher_x86_64_test.cc +++ b/compiler/linker/x86_64/relative_patcher_x86_64_test.cc @@ -29,6 +29,8 @@ class X86_64RelativePatcherTest : public RelativePatcherTest { static const ArrayRef<const uint8_t> kCallCode; static const uint8_t kDexCacheLoadRawCode[]; static const ArrayRef<const uint8_t> kDexCacheLoadCode; + static const uint8_t kStringReferenceRawCode[]; + static const ArrayRef<const uint8_t> kStringReferenceCode; uint32_t GetMethodOffset(uint32_t method_idx) { auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx)); @@ -51,6 +53,14 @@ const uint8_t X86_64RelativePatcherTest::kDexCacheLoadRawCode[] = { const ArrayRef<const uint8_t> X86_64RelativePatcherTest::kDexCacheLoadCode( kDexCacheLoadRawCode); +const uint8_t X86_64RelativePatcherTest::kStringReferenceRawCode[] = { + 0x8d, 0x05, // lea eax, [rip + <offset>] + 0x00, 0x01, 0x00, 0x00 +}; + +const ArrayRef<const uint8_t> X86_64RelativePatcherTest::kStringReferenceCode( + kStringReferenceRawCode); + TEST_F(X86_64RelativePatcherTest, CallSelf) { LinkerPatch patches[] = { LinkerPatch::RelativeCodePatch(kCallCode.size() - 4u, nullptr, 1u), @@ -80,15 +90,19 @@ TEST_F(X86_64RelativePatcherTest, CallOther) { uint32_t diff_after = method2_offset - (method1_offset + kCallCode.size() /* PC adjustment */); static const uint8_t method1_expected_code[] = { 0xe8, - static_cast<uint8_t>(diff_after), static_cast<uint8_t>(diff_after >> 8), - static_cast<uint8_t>(diff_after >> 16), static_cast<uint8_t>(diff_after >> 24) + static_cast<uint8_t>(diff_after), + static_cast<uint8_t>(diff_after >> 8), + static_cast<uint8_t>(diff_after >> 16), + static_cast<uint8_t>(diff_after >> 24) }; EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(method1_expected_code))); uint32_t diff_before = method1_offset - (method2_offset + kCallCode.size() /* PC adjustment */); static const uint8_t method2_expected_code[] = { 0xe8, - static_cast<uint8_t>(diff_before), static_cast<uint8_t>(diff_before >> 8), - static_cast<uint8_t>(diff_before >> 16), static_cast<uint8_t>(diff_before >> 24) + static_cast<uint8_t>(diff_before), + static_cast<uint8_t>(diff_before >> 8), + static_cast<uint8_t>(diff_before >> 16), + static_cast<uint8_t>(diff_before >> 24) }; EXPECT_TRUE(CheckLinkedMethod(MethodRef(2u), ArrayRef<const uint8_t>(method2_expected_code))); } @@ -105,8 +119,10 @@ TEST_F(X86_64RelativePatcherTest, CallTrampoline) { uint32_t diff = kTrampolineOffset - (result.second + kCallCode.size()); static const uint8_t expected_code[] = { 0xe8, - static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), - static_cast<uint8_t>(diff >> 16), static_cast<uint8_t>(diff >> 24) + static_cast<uint8_t>(diff), + static_cast<uint8_t>(diff >> 8), + static_cast<uint8_t>(diff >> 16), + static_cast<uint8_t>(diff >> 24) }; EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code))); } @@ -126,8 +142,34 @@ TEST_F(X86_64RelativePatcherTest, DexCacheReference) { dex_cache_arrays_begin_ + kElementOffset - (result.second + kDexCacheLoadCode.size()); static const uint8_t expected_code[] = { 0x8b, 0x05, - static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), - static_cast<uint8_t>(diff >> 16), static_cast<uint8_t>(diff >> 24) + static_cast<uint8_t>(diff), + static_cast<uint8_t>(diff >> 8), + static_cast<uint8_t>(diff >> 16), + static_cast<uint8_t>(diff >> 24) + }; + EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code))); +} + +TEST_F(X86_64RelativePatcherTest, StringReference) { + constexpr uint32_t kStringIndex = 1u; + constexpr uint32_t kStringOffset = 0x12345678; + string_index_to_offset_map_.Put(kStringIndex, kStringOffset); + LinkerPatch patches[] = { + LinkerPatch::RelativeStringPatch( + kStringReferenceCode.size() - 4u, nullptr, 0u, kStringIndex), + }; + AddCompiledMethod(MethodRef(1u), kStringReferenceCode, ArrayRef<const LinkerPatch>(patches)); + Link(); + + auto result = method_offset_map_.FindMethodOffset(MethodRef(1u)); + ASSERT_TRUE(result.first); + uint32_t diff = kStringOffset - (result.second + kStringReferenceCode.size()); + static const uint8_t expected_code[] = { + 0x8d, 0x05, + static_cast<uint8_t>(diff), + static_cast<uint8_t>(diff >> 8), + static_cast<uint8_t>(diff >> 16), + static_cast<uint8_t>(diff >> 24) }; EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code))); } |