diff options
author | 2022-09-23 14:47:02 +0100 | |
---|---|---|
committer | 2022-09-26 07:26:49 +0000 | |
commit | 2687277fd70535d24a886a83e8375f9f90d84800 (patch) | |
tree | e42c9b8eb7d809a043fd7cf5fa609237066623d1 | |
parent | 58b1b82dc2ee262da6f10399b82a1687af5ad819 (diff) |
Rename a few constants and clarify their meaning in comments.
Bug: None
Test: m libart-install
Change-Id: I19ae9068317d5f409cf60648ea3ee063a5ff39f1
-rw-r--r-- | dex2oat/linker/arm/relative_patcher_thumb2_test.cc | 87 | ||||
-rw-r--r-- | dex2oat/linker/arm64/relative_patcher_arm64_test.cc | 72 | ||||
-rw-r--r-- | libartbase/arch/instruction_set.cc | 6 | ||||
-rw-r--r-- | libartbase/arch/instruction_set.h | 18 |
4 files changed, 96 insertions, 87 deletions
diff --git a/dex2oat/linker/arm/relative_patcher_thumb2_test.cc b/dex2oat/linker/arm/relative_patcher_thumb2_test.cc index 296bf61cbc..f7abd6b9eb 100644 --- a/dex2oat/linker/arm/relative_patcher_thumb2_test.cc +++ b/dex2oat/linker/arm/relative_patcher_thumb2_test.cc @@ -145,7 +145,7 @@ class Thumb2RelativePatcherTest : public RelativePatcherTest { const ArrayRef<const uint8_t>& last_method_code, const ArrayRef<const LinkerPatch>& last_method_patches, uint32_t distance_without_thunks) { - CHECK_EQ(distance_without_thunks % kArmAlignment, 0u); + CHECK_EQ(distance_without_thunks % kArmCodeAlignment, 0u); uint32_t method1_offset = kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader); AddCompiledMethod(MethodRef(1u), method1_code, method1_patches); @@ -153,7 +153,7 @@ class Thumb2RelativePatcherTest : public RelativePatcherTest { // We want to put the last method at a very precise offset. const uint32_t last_method_offset = method1_offset + distance_without_thunks; - CHECK_ALIGNED(last_method_offset, kArmAlignment); + CHECK_ALIGNED(last_method_offset, kArmCodeAlignment); const uint32_t gap_end = last_method_offset - sizeof(OatQuickMethodHeader); // Fill the gap with intermediate methods in chunks of 2MiB and the first in [2MiB, 4MiB). @@ -562,24 +562,25 @@ TEST_F(Thumb2RelativePatcherTest, CallOtherJustTooFarAfter) { bl_offset_in_method1 + just_over_max_positive_disp); ASSERT_EQ(kExpectedLastMethodIdx, last_method_idx); uint32_t method_after_thunk_idx = last_method_idx; - if (sizeof(OatQuickMethodHeader) < kArmAlignment) { - // The thunk needs to start on a kArmAlignment-aligned address before the address where the + if (sizeof(OatQuickMethodHeader) < kArmCodeAlignment) { + // The thunk needs to start on a kArmCodeAlignment-aligned address before the address where the // last method would have been if there was no thunk. If the size of the OatQuickMethodHeader - // is at least kArmAlignment, the thunk start shall fit between the previous filler method + // is at least kArmCodeAlignment, the thunk start shall fit between the previous filler method // and that address. Otherwise, it shall be inserted before that filler method. method_after_thunk_idx -= 1u; } uint32_t method1_offset = GetMethodOffset(1u); uint32_t method_after_thunk_offset = GetMethodOffset(method_after_thunk_idx); - ASSERT_TRUE(IsAligned<kArmAlignment>(method_after_thunk_offset)); + ASSERT_TRUE(IsAligned<kArmCodeAlignment>(method_after_thunk_offset)); uint32_t method_after_thunk_header_offset = method_after_thunk_offset - sizeof(OatQuickMethodHeader); uint32_t thunk_size = MethodCallThunkSize(); - uint32_t thunk_offset = RoundDown(method_after_thunk_header_offset - thunk_size, kArmAlignment); + uint32_t thunk_offset = + RoundDown(method_after_thunk_header_offset - thunk_size, kArmCodeAlignment); DCHECK_EQ(thunk_offset + thunk_size + CodeAlignmentSize(thunk_offset + thunk_size), method_after_thunk_header_offset); - ASSERT_TRUE(IsAligned<kArmAlignment>(thunk_offset)); + ASSERT_TRUE(IsAligned<kArmCodeAlignment>(thunk_offset)); uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1 + 4u /* PC adjustment */); ASSERT_TRUE(IsAligned<2u>(diff)); ASSERT_GE(diff, 16 * MB - (1u << 22)); // Simple encoding, unknown bits fit into imm10:imm11:0. @@ -725,7 +726,7 @@ void Thumb2RelativePatcherTest::TestBakerFieldWide(uint32_t offset, uint32_t ref Link(); // All thunks are at the end. - uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArmAlignment); + uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArmCodeAlignment); method_idx = 0u; for (uint32_t base_reg : kBakerValidRegs) { for (uint32_t holder_reg : kBakerValidRegs) { @@ -791,7 +792,7 @@ void Thumb2RelativePatcherTest::TestBakerFieldWide(uint32_t offset, uint32_t ref // Do not check the rest of the implementation. // The next thunk follows on the next aligned offset. - thunk_offset += RoundUp(expected_thunk.size(), kArmAlignment); + thunk_offset += RoundUp(expected_thunk.size(), kArmCodeAlignment); } } } @@ -823,7 +824,7 @@ void Thumb2RelativePatcherTest::TestBakerFieldNarrow(uint32_t offset, uint32_t r Link(); // All thunks are at the end. - uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArmAlignment); + uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArmCodeAlignment); method_idx = 0u; for (uint32_t base_reg : kBakerValidRegs) { if (base_reg >= 8u) { @@ -892,7 +893,7 @@ void Thumb2RelativePatcherTest::TestBakerFieldNarrow(uint32_t offset, uint32_t r // Do not check the rest of the implementation. // The next thunk follows on the next aligned offset. - thunk_offset += RoundUp(expected_thunk.size(), kArmAlignment); + thunk_offset += RoundUp(expected_thunk.size(), kArmCodeAlignment); } } } @@ -945,9 +946,10 @@ TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkInTheMiddle) { constexpr uint32_t expected_thunk_offset = kLiteralOffset1 + kPcAdjustment + /* kMaxBcondPositiveDisplacement */ ((1 << 20) - 2u); - static_assert(IsAligned<kArmAlignment>(expected_thunk_offset), "Target offset must be aligned."); + static_assert(IsAligned<kArmCodeAlignment>(expected_thunk_offset), + "Target offset must be aligned."); size_t filler1_size = expected_thunk_offset - - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArmAlignment); + RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArmCodeAlignment); std::vector<uint8_t> raw_filler1_code = GenNops(filler1_size / 2u); ArrayRef<const uint8_t> filler1_code(raw_filler1_code); AddCompiledMethod(MethodRef(2u), filler1_code); @@ -956,7 +958,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkInTheMiddle) { AddCompiledMethod(MethodRef(3u), kNopCode); constexpr uint32_t kLiteralOffset2 = 4; - static_assert(IsAligned<kArmAlignment>(kLiteralOffset2 + kPcAdjustment), + static_assert(IsAligned<kArmCodeAlignment>(kLiteralOffset2 + kPcAdjustment), "PC for BNE must be aligned."); // Allow reaching the thunk from the very beginning of a method almost 1MiB away. Backward branch @@ -968,8 +970,8 @@ TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkInTheMiddle) { CompileBakerOffsetThunk(/* base_reg */ 0, /* holder_reg */ 0, /* narrow */ false).size(); size_t filler2_size = 1 * MB - (kLiteralOffset2 + kPcAdjustment) - - RoundUp(thunk_size + sizeof(OatQuickMethodHeader), kArmAlignment) - - RoundUp(kNopCode.size() + sizeof(OatQuickMethodHeader), kArmAlignment) + - RoundUp(thunk_size + sizeof(OatQuickMethodHeader), kArmCodeAlignment) + - RoundUp(kNopCode.size() + sizeof(OatQuickMethodHeader), kArmCodeAlignment) - sizeof(OatQuickMethodHeader); std::vector<uint8_t> raw_filler2_code = GenNops(filler2_size / 2u); ArrayRef<const uint8_t> filler2_code(raw_filler2_code); @@ -1013,16 +1015,18 @@ TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkBeforeFiller) { constexpr uint32_t expected_thunk_offset = kLiteralOffset1 + kPcAdjustment + /* kMaxBcondPositiveDisplacement + 2 */ (1u << 20); - static_assert(IsAligned<kArmAlignment>(expected_thunk_offset), "Target offset must be aligned."); + static_assert(IsAligned<kArmCodeAlignment>(expected_thunk_offset), + "Target offset must be aligned."); size_t filler1_size = expected_thunk_offset - - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArmAlignment); + RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArmCodeAlignment); std::vector<uint8_t> raw_filler1_code = GenNops(filler1_size / 2u); ArrayRef<const uint8_t> filler1_code(raw_filler1_code); AddCompiledMethod(MethodRef(2u), filler1_code); Link(); - const uint32_t bne = BneWWithOffset(kLiteralOffset1, RoundUp(raw_code1.size(), kArmAlignment)); + const uint32_t bne = + BneWWithOffset(kLiteralOffset1, RoundUp(raw_code1.size(), kArmCodeAlignment)); const std::vector<uint8_t> expected_code1 = RawCode({kNopWInsn, bne, kLdrWInsn, kNopInsn}); ASSERT_TRUE(CheckLinkedMethod(MethodRef(1), ArrayRef<const uint8_t>(expected_code1))); } @@ -1043,9 +1047,10 @@ TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkInTheMiddleUnreachableFromLast constexpr uint32_t expected_thunk_offset = kLiteralOffset1 + kPcAdjustment + /* kMaxBcondPositiveDisplacement */ ((1 << 20) - 2u); - static_assert(IsAligned<kArmAlignment>(expected_thunk_offset), "Target offset must be aligned."); + static_assert(IsAligned<kArmCodeAlignment>(expected_thunk_offset), + "Target offset must be aligned."); size_t filler1_size = expected_thunk_offset - - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArmAlignment); + RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArmCodeAlignment); std::vector<uint8_t> raw_filler1_code = GenNops(filler1_size / 2u); ArrayRef<const uint8_t> filler1_code(raw_filler1_code); AddCompiledMethod(MethodRef(2u), filler1_code); @@ -1055,7 +1060,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkInTheMiddleUnreachableFromLast constexpr uint32_t kReachableFromOffset2 = 4; constexpr uint32_t kLiteralOffset2 = kReachableFromOffset2 + 2; - static_assert(IsAligned<kArmAlignment>(kReachableFromOffset2 + kPcAdjustment), + static_assert(IsAligned<kArmCodeAlignment>(kReachableFromOffset2 + kPcAdjustment), "PC for BNE must be aligned."); // If not for the extra NOP, this would allow reaching the thunk from the BNE @@ -1068,8 +1073,8 @@ TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkInTheMiddleUnreachableFromLast CompileBakerOffsetThunk(/* base_reg */ 0, /* holder_reg */ 0, /* narrow */ false).size(); size_t filler2_size = 1 * MB - (kReachableFromOffset2 + kPcAdjustment) - - RoundUp(thunk_size + sizeof(OatQuickMethodHeader), kArmAlignment) - - RoundUp(kNopCode.size() + sizeof(OatQuickMethodHeader), kArmAlignment) + - RoundUp(thunk_size + sizeof(OatQuickMethodHeader), kArmCodeAlignment) + - RoundUp(kNopCode.size() + sizeof(OatQuickMethodHeader), kArmCodeAlignment) - sizeof(OatQuickMethodHeader); std::vector<uint8_t> raw_filler2_code = GenNops(filler2_size / 2u); ArrayRef<const uint8_t> filler2_code(raw_filler2_code); @@ -1091,7 +1096,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkInTheMiddleUnreachableFromLast const uint32_t bne_max_forward = kBneWPlus0 | 0x003f2fff; const uint32_t bne_last = - BneWWithOffset(kLiteralOffset2, RoundUp(raw_code2.size(), kArmAlignment)); + BneWWithOffset(kLiteralOffset2, RoundUp(raw_code2.size(), kArmCodeAlignment)); const std::vector<uint8_t> expected_code1 = RawCode({kNopWInsn, kNopInsn, bne_max_forward, kLdrWInsn}); const std::vector<uint8_t> expected_code2 = @@ -1123,7 +1128,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerArray) { Link(); // All thunks are at the end. - uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArmAlignment); + uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArmCodeAlignment); method_idx = 0u; for (uint32_t base_reg : kBakerValidRegs) { ++method_idx; @@ -1177,7 +1182,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerArray) { // Do not check the rest of the implementation. // The next thunk follows on the next aligned offset. - thunk_offset += RoundUp(expected_thunk.size(), kArmAlignment); + thunk_offset += RoundUp(expected_thunk.size(), kArmCodeAlignment); } } @@ -1200,7 +1205,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerGcRootWide) { Link(); // All thunks are at the end. - uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArmAlignment); + uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArmCodeAlignment); method_idx = 0u; for (uint32_t root_reg : kBakerValidRegs) { ++method_idx; @@ -1232,7 +1237,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerGcRootWide) { // Do not check the rest of the implementation. // The next thunk follows on the next aligned offset. - thunk_offset += RoundUp(expected_thunk.size(), kArmAlignment); + thunk_offset += RoundUp(expected_thunk.size(), kArmCodeAlignment); } } @@ -1255,7 +1260,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerGcRootNarrow) { Link(); // All thunks are at the end. - uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArmAlignment); + uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArmCodeAlignment); method_idx = 0u; for (uint32_t root_reg : kBakerValidRegsNarrow) { ++method_idx; @@ -1281,7 +1286,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerGcRootNarrow) { // Do not check the rest of the implementation. // The next thunk follows on the next aligned offset. - thunk_offset += RoundUp(expected_thunk.size(), kArmAlignment); + thunk_offset += RoundUp(expected_thunk.size(), kArmCodeAlignment); } } @@ -1309,7 +1314,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerGcRootOffsetBits) { Link(); // The thunk is right after the method code. - DCHECK_ALIGNED(1 * MB, kArmAlignment); + DCHECK_ALIGNED(1 * MB, kArmCodeAlignment); std::vector<uint8_t> expected_code; for (size_t i = 0; i != num_patches; ++i) { PushBackInsn(&expected_code, ldr); @@ -1343,7 +1348,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerAndMethodCallInteraction) { // Add a method with the right size that the method code for the next one starts 1MiB // after code for method 1. size_t filler_size = - 1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArmAlignment) + 1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArmCodeAlignment) - sizeof(OatQuickMethodHeader); std::vector<uint8_t> filler_code = GenNops(filler_size / 2u); ++method_idx; @@ -1358,16 +1363,16 @@ TEST_F(Thumb2RelativePatcherTest, BakerAndMethodCallInteraction) { } // Add 2 Baker GC root patches to the last method, one that would allow the thunk at - // 1MiB + kArmAlignment, i.e. kArmAlignment after the method call thunk, and the - // second that needs it kArmAlignment after that. Given the size of the GC root thunk - // is more than the space required by the method call thunk plus kArmAlignment, + // 1MiB + kArmCodeAlignment, i.e. kArmCodeAlignment after the method call thunk, and the + // second that needs it kArmCodeAlignment after that. Given the size of the GC root thunk + // is more than the space required by the method call thunk plus kArmCodeAlignment, // this pushes the first GC root thunk's pending MaxNextOffset() before the method call // thunk's pending MaxNextOffset() which needs to be adjusted. - ASSERT_LT(RoundUp(CompileMethodCallThunk().size(), kArmAlignment) + kArmAlignment, + ASSERT_LT(RoundUp(CompileMethodCallThunk().size(), kArmCodeAlignment) + kArmCodeAlignment, CompileBakerGcRootThunk(/* root_reg */ 0, /* narrow */ false).size()); - static_assert(kArmAlignment == 8, "Code below assumes kArmAlignment == 8"); - constexpr size_t kBakerLiteralOffset1 = kArmAlignment + 2u - kPcAdjustment; - constexpr size_t kBakerLiteralOffset2 = kBakerLiteralOffset1 + kArmAlignment; + static_assert(kArmCodeAlignment == 8, "Code below assumes kArmCodeAlignment == 8"); + constexpr size_t kBakerLiteralOffset1 = kArmCodeAlignment + 2u - kPcAdjustment; + constexpr size_t kBakerLiteralOffset2 = kBakerLiteralOffset1 + kArmCodeAlignment; // Use offset = 0, base_reg = 0, the LDR is simply `kLdrWInsn | (root_reg << 12)`. const uint32_t ldr1 = kLdrWInsn | (/* root_reg */ 1 << 12); const uint32_t ldr2 = kLdrWInsn | (/* root_reg */ 2 << 12); diff --git a/dex2oat/linker/arm64/relative_patcher_arm64_test.cc b/dex2oat/linker/arm64/relative_patcher_arm64_test.cc index 8bae5d47f1..ce61f43b6e 100644 --- a/dex2oat/linker/arm64/relative_patcher_arm64_test.cc +++ b/dex2oat/linker/arm64/relative_patcher_arm64_test.cc @@ -112,7 +112,7 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { const ArrayRef<const uint8_t>& last_method_code, const ArrayRef<const LinkerPatch>& last_method_patches, uint32_t distance_without_thunks) { - CHECK_EQ(distance_without_thunks % kArm64Alignment, 0u); + CHECK_EQ(distance_without_thunks % kArm64CodeAlignment, 0u); uint32_t method1_offset = kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader); AddCompiledMethod(MethodRef(1u), method1_code, method1_patches); @@ -120,7 +120,7 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { // We want to put the last method at a very precise offset. const uint32_t last_method_offset = method1_offset + distance_without_thunks; - CHECK_ALIGNED(last_method_offset, kArm64Alignment); + CHECK_ALIGNED(last_method_offset, kArm64CodeAlignment); const uint32_t gap_end = last_method_offset - sizeof(OatQuickMethodHeader); // Fill the gap with intermediate methods in chunks of 2MiB and the first in [2MiB, 4MiB). @@ -733,24 +733,26 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarAfter) { bl_offset_in_method1 + just_over_max_positive_disp); ASSERT_EQ(kExpectedLastMethodIdx, last_method_idx); uint32_t method_after_thunk_idx = last_method_idx; - if (sizeof(OatQuickMethodHeader) < kArm64Alignment) { - // The thunk needs to start on a kArm64Alignment-aligned address before the address where the - // last method would have been if there was no thunk. If the size of the OatQuickMethodHeader - // is at least kArm64Alignment, the thunk start shall fit between the previous filler method - // and that address. Otherwise, it shall be inserted before that filler method. + if (sizeof(OatQuickMethodHeader) < kArm64CodeAlignment) { + // The thunk needs to start on a kArm64CodeAlignment-aligned address before the address where + // the last method would have been if there was no thunk. If the size of the + // OatQuickMethodHeader is at least kArm64CodeAlignment, the thunk start shall fit between the + // previous filler method and that address. Otherwise, it shall be inserted before that filler + // method. method_after_thunk_idx -= 1u; } uint32_t method1_offset = GetMethodOffset(1u); uint32_t method_after_thunk_offset = GetMethodOffset(method_after_thunk_idx); - ASSERT_TRUE(IsAligned<kArm64Alignment>(method_after_thunk_offset)); + ASSERT_TRUE(IsAligned<kArm64CodeAlignment>(method_after_thunk_offset)); uint32_t method_after_thunk_header_offset = method_after_thunk_offset - sizeof(OatQuickMethodHeader); uint32_t thunk_size = MethodCallThunkSize(); - uint32_t thunk_offset = RoundDown(method_after_thunk_header_offset - thunk_size, kArm64Alignment); + uint32_t thunk_offset = RoundDown( + method_after_thunk_header_offset - thunk_size, kArm64CodeAlignment); DCHECK_EQ(thunk_offset + thunk_size + CodeAlignmentSize(thunk_offset + thunk_size), method_after_thunk_header_offset); - ASSERT_TRUE(IsAligned<kArm64Alignment>(thunk_offset)); + ASSERT_TRUE(IsAligned<kArm64CodeAlignment>(thunk_offset)); uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1); ASSERT_TRUE(IsAligned<4u>(diff)); ASSERT_LT(diff, 128 * MB); @@ -1065,7 +1067,8 @@ void Arm64RelativePatcherTest::TestBakerField(uint32_t offset, uint32_t ref_reg) Link(); // All thunks are at the end. - uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArm64Alignment); + uint32_t thunk_offset = + GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArm64CodeAlignment); method_idx = 0u; for (uint32_t base_reg : valid_regs) { for (uint32_t holder_reg : valid_regs) { @@ -1118,7 +1121,7 @@ void Arm64RelativePatcherTest::TestBakerField(uint32_t offset, uint32_t ref_reg) // Do not check the rest of the implementation. // The next thunk follows on the next aligned offset. - thunk_offset += RoundUp(expected_thunk.size(), kArm64Alignment); + thunk_offset += RoundUp(expected_thunk.size(), kArm64CodeAlignment); } } } @@ -1155,7 +1158,7 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddle) { // Allow thunk at 1MiB offset from the start of the method above. Literal offset being 4 // allows the branch to reach that thunk. size_t filler1_size = - 1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArm64Alignment); + 1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArm64CodeAlignment); std::vector<uint8_t> raw_filler1_code = GenNops(filler1_size / 4u); ArrayRef<const uint8_t> filler1_code(raw_filler1_code); AddCompiledMethod(MethodRef(2u), filler1_code); @@ -1170,8 +1173,8 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddle) { // - method 4 header (let there be no padding between method 4 code and method 5 pre-header). size_t thunk_size = CompileBakerOffsetThunk(/* base_reg */ 0, /* holder_reg */ 0).size(); size_t filler2_size = - 1 * MB - RoundUp(thunk_size + sizeof(OatQuickMethodHeader), kArm64Alignment) - - RoundUp(kNopCode.size() + sizeof(OatQuickMethodHeader), kArm64Alignment) + 1 * MB - RoundUp(thunk_size + sizeof(OatQuickMethodHeader), kArm64CodeAlignment) + - RoundUp(kNopCode.size() + sizeof(OatQuickMethodHeader), kArm64CodeAlignment) - sizeof(OatQuickMethodHeader); std::vector<uint8_t> raw_filler2_code = GenNops(filler2_size / 4u); ArrayRef<const uint8_t> filler2_code(raw_filler2_code); @@ -1215,14 +1218,14 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkBeforeFiller) { // Allow thunk at 1MiB offset from the start of the method above. Literal offset being 4 // allows the branch to reach that thunk. size_t filler1_size = - 1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArm64Alignment); + 1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArm64CodeAlignment); std::vector<uint8_t> raw_filler1_code = GenNops(filler1_size / 4u); ArrayRef<const uint8_t> filler1_code(raw_filler1_code); AddCompiledMethod(MethodRef(2u), filler1_code); Link(); - const uint32_t cbnz_offset = RoundUp(raw_code1.size(), kArm64Alignment) - kLiteralOffset1; + const uint32_t cbnz_offset = RoundUp(raw_code1.size(), kArm64CodeAlignment) - kLiteralOffset1; const uint32_t cbnz = kCbnzIP1Plus0Insn | (cbnz_offset << (5 - 2)); const std::vector<uint8_t> expected_code1 = RawCode({cbnz, kLdrWInsn, kNopInsn}); ASSERT_TRUE(CheckLinkedMethod(MethodRef(1), ArrayRef<const uint8_t>(expected_code1))); @@ -1244,7 +1247,7 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddleUnreachableFr // Allow thunk at 1MiB offset from the start of the method above. Literal offset being 4 // allows the branch to reach that thunk. size_t filler1_size = - 1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArm64Alignment); + 1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArm64CodeAlignment); std::vector<uint8_t> raw_filler1_code = GenNops(filler1_size / 4u); ArrayRef<const uint8_t> filler1_code(raw_filler1_code); AddCompiledMethod(MethodRef(2u), filler1_code); @@ -1259,8 +1262,8 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddleUnreachableFr // - method 4 header (let there be no padding between method 4 code and method 5 pre-header). size_t thunk_size = CompileBakerOffsetThunk(/* base_reg */ 0, /* holder_reg */ 0).size(); size_t filler2_size = - 1 * MB - RoundUp(thunk_size + sizeof(OatQuickMethodHeader), kArm64Alignment) - - RoundUp(kNopCode.size() + sizeof(OatQuickMethodHeader), kArm64Alignment) + 1 * MB - RoundUp(thunk_size + sizeof(OatQuickMethodHeader), kArm64CodeAlignment) + - RoundUp(kNopCode.size() + sizeof(OatQuickMethodHeader), kArm64CodeAlignment) - sizeof(OatQuickMethodHeader); std::vector<uint8_t> raw_filler2_code = GenNops(filler2_size / 4u); ArrayRef<const uint8_t> filler2_code(raw_filler2_code); @@ -1278,7 +1281,8 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddleUnreachableFr Link(); const uint32_t cbnz_max_forward = kCbnzIP1Plus0Insn | 0x007fffe0; - const uint32_t cbnz_last_offset = RoundUp(raw_code2.size(), kArm64Alignment) - kLiteralOffset2; + const uint32_t cbnz_last_offset = + RoundUp(raw_code2.size(), kArm64CodeAlignment) - kLiteralOffset2; const uint32_t cbnz_last = kCbnzIP1Plus0Insn | (cbnz_last_offset << (5 - 2)); const std::vector<uint8_t> expected_code1 = RawCode({kNopInsn, cbnz_max_forward, kLdrWInsn}); const std::vector<uint8_t> expected_code2 = RawCode({kNopInsn, cbnz_last, kLdrWInsn}); @@ -1315,7 +1319,8 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerArray) { Link(); // All thunks are at the end. - uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArm64Alignment); + uint32_t thunk_offset = + GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArm64CodeAlignment); method_idx = 0u; for (uint32_t base_reg : valid_regs) { ++method_idx; @@ -1363,7 +1368,7 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerArray) { // Do not check the rest of the implementation. // The next thunk follows on the next aligned offset. - thunk_offset += RoundUp(expected_thunk.size(), kArm64Alignment); + thunk_offset += RoundUp(expected_thunk.size(), kArm64CodeAlignment); } } @@ -1392,7 +1397,8 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerGcRoot) { Link(); // All thunks are at the end. - uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArm64Alignment); + uint32_t thunk_offset = + GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArm64CodeAlignment); method_idx = 0u; for (uint32_t root_reg : valid_regs) { ++method_idx; @@ -1419,7 +1425,7 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerGcRoot) { // Do not check the rest of the implementation. // The next thunk follows on the next aligned offset. - thunk_offset += RoundUp(expected_thunk.size(), kArm64Alignment); + thunk_offset += RoundUp(expected_thunk.size(), kArm64CodeAlignment); } } @@ -1447,7 +1453,7 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerAndMethodCallInteraction) { // Add a method with the right size that the method code for the next one starts 1MiB // after code for method 1. size_t filler_size = - 1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArm64Alignment) + 1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArm64CodeAlignment) - sizeof(OatQuickMethodHeader); std::vector<uint8_t> filler_code = GenNops(filler_size / 4u); ++method_idx; @@ -1462,16 +1468,16 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerAndMethodCallInteraction) { } // Add 2 Baker GC root patches to the last method, one that would allow the thunk at - // 1MiB + kArm64Alignment, i.e. kArm64Alignment after the method call thunk, and the - // second that needs it kArm64Alignment after that. Given the size of the GC root thunk - // is more than the space required by the method call thunk plus kArm64Alignment, + // 1MiB + kArm64CodeAlignment, i.e. kArm64CodeAlignment after the method call thunk, and the + // second that needs it kArm64CodeAlignment after that. Given the size of the GC root thunk + // is more than the space required by the method call thunk plus kArm64CodeAlignment, // this pushes the first GC root thunk's pending MaxNextOffset() before the method call // thunk's pending MaxNextOffset() which needs to be adjusted. - ASSERT_LT(RoundUp(CompileMethodCallThunk().size(), kArm64Alignment) + kArm64Alignment, + ASSERT_LT(RoundUp(CompileMethodCallThunk().size(), kArm64CodeAlignment) + kArm64CodeAlignment, CompileBakerGcRootThunk(/* root_reg */ 0).size()); - static_assert(kArm64Alignment == 16, "Code below assumes kArm64Alignment == 16"); - constexpr size_t kBakerLiteralOffset1 = 4u + kArm64Alignment; - constexpr size_t kBakerLiteralOffset2 = 4u + 2 * kArm64Alignment; + static_assert(kArm64CodeAlignment == 16, "Code below assumes kArm64CodeAlignment == 16"); + constexpr size_t kBakerLiteralOffset1 = 4u + kArm64CodeAlignment; + constexpr size_t kBakerLiteralOffset2 = 4u + 2 * kArm64CodeAlignment; // Use offset = 0, base_reg = 0, the LDR is simply `kLdrWInsn | root_reg`. const uint32_t ldr1 = kLdrWInsn | /* root_reg */ 1; const uint32_t ldr2 = kLdrWInsn | /* root_reg */ 2; diff --git a/libartbase/arch/instruction_set.cc b/libartbase/arch/instruction_set.cc index 10a99196e1..852f681a05 100644 --- a/libartbase/arch/instruction_set.cc +++ b/libartbase/arch/instruction_set.cc @@ -78,13 +78,13 @@ size_t GetInstructionSetAlignment(InstructionSet isa) { case InstructionSet::kArm: // Fall-through. case InstructionSet::kThumb2: - return kArmAlignment; + return kArmCodeAlignment; case InstructionSet::kArm64: - return kArm64Alignment; + return kArm64CodeAlignment; case InstructionSet::kX86: // Fall-through. case InstructionSet::kX86_64: - return kX86Alignment; + return kX86CodeAlignment; case InstructionSet::kNone: LOG(FATAL) << "ISA kNone does not have alignment."; UNREACHABLE(); diff --git a/libartbase/arch/instruction_set.h b/libartbase/arch/instruction_set.h index 155b186a8e..1516df9417 100644 --- a/libartbase/arch/instruction_set.h +++ b/libartbase/arch/instruction_set.h @@ -55,20 +55,18 @@ static constexpr PointerSize kArm64PointerSize = PointerSize::k64; static constexpr PointerSize kX86PointerSize = PointerSize::k32; static constexpr PointerSize kX86_64PointerSize = PointerSize::k64; -// ARM instruction alignment. ARM processors require code to be 4-byte aligned, -// but ARM ELF requires 8.. -static constexpr size_t kArmAlignment = 8; - -// ARM64 instruction alignment. This is the recommended alignment for maximum performance. -static constexpr size_t kArm64Alignment = 16; - // ARM64 default SVE vector length. static constexpr size_t kArm64DefaultSVEVectorLength = 256; -// X86 instruction alignment. This is the recommended alignment for maximum performance. -static constexpr size_t kX86Alignment = 16; +// Code alignment (used for the first instruction of a subroutine, such as an entrypoint). +// This is the recommended alignment for maximum performance. +// ARM processors require code to be 4-byte aligned, but ARM ELF requires 8. +static constexpr size_t kArmCodeAlignment = 8; +static constexpr size_t kArm64CodeAlignment = 16; +static constexpr size_t kX86CodeAlignment = 16; -// Different than code alignment since code alignment is only first instruction of method. +// Instruction alignment (every instruction must be aligned at this boundary). This differs from +// code alignment, which applies only to the first instruction of a subroutine. static constexpr size_t kThumb2InstructionAlignment = 2; static constexpr size_t kArm64InstructionAlignment = 4; static constexpr size_t kX86InstructionAlignment = 1; |