diff options
31 files changed, 888 insertions, 1336 deletions
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h index 581edaa773..29ff235cea 100644 --- a/compiler/cfi_test.h +++ b/compiler/cfi_test.h @@ -37,8 +37,8 @@ constexpr dwarf::CFIFormat kCFIFormat = dwarf::DW_DEBUG_FRAME_FORMAT; class CFITest : public dwarf::DwarfTest { public: void GenerateExpected(FILE* f, InstructionSet isa, const char* isa_str, - ArrayRef<const uint8_t> actual_asm, - ArrayRef<const uint8_t> actual_cfi) { + const std::vector<uint8_t>& actual_asm, + const std::vector<uint8_t>& actual_cfi) { std::vector<std::string> lines; // Print the raw bytes. fprintf(f, "static constexpr uint8_t expected_asm_%s[] = {", isa_str); @@ -50,18 +50,11 @@ class CFITest : public dwarf::DwarfTest { // Pretty-print CFI opcodes. constexpr bool is64bit = false; dwarf::DebugFrameOpCodeWriter<> initial_opcodes; - dwarf::WriteCIE(is64bit, dwarf::Reg(8), initial_opcodes, kCFIFormat, &debug_frame_data_); + dwarf::WriteCIE(is64bit, dwarf::Reg(8), + initial_opcodes, kCFIFormat, &debug_frame_data_); std::vector<uintptr_t> debug_frame_patches; - dwarf::WriteFDE(is64bit, - /* section_address */ 0, - /* cie_address */ 0, - /* code_address */ 0, - actual_asm.size(), - actual_cfi, - kCFIFormat, - /* buffer_address */ 0, - &debug_frame_data_, - &debug_frame_patches); + dwarf::WriteFDE(is64bit, 0, 0, 0, actual_asm.size(), ArrayRef<const uint8_t>(actual_cfi), + kCFIFormat, 0, &debug_frame_data_, &debug_frame_patches); ReformatCfi(Objdump(false, "-W"), &lines); // Pretty-print assembly. const uint8_t* asm_base = actual_asm.data(); @@ -149,7 +142,7 @@ class CFITest : public dwarf::DwarfTest { } // Pretty-print byte array. 12 bytes per line. - static void HexDump(FILE* f, ArrayRef<const uint8_t> data) { + static void HexDump(FILE* f, const std::vector<uint8_t>& data) { for (size_t i = 0; i < data.size(); i++) { fprintf(f, i % 12 == 0 ? "\n " : " "); // Whitespace. fprintf(f, "0x%02X,", data[i]); diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc index aa8277edb4..a26a985ff9 100644 --- a/compiler/driver/compiled_method_storage.cc +++ b/compiler/driver/compiled_method_storage.cc @@ -161,46 +161,6 @@ class CompiledMethodStorage::LengthPrefixedArrayAlloc { SwapSpace* const swap_space_; }; -class CompiledMethodStorage::ThunkMapKey { - public: - ThunkMapKey(linker::LinkerPatch::Type type, uint32_t custom_value1, uint32_t custom_value2) - : type_(type), custom_value1_(custom_value1), custom_value2_(custom_value2) {} - - bool operator<(const ThunkMapKey& other) const { - if (custom_value1_ != other.custom_value1_) { - return custom_value1_ < other.custom_value1_; - } - if (custom_value2_ != other.custom_value2_) { - return custom_value2_ < other.custom_value2_; - } - return type_ < other.type_; - } - - private: - linker::LinkerPatch::Type type_; - uint32_t custom_value1_; - uint32_t custom_value2_; -}; - -class CompiledMethodStorage::ThunkMapValue { - public: - ThunkMapValue(std::vector<uint8_t, SwapAllocator<uint8_t>>&& code, - const std::string& debug_name) - : code_(std::move(code)), debug_name_(debug_name) {} - - ArrayRef<const uint8_t> GetCode() const { - return ArrayRef<const uint8_t>(code_); - } - - const std::string& GetDebugName() const { - return debug_name_; - } - - private: - std::vector<uint8_t, SwapAllocator<uint8_t>> code_; - std::string debug_name_; -}; - CompiledMethodStorage::CompiledMethodStorage(int swap_fd) : swap_space_(swap_fd == -1 ? nullptr : new SwapSpace(swap_fd, 10 * MB)), dedupe_enabled_(true), @@ -211,9 +171,7 @@ CompiledMethodStorage::CompiledMethodStorage(int swap_fd) LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())), dedupe_cfi_info_("dedupe cfi info", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())), dedupe_linker_patches_("dedupe cfi info", - LengthPrefixedArrayAlloc<linker::LinkerPatch>(swap_space_.get())), - thunk_map_lock_("thunk_map_lock"), - thunk_map_(std::less<ThunkMapKey>(), SwapAllocator<ThunkMapValueType>(swap_space_.get())) { + LengthPrefixedArrayAlloc<linker::LinkerPatch>(swap_space_.get())) { } CompiledMethodStorage::~CompiledMethodStorage() { @@ -279,55 +237,4 @@ void CompiledMethodStorage::ReleaseLinkerPatches( ReleaseArrayIfNotDeduplicated(linker_patches); } -CompiledMethodStorage::ThunkMapKey CompiledMethodStorage::GetThunkMapKey( - const linker::LinkerPatch& linker_patch) { - uint32_t custom_value1 = 0u; - uint32_t custom_value2 = 0u; - switch (linker_patch.GetType()) { - case linker::LinkerPatch::Type::kBakerReadBarrierBranch: - custom_value1 = linker_patch.GetBakerCustomValue1(); - custom_value2 = linker_patch.GetBakerCustomValue2(); - break; - case linker::LinkerPatch::Type::kCallRelative: - // No custom values. - break; - default: - LOG(FATAL) << "Unexpected patch type: " << linker_patch.GetType(); - UNREACHABLE(); - } - return ThunkMapKey(linker_patch.GetType(), custom_value1, custom_value2); -} - -ArrayRef<const uint8_t> CompiledMethodStorage::GetThunkCode(const linker::LinkerPatch& linker_patch, - /*out*/ std::string* debug_name) { - ThunkMapKey key = GetThunkMapKey(linker_patch); - MutexLock lock(Thread::Current(), thunk_map_lock_); - auto it = thunk_map_.find(key); - if (it != thunk_map_.end()) { - const ThunkMapValue& value = it->second; - if (debug_name != nullptr) { - *debug_name = value.GetDebugName(); - } - return value.GetCode(); - } else { - if (debug_name != nullptr) { - *debug_name = std::string(); - } - return ArrayRef<const uint8_t>(); - } -} - -void CompiledMethodStorage::SetThunkCode(const linker::LinkerPatch& linker_patch, - ArrayRef<const uint8_t> code, - const std::string& debug_name) { - DCHECK(!code.empty()); - ThunkMapKey key = GetThunkMapKey(linker_patch); - std::vector<uint8_t, SwapAllocator<uint8_t>> code_copy( - code.begin(), code.end(), SwapAllocator<uint8_t>(swap_space_.get())); - ThunkMapValue value(std::move(code_copy), debug_name); - MutexLock lock(Thread::Current(), thunk_map_lock_); - // Note: Multiple threads can try and compile the same thunk, so this may not create a new entry. - thunk_map_.emplace(key, std::move(value)); -} - } // namespace art diff --git a/compiler/driver/compiled_method_storage.h b/compiler/driver/compiled_method_storage.h index 1634facb7c..249f06c20f 100644 --- a/compiler/driver/compiled_method_storage.h +++ b/compiler/driver/compiled_method_storage.h @@ -18,7 +18,6 @@ #define ART_COMPILER_DRIVER_COMPILED_METHOD_STORAGE_H_ #include <iosfwd> -#include <map> #include <memory> #include "base/array_ref.h" @@ -68,29 +67,7 @@ class CompiledMethodStorage { const ArrayRef<const linker::LinkerPatch>& linker_patches); void ReleaseLinkerPatches(const LengthPrefixedArray<linker::LinkerPatch>* linker_patches); - // Returns the code associated with the given patch. - // If the code has not been set, returns empty data. - // If `debug_name` is not null, stores the associated debug name in `*debug_name`. - ArrayRef<const uint8_t> GetThunkCode(const linker::LinkerPatch& linker_patch, - /*out*/ std::string* debug_name = nullptr); - - // Sets the code and debug name associated with the given patch. - void SetThunkCode(const linker::LinkerPatch& linker_patch, - ArrayRef<const uint8_t> code, - const std::string& debug_name); - private: - class ThunkMapKey; - class ThunkMapValue; - using ThunkMapValueType = std::pair<const ThunkMapKey, ThunkMapValue>; - using ThunkMap = std::map<ThunkMapKey, - ThunkMapValue, - std::less<ThunkMapKey>, - SwapAllocator<ThunkMapValueType>>; - static_assert(std::is_same<ThunkMapValueType, ThunkMap::value_type>::value, "Value type check."); - - static ThunkMapKey GetThunkMapKey(const linker::LinkerPatch& linker_patch); - template <typename T, typename DedupeSetType> const LengthPrefixedArray<T>* AllocateOrDeduplicateArray(const ArrayRef<const T>& data, DedupeSetType* dedupe_set); @@ -125,9 +102,6 @@ class CompiledMethodStorage { ArrayDedupeSet<uint8_t> dedupe_cfi_info_; ArrayDedupeSet<linker::LinkerPatch> dedupe_linker_patches_; - Mutex thunk_map_lock_; - ThunkMap thunk_map_ GUARDED_BY(thunk_map_lock_); - DISALLOW_COPY_AND_ASSIGN(CompiledMethodStorage); }; diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc index 38f95488a9..236b5c0c2e 100644 --- a/compiler/jni/jni_cfi_test.cc +++ b/compiler/jni/jni_cfi_test.cc @@ -94,11 +94,7 @@ class JNICFITest : public CFITest { const std::vector<uint8_t>& actual_cfi = *(jni_asm->cfi().data()); if (kGenerateExpected) { - GenerateExpected(stdout, - isa, - isa_str, - ArrayRef<const uint8_t>(actual_asm), - ArrayRef<const uint8_t>(actual_cfi)); + GenerateExpected(stdout, isa, isa_str, actual_asm, actual_cfi); } else { EXPECT_EQ(expected_asm, actual_asm); EXPECT_EQ(expected_cfi, actual_cfi); diff --git a/compiler/linker/arm/relative_patcher_arm_base.cc b/compiler/linker/arm/relative_patcher_arm_base.cc index 7cb8ae55c5..6e0286afac 100644 --- a/compiler/linker/arm/relative_patcher_arm_base.cc +++ b/compiler/linker/arm/relative_patcher_arm_base.cc @@ -30,9 +30,8 @@ namespace linker { class ArmBaseRelativePatcher::ThunkData { public: - ThunkData(ArrayRef<const uint8_t> code, const std::string& debug_name, uint32_t max_next_offset) - : code_(code), - debug_name_(debug_name), + ThunkData(std::vector<uint8_t> code, uint32_t max_next_offset) + : code_(std::move(code)), offsets_(), max_next_offset_(max_next_offset), pending_offset_(0u) { @@ -46,11 +45,7 @@ class ArmBaseRelativePatcher::ThunkData { } ArrayRef<const uint8_t> GetCode() const { - return code_; - } - - const std::string& GetDebugName() const { - return debug_name_; + return ArrayRef<const uint8_t>(code_); } bool NeedsNextThunk() const { @@ -147,11 +142,10 @@ class ArmBaseRelativePatcher::ThunkData { } private: - const ArrayRef<const uint8_t> code_; // The code of the thunk. - const std::string debug_name_; // The debug name of the thunk. - std::vector<uint32_t> offsets_; // Offsets at which the thunk needs to be written. - uint32_t max_next_offset_; // The maximum offset at which the next thunk can be placed. - uint32_t pending_offset_; // The index of the next offset to write. + std::vector<uint8_t> code_; // The code of the thunk. + std::vector<uint32_t> offsets_; // Offsets at which the thunk needs to be written. + uint32_t max_next_offset_; // The maximum offset at which the next thunk can be placed. + uint32_t pending_offset_; // The index of the next offset to write. }; class ArmBaseRelativePatcher::PendingThunkComparator { @@ -245,13 +239,14 @@ std::vector<debug::MethodDebugInfo> ArmBaseRelativePatcher::GenerateThunkDebugIn std::vector<debug::MethodDebugInfo> result; result.reserve(number_of_thunks); for (auto&& entry : thunks_) { + const ThunkKey& key = entry.first; const ThunkData& data = entry.second; size_t start = data.IndexOfFirstThunkAtOrAfter(executable_offset); if (start == data.NumberOfThunks()) { continue; } // Get the base name to use for the first occurrence of the thunk. - std::string base_name = data.GetDebugName(); + std::string base_name = GetThunkDebugName(key); for (size_t i = start, num = data.NumberOfThunks(); i != num; ++i) { debug::MethodDebugInfo info = {}; if (i == 0u) { @@ -272,11 +267,9 @@ std::vector<debug::MethodDebugInfo> ArmBaseRelativePatcher::GenerateThunkDebugIn return result; } -ArmBaseRelativePatcher::ArmBaseRelativePatcher(RelativePatcherThunkProvider* thunk_provider, - RelativePatcherTargetProvider* target_provider, +ArmBaseRelativePatcher::ArmBaseRelativePatcher(RelativePatcherTargetProvider* provider, InstructionSet instruction_set) - : thunk_provider_(thunk_provider), - target_provider_(target_provider), + : provider_(provider), instruction_set_(instruction_set), thunks_(), unprocessed_method_call_patches_(), @@ -405,7 +398,7 @@ void ArmBaseRelativePatcher::ProcessPatches(const CompiledMethod* compiled_metho unprocessed_method_call_patches_.emplace_back(patch_offset, patch.TargetMethod()); if (method_call_thunk_ == nullptr) { uint32_t max_next_offset = CalculateMaxNextOffset(patch_offset, key); - auto it = thunks_.Put(key, ThunkDataForPatch(patch, max_next_offset)); + auto it = thunks_.Put(key, ThunkData(CompileThunk(key), max_next_offset)); method_call_thunk_ = &it->second; AddUnreservedThunk(method_call_thunk_); } else { @@ -416,7 +409,7 @@ void ArmBaseRelativePatcher::ProcessPatches(const CompiledMethod* compiled_metho auto lb = thunks_.lower_bound(key); if (lb == thunks_.end() || thunks_.key_comp()(key, lb->first)) { uint32_t max_next_offset = CalculateMaxNextOffset(patch_offset, key); - auto it = thunks_.PutBefore(lb, key, ThunkDataForPatch(patch, max_next_offset)); + auto it = thunks_.PutBefore(lb, key, ThunkData(CompileThunk(key), max_next_offset)); AddUnreservedThunk(&it->second); } else { old_data = &lb->second; @@ -484,7 +477,7 @@ void ArmBaseRelativePatcher::ResolveMethodCalls(uint32_t quick_code_offset, break; } } else { - auto result = target_provider_->FindMethodOffset(target_method); + auto result = provider_->FindMethodOffset(target_method); if (!result.first) { break; } @@ -525,14 +518,5 @@ inline uint32_t ArmBaseRelativePatcher::CalculateMaxNextOffset(uint32_t patch_of GetInstructionSetAlignment(instruction_set_)); } -inline ArmBaseRelativePatcher::ThunkData ArmBaseRelativePatcher::ThunkDataForPatch( - const LinkerPatch& patch, uint32_t max_next_offset) { - ArrayRef<const uint8_t> code; - std::string debug_name; - thunk_provider_->GetThunkCode(patch, &code, &debug_name); - DCHECK(!code.empty()); - return ThunkData(code, debug_name, max_next_offset); -} - } // namespace linker } // namespace art diff --git a/compiler/linker/arm/relative_patcher_arm_base.h b/compiler/linker/arm/relative_patcher_arm_base.h index 963d6690b0..ee09bf96b3 100644 --- a/compiler/linker/arm/relative_patcher_arm_base.h +++ b/compiler/linker/arm/relative_patcher_arm_base.h @@ -37,8 +37,7 @@ class ArmBaseRelativePatcher : public RelativePatcher { std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE; protected: - ArmBaseRelativePatcher(RelativePatcherThunkProvider* thunk_provider, - RelativePatcherTargetProvider* target_provider, + ArmBaseRelativePatcher(RelativePatcherTargetProvider* provider, InstructionSet instruction_set); ~ArmBaseRelativePatcher(); @@ -95,6 +94,8 @@ class ArmBaseRelativePatcher : public RelativePatcher { uint32_t CalculateMethodCallDisplacement(uint32_t patch_offset, uint32_t target_offset); + virtual std::vector<uint8_t> CompileThunk(const ThunkKey& key) = 0; + virtual std::string GetThunkDebugName(const ThunkKey& key) = 0; virtual uint32_t MaxPositiveDisplacement(const ThunkKey& key) = 0; virtual uint32_t MaxNegativeDisplacement(const ThunkKey& key) = 0; @@ -107,10 +108,8 @@ class ArmBaseRelativePatcher : public RelativePatcher { void ResolveMethodCalls(uint32_t quick_code_offset, MethodReference method_ref); uint32_t CalculateMaxNextOffset(uint32_t patch_offset, const ThunkKey& key); - ThunkData ThunkDataForPatch(const LinkerPatch& patch, uint32_t max_next_offset); - RelativePatcherThunkProvider* const thunk_provider_; - RelativePatcherTargetProvider* const target_provider_; + RelativePatcherTargetProvider* const provider_; const InstructionSet instruction_set_; // The data for all thunks. diff --git a/compiler/linker/arm/relative_patcher_thumb2.cc b/compiler/linker/arm/relative_patcher_thumb2.cc index 7400d11c31..78755176e4 100644 --- a/compiler/linker/arm/relative_patcher_thumb2.cc +++ b/compiler/linker/arm/relative_patcher_thumb2.cc @@ -48,9 +48,8 @@ constexpr uint32_t kMaxMethodCallNegativeDisplacement = (1u << 24) - kPcDisplace constexpr uint32_t kMaxBcondPositiveDisplacement = (1u << 20) - 2u + kPcDisplacement; constexpr uint32_t kMaxBcondNegativeDisplacement = (1u << 20) - kPcDisplacement; -Thumb2RelativePatcher::Thumb2RelativePatcher(RelativePatcherThunkProvider* thunk_provider, - RelativePatcherTargetProvider* target_provider) - : ArmBaseRelativePatcher(thunk_provider, target_provider, InstructionSet::kThumb2) { +Thumb2RelativePatcher::Thumb2RelativePatcher(RelativePatcherTargetProvider* provider) + : ArmBaseRelativePatcher(provider, InstructionSet::kThumb2) { } void Thumb2RelativePatcher::PatchCall(std::vector<uint8_t>* code, @@ -111,6 +110,62 @@ void Thumb2RelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* co uint32_t insn = GetInsn32(code, literal_offset); DCHECK_EQ(insn, 0xf0408000); // BNE +0 (unpatched) ThunkKey key = GetBakerThunkKey(patch); + if (kIsDebugBuild) { + const uint32_t encoded_data = key.GetCustomValue1(); + BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); + // Check that the next instruction matches the expected LDR. + switch (kind) { + case BakerReadBarrierKind::kField: { + BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data); + if (width == BakerReadBarrierWidth::kWide) { + DCHECK_GE(code->size() - literal_offset, 8u); + uint32_t next_insn = GetInsn32(code, literal_offset + 4u); + // LDR (immediate), encoding T3, with correct base_reg. + CheckValidReg((next_insn >> 12) & 0xfu); // Check destination register. + const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(next_insn & 0xffff0000u, 0xf8d00000u | (base_reg << 16)); + } else { + DCHECK_GE(code->size() - literal_offset, 6u); + uint32_t next_insn = GetInsn16(code, literal_offset + 4u); + // LDR (immediate), encoding T1, with correct base_reg. + CheckValidReg(next_insn & 0x7u); // Check destination register. + const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(next_insn & 0xf838u, 0x6800u | (base_reg << 3)); + } + break; + } + case BakerReadBarrierKind::kArray: { + DCHECK_GE(code->size() - literal_offset, 8u); + uint32_t next_insn = GetInsn32(code, literal_offset + 4u); + // LDR (register) with correct base_reg, S=1 and option=011 (LDR Wt, [Xn, Xm, LSL #2]). + CheckValidReg((next_insn >> 12) & 0xfu); // Check destination register. + const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(next_insn & 0xffff0ff0u, 0xf8500020u | (base_reg << 16)); + CheckValidReg(next_insn & 0xf); // Check index register + break; + } + case BakerReadBarrierKind::kGcRoot: { + BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data); + if (width == BakerReadBarrierWidth::kWide) { + DCHECK_GE(literal_offset, 4u); + uint32_t prev_insn = GetInsn32(code, literal_offset - 4u); + // LDR (immediate), encoding T3, with correct root_reg. + const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(prev_insn & 0xfff0f000u, 0xf8d00000u | (root_reg << 12)); + } else { + DCHECK_GE(literal_offset, 2u); + uint32_t prev_insn = GetInsn16(code, literal_offset - 2u); + // LDR (immediate), encoding T1, with correct root_reg. + const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(prev_insn & 0xf807u, 0x6800u | root_reg); + } + break; + } + default: + LOG(FATAL) << "Unexpected type: " << static_cast<uint32_t>(key.GetType()); + UNREACHABLE(); + } + } uint32_t target_offset = GetThunkTargetOffset(key, patch_offset); DCHECK_ALIGNED(target_offset, 4u); uint32_t disp = target_offset - (patch_offset + kPcDisplacement); @@ -123,6 +178,250 @@ void Thumb2RelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* co SetInsn32(code, literal_offset, insn); } +#define __ assembler.GetVIXLAssembler()-> + +static void EmitGrayCheckAndFastPath(arm::ArmVIXLAssembler& assembler, + vixl::aarch32::Register base_reg, + vixl::aarch32::MemOperand& lock_word, + vixl::aarch32::Label* slow_path, + int32_t raw_ldr_offset) { + using namespace vixl::aarch32; // NOLINT(build/namespaces) + // Load the lock word containing the rb_state. + __ Ldr(ip, lock_word); + // Given the numeric representation, it's enough to check the low bit of the rb_state. + static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0"); + static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1"); + __ Tst(ip, Operand(LockWord::kReadBarrierStateMaskShifted)); + __ B(ne, slow_path, /* is_far_target */ false); + __ Add(lr, lr, raw_ldr_offset); + // Introduce a dependency on the lock_word including rb_state, + // to prevent load-load reordering, and without using + // a memory barrier (which would be more expensive). + __ Add(base_reg, base_reg, Operand(ip, LSR, 32)); + __ Bx(lr); // And return back to the function. + // Note: The fake dependency is unnecessary for the slow path. +} + +// Load the read barrier introspection entrypoint in register `entrypoint` +static void LoadReadBarrierMarkIntrospectionEntrypoint(arm::ArmVIXLAssembler& assembler, + vixl::aarch32::Register entrypoint) { + using vixl::aarch32::MemOperand; + using vixl::aarch32::ip; + // Thread Register. + const vixl::aarch32::Register tr = vixl::aarch32::r9; + + // The register where the read barrier introspection entrypoint is loaded + // is fixed: `Thumb2RelativePatcher::kBakerCcEntrypointRegister` (R4). + DCHECK_EQ(entrypoint.GetCode(), Thumb2RelativePatcher::kBakerCcEntrypointRegister); + // entrypoint = Thread::Current()->pReadBarrierMarkReg12, i.e. pReadBarrierMarkIntrospection. + DCHECK_EQ(ip.GetCode(), 12u); + const int32_t entry_point_offset = + Thread::ReadBarrierMarkEntryPointsOffset<kArmPointerSize>(ip.GetCode()); + __ Ldr(entrypoint, MemOperand(tr, entry_point_offset)); +} + +void Thumb2RelativePatcher::CompileBakerReadBarrierThunk(arm::ArmVIXLAssembler& assembler, + uint32_t encoded_data) { + using namespace vixl::aarch32; // NOLINT(build/namespaces) + BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); + switch (kind) { + case BakerReadBarrierKind::kField: { + // Check if the holder is gray and, if not, add fake dependency to the base register + // and return to the LDR instruction to load the reference. Otherwise, use introspection + // to load the reference and call the entrypoint (in kBakerCcEntrypointRegister) + // that performs further checks on the reference and marks it if needed. + Register base_reg(BakerReadBarrierFirstRegField::Decode(encoded_data)); + CheckValidReg(base_reg.GetCode()); + Register holder_reg(BakerReadBarrierSecondRegField::Decode(encoded_data)); + CheckValidReg(holder_reg.GetCode()); + BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data); + UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); + temps.Exclude(ip); + // If base_reg differs from holder_reg, the offset was too large and we must have + // emitted an explicit null check before the load. Otherwise, we need to null-check + // the holder as we do not necessarily do that check before going to the thunk. + vixl::aarch32::Label throw_npe; + if (holder_reg.Is(base_reg)) { + __ CompareAndBranchIfZero(holder_reg, &throw_npe, /* is_far_target */ false); + } + vixl::aarch32::Label slow_path; + MemOperand lock_word(holder_reg, mirror::Object::MonitorOffset().Int32Value()); + const int32_t raw_ldr_offset = (width == BakerReadBarrierWidth::kWide) + ? BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET + : BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET; + EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path, raw_ldr_offset); + __ Bind(&slow_path); + const int32_t ldr_offset = /* Thumb state adjustment (LR contains Thumb state). */ -1 + + raw_ldr_offset; + Register ep_reg(kBakerCcEntrypointRegister); + LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg); + if (width == BakerReadBarrierWidth::kWide) { + MemOperand ldr_half_address(lr, ldr_offset + 2); + __ Ldrh(ip, ldr_half_address); // Load the LDR immediate half-word with "Rt | imm12". + __ Ubfx(ip, ip, 0, 12); // Extract the offset imm12. + __ Ldr(ip, MemOperand(base_reg, ip)); // Load the reference. + } else { + MemOperand ldr_address(lr, ldr_offset); + __ Ldrh(ip, ldr_address); // Load the LDR immediate, encoding T1. + __ Add(ep_reg, // Adjust the entrypoint address to the entrypoint + ep_reg, // for narrow LDR. + Operand(BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_ENTRYPOINT_OFFSET)); + __ Ubfx(ip, ip, 6, 5); // Extract the imm5, i.e. offset / 4. + __ Ldr(ip, MemOperand(base_reg, ip, LSL, 2)); // Load the reference. + } + // Do not unpoison. With heap poisoning enabled, the entrypoint expects a poisoned reference. + __ Bx(ep_reg); // Jump to the entrypoint. + if (holder_reg.Is(base_reg)) { + // Add null check slow path. The stack map is at the address pointed to by LR. + __ Bind(&throw_npe); + int32_t offset = GetThreadOffset<kArmPointerSize>(kQuickThrowNullPointer).Int32Value(); + __ Ldr(ip, MemOperand(/* Thread* */ vixl::aarch32::r9, offset)); + __ Bx(ip); + } + break; + } + case BakerReadBarrierKind::kArray: { + Register base_reg(BakerReadBarrierFirstRegField::Decode(encoded_data)); + CheckValidReg(base_reg.GetCode()); + DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data)); + DCHECK(BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide); + UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); + temps.Exclude(ip); + vixl::aarch32::Label slow_path; + int32_t data_offset = + mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value(); + MemOperand lock_word(base_reg, mirror::Object::MonitorOffset().Int32Value() - data_offset); + DCHECK_LT(lock_word.GetOffsetImmediate(), 0); + const int32_t raw_ldr_offset = BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET; + EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path, raw_ldr_offset); + __ Bind(&slow_path); + const int32_t ldr_offset = /* Thumb state adjustment (LR contains Thumb state). */ -1 + + raw_ldr_offset; + MemOperand ldr_address(lr, ldr_offset + 2); + __ Ldrb(ip, ldr_address); // Load the LDR (register) byte with "00 | imm2 | Rm", + // i.e. Rm+32 because the scale in imm2 is 2. + Register ep_reg(kBakerCcEntrypointRegister); + LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg); + __ Bfi(ep_reg, ip, 3, 6); // Insert ip to the entrypoint address to create + // a switch case target based on the index register. + __ Mov(ip, base_reg); // Move the base register to ip0. + __ Bx(ep_reg); // Jump to the entrypoint's array switch case. + break; + } + case BakerReadBarrierKind::kGcRoot: { + // Check if the reference needs to be marked and if so (i.e. not null, not marked yet + // and it does not have a forwarding address), call the correct introspection entrypoint; + // otherwise return the reference (or the extracted forwarding address). + // There is no gray bit check for GC roots. + Register root_reg(BakerReadBarrierFirstRegField::Decode(encoded_data)); + CheckValidReg(root_reg.GetCode()); + DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data)); + BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data); + UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); + temps.Exclude(ip); + vixl::aarch32::Label return_label, not_marked, forwarding_address; + __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target */ false); + MemOperand lock_word(root_reg, mirror::Object::MonitorOffset().Int32Value()); + __ Ldr(ip, lock_word); + __ Tst(ip, LockWord::kMarkBitStateMaskShifted); + __ B(eq, ¬_marked); + __ Bind(&return_label); + __ Bx(lr); + __ Bind(¬_marked); + static_assert(LockWord::kStateShift == 30 && LockWord::kStateForwardingAddress == 3, + "To use 'CMP ip, #modified-immediate; BHS', we need the lock word state in " + " the highest bits and the 'forwarding address' state to have all bits set"); + __ Cmp(ip, Operand(0xc0000000)); + __ B(hs, &forwarding_address); + Register ep_reg(kBakerCcEntrypointRegister); + LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg); + // Adjust the art_quick_read_barrier_mark_introspection address in kBakerCcEntrypointRegister + // to art_quick_read_barrier_mark_introspection_gc_roots. + int32_t entrypoint_offset = (width == BakerReadBarrierWidth::kWide) + ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_ENTRYPOINT_OFFSET + : BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_ENTRYPOINT_OFFSET; + __ Add(ep_reg, ep_reg, Operand(entrypoint_offset)); + __ Mov(ip, root_reg); + __ Bx(ep_reg); + __ Bind(&forwarding_address); + __ Lsl(root_reg, ip, LockWord::kForwardingAddressShift); + __ Bx(lr); + break; + } + default: + LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind); + UNREACHABLE(); + } +} + +std::vector<uint8_t> Thumb2RelativePatcher::CompileThunk(const ThunkKey& key) { + ArenaPool pool; + ArenaAllocator allocator(&pool); + arm::ArmVIXLAssembler assembler(&allocator); + + switch (key.GetType()) { + case ThunkType::kMethodCall: + // The thunk just uses the entry point in the ArtMethod. This works even for calls + // to the generic JNI and interpreter trampolines. + assembler.LoadFromOffset( + arm::kLoadWord, + vixl::aarch32::pc, + vixl::aarch32::r0, + ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value()); + __ Bkpt(0); + break; + case ThunkType::kBakerReadBarrier: + CompileBakerReadBarrierThunk(assembler, key.GetCustomValue1()); + break; + } + + assembler.FinalizeCode(); + std::vector<uint8_t> thunk_code(assembler.CodeSize()); + MemoryRegion code(thunk_code.data(), thunk_code.size()); + assembler.FinalizeInstructions(code); + return thunk_code; +} + +std::string Thumb2RelativePatcher::GetThunkDebugName(const ThunkKey& key) { + switch (key.GetType()) { + case ThunkType::kMethodCall: + return "MethodCallThunk"; + + case ThunkType::kBakerReadBarrier: { + uint32_t encoded_data = key.GetCustomValue1(); + BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); + std::ostringstream oss; + oss << "BakerReadBarrierThunk"; + switch (kind) { + case BakerReadBarrierKind::kField: + oss << "Field"; + if (BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide) { + oss << "Wide"; + } + oss << "_r" << BakerReadBarrierFirstRegField::Decode(encoded_data) + << "_r" << BakerReadBarrierSecondRegField::Decode(encoded_data); + break; + case BakerReadBarrierKind::kArray: + oss << "Array_r" << BakerReadBarrierFirstRegField::Decode(encoded_data); + DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data)); + DCHECK(BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide); + break; + case BakerReadBarrierKind::kGcRoot: + oss << "GcRoot"; + if (BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide) { + oss << "Wide"; + } + oss << "_r" << BakerReadBarrierFirstRegField::Decode(encoded_data); + DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data)); + break; + } + return oss.str(); + } + } +} + +#undef __ + uint32_t Thumb2RelativePatcher::MaxPositiveDisplacement(const ThunkKey& key) { switch (key.GetType()) { case ThunkType::kMethodCall: diff --git a/compiler/linker/arm/relative_patcher_thumb2.h b/compiler/linker/arm/relative_patcher_thumb2.h index 68610d69e1..68386c00f4 100644 --- a/compiler/linker/arm/relative_patcher_thumb2.h +++ b/compiler/linker/arm/relative_patcher_thumb2.h @@ -19,6 +19,8 @@ #include "arch/arm/registers_arm.h" #include "base/array_ref.h" +#include "base/bit_field.h" +#include "base/bit_utils.h" #include "linker/arm/relative_patcher_arm_base.h" namespace art { @@ -31,8 +33,42 @@ namespace linker { class Thumb2RelativePatcher FINAL : public ArmBaseRelativePatcher { public: - explicit Thumb2RelativePatcher(RelativePatcherThunkProvider* thunk_provider, - RelativePatcherTargetProvider* target_provider); + static constexpr uint32_t kBakerCcEntrypointRegister = 4u; + + static uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg, + uint32_t holder_reg, + bool narrow) { + CheckValidReg(base_reg); + CheckValidReg(holder_reg); + DCHECK(!narrow || base_reg < 8u) << base_reg; + BakerReadBarrierWidth width = + narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide; + return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kField) | + BakerReadBarrierFirstRegField::Encode(base_reg) | + BakerReadBarrierSecondRegField::Encode(holder_reg) | + BakerReadBarrierWidthField::Encode(width); + } + + static uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) { + CheckValidReg(base_reg); + return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kArray) | + BakerReadBarrierFirstRegField::Encode(base_reg) | + BakerReadBarrierSecondRegField::Encode(kInvalidEncodedReg) | + BakerReadBarrierWidthField::Encode(BakerReadBarrierWidth::kWide); + } + + static uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg, bool narrow) { + CheckValidReg(root_reg); + DCHECK(!narrow || root_reg < 8u) << root_reg; + BakerReadBarrierWidth width = + narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide; + return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) | + BakerReadBarrierFirstRegField::Encode(root_reg) | + BakerReadBarrierSecondRegField::Encode(kInvalidEncodedReg) | + BakerReadBarrierWidthField::Encode(width); + } + + explicit Thumb2RelativePatcher(RelativePatcherTargetProvider* provider); void PatchCall(std::vector<uint8_t>* code, uint32_t literal_offset, @@ -47,10 +83,48 @@ class Thumb2RelativePatcher FINAL : public ArmBaseRelativePatcher { uint32_t patch_offset) OVERRIDE; protected: + std::vector<uint8_t> CompileThunk(const ThunkKey& key) OVERRIDE; + std::string GetThunkDebugName(const ThunkKey& key) OVERRIDE; uint32_t MaxPositiveDisplacement(const ThunkKey& key) OVERRIDE; uint32_t MaxNegativeDisplacement(const ThunkKey& key) OVERRIDE; private: + static constexpr uint32_t kInvalidEncodedReg = /* pc is invalid */ 15u; + + enum class BakerReadBarrierKind : uint8_t { + kField, // Field get or array get with constant offset (i.e. constant index). + kArray, // Array get with index in register. + kGcRoot, // GC root load. + kLast = kGcRoot + }; + + enum class BakerReadBarrierWidth : uint8_t { + kWide, // 32-bit LDR (and 32-bit NEG if heap poisoning is enabled). + kNarrow, // 16-bit LDR (and 16-bit NEG if heap poisoning is enabled). + kLast = kNarrow + }; + + static constexpr size_t kBitsForBakerReadBarrierKind = + MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierKind::kLast)); + static constexpr size_t kBitsForRegister = 4u; + using BakerReadBarrierKindField = + BitField<BakerReadBarrierKind, 0, kBitsForBakerReadBarrierKind>; + using BakerReadBarrierFirstRegField = + BitField<uint32_t, kBitsForBakerReadBarrierKind, kBitsForRegister>; + using BakerReadBarrierSecondRegField = + BitField<uint32_t, kBitsForBakerReadBarrierKind + kBitsForRegister, kBitsForRegister>; + static constexpr size_t kBitsForBakerReadBarrierWidth = + MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierWidth::kLast)); + using BakerReadBarrierWidthField = BitField<BakerReadBarrierWidth, + kBitsForBakerReadBarrierKind + 2 * kBitsForRegister, + kBitsForBakerReadBarrierWidth>; + + static void CheckValidReg(uint32_t reg) { + DCHECK(reg < 12u && reg != kBakerCcEntrypointRegister) << reg; + } + + void CompileBakerReadBarrierThunk(arm::ArmVIXLAssembler& assembler, uint32_t encoded_data); + void SetInsn32(std::vector<uint8_t>* code, uint32_t offset, uint32_t value); static uint32_t GetInsn32(ArrayRef<const uint8_t> code, uint32_t offset); diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc index e7b11bd16b..2c22a352c2 100644 --- a/compiler/linker/arm/relative_patcher_thumb2_test.cc +++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc @@ -16,15 +16,12 @@ #include "linker/arm/relative_patcher_thumb2.h" -#include "arch/arm/instruction_set_features_arm.h" #include "base/casts.h" #include "linker/relative_patcher_test.h" #include "lock_word.h" #include "mirror/array-inl.h" #include "mirror/object.h" #include "oat_quick_method_header.h" -#include "optimizing/code_generator_arm_vixl.h" -#include "optimizing/optimizing_unit_test.h" namespace art { namespace linker { @@ -192,42 +189,9 @@ class Thumb2RelativePatcherTest : public RelativePatcherTest { return result.second - 1 /* thumb mode */; } - std::vector<uint8_t> CompileThunk(const LinkerPatch& patch, - /*out*/ std::string* debug_name = nullptr) { - OptimizingUnitTestHelper helper; - HGraph* graph = helper.CreateGraph(); - std::string error_msg; - ArmFeaturesUniquePtr features = - ArmInstructionSetFeatures::FromVariant("default", &error_msg); - CompilerOptions options; - arm::CodeGeneratorARMVIXL codegen(graph, *features, options); - ArenaVector<uint8_t> code(helper.GetAllocator()->Adapter()); - codegen.EmitThunkCode(patch, &code, debug_name); - return std::vector<uint8_t>(code.begin(), code.end()); - } - - void AddCompiledMethod( - MethodReference method_ref, - const ArrayRef<const uint8_t>& code, - const ArrayRef<const LinkerPatch>& patches = ArrayRef<const LinkerPatch>()) { - RelativePatcherTest::AddCompiledMethod(method_ref, code, patches); - - // Make sure the ThunkProvider has all the necessary thunks. - for (const LinkerPatch& patch : patches) { - if (patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch || - patch.GetType() == LinkerPatch::Type::kCallRelative) { - std::string debug_name; - std::vector<uint8_t> thunk_code = CompileThunk(patch, &debug_name); - thunk_provider_.SetThunkCode(patch, ArrayRef<const uint8_t>(thunk_code), debug_name); - } - } - } - std::vector<uint8_t> CompileMethodCallThunk() { - LinkerPatch patch = LinkerPatch::RelativeCodePatch(/* literal_offset */ 0u, - /* target_dex_file*/ nullptr, - /* target_method_idx */ 0u); - return CompileThunk(patch); + ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetMethodCallKey(); + return static_cast<Thumb2RelativePatcher*>(patcher_.get())->CompileThunk(key); } uint32_t MethodCallThunkSize() { @@ -264,38 +228,27 @@ class Thumb2RelativePatcherTest : public RelativePatcherTest { void TestStringReference(uint32_t string_offset); void CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches, uint32_t target_offset); - static uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg, - uint32_t holder_reg, - bool narrow) { - return arm::CodeGeneratorARMVIXL::EncodeBakerReadBarrierFieldData(base_reg, holder_reg, narrow); - } - - static uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) { - return arm::CodeGeneratorARMVIXL::EncodeBakerReadBarrierArrayData(base_reg); - } - - static uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg, bool narrow) { - return arm::CodeGeneratorARMVIXL::EncodeBakerReadBarrierGcRootData(root_reg, narrow); - } - std::vector<uint8_t> CompileBakerOffsetThunk(uint32_t base_reg, uint32_t holder_reg, bool narrow) { const LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch( - /* literal_offset */ 0u, EncodeBakerReadBarrierFieldData(base_reg, holder_reg, narrow)); - return CompileThunk(patch); + 0u, Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData(base_reg, holder_reg, narrow)); + ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch); + return down_cast<Thumb2RelativePatcher*>(patcher_.get())->CompileThunk(key); } std::vector<uint8_t> CompileBakerArrayThunk(uint32_t base_reg) { LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch( - /* literal_offset */ 0u, EncodeBakerReadBarrierArrayData(base_reg)); - return CompileThunk(patch); + 0u, Thumb2RelativePatcher::EncodeBakerReadBarrierArrayData(base_reg)); + ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch); + return down_cast<Thumb2RelativePatcher*>(patcher_.get())->CompileThunk(key); } std::vector<uint8_t> CompileBakerGcRootThunk(uint32_t root_reg, bool narrow) { LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch( - /* literal_offset */ 0u, EncodeBakerReadBarrierGcRootData(root_reg, narrow)); - return CompileThunk(patch); + 0u, Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg, narrow)); + ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch); + return down_cast<Thumb2RelativePatcher*>(patcher_.get())->CompileThunk(key); } uint32_t GetOutputInsn32(uint32_t offset) { @@ -641,7 +594,7 @@ void Thumb2RelativePatcherTest::TestBakerFieldWide(uint32_t offset, uint32_t ref const std::vector<uint8_t> raw_code = RawCode({kBneWPlus0, ldr}); ASSERT_EQ(kMethodCodeSize, raw_code.size()); ArrayRef<const uint8_t> code(raw_code); - uint32_t encoded_data = EncodeBakerReadBarrierFieldData( + uint32_t encoded_data = Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData( base_reg, holder_reg, /* narrow */ false); const LinkerPatch patches[] = { LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset, encoded_data), @@ -743,7 +696,7 @@ void Thumb2RelativePatcherTest::TestBakerFieldNarrow(uint32_t offset, uint32_t r const std::vector<uint8_t> raw_code = RawCode({kBneWPlus0, ldr}); ASSERT_EQ(kMethodCodeSize, raw_code.size()); ArrayRef<const uint8_t> code(raw_code); - uint32_t encoded_data = EncodeBakerReadBarrierFieldData( + uint32_t encoded_data = Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData( base_reg, holder_reg, /* narrow */ true); const LinkerPatch patches[] = { LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset, encoded_data), @@ -856,7 +809,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkInTheMiddle) { constexpr uint32_t kLiteralOffset1 = 6u; const std::vector<uint8_t> raw_code1 = RawCode({kNopWInsn, kNopInsn, kBneWPlus0, kLdrWInsn}); ArrayRef<const uint8_t> code1(raw_code1); - uint32_t encoded_data = EncodeBakerReadBarrierFieldData( + uint32_t encoded_data = Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData( /* base_reg */ 0, /* holder_reg */ 0, /* narrow */ false); const LinkerPatch patches1[] = { LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data), @@ -924,7 +877,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkBeforeFiller) { constexpr uint32_t kLiteralOffset1 = 4u; const std::vector<uint8_t> raw_code1 = RawCode({kNopWInsn, kBneWPlus0, kLdrWInsn, kNopInsn}); ArrayRef<const uint8_t> code1(raw_code1); - uint32_t encoded_data = EncodeBakerReadBarrierFieldData( + uint32_t encoded_data = Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData( /* base_reg */ 0, /* holder_reg */ 0, /* narrow */ false); const LinkerPatch patches1[] = { LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data), @@ -954,7 +907,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkInTheMiddleUnreachableFromLast constexpr uint32_t kLiteralOffset1 = 6u; const std::vector<uint8_t> raw_code1 = RawCode({kNopWInsn, kNopInsn, kBneWPlus0, kLdrWInsn}); ArrayRef<const uint8_t> code1(raw_code1); - uint32_t encoded_data = EncodeBakerReadBarrierFieldData( + uint32_t encoded_data = Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData( /* base_reg */ 0, /* holder_reg */ 0, /* narrow */ false); const LinkerPatch patches1[] = { LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data), @@ -1040,7 +993,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerArray) { ArrayRef<const uint8_t> code(raw_code); const LinkerPatch patches[] = { LinkerPatch::BakerReadBarrierBranchPatch( - kLiteralOffset, EncodeBakerReadBarrierArrayData(base_reg)), + kLiteralOffset, Thumb2RelativePatcher::EncodeBakerReadBarrierArrayData(base_reg)), }; AddCompiledMethod(MethodRef(method_idx), code, ArrayRef<const LinkerPatch>(patches)); } @@ -1121,7 +1074,8 @@ TEST_F(Thumb2RelativePatcherTest, BakerGcRootWide) { ArrayRef<const uint8_t> code(raw_code); const LinkerPatch patches[] = { LinkerPatch::BakerReadBarrierBranchPatch( - kLiteralOffset, EncodeBakerReadBarrierGcRootData(root_reg, /* narrow */ false)), + kLiteralOffset, + Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg, /* narrow */ false)), }; AddCompiledMethod(MethodRef(method_idx), code, ArrayRef<const LinkerPatch>(patches)); } @@ -1180,7 +1134,8 @@ TEST_F(Thumb2RelativePatcherTest, BakerGcRootNarrow) { ArrayRef<const uint8_t> code(raw_code); const LinkerPatch patches[] = { LinkerPatch::BakerReadBarrierBranchPatch( - kLiteralOffset, EncodeBakerReadBarrierGcRootData(root_reg, /* narrow */ true)), + kLiteralOffset, + Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg, /* narrow */ true)), }; AddCompiledMethod(MethodRef(method_idx), code, ArrayRef<const LinkerPatch>(patches)); } @@ -1227,7 +1182,8 @@ TEST_F(Thumb2RelativePatcherTest, BakerGcRootOffsetBits) { patches.reserve(num_patches); const uint32_t ldr = kLdrWInsn | (/* offset */ 8) | (/* base_reg */ 0 << 16) | (/* root_reg */ 0 << 12); - uint32_t encoded_data = EncodeBakerReadBarrierGcRootData(/* root_reg */ 0, /* narrow */ false); + uint32_t encoded_data = + Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(/* root_reg */ 0, /* narrow */ false); for (size_t i = 0; i != num_patches; ++i) { PushBackInsn(&code, ldr); PushBackInsn(&code, kBneWPlus0); @@ -1308,8 +1264,10 @@ TEST_F(Thumb2RelativePatcherTest, BakerAndMethodCallInteraction) { ldr1, kBneWPlus0, // First GC root LDR with read barrier. ldr2, kBneWPlus0, // Second GC root LDR with read barrier. }); - uint32_t encoded_data1 = EncodeBakerReadBarrierGcRootData(/* root_reg */ 1, /* narrow */ false); - uint32_t encoded_data2 = EncodeBakerReadBarrierGcRootData(/* root_reg */ 2, /* narrow */ false); + uint32_t encoded_data1 = + Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(/* root_reg */ 1, /* narrow */ false); + uint32_t encoded_data2 = + Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(/* root_reg */ 2, /* narrow */ false); const LinkerPatch last_method_patches[] = { LinkerPatch::BakerReadBarrierBranchPatch(kBakerLiteralOffset1, encoded_data1), LinkerPatch::BakerReadBarrierBranchPatch(kBakerLiteralOffset2, encoded_data2), diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc index 135e39d100..b268204b4a 100644 --- a/compiler/linker/arm64/relative_patcher_arm64.cc +++ b/compiler/linker/arm64/relative_patcher_arm64.cc @@ -82,10 +82,9 @@ inline uint32_t MaxExtraSpace(size_t num_adrp, size_t code_size) { } // anonymous namespace -Arm64RelativePatcher::Arm64RelativePatcher(RelativePatcherThunkProvider* thunk_provider, - RelativePatcherTargetProvider* target_provider, +Arm64RelativePatcher::Arm64RelativePatcher(RelativePatcherTargetProvider* provider, const Arm64InstructionSetFeatures* features) - : ArmBaseRelativePatcher(thunk_provider, target_provider, InstructionSet::kArm64), + : ArmBaseRelativePatcher(provider, InstructionSet::kArm64), fix_cortex_a53_843419_(features->NeedFixCortexA53_843419()), reserved_adrp_thunks_(0u), processed_adrp_thunks_(0u) { @@ -314,6 +313,44 @@ void Arm64RelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* cod uint32_t insn = GetInsn(code, literal_offset); DCHECK_EQ(insn & 0xffffffe0u, 0xb5000000); // CBNZ Xt, +0 (unpatched) ThunkKey key = GetBakerThunkKey(patch); + if (kIsDebugBuild) { + const uint32_t encoded_data = key.GetCustomValue1(); + BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); + // Check that the next instruction matches the expected LDR. + switch (kind) { + case BakerReadBarrierKind::kField: { + DCHECK_GE(code->size() - literal_offset, 8u); + uint32_t next_insn = GetInsn(code, literal_offset + 4u); + // LDR (immediate) with correct base_reg. + CheckValidReg(next_insn & 0x1fu); // Check destination register. + const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(next_insn & 0xffc003e0u, 0xb9400000u | (base_reg << 5)); + break; + } + case BakerReadBarrierKind::kArray: { + DCHECK_GE(code->size() - literal_offset, 8u); + uint32_t next_insn = GetInsn(code, literal_offset + 4u); + // LDR (register) with the correct base_reg, size=10 (32-bit), option=011 (extend = LSL), + // and S=1 (shift amount = 2 for 32-bit version), i.e. LDR Wt, [Xn, Xm, LSL #2]. + CheckValidReg(next_insn & 0x1fu); // Check destination register. + const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(next_insn & 0xffe0ffe0u, 0xb8607800u | (base_reg << 5)); + CheckValidReg((next_insn >> 16) & 0x1f); // Check index register + break; + } + case BakerReadBarrierKind::kGcRoot: { + DCHECK_GE(literal_offset, 4u); + uint32_t prev_insn = GetInsn(code, literal_offset - 4u); + // LDR (immediate) with correct root_reg. + const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(prev_insn & 0xffc0001fu, 0xb9400000u | root_reg); + break; + } + default: + LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind); + UNREACHABLE(); + } + } uint32_t target_offset = GetThunkTargetOffset(key, patch_offset); DCHECK_ALIGNED(target_offset, 4u); uint32_t disp = target_offset - patch_offset; @@ -322,6 +359,216 @@ void Arm64RelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* cod SetInsn(code, literal_offset, insn); } +#define __ assembler.GetVIXLAssembler()-> + +static void EmitGrayCheckAndFastPath(arm64::Arm64Assembler& assembler, + vixl::aarch64::Register base_reg, + vixl::aarch64::MemOperand& lock_word, + vixl::aarch64::Label* slow_path) { + using namespace vixl::aarch64; // NOLINT(build/namespaces) + // Load the lock word containing the rb_state. + __ Ldr(ip0.W(), lock_word); + // Given the numeric representation, it's enough to check the low bit of the rb_state. + static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0"); + static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1"); + __ Tbnz(ip0.W(), LockWord::kReadBarrierStateShift, slow_path); + static_assert( + BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET, + "Field and array LDR offsets must be the same to reuse the same code."); + // Adjust the return address back to the LDR (1 instruction; 2 for heap poisoning). + static_assert(BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4), + "Field LDR must be 1 instruction (4B) before the return address label; " + " 2 instructions (8B) for heap poisoning."); + __ Add(lr, lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET); + // Introduce a dependency on the lock_word including rb_state, + // to prevent load-load reordering, and without using + // a memory barrier (which would be more expensive). + __ Add(base_reg, base_reg, Operand(ip0, LSR, 32)); + __ Br(lr); // And return back to the function. + // Note: The fake dependency is unnecessary for the slow path. +} + +// Load the read barrier introspection entrypoint in register `entrypoint`. +static void LoadReadBarrierMarkIntrospectionEntrypoint(arm64::Arm64Assembler& assembler, + vixl::aarch64::Register entrypoint) { + using vixl::aarch64::MemOperand; + using vixl::aarch64::ip0; + // Thread Register. + const vixl::aarch64::Register tr = vixl::aarch64::x19; + + // entrypoint = Thread::Current()->pReadBarrierMarkReg16, i.e. pReadBarrierMarkIntrospection. + DCHECK_EQ(ip0.GetCode(), 16u); + const int32_t entry_point_offset = + Thread::ReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(ip0.GetCode()); + __ Ldr(entrypoint, MemOperand(tr, entry_point_offset)); +} + +void Arm64RelativePatcher::CompileBakerReadBarrierThunk(arm64::Arm64Assembler& assembler, + uint32_t encoded_data) { + using namespace vixl::aarch64; // NOLINT(build/namespaces) + BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); + switch (kind) { + case BakerReadBarrierKind::kField: { + // Check if the holder is gray and, if not, add fake dependency to the base register + // and return to the LDR instruction to load the reference. Otherwise, use introspection + // to load the reference and call the entrypoint (in IP1) that performs further checks + // on the reference and marks it if needed. + auto base_reg = + Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); + CheckValidReg(base_reg.GetCode()); + auto holder_reg = + Register::GetXRegFromCode(BakerReadBarrierSecondRegField::Decode(encoded_data)); + CheckValidReg(holder_reg.GetCode()); + UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); + temps.Exclude(ip0, ip1); + // If base_reg differs from holder_reg, the offset was too large and we must have + // emitted an explicit null check before the load. Otherwise, we need to null-check + // the holder as we do not necessarily do that check before going to the thunk. + vixl::aarch64::Label throw_npe; + if (holder_reg.Is(base_reg)) { + __ Cbz(holder_reg.W(), &throw_npe); + } + vixl::aarch64::Label slow_path; + MemOperand lock_word(holder_reg, mirror::Object::MonitorOffset().Int32Value()); + EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path); + __ Bind(&slow_path); + MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET); + __ Ldr(ip0.W(), ldr_address); // Load the LDR (immediate) unsigned offset. + LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); + __ Ubfx(ip0.W(), ip0.W(), 10, 12); // Extract the offset. + __ Ldr(ip0.W(), MemOperand(base_reg, ip0, LSL, 2)); // Load the reference. + // Do not unpoison. With heap poisoning enabled, the entrypoint expects a poisoned reference. + __ Br(ip1); // Jump to the entrypoint. + if (holder_reg.Is(base_reg)) { + // Add null check slow path. The stack map is at the address pointed to by LR. + __ Bind(&throw_npe); + int32_t offset = GetThreadOffset<kArm64PointerSize>(kQuickThrowNullPointer).Int32Value(); + __ Ldr(ip0, MemOperand(/* Thread* */ vixl::aarch64::x19, offset)); + __ Br(ip0); + } + break; + } + case BakerReadBarrierKind::kArray: { + auto base_reg = + Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); + CheckValidReg(base_reg.GetCode()); + DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data)); + UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); + temps.Exclude(ip0, ip1); + vixl::aarch64::Label slow_path; + int32_t data_offset = + mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value(); + MemOperand lock_word(base_reg, mirror::Object::MonitorOffset().Int32Value() - data_offset); + DCHECK_LT(lock_word.GetOffset(), 0); + EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path); + __ Bind(&slow_path); + MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET); + __ Ldr(ip0.W(), ldr_address); // Load the LDR (register) unsigned offset. + LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); + __ Ubfx(ip0, ip0, 16, 6); // Extract the index register, plus 32 (bit 21 is set). + __ Bfi(ip1, ip0, 3, 6); // Insert ip0 to the entrypoint address to create + // a switch case target based on the index register. + __ Mov(ip0, base_reg); // Move the base register to ip0. + __ Br(ip1); // Jump to the entrypoint's array switch case. + break; + } + case BakerReadBarrierKind::kGcRoot: { + // Check if the reference needs to be marked and if so (i.e. not null, not marked yet + // and it does not have a forwarding address), call the correct introspection entrypoint; + // otherwise return the reference (or the extracted forwarding address). + // There is no gray bit check for GC roots. + auto root_reg = + Register::GetWRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); + CheckValidReg(root_reg.GetCode()); + DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data)); + UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); + temps.Exclude(ip0, ip1); + vixl::aarch64::Label return_label, not_marked, forwarding_address; + __ Cbz(root_reg, &return_label); + MemOperand lock_word(root_reg.X(), mirror::Object::MonitorOffset().Int32Value()); + __ Ldr(ip0.W(), lock_word); + __ Tbz(ip0.W(), LockWord::kMarkBitStateShift, ¬_marked); + __ Bind(&return_label); + __ Br(lr); + __ Bind(¬_marked); + __ Tst(ip0.W(), Operand(ip0.W(), LSL, 1)); + __ B(&forwarding_address, mi); + LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); + // Adjust the art_quick_read_barrier_mark_introspection address in IP1 to + // art_quick_read_barrier_mark_introspection_gc_roots. + __ Add(ip1, ip1, Operand(BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRYPOINT_OFFSET)); + __ Mov(ip0.W(), root_reg); + __ Br(ip1); + __ Bind(&forwarding_address); + __ Lsl(root_reg, ip0.W(), LockWord::kForwardingAddressShift); + __ Br(lr); + break; + } + default: + LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind); + UNREACHABLE(); + } +} + +std::vector<uint8_t> Arm64RelativePatcher::CompileThunk(const ThunkKey& key) { + ArenaPool pool; + ArenaAllocator allocator(&pool); + arm64::Arm64Assembler assembler(&allocator); + + switch (key.GetType()) { + case ThunkType::kMethodCall: { + // The thunk just uses the entry point in the ArtMethod. This works even for calls + // to the generic JNI and interpreter trampolines. + Offset offset(ArtMethod::EntryPointFromQuickCompiledCodeOffset( + kArm64PointerSize).Int32Value()); + assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0)); + break; + } + case ThunkType::kBakerReadBarrier: { + CompileBakerReadBarrierThunk(assembler, key.GetCustomValue1()); + break; + } + } + + // Ensure we emit the literal pool. + assembler.FinalizeCode(); + std::vector<uint8_t> thunk_code(assembler.CodeSize()); + MemoryRegion code(thunk_code.data(), thunk_code.size()); + assembler.FinalizeInstructions(code); + return thunk_code; +} + +std::string Arm64RelativePatcher::GetThunkDebugName(const ThunkKey& key) { + switch (key.GetType()) { + case ThunkType::kMethodCall: + return "MethodCallThunk"; + + case ThunkType::kBakerReadBarrier: { + uint32_t encoded_data = key.GetCustomValue1(); + BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); + std::ostringstream oss; + oss << "BakerReadBarrierThunk"; + switch (kind) { + case BakerReadBarrierKind::kField: + oss << "Field_r" << BakerReadBarrierFirstRegField::Decode(encoded_data) + << "_r" << BakerReadBarrierSecondRegField::Decode(encoded_data); + break; + case BakerReadBarrierKind::kArray: + oss << "Array_r" << BakerReadBarrierFirstRegField::Decode(encoded_data); + DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data)); + break; + case BakerReadBarrierKind::kGcRoot: + oss << "GcRoot_r" << BakerReadBarrierFirstRegField::Decode(encoded_data); + DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data)); + break; + } + return oss.str(); + } + } +} + +#undef __ + uint32_t Arm64RelativePatcher::MaxPositiveDisplacement(const ThunkKey& key) { switch (key.GetType()) { case ThunkType::kMethodCall: diff --git a/compiler/linker/arm64/relative_patcher_arm64.h b/compiler/linker/arm64/relative_patcher_arm64.h index 9dc289da44..8ba59976e7 100644 --- a/compiler/linker/arm64/relative_patcher_arm64.h +++ b/compiler/linker/arm64/relative_patcher_arm64.h @@ -18,6 +18,8 @@ #define ART_COMPILER_LINKER_ARM64_RELATIVE_PATCHER_ARM64_H_ #include "base/array_ref.h" +#include "base/bit_field.h" +#include "base/bit_utils.h" #include "linker/arm/relative_patcher_arm_base.h" namespace art { @@ -30,8 +32,29 @@ namespace linker { class Arm64RelativePatcher FINAL : public ArmBaseRelativePatcher { public: - Arm64RelativePatcher(RelativePatcherThunkProvider* thunk_provider, - RelativePatcherTargetProvider* target_provider, + static uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg, uint32_t holder_reg) { + CheckValidReg(base_reg); + CheckValidReg(holder_reg); + return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kField) | + BakerReadBarrierFirstRegField::Encode(base_reg) | + BakerReadBarrierSecondRegField::Encode(holder_reg); + } + + static uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) { + CheckValidReg(base_reg); + return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kArray) | + BakerReadBarrierFirstRegField::Encode(base_reg) | + BakerReadBarrierSecondRegField::Encode(kInvalidEncodedReg); + } + + static uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg) { + CheckValidReg(root_reg); + return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) | + BakerReadBarrierFirstRegField::Encode(root_reg) | + BakerReadBarrierSecondRegField::Encode(kInvalidEncodedReg); + } + + Arm64RelativePatcher(RelativePatcherTargetProvider* provider, const Arm64InstructionSetFeatures* features); uint32_t ReserveSpace(uint32_t offset, @@ -52,10 +75,37 @@ class Arm64RelativePatcher FINAL : public ArmBaseRelativePatcher { uint32_t patch_offset) OVERRIDE; protected: + std::vector<uint8_t> CompileThunk(const ThunkKey& key) OVERRIDE; + std::string GetThunkDebugName(const ThunkKey& key) OVERRIDE; uint32_t MaxPositiveDisplacement(const ThunkKey& key) OVERRIDE; uint32_t MaxNegativeDisplacement(const ThunkKey& key) OVERRIDE; private: + static constexpr uint32_t kInvalidEncodedReg = /* sp/zr is invalid */ 31u; + + enum class BakerReadBarrierKind : uint8_t { + kField, // Field get or array get with constant offset (i.e. constant index). + kArray, // Array get with index in register. + kGcRoot, // GC root load. + kLast = kGcRoot + }; + + static constexpr size_t kBitsForBakerReadBarrierKind = + MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierKind::kLast)); + static constexpr size_t kBitsForRegister = 5u; + using BakerReadBarrierKindField = + BitField<BakerReadBarrierKind, 0, kBitsForBakerReadBarrierKind>; + using BakerReadBarrierFirstRegField = + BitField<uint32_t, kBitsForBakerReadBarrierKind, kBitsForRegister>; + using BakerReadBarrierSecondRegField = + BitField<uint32_t, kBitsForBakerReadBarrierKind + kBitsForRegister, kBitsForRegister>; + + static void CheckValidReg(uint32_t reg) { + DCHECK(reg < 30u && reg != 16u && reg != 17u) << reg; + } + + void CompileBakerReadBarrierThunk(arm64::Arm64Assembler& assembler, uint32_t encoded_data); + static uint32_t PatchAdrp(uint32_t adrp, uint32_t disp); static bool NeedsErratum843419Thunk(ArrayRef<const uint8_t> code, uint32_t literal_offset, diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc index 393733dd0c..05459a2a82 100644 --- a/compiler/linker/arm64/relative_patcher_arm64_test.cc +++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc @@ -16,15 +16,12 @@ #include "linker/arm64/relative_patcher_arm64.h" -#include "arch/arm64/instruction_set_features_arm64.h" #include "base/casts.h" #include "linker/relative_patcher_test.h" #include "lock_word.h" #include "mirror/array-inl.h" #include "mirror/object.h" #include "oat_quick_method_header.h" -#include "optimizing/code_generator_arm64.h" -#include "optimizing/optimizing_unit_test.h" namespace art { namespace linker { @@ -171,42 +168,9 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { return result.second; } - std::vector<uint8_t> CompileThunk(const LinkerPatch& patch, - /*out*/ std::string* debug_name = nullptr) { - OptimizingUnitTestHelper helper; - HGraph* graph = helper.CreateGraph(); - std::string error_msg; - Arm64FeaturesUniquePtr features = - Arm64InstructionSetFeatures::FromVariant("default", &error_msg); - CompilerOptions options; - arm64::CodeGeneratorARM64 codegen(graph, *features, options); - ArenaVector<uint8_t> code(helper.GetAllocator()->Adapter()); - codegen.EmitThunkCode(patch, &code, debug_name); - return std::vector<uint8_t>(code.begin(), code.end()); - } - - void AddCompiledMethod( - MethodReference method_ref, - const ArrayRef<const uint8_t>& code, - const ArrayRef<const LinkerPatch>& patches = ArrayRef<const LinkerPatch>()) { - RelativePatcherTest::AddCompiledMethod(method_ref, code, patches); - - // Make sure the ThunkProvider has all the necessary thunks. - for (const LinkerPatch& patch : patches) { - if (patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch || - patch.GetType() == LinkerPatch::Type::kCallRelative) { - std::string debug_name; - std::vector<uint8_t> thunk_code = CompileThunk(patch, &debug_name); - thunk_provider_.SetThunkCode(patch, ArrayRef<const uint8_t>(thunk_code), debug_name); - } - } - } - std::vector<uint8_t> CompileMethodCallThunk() { - LinkerPatch patch = LinkerPatch::RelativeCodePatch(/* literal_offset */ 0u, - /* target_dex_file*/ nullptr, - /* target_method_idx */ 0u); - return CompileThunk(patch); + ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetMethodCallKey(); + return down_cast<Arm64RelativePatcher*>(patcher_.get())->CompileThunk(key); } uint32_t MethodCallThunkSize() { @@ -511,34 +475,25 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { TestAdrpInsn2Add(insn2, adrp_offset, has_thunk, string_offset); } - static uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg, uint32_t holder_reg) { - return arm64::CodeGeneratorARM64::EncodeBakerReadBarrierFieldData(base_reg, holder_reg); - } - - static uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) { - return arm64::CodeGeneratorARM64::EncodeBakerReadBarrierArrayData(base_reg); - } - - static uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg) { - return arm64::CodeGeneratorARM64::EncodeBakerReadBarrierGcRootData(root_reg); - } - std::vector<uint8_t> CompileBakerOffsetThunk(uint32_t base_reg, uint32_t holder_reg) { const LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch( - /* literal_offset */ 0u, EncodeBakerReadBarrierFieldData(base_reg, holder_reg)); - return CompileThunk(patch); + 0u, Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(base_reg, holder_reg)); + ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch); + return down_cast<Arm64RelativePatcher*>(patcher_.get())->CompileThunk(key); } std::vector<uint8_t> CompileBakerArrayThunk(uint32_t base_reg) { LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch( - /* literal_offset */ 0u, EncodeBakerReadBarrierArrayData(base_reg)); - return CompileThunk(patch); + 0u, Arm64RelativePatcher::EncodeBakerReadBarrierArrayData(base_reg)); + ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch); + return down_cast<Arm64RelativePatcher*>(patcher_.get())->CompileThunk(key); } std::vector<uint8_t> CompileBakerGcRootThunk(uint32_t root_reg) { LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch( - /* literal_offset */ 0u, EncodeBakerReadBarrierGcRootData(root_reg)); - return CompileThunk(patch); + 0u, Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg)); + ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch); + return down_cast<Arm64RelativePatcher*>(patcher_.get())->CompileThunk(key); } uint32_t GetOutputInsn(uint32_t offset) { @@ -964,7 +919,8 @@ void Arm64RelativePatcherTest::TestBakerField(uint32_t offset, uint32_t ref_reg) const std::vector<uint8_t> raw_code = RawCode({kCbnzIP1Plus0Insn, ldr}); ASSERT_EQ(kMethodCodeSize, raw_code.size()); ArrayRef<const uint8_t> code(raw_code); - uint32_t encoded_data = EncodeBakerReadBarrierFieldData(base_reg, holder_reg); + uint32_t encoded_data = + Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(base_reg, holder_reg); const LinkerPatch patches[] = { LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset, encoded_data), }; @@ -1049,7 +1005,8 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddle) { constexpr uint32_t kLiteralOffset1 = 4; const std::vector<uint8_t> raw_code1 = RawCode({kNopInsn, kCbnzIP1Plus0Insn, kLdrWInsn}); ArrayRef<const uint8_t> code1(raw_code1); - uint32_t encoded_data = EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0); + uint32_t encoded_data = + Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0); const LinkerPatch patches1[] = { LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data), }; @@ -1109,7 +1066,8 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkBeforeFiller) { constexpr uint32_t kLiteralOffset1 = 0; const std::vector<uint8_t> raw_code1 = RawCode({kCbnzIP1Plus0Insn, kLdrWInsn, kNopInsn}); ArrayRef<const uint8_t> code1(raw_code1); - uint32_t encoded_data = EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0); + uint32_t encoded_data = + Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0); const LinkerPatch patches1[] = { LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data), }; @@ -1138,7 +1096,8 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddleUnreachableFr constexpr uint32_t kLiteralOffset1 = 4; const std::vector<uint8_t> raw_code1 = RawCode({kNopInsn, kCbnzIP1Plus0Insn, kLdrWInsn}); ArrayRef<const uint8_t> code1(raw_code1); - uint32_t encoded_data = EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0); + uint32_t encoded_data = + Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0); const LinkerPatch patches1[] = { LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data), }; @@ -1211,7 +1170,7 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerArray) { ArrayRef<const uint8_t> code(raw_code); const LinkerPatch patches[] = { LinkerPatch::BakerReadBarrierBranchPatch( - kLiteralOffset, EncodeBakerReadBarrierArrayData(base_reg)), + kLiteralOffset, Arm64RelativePatcher::EncodeBakerReadBarrierArrayData(base_reg)), }; AddCompiledMethod(MethodRef(method_idx), code, ArrayRef<const LinkerPatch>(patches)); } @@ -1288,7 +1247,7 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerGcRoot) { ArrayRef<const uint8_t> code(raw_code); const LinkerPatch patches[] = { LinkerPatch::BakerReadBarrierBranchPatch( - kLiteralOffset, EncodeBakerReadBarrierGcRootData(root_reg)), + kLiteralOffset, Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg)), }; AddCompiledMethod(MethodRef(method_idx), code, ArrayRef<const LinkerPatch>(patches)); } @@ -1384,8 +1343,8 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerAndMethodCallInteraction) { kNopInsn, kNopInsn, // Padding before second GC root read barrier. ldr2, kCbnzIP1Plus0Insn, // Second GC root LDR with read barrier. }); - uint32_t encoded_data1 = EncodeBakerReadBarrierGcRootData(/* root_reg */ 1); - uint32_t encoded_data2 = EncodeBakerReadBarrierGcRootData(/* root_reg */ 2); + uint32_t encoded_data1 = Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(/* root_reg */ 1); + uint32_t encoded_data2 = Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(/* root_reg */ 2); const LinkerPatch last_method_patches[] = { LinkerPatch::BakerReadBarrierBranchPatch(kBakerLiteralOffset1, encoded_data1), LinkerPatch::BakerReadBarrierBranchPatch(kBakerLiteralOffset2, encoded_data2), diff --git a/compiler/linker/linker_patch.h b/compiler/linker/linker_patch.h index 7b35fd9b0c..710d8a690a 100644 --- a/compiler/linker/linker_patch.h +++ b/compiler/linker/linker_patch.h @@ -141,7 +141,7 @@ class LinkerPatch { static LinkerPatch BakerReadBarrierBranchPatch(size_t literal_offset, uint32_t custom_value1 = 0u, uint32_t custom_value2 = 0u) { - LinkerPatch patch(literal_offset, Type::kBakerReadBarrierBranch, /* target_dex_file */ nullptr); + LinkerPatch patch(literal_offset, Type::kBakerReadBarrierBranch, nullptr); patch.baker_custom_value1_ = custom_value1; patch.baker_custom_value2_ = custom_value2; return patch; diff --git a/compiler/linker/relative_patcher.cc b/compiler/linker/relative_patcher.cc index b82d15230d..13877f8f12 100644 --- a/compiler/linker/relative_patcher.cc +++ b/compiler/linker/relative_patcher.cc @@ -43,8 +43,7 @@ namespace linker { std::unique_ptr<RelativePatcher> RelativePatcher::Create( InstructionSet instruction_set, const InstructionSetFeatures* features, - RelativePatcherThunkProvider* thunk_provider, - RelativePatcherTargetProvider* target_provider) { + RelativePatcherTargetProvider* provider) { class RelativePatcherNone FINAL : public RelativePatcher { public: RelativePatcherNone() { } @@ -93,8 +92,7 @@ std::unique_ptr<RelativePatcher> RelativePatcher::Create( }; UNUSED(features); - UNUSED(thunk_provider); - UNUSED(target_provider); + UNUSED(provider); switch (instruction_set) { #ifdef ART_ENABLE_CODEGEN_x86 case InstructionSet::kX86: @@ -108,15 +106,12 @@ std::unique_ptr<RelativePatcher> RelativePatcher::Create( case InstructionSet::kArm: // Fall through: we generate Thumb2 code for "arm". case InstructionSet::kThumb2: - return std::unique_ptr<RelativePatcher>( - new Thumb2RelativePatcher(thunk_provider, target_provider)); + return std::unique_ptr<RelativePatcher>(new Thumb2RelativePatcher(provider)); #endif #ifdef ART_ENABLE_CODEGEN_arm64 case InstructionSet::kArm64: return std::unique_ptr<RelativePatcher>( - new Arm64RelativePatcher(thunk_provider, - target_provider, - features->AsArm64InstructionSetFeatures())); + new Arm64RelativePatcher(provider, features->AsArm64InstructionSetFeatures())); #endif #ifdef ART_ENABLE_CODEGEN_mips case InstructionSet::kMips: diff --git a/compiler/linker/relative_patcher.h b/compiler/linker/relative_patcher.h index 06c7e70d23..b58e3dffbd 100644 --- a/compiler/linker/relative_patcher.h +++ b/compiler/linker/relative_patcher.h @@ -39,27 +39,6 @@ class LinkerPatch; class OutputStream; /** - * @class RelativePatcherThunkProvider - * @brief Interface for providing method offsets for relative call targets. - */ -class RelativePatcherThunkProvider { - public: - /** - * Get the code and debug name of a thunk needed by the given linker patch. - * - * @param patch The patch for which we need to retrieve the thunk code. - * @param code A variable to receive the code of the thunk. This code must not be empty. - * @param debug_name A variable to receive the debug name of the thunk. - */ - virtual void GetThunkCode(const LinkerPatch& patch, - /*out*/ ArrayRef<const uint8_t>* code, - /*out*/ std::string* debug_name) = 0; - - protected: - virtual ~RelativePatcherThunkProvider() { } -}; - -/** * @class RelativePatcherTargetProvider * @brief Interface for providing method offsets for relative call targets. */ @@ -91,10 +70,8 @@ class RelativePatcherTargetProvider { class RelativePatcher { public: static std::unique_ptr<RelativePatcher> Create( - InstructionSet instruction_set, - const InstructionSetFeatures* features, - RelativePatcherThunkProvider* thunk_provider, - RelativePatcherTargetProvider* target_provider); + InstructionSet instruction_set, const InstructionSetFeatures* features, + RelativePatcherTargetProvider* provider); virtual ~RelativePatcher() { } diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h index af8dc4dbc9..d21f2795b9 100644 --- a/compiler/linker/relative_patcher_test.h +++ b/compiler/linker/relative_patcher_test.h @@ -58,10 +58,7 @@ class RelativePatcherTest : public testing::Test { instruction_set_(instruction_set), features_(InstructionSetFeatures::FromVariant(instruction_set, variant, &error_msg_)), method_offset_map_(), - patcher_(RelativePatcher::Create(instruction_set, - features_.get(), - &thunk_provider_, - &method_offset_map_)), + patcher_(RelativePatcher::Create(instruction_set, features_.get(), &method_offset_map_)), bss_begin_(0u), compiled_method_refs_(), compiled_methods_(), @@ -251,72 +248,6 @@ class RelativePatcherTest : public testing::Test { LOG(ERROR) << " " << diff_indicator_str; } - class ThunkProvider : public RelativePatcherThunkProvider { - public: - ThunkProvider() {} - - void SetThunkCode(const LinkerPatch& patch, - ArrayRef<const uint8_t> code, - const std::string& debug_name) { - thunk_map_.emplace(ThunkKey(patch), ThunkValue(code, debug_name)); - } - - void GetThunkCode(const LinkerPatch& patch, - /*out*/ ArrayRef<const uint8_t>* code, - /*out*/ std::string* debug_name) OVERRIDE { - auto it = thunk_map_.find(ThunkKey(patch)); - CHECK(it != thunk_map_.end()); - const ThunkValue& value = it->second; - CHECK(code != nullptr); - *code = value.GetCode(); - CHECK(debug_name != nullptr); - *debug_name = value.GetDebugName(); - } - - private: - class ThunkKey { - public: - explicit ThunkKey(const LinkerPatch& patch) - : type_(patch.GetType()), - custom_value1_(patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch - ? patch.GetBakerCustomValue1() : 0u), - custom_value2_(patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch - ? patch.GetBakerCustomValue2() : 0u) { - CHECK(patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch || - patch.GetType() == LinkerPatch::Type::kCallRelative); - } - - bool operator<(const ThunkKey& other) const { - if (custom_value1_ != other.custom_value1_) { - return custom_value1_ < other.custom_value1_; - } - if (custom_value2_ != other.custom_value2_) { - return custom_value2_ < other.custom_value2_; - } - return type_ < other.type_; - } - - private: - const LinkerPatch::Type type_; - const uint32_t custom_value1_; - const uint32_t custom_value2_; - }; - - class ThunkValue { - public: - ThunkValue(ArrayRef<const uint8_t> code, const std::string& debug_name) - : code_(code.begin(), code.end()), debug_name_(debug_name) {} - ArrayRef<const uint8_t> GetCode() const { return ArrayRef<const uint8_t>(code_); } - const std::string& GetDebugName() const { return debug_name_; } - - private: - const std::vector<uint8_t> code_; - const std::string debug_name_; - }; - - std::map<ThunkKey, ThunkValue> thunk_map_; - }; - // Map method reference to assinged offset. // Wrap the map in a class implementing RelativePatcherTargetProvider. class MethodOffsetMap FINAL : public RelativePatcherTargetProvider { @@ -341,7 +272,6 @@ class RelativePatcherTest : public testing::Test { std::string error_msg_; InstructionSet instruction_set_; std::unique_ptr<const InstructionSetFeatures> features_; - ThunkProvider thunk_provider_; MethodOffsetMap method_offset_map_; std::unique_ptr<RelativePatcher> patcher_; uint32_t bss_begin_; diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 231017f55e..c2ae7646b5 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -449,18 +449,6 @@ void CodeGenerator::EmitLinkerPatches( // No linker patches by default. } -bool CodeGenerator::NeedsThunkCode(const linker::LinkerPatch& patch ATTRIBUTE_UNUSED) const { - // Code generators that create patches requiring thunk compilation should override this function. - return false; -} - -void CodeGenerator::EmitThunkCode(const linker::LinkerPatch& patch ATTRIBUTE_UNUSED, - /*out*/ ArenaVector<uint8_t>* code ATTRIBUTE_UNUSED, - /*out*/ std::string* debug_name ATTRIBUTE_UNUSED) { - // Code generators that create patches requiring thunk compilation should override this function. - LOG(FATAL) << "Unexpected call to EmitThunkCode()."; -} - void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots, size_t maximum_safepoint_spill_size, size_t number_of_out_slots, diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index a86b27151d..3bd5e14539 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -21,7 +21,6 @@ #include "arch/instruction_set_features.h" #include "base/arena_containers.h" #include "base/arena_object.h" -#include "base/array_ref.h" #include "base/bit_field.h" #include "base/bit_utils.h" #include "base/enums.h" @@ -75,7 +74,6 @@ class CodeAllocator { virtual ~CodeAllocator() {} virtual uint8_t* Allocate(size_t size) = 0; - virtual ArrayRef<const uint8_t> GetMemory() const = 0; private: DISALLOW_COPY_AND_ASSIGN(CodeAllocator); @@ -212,10 +210,6 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { virtual void Initialize() = 0; virtual void Finalize(CodeAllocator* allocator); virtual void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches); - virtual bool NeedsThunkCode(const linker::LinkerPatch& patch) const; - virtual void EmitThunkCode(const linker::LinkerPatch& patch, - /*out*/ ArenaVector<uint8_t>* code, - /*out*/ std::string* debug_name); virtual void GenerateFrameEntry() = 0; virtual void GenerateFrameExit() = 0; virtual void Bind(HBasicBlock* block) = 0; diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 31887d92e8..273346ab4a 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -30,6 +30,7 @@ #include "heap_poisoning.h" #include "intrinsics.h" #include "intrinsics_arm64.h" +#include "linker/arm64/relative_patcher_arm64.h" #include "linker/linker_patch.h" #include "lock_word.h" #include "mirror/array-inl.h" @@ -1424,62 +1425,6 @@ void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) { __ FinalizeCode(); CodeGenerator::Finalize(allocator); - - // Verify Baker read barrier linker patches. - if (kIsDebugBuild) { - ArrayRef<const uint8_t> code = allocator->GetMemory(); - for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) { - DCHECK(info.label.IsBound()); - uint32_t literal_offset = info.label.GetLocation(); - DCHECK_ALIGNED(literal_offset, 4u); - - auto GetInsn = [&code](uint32_t offset) { - DCHECK_ALIGNED(offset, 4u); - return - (static_cast<uint32_t>(code[offset + 0]) << 0) + - (static_cast<uint32_t>(code[offset + 1]) << 8) + - (static_cast<uint32_t>(code[offset + 2]) << 16)+ - (static_cast<uint32_t>(code[offset + 3]) << 24); - }; - - const uint32_t encoded_data = info.custom_data; - BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); - // Check that the next instruction matches the expected LDR. - switch (kind) { - case BakerReadBarrierKind::kField: { - DCHECK_GE(code.size() - literal_offset, 8u); - uint32_t next_insn = GetInsn(literal_offset + 4u); - // LDR (immediate) with correct base_reg. - CheckValidReg(next_insn & 0x1fu); // Check destination register. - const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); - CHECK_EQ(next_insn & 0xffc003e0u, 0xb9400000u | (base_reg << 5)); - break; - } - case BakerReadBarrierKind::kArray: { - DCHECK_GE(code.size() - literal_offset, 8u); - uint32_t next_insn = GetInsn(literal_offset + 4u); - // LDR (register) with the correct base_reg, size=10 (32-bit), option=011 (extend = LSL), - // and S=1 (shift amount = 2 for 32-bit version), i.e. LDR Wt, [Xn, Xm, LSL #2]. - CheckValidReg(next_insn & 0x1fu); // Check destination register. - const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); - CHECK_EQ(next_insn & 0xffe0ffe0u, 0xb8607800u | (base_reg << 5)); - CheckValidReg((next_insn >> 16) & 0x1f); // Check index register - break; - } - case BakerReadBarrierKind::kGcRoot: { - DCHECK_GE(literal_offset, 4u); - uint32_t prev_insn = GetInsn(literal_offset - 4u); - // LDR (immediate) with correct root_reg. - const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); - CHECK_EQ(prev_insn & 0xffc0001fu, 0xb9400000u | root_reg); - break; - } - default: - LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind); - UNREACHABLE(); - } - } - } } void ParallelMoveResolverARM64::PrepareForEmitNativeCode() { @@ -4869,44 +4814,6 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* lin DCHECK_EQ(size, linker_patches->size()); } -bool CodeGeneratorARM64::NeedsThunkCode(const linker::LinkerPatch& patch) const { - return patch.GetType() == linker::LinkerPatch::Type::kBakerReadBarrierBranch || - patch.GetType() == linker::LinkerPatch::Type::kCallRelative; -} - -void CodeGeneratorARM64::EmitThunkCode(const linker::LinkerPatch& patch, - /*out*/ ArenaVector<uint8_t>* code, - /*out*/ std::string* debug_name) { - Arm64Assembler assembler(GetGraph()->GetAllocator()); - switch (patch.GetType()) { - case linker::LinkerPatch::Type::kCallRelative: { - // The thunk just uses the entry point in the ArtMethod. This works even for calls - // to the generic JNI and interpreter trampolines. - Offset offset(ArtMethod::EntryPointFromQuickCompiledCodeOffset( - kArm64PointerSize).Int32Value()); - assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0)); - if (GetCompilerOptions().GenerateAnyDebugInfo()) { - *debug_name = "MethodCallThunk"; - } - break; - } - case linker::LinkerPatch::Type::kBakerReadBarrierBranch: { - DCHECK_EQ(patch.GetBakerCustomValue2(), 0u); - CompileBakerReadBarrierThunk(assembler, patch.GetBakerCustomValue1(), debug_name); - break; - } - default: - LOG(FATAL) << "Unexpected patch type " << patch.GetType(); - UNREACHABLE(); - } - - // Ensure we emit the literal pool if any. - assembler.FinalizeCode(); - code->resize(assembler.CodeSize()); - MemoryRegion code_region(code->data(), code->size()); - assembler.FinalizeInstructions(code_region); -} - vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateUint32Literal(uint32_t value) { return uint32_literals_.GetOrCreate( value, @@ -5047,12 +4954,12 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA DCHECK(!cls->MustGenerateClinitCheck()); // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_ Register current_method = InputRegisterAt(cls, 0); - codegen_->GenerateGcRootFieldLoad(cls, - out_loc, - current_method, - ArtMethod::DeclaringClassOffset().Int32Value(), - /* fixup_label */ nullptr, - read_barrier_option); + GenerateGcRootFieldLoad(cls, + out_loc, + current_method, + ArtMethod::DeclaringClassOffset().Int32Value(), + /* fixup_label */ nullptr, + read_barrier_option); break; } case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: { @@ -5099,12 +5006,12 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA vixl::aarch64::Label* ldr_label = codegen_->NewBssEntryTypePatch(dex_file, type_index, adrp_label); // /* GcRoot<mirror::Class> */ out = *(base_address + offset) /* PC-relative */ - codegen_->GenerateGcRootFieldLoad(cls, - out_loc, - temp, - /* offset placeholder */ 0u, - ldr_label, - read_barrier_option); + GenerateGcRootFieldLoad(cls, + out_loc, + temp, + /* offset placeholder */ 0u, + ldr_label, + read_barrier_option); generate_null_check = true; break; } @@ -5112,12 +5019,12 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA __ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass())); - codegen_->GenerateGcRootFieldLoad(cls, - out_loc, - out.X(), - /* offset */ 0, - /* fixup_label */ nullptr, - read_barrier_option); + GenerateGcRootFieldLoad(cls, + out_loc, + out.X(), + /* offset */ 0, + /* fixup_label */ nullptr, + read_barrier_option); break; } case HLoadClass::LoadKind::kRuntimeCall: @@ -5260,12 +5167,12 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD vixl::aarch64::Label* ldr_label = codegen_->NewStringBssEntryPatch(dex_file, string_index, adrp_label); // /* GcRoot<mirror::String> */ out = *(base_address + offset) /* PC-relative */ - codegen_->GenerateGcRootFieldLoad(load, - out_loc, - temp, - /* offset placeholder */ 0u, - ldr_label, - kCompilerReadBarrierOption); + GenerateGcRootFieldLoad(load, + out_loc, + temp, + /* offset placeholder */ 0u, + ldr_label, + kCompilerReadBarrierOption); SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load); codegen_->AddSlowPath(slow_path); @@ -5278,12 +5185,12 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD __ Ldr(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(), load->GetStringIndex(), load->GetString())); - codegen_->GenerateGcRootFieldLoad(load, - out_loc, - out.X(), - /* offset */ 0, - /* fixup_label */ nullptr, - kCompilerReadBarrierOption); + GenerateGcRootFieldLoad(load, + out_loc, + out.X(), + /* offset */ 0, + /* fixup_label */ nullptr, + kCompilerReadBarrierOption); return; } default: @@ -6232,7 +6139,7 @@ void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters( } } -void CodeGeneratorARM64::GenerateGcRootFieldLoad( +void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad( HInstruction* instruction, Location root, Register obj, @@ -6266,8 +6173,9 @@ void CodeGeneratorARM64::GenerateGcRootFieldLoad( DCHECK(temps.IsAvailable(ip0)); DCHECK(temps.IsAvailable(ip1)); temps.Exclude(ip0, ip1); - uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode()); - vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data); + uint32_t custom_data = + linker::Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg.GetCode()); + vixl::aarch64::Label* cbnz_label = codegen_->NewBakerReadBarrierPatch(custom_data); EmissionCheckScope guard(GetVIXLAssembler(), 3 * vixl::aarch64::kInstructionSize); vixl::aarch64::Label return_address; @@ -6296,14 +6204,14 @@ void CodeGeneratorARM64::GenerateGcRootFieldLoad( // Slow path marking the GC root `root`. The entrypoint will // be loaded by the slow path code. SlowPathCodeARM64* slow_path = - new (GetScopedAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root); - AddSlowPath(slow_path); + new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root); + codegen_->AddSlowPath(slow_path); // /* GcRoot<mirror::Object> */ root = *(obj + offset) if (fixup_label == nullptr) { __ Ldr(root_reg, MemOperand(obj, offset)); } else { - EmitLdrOffsetPlaceholder(fixup_label, root_reg, obj); + codegen_->EmitLdrOffsetPlaceholder(fixup_label, root_reg, obj); } static_assert( sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>), @@ -6323,10 +6231,10 @@ void CodeGeneratorARM64::GenerateGcRootFieldLoad( if (fixup_label == nullptr) { __ Add(root_reg.X(), obj.X(), offset); } else { - EmitAddPlaceholder(fixup_label, root_reg.X(), obj.X()); + codegen_->EmitAddPlaceholder(fixup_label, root_reg.X(), obj.X()); } // /* mirror::Object* */ root = root->Read() - GenerateReadBarrierForRootSlow(instruction, root, root); + codegen_->GenerateReadBarrierForRootSlow(instruction, root, root); } } else { // Plain GC root load with no read barrier. @@ -6334,12 +6242,12 @@ void CodeGeneratorARM64::GenerateGcRootFieldLoad( if (fixup_label == nullptr) { __ Ldr(root_reg, MemOperand(obj, offset)); } else { - EmitLdrOffsetPlaceholder(fixup_label, root_reg, obj.X()); + codegen_->EmitLdrOffsetPlaceholder(fixup_label, root_reg, obj.X()); } // Note that GC roots are not affected by heap poisoning, thus we // do not have to unpoison `root_reg` here. } - MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); } void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, @@ -6388,7 +6296,9 @@ void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* ins DCHECK(temps.IsAvailable(ip0)); DCHECK(temps.IsAvailable(ip1)); temps.Exclude(ip0, ip1); - uint32_t custom_data = EncodeBakerReadBarrierFieldData(base.GetCode(), obj.GetCode()); + uint32_t custom_data = linker::Arm64RelativePatcher::EncodeBakerReadBarrierFieldData( + base.GetCode(), + obj.GetCode()); vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data); { @@ -6473,7 +6383,8 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* ins DCHECK(temps.IsAvailable(ip0)); DCHECK(temps.IsAvailable(ip1)); temps.Exclude(ip0, ip1); - uint32_t custom_data = EncodeBakerReadBarrierArrayData(temp.GetCode()); + uint32_t custom_data = + linker::Arm64RelativePatcher::EncodeBakerReadBarrierArrayData(temp.GetCode()); vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data); __ Add(temp.X(), obj.X(), Operand(data_offset)); @@ -6833,176 +6744,5 @@ void CodeGeneratorARM64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_ #undef __ #undef QUICK_ENTRY_POINT -#define __ assembler.GetVIXLAssembler()-> - -static void EmitGrayCheckAndFastPath(arm64::Arm64Assembler& assembler, - vixl::aarch64::Register base_reg, - vixl::aarch64::MemOperand& lock_word, - vixl::aarch64::Label* slow_path) { - // Load the lock word containing the rb_state. - __ Ldr(ip0.W(), lock_word); - // Given the numeric representation, it's enough to check the low bit of the rb_state. - static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0"); - static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1"); - __ Tbnz(ip0.W(), LockWord::kReadBarrierStateShift, slow_path); - static_assert( - BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET, - "Field and array LDR offsets must be the same to reuse the same code."); - // Adjust the return address back to the LDR (1 instruction; 2 for heap poisoning). - static_assert(BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4), - "Field LDR must be 1 instruction (4B) before the return address label; " - " 2 instructions (8B) for heap poisoning."); - __ Add(lr, lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET); - // Introduce a dependency on the lock_word including rb_state, - // to prevent load-load reordering, and without using - // a memory barrier (which would be more expensive). - __ Add(base_reg, base_reg, Operand(ip0, LSR, 32)); - __ Br(lr); // And return back to the function. - // Note: The fake dependency is unnecessary for the slow path. -} - -// Load the read barrier introspection entrypoint in register `entrypoint`. -static void LoadReadBarrierMarkIntrospectionEntrypoint(arm64::Arm64Assembler& assembler, - vixl::aarch64::Register entrypoint) { - // entrypoint = Thread::Current()->pReadBarrierMarkReg16, i.e. pReadBarrierMarkIntrospection. - DCHECK_EQ(ip0.GetCode(), 16u); - const int32_t entry_point_offset = - Thread::ReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(ip0.GetCode()); - __ Ldr(entrypoint, MemOperand(tr, entry_point_offset)); -} - -void CodeGeneratorARM64::CompileBakerReadBarrierThunk(Arm64Assembler& assembler, - uint32_t encoded_data, - /*out*/ std::string* debug_name) { - BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); - switch (kind) { - case BakerReadBarrierKind::kField: { - // Check if the holder is gray and, if not, add fake dependency to the base register - // and return to the LDR instruction to load the reference. Otherwise, use introspection - // to load the reference and call the entrypoint (in IP1) that performs further checks - // on the reference and marks it if needed. - auto base_reg = - Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); - CheckValidReg(base_reg.GetCode()); - auto holder_reg = - Register::GetXRegFromCode(BakerReadBarrierSecondRegField::Decode(encoded_data)); - CheckValidReg(holder_reg.GetCode()); - UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); - temps.Exclude(ip0, ip1); - // If base_reg differs from holder_reg, the offset was too large and we must have - // emitted an explicit null check before the load. Otherwise, we need to null-check - // the holder as we do not necessarily do that check before going to the thunk. - vixl::aarch64::Label throw_npe; - if (holder_reg.Is(base_reg)) { - __ Cbz(holder_reg.W(), &throw_npe); - } - vixl::aarch64::Label slow_path; - MemOperand lock_word(holder_reg, mirror::Object::MonitorOffset().Int32Value()); - EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path); - __ Bind(&slow_path); - MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET); - __ Ldr(ip0.W(), ldr_address); // Load the LDR (immediate) unsigned offset. - LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); - __ Ubfx(ip0.W(), ip0.W(), 10, 12); // Extract the offset. - __ Ldr(ip0.W(), MemOperand(base_reg, ip0, LSL, 2)); // Load the reference. - // Do not unpoison. With heap poisoning enabled, the entrypoint expects a poisoned reference. - __ Br(ip1); // Jump to the entrypoint. - if (holder_reg.Is(base_reg)) { - // Add null check slow path. The stack map is at the address pointed to by LR. - __ Bind(&throw_npe); - int32_t offset = GetThreadOffset<kArm64PointerSize>(kQuickThrowNullPointer).Int32Value(); - __ Ldr(ip0, MemOperand(/* Thread* */ vixl::aarch64::x19, offset)); - __ Br(ip0); - } - break; - } - case BakerReadBarrierKind::kArray: { - auto base_reg = - Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); - CheckValidReg(base_reg.GetCode()); - DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, - BakerReadBarrierSecondRegField::Decode(encoded_data)); - UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); - temps.Exclude(ip0, ip1); - vixl::aarch64::Label slow_path; - int32_t data_offset = - mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value(); - MemOperand lock_word(base_reg, mirror::Object::MonitorOffset().Int32Value() - data_offset); - DCHECK_LT(lock_word.GetOffset(), 0); - EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path); - __ Bind(&slow_path); - MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET); - __ Ldr(ip0.W(), ldr_address); // Load the LDR (register) unsigned offset. - LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); - __ Ubfx(ip0, ip0, 16, 6); // Extract the index register, plus 32 (bit 21 is set). - __ Bfi(ip1, ip0, 3, 6); // Insert ip0 to the entrypoint address to create - // a switch case target based on the index register. - __ Mov(ip0, base_reg); // Move the base register to ip0. - __ Br(ip1); // Jump to the entrypoint's array switch case. - break; - } - case BakerReadBarrierKind::kGcRoot: { - // Check if the reference needs to be marked and if so (i.e. not null, not marked yet - // and it does not have a forwarding address), call the correct introspection entrypoint; - // otherwise return the reference (or the extracted forwarding address). - // There is no gray bit check for GC roots. - auto root_reg = - Register::GetWRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); - CheckValidReg(root_reg.GetCode()); - DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, - BakerReadBarrierSecondRegField::Decode(encoded_data)); - UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); - temps.Exclude(ip0, ip1); - vixl::aarch64::Label return_label, not_marked, forwarding_address; - __ Cbz(root_reg, &return_label); - MemOperand lock_word(root_reg.X(), mirror::Object::MonitorOffset().Int32Value()); - __ Ldr(ip0.W(), lock_word); - __ Tbz(ip0.W(), LockWord::kMarkBitStateShift, ¬_marked); - __ Bind(&return_label); - __ Br(lr); - __ Bind(¬_marked); - __ Tst(ip0.W(), Operand(ip0.W(), LSL, 1)); - __ B(&forwarding_address, mi); - LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); - // Adjust the art_quick_read_barrier_mark_introspection address in IP1 to - // art_quick_read_barrier_mark_introspection_gc_roots. - __ Add(ip1, ip1, Operand(BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRYPOINT_OFFSET)); - __ Mov(ip0.W(), root_reg); - __ Br(ip1); - __ Bind(&forwarding_address); - __ Lsl(root_reg, ip0.W(), LockWord::kForwardingAddressShift); - __ Br(lr); - break; - } - default: - LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind); - UNREACHABLE(); - } - - if (GetCompilerOptions().GenerateAnyDebugInfo()) { - std::ostringstream oss; - oss << "BakerReadBarrierThunk"; - switch (kind) { - case BakerReadBarrierKind::kField: - oss << "Field_r" << BakerReadBarrierFirstRegField::Decode(encoded_data) - << "_r" << BakerReadBarrierSecondRegField::Decode(encoded_data); - break; - case BakerReadBarrierKind::kArray: - oss << "Array_r" << BakerReadBarrierFirstRegField::Decode(encoded_data); - DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, - BakerReadBarrierSecondRegField::Decode(encoded_data)); - break; - case BakerReadBarrierKind::kGcRoot: - oss << "GcRoot_r" << BakerReadBarrierFirstRegField::Decode(encoded_data); - DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, - BakerReadBarrierSecondRegField::Decode(encoded_data)); - break; - } - *debug_name = oss.str(); - } -} - -#undef __ - } // namespace arm64 } // namespace art diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index aa343b1185..6a52eecbd3 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -18,7 +18,6 @@ #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_ #include "arch/arm64/quick_method_frame_info_arm64.h" -#include "base/bit_field.h" #include "code_generator.h" #include "common_arm64.h" #include "dex/dex_file_types.h" @@ -37,11 +36,6 @@ #pragma GCC diagnostic pop namespace art { - -namespace linker { -class Arm64RelativePatcherTest; -} // namespace linker - namespace arm64 { class CodeGeneratorARM64; @@ -315,6 +309,17 @@ class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator { uint32_t offset, Location maybe_temp, ReadBarrierOption read_barrier_option); + // Generate a GC root reference load: + // + // root <- *(obj + offset) + // + // while honoring read barriers based on read_barrier_option. + void GenerateGcRootFieldLoad(HInstruction* instruction, + Location root, + vixl::aarch64::Register obj, + uint32_t offset, + vixl::aarch64::Label* fixup_label, + ReadBarrierOption read_barrier_option); // Generate a floating-point comparison. void GenerateFcmp(HInstruction* instruction); @@ -636,24 +641,9 @@ class CodeGeneratorARM64 : public CodeGenerator { vixl::aarch64::Register base); void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE; - bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE; - void EmitThunkCode(const linker::LinkerPatch& patch, - /*out*/ ArenaVector<uint8_t>* code, - /*out*/ std::string* debug_name) OVERRIDE; void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE; - // Generate a GC root reference load: - // - // root <- *(obj + offset) - // - // while honoring read barriers based on read_barrier_option. - void GenerateGcRootFieldLoad(HInstruction* instruction, - Location root, - vixl::aarch64::Register obj, - uint32_t offset, - vixl::aarch64::Label* fixup_label, - ReadBarrierOption read_barrier_option); // Fast path implementation of ReadBarrier::Barrier for a heap // reference field load when Baker's read barriers are used. void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, @@ -788,62 +778,6 @@ class CodeGeneratorARM64 : public CodeGenerator { void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE; private: - // Encoding of thunk type and data for link-time generated thunks for Baker read barriers. - - enum class BakerReadBarrierKind : uint8_t { - kField, // Field get or array get with constant offset (i.e. constant index). - kArray, // Array get with index in register. - kGcRoot, // GC root load. - kLast = kGcRoot - }; - - static constexpr uint32_t kBakerReadBarrierInvalidEncodedReg = /* sp/zr is invalid */ 31u; - - static constexpr size_t kBitsForBakerReadBarrierKind = - MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierKind::kLast)); - static constexpr size_t kBakerReadBarrierBitsForRegister = - MinimumBitsToStore(kBakerReadBarrierInvalidEncodedReg); - using BakerReadBarrierKindField = - BitField<BakerReadBarrierKind, 0, kBitsForBakerReadBarrierKind>; - using BakerReadBarrierFirstRegField = - BitField<uint32_t, kBitsForBakerReadBarrierKind, kBakerReadBarrierBitsForRegister>; - using BakerReadBarrierSecondRegField = - BitField<uint32_t, - kBitsForBakerReadBarrierKind + kBakerReadBarrierBitsForRegister, - kBakerReadBarrierBitsForRegister>; - - static void CheckValidReg(uint32_t reg) { - DCHECK(reg < vixl::aarch64::lr.GetCode() && - reg != vixl::aarch64::ip0.GetCode() && - reg != vixl::aarch64::ip1.GetCode()) << reg; - } - - static inline uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg, uint32_t holder_reg) { - CheckValidReg(base_reg); - CheckValidReg(holder_reg); - return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kField) | - BakerReadBarrierFirstRegField::Encode(base_reg) | - BakerReadBarrierSecondRegField::Encode(holder_reg); - } - - static inline uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) { - CheckValidReg(base_reg); - return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kArray) | - BakerReadBarrierFirstRegField::Encode(base_reg) | - BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg); - } - - static inline uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg) { - CheckValidReg(root_reg); - return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) | - BakerReadBarrierFirstRegField::Encode(root_reg) | - BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg); - } - - void CompileBakerReadBarrierThunk(Arm64Assembler& assembler, - uint32_t encoded_data, - /*out*/ std::string* debug_name); - using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, vixl::aarch64::Literal<uint64_t>*>; using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, vixl::aarch64::Literal<uint32_t>*>; using StringToLiteralMap = ArenaSafeMap<StringReference, @@ -920,7 +854,6 @@ class CodeGeneratorARM64 : public CodeGenerator { // Patches for class literals in JIT compiled code. TypeToLiteralMap jit_class_patches_; - friend class linker::Arm64RelativePatcherTest; DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64); }; diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index 15d952608d..b38a006305 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -94,6 +94,9 @@ constexpr bool kBakerReadBarrierLinkTimeThunksEnableForFields = true; constexpr bool kBakerReadBarrierLinkTimeThunksEnableForArrays = true; constexpr bool kBakerReadBarrierLinkTimeThunksEnableForGcRoots = true; +// The reserved entrypoint register for link-time generated thunks. +const vixl32::Register kBakerCcEntrypointRegister = r4; + // Using a base helps identify when we hit Marking Register check breakpoints. constexpr int kMarkingRegisterCheckBreakCodeBaseCode = 0x10; @@ -113,6 +116,8 @@ static inline void ExcludeIPAndBakerCcEntrypointRegister(UseScratchRegisterScope DCHECK(temps->IsAvailable(ip)); temps->Exclude(ip); DCHECK(!temps->IsAvailable(kBakerCcEntrypointRegister)); + DCHECK_EQ(kBakerCcEntrypointRegister.GetCode(), + linker::Thumb2RelativePatcher::kBakerCcEntrypointRegister); DCHECK_NE(instruction->GetLocations()->GetTempCount(), 0u); DCHECK(RegisterFrom(instruction->GetLocations()->GetTemp( instruction->GetLocations()->GetTempCount() - 1u)).Is(kBakerCcEntrypointRegister)); @@ -2417,80 +2422,6 @@ void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) { FixJumpTables(); GetAssembler()->FinalizeCode(); CodeGenerator::Finalize(allocator); - - // Verify Baker read barrier linker patches. - if (kIsDebugBuild) { - ArrayRef<const uint8_t> code = allocator->GetMemory(); - for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) { - DCHECK(info.label.IsBound()); - uint32_t literal_offset = info.label.GetLocation(); - DCHECK_ALIGNED(literal_offset, 2u); - - auto GetInsn16 = [&code](uint32_t offset) { - DCHECK_ALIGNED(offset, 2u); - return (static_cast<uint32_t>(code[offset + 0]) << 0) + - (static_cast<uint32_t>(code[offset + 1]) << 8); - }; - auto GetInsn32 = [=](uint32_t offset) { - return (GetInsn16(offset) << 16) + (GetInsn16(offset + 2u) << 0); - }; - - uint32_t encoded_data = info.custom_data; - BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); - // Check that the next instruction matches the expected LDR. - switch (kind) { - case BakerReadBarrierKind::kField: { - BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data); - if (width == BakerReadBarrierWidth::kWide) { - DCHECK_GE(code.size() - literal_offset, 8u); - uint32_t next_insn = GetInsn32(literal_offset + 4u); - // LDR (immediate), encoding T3, with correct base_reg. - CheckValidReg((next_insn >> 12) & 0xfu); // Check destination register. - const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); - CHECK_EQ(next_insn & 0xffff0000u, 0xf8d00000u | (base_reg << 16)); - } else { - DCHECK_GE(code.size() - literal_offset, 6u); - uint32_t next_insn = GetInsn16(literal_offset + 4u); - // LDR (immediate), encoding T1, with correct base_reg. - CheckValidReg(next_insn & 0x7u); // Check destination register. - const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); - CHECK_EQ(next_insn & 0xf838u, 0x6800u | (base_reg << 3)); - } - break; - } - case BakerReadBarrierKind::kArray: { - DCHECK_GE(code.size() - literal_offset, 8u); - uint32_t next_insn = GetInsn32(literal_offset + 4u); - // LDR (register) with correct base_reg, S=1 and option=011 (LDR Wt, [Xn, Xm, LSL #2]). - CheckValidReg((next_insn >> 12) & 0xfu); // Check destination register. - const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); - CHECK_EQ(next_insn & 0xffff0ff0u, 0xf8500020u | (base_reg << 16)); - CheckValidReg(next_insn & 0xf); // Check index register - break; - } - case BakerReadBarrierKind::kGcRoot: { - BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data); - if (width == BakerReadBarrierWidth::kWide) { - DCHECK_GE(literal_offset, 4u); - uint32_t prev_insn = GetInsn32(literal_offset - 4u); - // LDR (immediate), encoding T3, with correct root_reg. - const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); - CHECK_EQ(prev_insn & 0xfff0f000u, 0xf8d00000u | (root_reg << 12)); - } else { - DCHECK_GE(literal_offset, 2u); - uint32_t prev_insn = GetInsn16(literal_offset - 2u); - // LDR (immediate), encoding T1, with correct root_reg. - const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); - CHECK_EQ(prev_insn & 0xf807u, 0x6800u | root_reg); - } - break; - } - default: - LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind); - UNREACHABLE(); - } - } - } } void CodeGeneratorARMVIXL::SetupBlockedRegisters() const { @@ -7482,11 +7413,11 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_ DCHECK(!cls->MustGenerateClinitCheck()); // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_ vixl32::Register current_method = InputRegisterAt(cls, 0); - codegen_->GenerateGcRootFieldLoad(cls, - out_loc, - current_method, - ArtMethod::DeclaringClassOffset().Int32Value(), - read_barrier_option); + GenerateGcRootFieldLoad(cls, + out_loc, + current_method, + ArtMethod::DeclaringClassOffset().Int32Value(), + read_barrier_option); break; } case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: { @@ -7517,7 +7448,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex()); codegen_->EmitMovwMovtPlaceholder(labels, out); - codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option); + GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option); generate_null_check = true; break; } @@ -7526,7 +7457,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_ cls->GetTypeIndex(), cls->GetClass())); // /* GcRoot<mirror::Class> */ out = *out - codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option); + GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option); break; } case HLoadClass::LoadKind::kRuntimeCall: @@ -7734,8 +7665,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex()); codegen_->EmitMovwMovtPlaceholder(labels, out); - codegen_->GenerateGcRootFieldLoad( - load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption); + GenerateGcRootFieldLoad(load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption); LoadStringSlowPathARMVIXL* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load); codegen_->AddSlowPath(slow_path); @@ -7749,8 +7679,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE load->GetStringIndex(), load->GetString())); // /* GcRoot<mirror::String> */ out = *out - codegen_->GenerateGcRootFieldLoad( - load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption); + GenerateGcRootFieldLoad(load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption); return; } default: @@ -8801,7 +8730,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadTwoRegisters( } } -void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad( +void InstructionCodeGeneratorARMVIXL::GenerateGcRootFieldLoad( HInstruction* instruction, Location root, vixl32::Register obj, @@ -8832,8 +8761,9 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad( UseScratchRegisterScope temps(GetVIXLAssembler()); ExcludeIPAndBakerCcEntrypointRegister(&temps, instruction); bool narrow = CanEmitNarrowLdr(root_reg, obj, offset); - uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode(), narrow); - vixl32::Label* bne_label = NewBakerReadBarrierPatch(custom_data); + uint32_t custom_data = linker::Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData( + root_reg.GetCode(), narrow); + vixl32::Label* bne_label = codegen_->NewBakerReadBarrierPatch(custom_data); vixl::EmissionCheckScope guard(GetVIXLAssembler(), 4 * vixl32::kMaxInstructionSizeInBytes); vixl32::Label return_address; @@ -8844,7 +8774,7 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad( DCHECK_LT(offset, kReferenceLoadMinFarOffset); ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset(); __ ldr(EncodingSize(narrow ? Narrow : Wide), root_reg, MemOperand(obj, offset)); - EmitPlaceholderBne(this, bne_label); + EmitPlaceholderBne(codegen_, bne_label); __ Bind(&return_address); DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(), narrow ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_OFFSET @@ -8864,8 +8794,8 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad( // Slow path marking the GC root `root`. The entrypoint will // be loaded by the slow path code. SlowPathCodeARMVIXL* slow_path = - new (GetScopedAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root); - AddSlowPath(slow_path); + new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root); + codegen_->AddSlowPath(slow_path); // /* GcRoot<mirror::Object> */ root = *(obj + offset) GetAssembler()->LoadFromOffset(kLoadWord, root_reg, obj, offset); @@ -8886,7 +8816,7 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad( // /* GcRoot<mirror::Object>* */ root = obj + offset __ Add(root_reg, obj, offset); // /* mirror::Object* */ root = root->Read() - GenerateReadBarrierForRootSlow(instruction, root, root); + codegen_->GenerateReadBarrierForRootSlow(instruction, root, root); } } else { // Plain GC root load with no read barrier. @@ -8895,7 +8825,7 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad( // Note that GC roots are not affected by heap poisoning, thus we // do not have to unpoison `root_reg` here. } - MaybeGenerateMarkingRegisterCheck(/* code */ 18); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 18); } void CodeGeneratorARMVIXL::MaybeAddBakerCcEntrypointTempForFields(LocationSummary* locations) { @@ -8956,7 +8886,8 @@ void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* i } UseScratchRegisterScope temps(GetVIXLAssembler()); ExcludeIPAndBakerCcEntrypointRegister(&temps, instruction); - uint32_t custom_data = EncodeBakerReadBarrierFieldData(base.GetCode(), obj.GetCode(), narrow); + uint32_t custom_data = linker::Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData( + base.GetCode(), obj.GetCode(), narrow); vixl32::Label* bne_label = NewBakerReadBarrierPatch(custom_data); { @@ -9042,7 +8973,8 @@ void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(HInstruction* i UseScratchRegisterScope temps(GetVIXLAssembler()); ExcludeIPAndBakerCcEntrypointRegister(&temps, instruction); - uint32_t custom_data = EncodeBakerReadBarrierArrayData(data_reg.GetCode()); + uint32_t custom_data = + linker::Thumb2RelativePatcher::EncodeBakerReadBarrierArrayData(data_reg.GetCode()); vixl32::Label* bne_label = NewBakerReadBarrierPatch(custom_data); __ Add(data_reg, obj, Operand(data_offset)); @@ -9179,7 +9111,7 @@ void CodeGeneratorARMVIXL::UpdateReferenceFieldWithBakerReadBarrier(HInstruction void CodeGeneratorARMVIXL::GenerateRawReferenceLoad(HInstruction* instruction, Location ref, - vixl32::Register obj, + vixl::aarch32::Register obj, uint32_t offset, Location index, ScaleFactor scale_factor, @@ -9519,7 +9451,7 @@ CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativePa return &patches->back(); } -vixl32::Label* CodeGeneratorARMVIXL::NewBakerReadBarrierPatch(uint32_t custom_data) { +vixl::aarch32::Label* CodeGeneratorARMVIXL::NewBakerReadBarrierPatch(uint32_t custom_data) { baker_read_barrier_patches_.emplace_back(custom_data); return &baker_read_barrier_patches_.back().label; } @@ -9616,45 +9548,6 @@ void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* l DCHECK_EQ(size, linker_patches->size()); } -bool CodeGeneratorARMVIXL::NeedsThunkCode(const linker::LinkerPatch& patch) const { - return patch.GetType() == linker::LinkerPatch::Type::kBakerReadBarrierBranch || - patch.GetType() == linker::LinkerPatch::Type::kCallRelative; -} - -void CodeGeneratorARMVIXL::EmitThunkCode(const linker::LinkerPatch& patch, - /*out*/ ArenaVector<uint8_t>* code, - /*out*/ std::string* debug_name) { - arm::ArmVIXLAssembler assembler(GetGraph()->GetAllocator()); - switch (patch.GetType()) { - case linker::LinkerPatch::Type::kCallRelative: - // The thunk just uses the entry point in the ArtMethod. This works even for calls - // to the generic JNI and interpreter trampolines. - assembler.LoadFromOffset( - arm::kLoadWord, - vixl32::pc, - vixl32::r0, - ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value()); - assembler.GetVIXLAssembler()->Bkpt(0); - if (GetCompilerOptions().GenerateAnyDebugInfo()) { - *debug_name = "MethodCallThunk"; - } - break; - case linker::LinkerPatch::Type::kBakerReadBarrierBranch: - DCHECK_EQ(patch.GetBakerCustomValue2(), 0u); - CompileBakerReadBarrierThunk(assembler, patch.GetBakerCustomValue1(), debug_name); - break; - default: - LOG(FATAL) << "Unexpected patch type " << patch.GetType(); - UNREACHABLE(); - } - - // Ensure we emit the literal pool if any. - assembler.FinalizeCode(); - code->resize(assembler.CodeSize()); - MemoryRegion code_region(code->data(), code->size()); - assembler.FinalizeInstructions(code_region); -} - VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateUint32Literal( uint32_t value, Uint32ToLiteralMap* map) { @@ -9899,210 +9792,5 @@ void CodeGeneratorARMVIXL::EmitMovwMovtPlaceholder( #undef QUICK_ENTRY_POINT #undef TODO_VIXL32 -#define __ assembler.GetVIXLAssembler()-> - -static void EmitGrayCheckAndFastPath(ArmVIXLAssembler& assembler, - vixl32::Register base_reg, - vixl32::MemOperand& lock_word, - vixl32::Label* slow_path, - int32_t raw_ldr_offset) { - // Load the lock word containing the rb_state. - __ Ldr(ip, lock_word); - // Given the numeric representation, it's enough to check the low bit of the rb_state. - static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0"); - static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1"); - __ Tst(ip, Operand(LockWord::kReadBarrierStateMaskShifted)); - __ B(ne, slow_path, /* is_far_target */ false); - __ Add(lr, lr, raw_ldr_offset); - // Introduce a dependency on the lock_word including rb_state, - // to prevent load-load reordering, and without using - // a memory barrier (which would be more expensive). - __ Add(base_reg, base_reg, Operand(ip, LSR, 32)); - __ Bx(lr); // And return back to the function. - // Note: The fake dependency is unnecessary for the slow path. -} - -// Load the read barrier introspection entrypoint in register `entrypoint` -static void LoadReadBarrierMarkIntrospectionEntrypoint(ArmVIXLAssembler& assembler, - vixl32::Register entrypoint) { - // The register where the read barrier introspection entrypoint is loaded - // is fixed: `Thumb2RelativePatcher::kBakerCcEntrypointRegister` (R4). - DCHECK(entrypoint.Is(kBakerCcEntrypointRegister)); - // entrypoint = Thread::Current()->pReadBarrierMarkReg12, i.e. pReadBarrierMarkIntrospection. - DCHECK_EQ(ip.GetCode(), 12u); - const int32_t entry_point_offset = - Thread::ReadBarrierMarkEntryPointsOffset<kArmPointerSize>(ip.GetCode()); - __ Ldr(entrypoint, MemOperand(tr, entry_point_offset)); -} - -void CodeGeneratorARMVIXL::CompileBakerReadBarrierThunk(ArmVIXLAssembler& assembler, - uint32_t encoded_data, - /*out*/ std::string* debug_name) { - BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); - switch (kind) { - case BakerReadBarrierKind::kField: { - // Check if the holder is gray and, if not, add fake dependency to the base register - // and return to the LDR instruction to load the reference. Otherwise, use introspection - // to load the reference and call the entrypoint (in kBakerCcEntrypointRegister) - // that performs further checks on the reference and marks it if needed. - vixl32::Register base_reg(BakerReadBarrierFirstRegField::Decode(encoded_data)); - CheckValidReg(base_reg.GetCode()); - vixl32::Register holder_reg(BakerReadBarrierSecondRegField::Decode(encoded_data)); - CheckValidReg(holder_reg.GetCode()); - BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data); - UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); - temps.Exclude(ip); - // If base_reg differs from holder_reg, the offset was too large and we must have - // emitted an explicit null check before the load. Otherwise, we need to null-check - // the holder as we do not necessarily do that check before going to the thunk. - vixl32::Label throw_npe; - if (holder_reg.Is(base_reg)) { - __ CompareAndBranchIfZero(holder_reg, &throw_npe, /* is_far_target */ false); - } - vixl32::Label slow_path; - MemOperand lock_word(holder_reg, mirror::Object::MonitorOffset().Int32Value()); - const int32_t raw_ldr_offset = (width == BakerReadBarrierWidth::kWide) - ? BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET - : BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET; - EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path, raw_ldr_offset); - __ Bind(&slow_path); - const int32_t ldr_offset = /* Thumb state adjustment (LR contains Thumb state). */ -1 + - raw_ldr_offset; - vixl32::Register ep_reg(kBakerCcEntrypointRegister); - LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg); - if (width == BakerReadBarrierWidth::kWide) { - MemOperand ldr_half_address(lr, ldr_offset + 2); - __ Ldrh(ip, ldr_half_address); // Load the LDR immediate half-word with "Rt | imm12". - __ Ubfx(ip, ip, 0, 12); // Extract the offset imm12. - __ Ldr(ip, MemOperand(base_reg, ip)); // Load the reference. - } else { - MemOperand ldr_address(lr, ldr_offset); - __ Ldrh(ip, ldr_address); // Load the LDR immediate, encoding T1. - __ Add(ep_reg, // Adjust the entrypoint address to the entrypoint - ep_reg, // for narrow LDR. - Operand(BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_ENTRYPOINT_OFFSET)); - __ Ubfx(ip, ip, 6, 5); // Extract the imm5, i.e. offset / 4. - __ Ldr(ip, MemOperand(base_reg, ip, LSL, 2)); // Load the reference. - } - // Do not unpoison. With heap poisoning enabled, the entrypoint expects a poisoned reference. - __ Bx(ep_reg); // Jump to the entrypoint. - if (holder_reg.Is(base_reg)) { - // Add null check slow path. The stack map is at the address pointed to by LR. - __ Bind(&throw_npe); - int32_t offset = GetThreadOffset<kArmPointerSize>(kQuickThrowNullPointer).Int32Value(); - __ Ldr(ip, MemOperand(/* Thread* */ vixl32::r9, offset)); - __ Bx(ip); - } - break; - } - case BakerReadBarrierKind::kArray: { - vixl32::Register base_reg(BakerReadBarrierFirstRegField::Decode(encoded_data)); - CheckValidReg(base_reg.GetCode()); - DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, - BakerReadBarrierSecondRegField::Decode(encoded_data)); - DCHECK(BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide); - UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); - temps.Exclude(ip); - vixl32::Label slow_path; - int32_t data_offset = - mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value(); - MemOperand lock_word(base_reg, mirror::Object::MonitorOffset().Int32Value() - data_offset); - DCHECK_LT(lock_word.GetOffsetImmediate(), 0); - const int32_t raw_ldr_offset = BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET; - EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path, raw_ldr_offset); - __ Bind(&slow_path); - const int32_t ldr_offset = /* Thumb state adjustment (LR contains Thumb state). */ -1 + - raw_ldr_offset; - MemOperand ldr_address(lr, ldr_offset + 2); - __ Ldrb(ip, ldr_address); // Load the LDR (register) byte with "00 | imm2 | Rm", - // i.e. Rm+32 because the scale in imm2 is 2. - vixl32::Register ep_reg(kBakerCcEntrypointRegister); - LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg); - __ Bfi(ep_reg, ip, 3, 6); // Insert ip to the entrypoint address to create - // a switch case target based on the index register. - __ Mov(ip, base_reg); // Move the base register to ip0. - __ Bx(ep_reg); // Jump to the entrypoint's array switch case. - break; - } - case BakerReadBarrierKind::kGcRoot: { - // Check if the reference needs to be marked and if so (i.e. not null, not marked yet - // and it does not have a forwarding address), call the correct introspection entrypoint; - // otherwise return the reference (or the extracted forwarding address). - // There is no gray bit check for GC roots. - vixl32::Register root_reg(BakerReadBarrierFirstRegField::Decode(encoded_data)); - CheckValidReg(root_reg.GetCode()); - DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, - BakerReadBarrierSecondRegField::Decode(encoded_data)); - BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data); - UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); - temps.Exclude(ip); - vixl32::Label return_label, not_marked, forwarding_address; - __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target */ false); - MemOperand lock_word(root_reg, mirror::Object::MonitorOffset().Int32Value()); - __ Ldr(ip, lock_word); - __ Tst(ip, LockWord::kMarkBitStateMaskShifted); - __ B(eq, ¬_marked); - __ Bind(&return_label); - __ Bx(lr); - __ Bind(¬_marked); - static_assert(LockWord::kStateShift == 30 && LockWord::kStateForwardingAddress == 3, - "To use 'CMP ip, #modified-immediate; BHS', we need the lock word state in " - " the highest bits and the 'forwarding address' state to have all bits set"); - __ Cmp(ip, Operand(0xc0000000)); - __ B(hs, &forwarding_address); - vixl32::Register ep_reg(kBakerCcEntrypointRegister); - LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg); - // Adjust the art_quick_read_barrier_mark_introspection address in kBakerCcEntrypointRegister - // to art_quick_read_barrier_mark_introspection_gc_roots. - int32_t entrypoint_offset = (width == BakerReadBarrierWidth::kWide) - ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_ENTRYPOINT_OFFSET - : BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_ENTRYPOINT_OFFSET; - __ Add(ep_reg, ep_reg, Operand(entrypoint_offset)); - __ Mov(ip, root_reg); - __ Bx(ep_reg); - __ Bind(&forwarding_address); - __ Lsl(root_reg, ip, LockWord::kForwardingAddressShift); - __ Bx(lr); - break; - } - default: - LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind); - UNREACHABLE(); - } - - if (GetCompilerOptions().GenerateAnyDebugInfo()) { - std::ostringstream oss; - oss << "BakerReadBarrierThunk"; - switch (kind) { - case BakerReadBarrierKind::kField: - oss << "Field"; - if (BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide) { - oss << "Wide"; - } - oss << "_r" << BakerReadBarrierFirstRegField::Decode(encoded_data) - << "_r" << BakerReadBarrierSecondRegField::Decode(encoded_data); - break; - case BakerReadBarrierKind::kArray: - oss << "Array_r" << BakerReadBarrierFirstRegField::Decode(encoded_data); - DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, - BakerReadBarrierSecondRegField::Decode(encoded_data)); - DCHECK(BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide); - break; - case BakerReadBarrierKind::kGcRoot: - oss << "GcRoot"; - if (BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide) { - oss << "Wide"; - } - oss << "_r" << BakerReadBarrierFirstRegField::Decode(encoded_data); - DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, - BakerReadBarrierSecondRegField::Decode(encoded_data)); - break; - } - *debug_name = oss.str(); - } -} - -#undef __ - } // namespace arm } // namespace art diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h index 6b9919ab15..2114ea1ba1 100644 --- a/compiler/optimizing/code_generator_arm_vixl.h +++ b/compiler/optimizing/code_generator_arm_vixl.h @@ -36,11 +36,6 @@ #pragma GCC diagnostic pop namespace art { - -namespace linker { -class Thumb2RelativePatcherTest; -} // namespace linker - namespace arm { // This constant is used as an approximate margin when emission of veneer and literal pools @@ -113,9 +108,6 @@ static const vixl::aarch32::SRegister kRuntimeParameterFpuRegistersVIXL[] = { static const size_t kRuntimeParameterFpuRegistersLengthVIXL = arraysize(kRuntimeParameterFpuRegistersVIXL); -// The reserved entrypoint register for link-time generated thunks. -const vixl::aarch32::Register kBakerCcEntrypointRegister = vixl32::r4; - class LoadClassSlowPathARMVIXL; class CodeGeneratorARMVIXL; @@ -396,6 +388,16 @@ class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator { uint32_t offset, Location maybe_temp, ReadBarrierOption read_barrier_option); + // Generate a GC root reference load: + // + // root <- *(obj + offset) + // + // while honoring read barriers based on read_barrier_option. + void GenerateGcRootFieldLoad(HInstruction* instruction, + Location root, + vixl::aarch32::Register obj, + uint32_t offset, + ReadBarrierOption read_barrier_option); void GenerateTestAndBranch(HInstruction* instruction, size_t condition_input_index, vixl::aarch32::Label* true_target, @@ -604,10 +606,6 @@ class CodeGeneratorARMVIXL : public CodeGenerator { Handle<mirror::Class> handle); void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE; - bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE; - void EmitThunkCode(const linker::LinkerPatch& patch, - /*out*/ ArenaVector<uint8_t>* code, - /*out*/ std::string* debug_name) OVERRIDE; void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE; @@ -615,16 +613,6 @@ class CodeGeneratorARMVIXL : public CodeGenerator { // is added only for AOT compilation if link-time generated thunks for fields are enabled. void MaybeAddBakerCcEntrypointTempForFields(LocationSummary* locations); - // Generate a GC root reference load: - // - // root <- *(obj + offset) - // - // while honoring read barriers based on read_barrier_option. - void GenerateGcRootFieldLoad(HInstruction* instruction, - Location root, - vixl::aarch32::Register obj, - uint32_t offset, - ReadBarrierOption read_barrier_option); // Fast path implementation of ReadBarrier::Barrier for a heap // reference field load when Baker's read barriers are used. void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, @@ -779,83 +767,6 @@ class CodeGeneratorARMVIXL : public CodeGenerator { vixl::aarch32::Register temp = vixl32::Register()); private: - // Encoding of thunk type and data for link-time generated thunks for Baker read barriers. - - enum class BakerReadBarrierKind : uint8_t { - kField, // Field get or array get with constant offset (i.e. constant index). - kArray, // Array get with index in register. - kGcRoot, // GC root load. - kLast = kGcRoot - }; - - enum class BakerReadBarrierWidth : uint8_t { - kWide, // 32-bit LDR (and 32-bit NEG if heap poisoning is enabled). - kNarrow, // 16-bit LDR (and 16-bit NEG if heap poisoning is enabled). - kLast = kNarrow - }; - - static constexpr uint32_t kBakerReadBarrierInvalidEncodedReg = /* pc is invalid */ 15u; - - static constexpr size_t kBitsForBakerReadBarrierKind = - MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierKind::kLast)); - static constexpr size_t kBakerReadBarrierBitsForRegister = - MinimumBitsToStore(kBakerReadBarrierInvalidEncodedReg); - using BakerReadBarrierKindField = - BitField<BakerReadBarrierKind, 0, kBitsForBakerReadBarrierKind>; - using BakerReadBarrierFirstRegField = - BitField<uint32_t, kBitsForBakerReadBarrierKind, kBakerReadBarrierBitsForRegister>; - using BakerReadBarrierSecondRegField = - BitField<uint32_t, - kBitsForBakerReadBarrierKind + kBakerReadBarrierBitsForRegister, - kBakerReadBarrierBitsForRegister>; - static constexpr size_t kBitsForBakerReadBarrierWidth = - MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierWidth::kLast)); - using BakerReadBarrierWidthField = - BitField<BakerReadBarrierWidth, - kBitsForBakerReadBarrierKind + 2 * kBakerReadBarrierBitsForRegister, - kBitsForBakerReadBarrierWidth>; - - static void CheckValidReg(uint32_t reg) { - DCHECK(reg < vixl::aarch32::ip.GetCode() && reg != kBakerCcEntrypointRegister.GetCode()) << reg; - } - - static uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg, - uint32_t holder_reg, - bool narrow) { - CheckValidReg(base_reg); - CheckValidReg(holder_reg); - DCHECK(!narrow || base_reg < 8u) << base_reg; - BakerReadBarrierWidth width = - narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide; - return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kField) | - BakerReadBarrierFirstRegField::Encode(base_reg) | - BakerReadBarrierSecondRegField::Encode(holder_reg) | - BakerReadBarrierWidthField::Encode(width); - } - - static uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) { - CheckValidReg(base_reg); - return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kArray) | - BakerReadBarrierFirstRegField::Encode(base_reg) | - BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg) | - BakerReadBarrierWidthField::Encode(BakerReadBarrierWidth::kWide); - } - - static uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg, bool narrow) { - CheckValidReg(root_reg); - DCHECK(!narrow || root_reg < 8u) << root_reg; - BakerReadBarrierWidth width = - narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide; - return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) | - BakerReadBarrierFirstRegField::Encode(root_reg) | - BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg) | - BakerReadBarrierWidthField::Encode(width); - } - - void CompileBakerReadBarrierThunk(ArmVIXLAssembler& assembler, - uint32_t encoded_data, - /*out*/ std::string* debug_name); - vixl::aarch32::Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, vixl::aarch32::Register temp); @@ -918,7 +829,6 @@ class CodeGeneratorARMVIXL : public CodeGenerator { // Patches for class literals in JIT compiled code. TypeToLiteralMap jit_class_patches_; - friend class linker::Thumb2RelativePatcherTest; DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARMVIXL); }; diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h index 792cfb539a..c41c290c8b 100644 --- a/compiler/optimizing/codegen_test_utils.h +++ b/compiler/optimizing/codegen_test_utils.h @@ -195,9 +195,7 @@ class InternalCodeAllocator : public CodeAllocator { } size_t GetSize() const { return size_; } - ArrayRef<const uint8_t> GetMemory() const OVERRIDE { - return ArrayRef<const uint8_t>(memory_.get(), size_); - } + uint8_t* GetMemory() const { return memory_.get(); } private: size_t size_; @@ -271,8 +269,8 @@ static void Run(const InternalCodeAllocator& allocator, InstructionSet target_isa = codegen.GetInstructionSet(); typedef Expected (*fptr)(); - CommonCompilerTest::MakeExecutable(allocator.GetMemory().data(), allocator.GetMemory().size()); - fptr f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(allocator.GetMemory().data())); + CommonCompilerTest::MakeExecutable(allocator.GetMemory(), allocator.GetSize()); + fptr f = reinterpret_cast<fptr>(allocator.GetMemory()); if (target_isa == InstructionSet::kThumb2) { // For thumb we need the bottom bit set. f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(f) + 1); diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc index 2e189fdd14..d20b681b49 100644 --- a/compiler/optimizing/optimizing_cfi_test.cc +++ b/compiler/optimizing/optimizing_cfi_test.cc @@ -105,15 +105,15 @@ class OptimizingCFITest : public CFITest, public OptimizingUnitTestHelper { const std::vector<uint8_t>& expected_asm, const std::vector<uint8_t>& expected_cfi) { // Get the outputs. - ArrayRef<const uint8_t> actual_asm = code_allocator_.GetMemory(); + const std::vector<uint8_t>& actual_asm = code_allocator_.GetMemory(); Assembler* opt_asm = code_gen_->GetAssembler(); - ArrayRef<const uint8_t> actual_cfi(*(opt_asm->cfi().data())); + const std::vector<uint8_t>& actual_cfi = *(opt_asm->cfi().data()); if (kGenerateExpected) { GenerateExpected(stdout, isa, isa_str, actual_asm, actual_cfi); } else { - EXPECT_EQ(ArrayRef<const uint8_t>(expected_asm), actual_asm); - EXPECT_EQ(ArrayRef<const uint8_t>(expected_cfi), actual_cfi); + EXPECT_EQ(expected_asm, actual_asm); + EXPECT_EQ(expected_cfi, actual_cfi); } } @@ -140,7 +140,7 @@ class OptimizingCFITest : public CFITest, public OptimizingUnitTestHelper { return memory_.data(); } - ArrayRef<const uint8_t> GetMemory() const OVERRIDE { return ArrayRef<const uint8_t>(memory_); } + const std::vector<uint8_t>& GetMemory() { return memory_; } private: std::vector<uint8_t> memory_; diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 79165826d1..e42dfc10ba 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -75,18 +75,22 @@ static constexpr const char* kPassNameSeparator = "$"; class CodeVectorAllocator FINAL : public CodeAllocator { public: explicit CodeVectorAllocator(ArenaAllocator* allocator) - : memory_(allocator->Adapter(kArenaAllocCodeBuffer)) {} + : memory_(allocator->Adapter(kArenaAllocCodeBuffer)), + size_(0) {} virtual uint8_t* Allocate(size_t size) { + size_ = size; memory_.resize(size); return &memory_[0]; } - ArrayRef<const uint8_t> GetMemory() const OVERRIDE { return ArrayRef<const uint8_t>(memory_); } + size_t GetSize() const { return size_; } + const ArenaVector<uint8_t>& GetMemory() const { return memory_; } uint8_t* GetData() { return memory_.data(); } private: ArenaVector<uint8_t> memory_; + size_t size_; DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator); }; @@ -715,7 +719,7 @@ CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator, CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod( GetCompilerDriver(), codegen->GetInstructionSet(), - code_allocator->GetMemory(), + ArrayRef<const uint8_t>(code_allocator->GetMemory()), // Follow Quick's behavior and set the frame size to zero if it is // considered "empty" (see the definition of // art::CodeGenerator::HasEmptyFrame). @@ -727,16 +731,6 @@ CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator, ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()), ArrayRef<const linker::LinkerPatch>(linker_patches)); - CompiledMethodStorage* storage = GetCompilerDriver()->GetCompiledMethodStorage(); - for (const linker::LinkerPatch& patch : linker_patches) { - if (codegen->NeedsThunkCode(patch) && storage->GetThunkCode(patch).empty()) { - ArenaVector<uint8_t> code(allocator->Adapter()); - std::string debug_name; - codegen->EmitThunkCode(patch, &code, &debug_name); - storage->SetThunkCode(patch, ArrayRef<const uint8_t>(code), debug_name); - } - } - return compiled_method; } @@ -1345,7 +1339,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, codegen->GetCoreSpillMask(), codegen->GetFpuSpillMask(), code_allocator.GetMemory().data(), - code_allocator.GetMemory().size(), + code_allocator.GetSize(), data_size, osr, roots, @@ -1375,7 +1369,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, info.is_optimized = true; info.is_code_address_text_relative = false; info.code_address = code_address; - info.code_size = code_allocator.GetMemory().size(); + info.code_size = code_allocator.GetSize(); info.frame_size_in_bytes = method_header->GetFrameSizeInBytes(); info.code_info = stack_map_size == 0 ? nullptr : stack_map_data; info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()); @@ -1384,7 +1378,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed()); if (jit_logger != nullptr) { - jit_logger->WriteLog(code, code_allocator.GetMemory().size(), method); + jit_logger->WriteLog(code, code_allocator.GetSize(), method); } if (kArenaAllocatorCountAllocations) { diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 88dc6d44b6..e2c53bbaa8 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -2068,9 +2068,7 @@ class Dex2Oat FINAL { { TimingLogger::ScopedTiming t2("dex2oat Write ELF", timings_); - linker::MultiOatRelativePatcher patcher(instruction_set_, - instruction_set_features_.get(), - driver_->GetCompiledMethodStorage()); + linker::MultiOatRelativePatcher patcher(instruction_set_, instruction_set_features_.get()); for (size_t i = 0, size = oat_files_.size(); i != size; ++i) { std::unique_ptr<linker::ElfWriter>& elf_writer = elf_writers_[i]; std::unique_ptr<linker::OatWriter>& oat_writer = oat_writers_[i]; diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h index 476a843821..7449191984 100644 --- a/dex2oat/linker/image_test.h +++ b/dex2oat/linker/image_test.h @@ -299,8 +299,7 @@ inline void CompilationHelper::Compile(CompilerDriver* driver, DCHECK_EQ(vdex_files.size(), oat_files.size()); for (size_t i = 0, size = oat_files.size(); i != size; ++i) { MultiOatRelativePatcher patcher(driver->GetInstructionSet(), - driver->GetInstructionSetFeatures(), - driver->GetCompiledMethodStorage()); + driver->GetInstructionSetFeatures()); OatWriter* const oat_writer = oat_writers[i].get(); ElfWriter* const elf_writer = elf_writers[i].get(); std::vector<const DexFile*> cur_dex_files(1u, class_path[i]); diff --git a/dex2oat/linker/multi_oat_relative_patcher.cc b/dex2oat/linker/multi_oat_relative_patcher.cc index 1449d478f9..1abaf7dfd1 100644 --- a/dex2oat/linker/multi_oat_relative_patcher.cc +++ b/dex2oat/linker/multi_oat_relative_patcher.cc @@ -20,28 +20,14 @@ #include "base/bit_utils.h" #include "globals.h" -#include "driver/compiled_method_storage.h" namespace art { namespace linker { -void MultiOatRelativePatcher::ThunkProvider::GetThunkCode(const LinkerPatch& patch, - /*out*/ ArrayRef<const uint8_t>* code, - /*out*/ std::string* debug_name) { - *code = storage_->GetThunkCode(patch, debug_name); - DCHECK(!code->empty()); -} - - MultiOatRelativePatcher::MultiOatRelativePatcher(InstructionSet instruction_set, - const InstructionSetFeatures* features, - CompiledMethodStorage* storage) - : thunk_provider_(storage), - method_offset_map_(), - relative_patcher_(RelativePatcher::Create(instruction_set, - features, - &thunk_provider_, - &method_offset_map_)), + const InstructionSetFeatures* features) + : method_offset_map_(), + relative_patcher_(RelativePatcher::Create(instruction_set, features, &method_offset_map_)), adjustment_(0u), instruction_set_(instruction_set), start_size_code_alignment_(0u), diff --git a/dex2oat/linker/multi_oat_relative_patcher.h b/dex2oat/linker/multi_oat_relative_patcher.h index 60fcfe8b58..bd33b95318 100644 --- a/dex2oat/linker/multi_oat_relative_patcher.h +++ b/dex2oat/linker/multi_oat_relative_patcher.h @@ -26,7 +26,6 @@ namespace art { class CompiledMethod; -class CompiledMethodStorage; class InstructionSetFeatures; namespace linker { @@ -39,9 +38,7 @@ class MultiOatRelativePatcher FINAL { public: using const_iterator = SafeMap<MethodReference, uint32_t>::const_iterator; - MultiOatRelativePatcher(InstructionSet instruction_set, - const InstructionSetFeatures* features, - CompiledMethodStorage* storage); + MultiOatRelativePatcher(InstructionSet instruction_set, const InstructionSetFeatures* features); // Mark the start of a new oat file (for statistics retrieval) and set the // adjustment for a new oat file to apply to all relative offsets that are @@ -132,19 +129,6 @@ class MultiOatRelativePatcher FINAL { uint32_t MiscThunksSize() const; private: - class ThunkProvider : public RelativePatcherThunkProvider { - public: - explicit ThunkProvider(CompiledMethodStorage* storage) - : storage_(storage) {} - - void GetThunkCode(const LinkerPatch& patch, - /*out*/ ArrayRef<const uint8_t>* code, - /*out*/ std::string* debug_name) OVERRIDE; - - private: - CompiledMethodStorage* storage_; - }; - // Map method reference to assigned offset. // Wrap the map in a class implementing RelativePatcherTargetProvider. class MethodOffsetMap : public RelativePatcherTargetProvider { @@ -153,7 +137,6 @@ class MultiOatRelativePatcher FINAL { SafeMap<MethodReference, uint32_t> map; }; - ThunkProvider thunk_provider_; MethodOffsetMap method_offset_map_; std::unique_ptr<RelativePatcher> relative_patcher_; uint32_t adjustment_; diff --git a/dex2oat/linker/multi_oat_relative_patcher_test.cc b/dex2oat/linker/multi_oat_relative_patcher_test.cc index 05fe36a590..ca9c5f1e84 100644 --- a/dex2oat/linker/multi_oat_relative_patcher_test.cc +++ b/dex2oat/linker/multi_oat_relative_patcher_test.cc @@ -122,7 +122,7 @@ class MultiOatRelativePatcherTest : public testing::Test { MultiOatRelativePatcherTest() : instruction_set_features_(InstructionSetFeatures::FromCppDefines()), - patcher_(kRuntimeISA, instruction_set_features_.get(), /* storage */ nullptr) { + patcher_(kRuntimeISA, instruction_set_features_.get()) { std::unique_ptr<MockPatcher> mock(new MockPatcher()); mock_ = mock.get(); patcher_.relative_patcher_ = std::move(mock); diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc index ea4e210b74..6e95393e80 100644 --- a/dex2oat/linker/oat_writer_test.cc +++ b/dex2oat/linker/oat_writer_test.cc @@ -213,8 +213,7 @@ class OatTest : public CommonCompilerTest { class_linker->RegisterDexFile(*dex_file, nullptr); } MultiOatRelativePatcher patcher(compiler_driver_->GetInstructionSet(), - instruction_set_features_.get(), - compiler_driver_->GetCompiledMethodStorage()); + instruction_set_features_.get()); oat_writer.Initialize(compiler_driver_.get(), nullptr, dex_files); oat_writer.PrepareLayout(&patcher); elf_writer->PrepareDynamicSection(oat_writer.GetOatHeader().GetExecutableOffset(), |