summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/cfi_test.h21
-rw-r--r--compiler/driver/compiled_method_storage.cc95
-rw-r--r--compiler/driver/compiled_method_storage.h26
-rw-r--r--compiler/jni/jni_cfi_test.cc6
-rw-r--r--compiler/linker/arm/relative_patcher_arm_base.cc44
-rw-r--r--compiler/linker/arm/relative_patcher_arm_base.h9
-rw-r--r--compiler/linker/arm/relative_patcher_thumb2.cc305
-rw-r--r--compiler/linker/arm/relative_patcher_thumb2.h78
-rw-r--r--compiler/linker/arm/relative_patcher_thumb2_test.cc96
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64.cc253
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64.h54
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64_test.cc87
-rw-r--r--compiler/linker/linker_patch.h2
-rw-r--r--compiler/linker/relative_patcher.cc13
-rw-r--r--compiler/linker/relative_patcher.h27
-rw-r--r--compiler/linker/relative_patcher_test.h72
-rw-r--r--compiler/optimizing/code_generator.cc12
-rw-r--r--compiler/optimizing/code_generator.h6
-rw-r--r--compiler/optimizing/code_generator_arm64.cc354
-rw-r--r--compiler/optimizing/code_generator_arm64.h89
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc370
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h110
-rw-r--r--compiler/optimizing/codegen_test_utils.h8
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc10
-rw-r--r--compiler/optimizing/optimizing_compiler.cc26
-rw-r--r--dex2oat/dex2oat.cc17
-rw-r--r--dex2oat/dex2oat_test.cc32
-rw-r--r--dex2oat/linker/image_test.h3
-rw-r--r--dex2oat/linker/multi_oat_relative_patcher.cc20
-rw-r--r--dex2oat/linker/multi_oat_relative_patcher.h19
-rw-r--r--dex2oat/linker/multi_oat_relative_patcher_test.cc2
-rw-r--r--dex2oat/linker/oat_writer_test.cc3
-rw-r--r--openjdkjvmti/deopt_manager.cc79
-rw-r--r--openjdkjvmti/deopt_manager.h23
-rw-r--r--openjdkjvmti/ti_method.cc3
-rw-r--r--runtime/debugger.cc5
-rw-r--r--runtime/debugger.h1
-rw-r--r--runtime/fault_handler.cc4
-rw-r--r--runtime/instrumentation.cc9
-rw-r--r--runtime/runtime_callbacks.cc9
-rw-r--r--runtime/runtime_callbacks.h9
-rw-r--r--test/1935-get-set-current-frame-jit/expected.txt2
-rwxr-xr-xtest/1935-get-set-current-frame-jit/run4
-rw-r--r--test/1935-get-set-current-frame-jit/src/Main.java33
-rw-r--r--tools/libcore_network_failures.txt92
-rwxr-xr-xtools/run-jdwp-tests.sh10
-rwxr-xr-xtools/run-libcore-tests.sh10
-rwxr-xr-xtools/setup-buildbot-device.sh17
-rw-r--r--tools/veridex/hidden_api.cc5
-rw-r--r--tools/veridex/hidden_api_finder.cc19
50 files changed, 1646 insertions, 957 deletions
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h
index 29ff235cea..581edaa773 100644
--- a/compiler/cfi_test.h
+++ b/compiler/cfi_test.h
@@ -37,8 +37,8 @@ constexpr dwarf::CFIFormat kCFIFormat = dwarf::DW_DEBUG_FRAME_FORMAT;
class CFITest : public dwarf::DwarfTest {
public:
void GenerateExpected(FILE* f, InstructionSet isa, const char* isa_str,
- const std::vector<uint8_t>& actual_asm,
- const std::vector<uint8_t>& actual_cfi) {
+ ArrayRef<const uint8_t> actual_asm,
+ ArrayRef<const uint8_t> actual_cfi) {
std::vector<std::string> lines;
// Print the raw bytes.
fprintf(f, "static constexpr uint8_t expected_asm_%s[] = {", isa_str);
@@ -50,11 +50,18 @@ class CFITest : public dwarf::DwarfTest {
// Pretty-print CFI opcodes.
constexpr bool is64bit = false;
dwarf::DebugFrameOpCodeWriter<> initial_opcodes;
- dwarf::WriteCIE(is64bit, dwarf::Reg(8),
- initial_opcodes, kCFIFormat, &debug_frame_data_);
+ dwarf::WriteCIE(is64bit, dwarf::Reg(8), initial_opcodes, kCFIFormat, &debug_frame_data_);
std::vector<uintptr_t> debug_frame_patches;
- dwarf::WriteFDE(is64bit, 0, 0, 0, actual_asm.size(), ArrayRef<const uint8_t>(actual_cfi),
- kCFIFormat, 0, &debug_frame_data_, &debug_frame_patches);
+ dwarf::WriteFDE(is64bit,
+ /* section_address */ 0,
+ /* cie_address */ 0,
+ /* code_address */ 0,
+ actual_asm.size(),
+ actual_cfi,
+ kCFIFormat,
+ /* buffer_address */ 0,
+ &debug_frame_data_,
+ &debug_frame_patches);
ReformatCfi(Objdump(false, "-W"), &lines);
// Pretty-print assembly.
const uint8_t* asm_base = actual_asm.data();
@@ -142,7 +149,7 @@ class CFITest : public dwarf::DwarfTest {
}
// Pretty-print byte array. 12 bytes per line.
- static void HexDump(FILE* f, const std::vector<uint8_t>& data) {
+ static void HexDump(FILE* f, ArrayRef<const uint8_t> data) {
for (size_t i = 0; i < data.size(); i++) {
fprintf(f, i % 12 == 0 ? "\n " : " "); // Whitespace.
fprintf(f, "0x%02X,", data[i]);
diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc
index a26a985ff9..aa8277edb4 100644
--- a/compiler/driver/compiled_method_storage.cc
+++ b/compiler/driver/compiled_method_storage.cc
@@ -161,6 +161,46 @@ class CompiledMethodStorage::LengthPrefixedArrayAlloc {
SwapSpace* const swap_space_;
};
+class CompiledMethodStorage::ThunkMapKey {
+ public:
+ ThunkMapKey(linker::LinkerPatch::Type type, uint32_t custom_value1, uint32_t custom_value2)
+ : type_(type), custom_value1_(custom_value1), custom_value2_(custom_value2) {}
+
+ bool operator<(const ThunkMapKey& other) const {
+ if (custom_value1_ != other.custom_value1_) {
+ return custom_value1_ < other.custom_value1_;
+ }
+ if (custom_value2_ != other.custom_value2_) {
+ return custom_value2_ < other.custom_value2_;
+ }
+ return type_ < other.type_;
+ }
+
+ private:
+ linker::LinkerPatch::Type type_;
+ uint32_t custom_value1_;
+ uint32_t custom_value2_;
+};
+
+class CompiledMethodStorage::ThunkMapValue {
+ public:
+ ThunkMapValue(std::vector<uint8_t, SwapAllocator<uint8_t>>&& code,
+ const std::string& debug_name)
+ : code_(std::move(code)), debug_name_(debug_name) {}
+
+ ArrayRef<const uint8_t> GetCode() const {
+ return ArrayRef<const uint8_t>(code_);
+ }
+
+ const std::string& GetDebugName() const {
+ return debug_name_;
+ }
+
+ private:
+ std::vector<uint8_t, SwapAllocator<uint8_t>> code_;
+ std::string debug_name_;
+};
+
CompiledMethodStorage::CompiledMethodStorage(int swap_fd)
: swap_space_(swap_fd == -1 ? nullptr : new SwapSpace(swap_fd, 10 * MB)),
dedupe_enabled_(true),
@@ -171,7 +211,9 @@ CompiledMethodStorage::CompiledMethodStorage(int swap_fd)
LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
dedupe_cfi_info_("dedupe cfi info", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
dedupe_linker_patches_("dedupe cfi info",
- LengthPrefixedArrayAlloc<linker::LinkerPatch>(swap_space_.get())) {
+ LengthPrefixedArrayAlloc<linker::LinkerPatch>(swap_space_.get())),
+ thunk_map_lock_("thunk_map_lock"),
+ thunk_map_(std::less<ThunkMapKey>(), SwapAllocator<ThunkMapValueType>(swap_space_.get())) {
}
CompiledMethodStorage::~CompiledMethodStorage() {
@@ -237,4 +279,55 @@ void CompiledMethodStorage::ReleaseLinkerPatches(
ReleaseArrayIfNotDeduplicated(linker_patches);
}
+CompiledMethodStorage::ThunkMapKey CompiledMethodStorage::GetThunkMapKey(
+ const linker::LinkerPatch& linker_patch) {
+ uint32_t custom_value1 = 0u;
+ uint32_t custom_value2 = 0u;
+ switch (linker_patch.GetType()) {
+ case linker::LinkerPatch::Type::kBakerReadBarrierBranch:
+ custom_value1 = linker_patch.GetBakerCustomValue1();
+ custom_value2 = linker_patch.GetBakerCustomValue2();
+ break;
+ case linker::LinkerPatch::Type::kCallRelative:
+ // No custom values.
+ break;
+ default:
+ LOG(FATAL) << "Unexpected patch type: " << linker_patch.GetType();
+ UNREACHABLE();
+ }
+ return ThunkMapKey(linker_patch.GetType(), custom_value1, custom_value2);
+}
+
+ArrayRef<const uint8_t> CompiledMethodStorage::GetThunkCode(const linker::LinkerPatch& linker_patch,
+ /*out*/ std::string* debug_name) {
+ ThunkMapKey key = GetThunkMapKey(linker_patch);
+ MutexLock lock(Thread::Current(), thunk_map_lock_);
+ auto it = thunk_map_.find(key);
+ if (it != thunk_map_.end()) {
+ const ThunkMapValue& value = it->second;
+ if (debug_name != nullptr) {
+ *debug_name = value.GetDebugName();
+ }
+ return value.GetCode();
+ } else {
+ if (debug_name != nullptr) {
+ *debug_name = std::string();
+ }
+ return ArrayRef<const uint8_t>();
+ }
+}
+
+void CompiledMethodStorage::SetThunkCode(const linker::LinkerPatch& linker_patch,
+ ArrayRef<const uint8_t> code,
+ const std::string& debug_name) {
+ DCHECK(!code.empty());
+ ThunkMapKey key = GetThunkMapKey(linker_patch);
+ std::vector<uint8_t, SwapAllocator<uint8_t>> code_copy(
+ code.begin(), code.end(), SwapAllocator<uint8_t>(swap_space_.get()));
+ ThunkMapValue value(std::move(code_copy), debug_name);
+ MutexLock lock(Thread::Current(), thunk_map_lock_);
+ // Note: Multiple threads can try and compile the same thunk, so this may not create a new entry.
+ thunk_map_.emplace(key, std::move(value));
+}
+
} // namespace art
diff --git a/compiler/driver/compiled_method_storage.h b/compiler/driver/compiled_method_storage.h
index 249f06c20f..1634facb7c 100644
--- a/compiler/driver/compiled_method_storage.h
+++ b/compiler/driver/compiled_method_storage.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_DRIVER_COMPILED_METHOD_STORAGE_H_
#include <iosfwd>
+#include <map>
#include <memory>
#include "base/array_ref.h"
@@ -67,7 +68,29 @@ class CompiledMethodStorage {
const ArrayRef<const linker::LinkerPatch>& linker_patches);
void ReleaseLinkerPatches(const LengthPrefixedArray<linker::LinkerPatch>* linker_patches);
+ // Returns the code associated with the given patch.
+ // If the code has not been set, returns empty data.
+ // If `debug_name` is not null, stores the associated debug name in `*debug_name`.
+ ArrayRef<const uint8_t> GetThunkCode(const linker::LinkerPatch& linker_patch,
+ /*out*/ std::string* debug_name = nullptr);
+
+ // Sets the code and debug name associated with the given patch.
+ void SetThunkCode(const linker::LinkerPatch& linker_patch,
+ ArrayRef<const uint8_t> code,
+ const std::string& debug_name);
+
private:
+ class ThunkMapKey;
+ class ThunkMapValue;
+ using ThunkMapValueType = std::pair<const ThunkMapKey, ThunkMapValue>;
+ using ThunkMap = std::map<ThunkMapKey,
+ ThunkMapValue,
+ std::less<ThunkMapKey>,
+ SwapAllocator<ThunkMapValueType>>;
+ static_assert(std::is_same<ThunkMapValueType, ThunkMap::value_type>::value, "Value type check.");
+
+ static ThunkMapKey GetThunkMapKey(const linker::LinkerPatch& linker_patch);
+
template <typename T, typename DedupeSetType>
const LengthPrefixedArray<T>* AllocateOrDeduplicateArray(const ArrayRef<const T>& data,
DedupeSetType* dedupe_set);
@@ -102,6 +125,9 @@ class CompiledMethodStorage {
ArrayDedupeSet<uint8_t> dedupe_cfi_info_;
ArrayDedupeSet<linker::LinkerPatch> dedupe_linker_patches_;
+ Mutex thunk_map_lock_;
+ ThunkMap thunk_map_ GUARDED_BY(thunk_map_lock_);
+
DISALLOW_COPY_AND_ASSIGN(CompiledMethodStorage);
};
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index 236b5c0c2e..38f95488a9 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -94,7 +94,11 @@ class JNICFITest : public CFITest {
const std::vector<uint8_t>& actual_cfi = *(jni_asm->cfi().data());
if (kGenerateExpected) {
- GenerateExpected(stdout, isa, isa_str, actual_asm, actual_cfi);
+ GenerateExpected(stdout,
+ isa,
+ isa_str,
+ ArrayRef<const uint8_t>(actual_asm),
+ ArrayRef<const uint8_t>(actual_cfi));
} else {
EXPECT_EQ(expected_asm, actual_asm);
EXPECT_EQ(expected_cfi, actual_cfi);
diff --git a/compiler/linker/arm/relative_patcher_arm_base.cc b/compiler/linker/arm/relative_patcher_arm_base.cc
index 6e0286afac..7cb8ae55c5 100644
--- a/compiler/linker/arm/relative_patcher_arm_base.cc
+++ b/compiler/linker/arm/relative_patcher_arm_base.cc
@@ -30,8 +30,9 @@ namespace linker {
class ArmBaseRelativePatcher::ThunkData {
public:
- ThunkData(std::vector<uint8_t> code, uint32_t max_next_offset)
- : code_(std::move(code)),
+ ThunkData(ArrayRef<const uint8_t> code, const std::string& debug_name, uint32_t max_next_offset)
+ : code_(code),
+ debug_name_(debug_name),
offsets_(),
max_next_offset_(max_next_offset),
pending_offset_(0u) {
@@ -45,7 +46,11 @@ class ArmBaseRelativePatcher::ThunkData {
}
ArrayRef<const uint8_t> GetCode() const {
- return ArrayRef<const uint8_t>(code_);
+ return code_;
+ }
+
+ const std::string& GetDebugName() const {
+ return debug_name_;
}
bool NeedsNextThunk() const {
@@ -142,10 +147,11 @@ class ArmBaseRelativePatcher::ThunkData {
}
private:
- std::vector<uint8_t> code_; // The code of the thunk.
- std::vector<uint32_t> offsets_; // Offsets at which the thunk needs to be written.
- uint32_t max_next_offset_; // The maximum offset at which the next thunk can be placed.
- uint32_t pending_offset_; // The index of the next offset to write.
+ const ArrayRef<const uint8_t> code_; // The code of the thunk.
+ const std::string debug_name_; // The debug name of the thunk.
+ std::vector<uint32_t> offsets_; // Offsets at which the thunk needs to be written.
+ uint32_t max_next_offset_; // The maximum offset at which the next thunk can be placed.
+ uint32_t pending_offset_; // The index of the next offset to write.
};
class ArmBaseRelativePatcher::PendingThunkComparator {
@@ -239,14 +245,13 @@ std::vector<debug::MethodDebugInfo> ArmBaseRelativePatcher::GenerateThunkDebugIn
std::vector<debug::MethodDebugInfo> result;
result.reserve(number_of_thunks);
for (auto&& entry : thunks_) {
- const ThunkKey& key = entry.first;
const ThunkData& data = entry.second;
size_t start = data.IndexOfFirstThunkAtOrAfter(executable_offset);
if (start == data.NumberOfThunks()) {
continue;
}
// Get the base name to use for the first occurrence of the thunk.
- std::string base_name = GetThunkDebugName(key);
+ std::string base_name = data.GetDebugName();
for (size_t i = start, num = data.NumberOfThunks(); i != num; ++i) {
debug::MethodDebugInfo info = {};
if (i == 0u) {
@@ -267,9 +272,11 @@ std::vector<debug::MethodDebugInfo> ArmBaseRelativePatcher::GenerateThunkDebugIn
return result;
}
-ArmBaseRelativePatcher::ArmBaseRelativePatcher(RelativePatcherTargetProvider* provider,
+ArmBaseRelativePatcher::ArmBaseRelativePatcher(RelativePatcherThunkProvider* thunk_provider,
+ RelativePatcherTargetProvider* target_provider,
InstructionSet instruction_set)
- : provider_(provider),
+ : thunk_provider_(thunk_provider),
+ target_provider_(target_provider),
instruction_set_(instruction_set),
thunks_(),
unprocessed_method_call_patches_(),
@@ -398,7 +405,7 @@ void ArmBaseRelativePatcher::ProcessPatches(const CompiledMethod* compiled_metho
unprocessed_method_call_patches_.emplace_back(patch_offset, patch.TargetMethod());
if (method_call_thunk_ == nullptr) {
uint32_t max_next_offset = CalculateMaxNextOffset(patch_offset, key);
- auto it = thunks_.Put(key, ThunkData(CompileThunk(key), max_next_offset));
+ auto it = thunks_.Put(key, ThunkDataForPatch(patch, max_next_offset));
method_call_thunk_ = &it->second;
AddUnreservedThunk(method_call_thunk_);
} else {
@@ -409,7 +416,7 @@ void ArmBaseRelativePatcher::ProcessPatches(const CompiledMethod* compiled_metho
auto lb = thunks_.lower_bound(key);
if (lb == thunks_.end() || thunks_.key_comp()(key, lb->first)) {
uint32_t max_next_offset = CalculateMaxNextOffset(patch_offset, key);
- auto it = thunks_.PutBefore(lb, key, ThunkData(CompileThunk(key), max_next_offset));
+ auto it = thunks_.PutBefore(lb, key, ThunkDataForPatch(patch, max_next_offset));
AddUnreservedThunk(&it->second);
} else {
old_data = &lb->second;
@@ -477,7 +484,7 @@ void ArmBaseRelativePatcher::ResolveMethodCalls(uint32_t quick_code_offset,
break;
}
} else {
- auto result = provider_->FindMethodOffset(target_method);
+ auto result = target_provider_->FindMethodOffset(target_method);
if (!result.first) {
break;
}
@@ -518,5 +525,14 @@ inline uint32_t ArmBaseRelativePatcher::CalculateMaxNextOffset(uint32_t patch_of
GetInstructionSetAlignment(instruction_set_));
}
+inline ArmBaseRelativePatcher::ThunkData ArmBaseRelativePatcher::ThunkDataForPatch(
+ const LinkerPatch& patch, uint32_t max_next_offset) {
+ ArrayRef<const uint8_t> code;
+ std::string debug_name;
+ thunk_provider_->GetThunkCode(patch, &code, &debug_name);
+ DCHECK(!code.empty());
+ return ThunkData(code, debug_name, max_next_offset);
+}
+
} // namespace linker
} // namespace art
diff --git a/compiler/linker/arm/relative_patcher_arm_base.h b/compiler/linker/arm/relative_patcher_arm_base.h
index ee09bf96b3..963d6690b0 100644
--- a/compiler/linker/arm/relative_patcher_arm_base.h
+++ b/compiler/linker/arm/relative_patcher_arm_base.h
@@ -37,7 +37,8 @@ class ArmBaseRelativePatcher : public RelativePatcher {
std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE;
protected:
- ArmBaseRelativePatcher(RelativePatcherTargetProvider* provider,
+ ArmBaseRelativePatcher(RelativePatcherThunkProvider* thunk_provider,
+ RelativePatcherTargetProvider* target_provider,
InstructionSet instruction_set);
~ArmBaseRelativePatcher();
@@ -94,8 +95,6 @@ class ArmBaseRelativePatcher : public RelativePatcher {
uint32_t CalculateMethodCallDisplacement(uint32_t patch_offset,
uint32_t target_offset);
- virtual std::vector<uint8_t> CompileThunk(const ThunkKey& key) = 0;
- virtual std::string GetThunkDebugName(const ThunkKey& key) = 0;
virtual uint32_t MaxPositiveDisplacement(const ThunkKey& key) = 0;
virtual uint32_t MaxNegativeDisplacement(const ThunkKey& key) = 0;
@@ -108,8 +107,10 @@ class ArmBaseRelativePatcher : public RelativePatcher {
void ResolveMethodCalls(uint32_t quick_code_offset, MethodReference method_ref);
uint32_t CalculateMaxNextOffset(uint32_t patch_offset, const ThunkKey& key);
+ ThunkData ThunkDataForPatch(const LinkerPatch& patch, uint32_t max_next_offset);
- RelativePatcherTargetProvider* const provider_;
+ RelativePatcherThunkProvider* const thunk_provider_;
+ RelativePatcherTargetProvider* const target_provider_;
const InstructionSet instruction_set_;
// The data for all thunks.
diff --git a/compiler/linker/arm/relative_patcher_thumb2.cc b/compiler/linker/arm/relative_patcher_thumb2.cc
index 78755176e4..7400d11c31 100644
--- a/compiler/linker/arm/relative_patcher_thumb2.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2.cc
@@ -48,8 +48,9 @@ constexpr uint32_t kMaxMethodCallNegativeDisplacement = (1u << 24) - kPcDisplace
constexpr uint32_t kMaxBcondPositiveDisplacement = (1u << 20) - 2u + kPcDisplacement;
constexpr uint32_t kMaxBcondNegativeDisplacement = (1u << 20) - kPcDisplacement;
-Thumb2RelativePatcher::Thumb2RelativePatcher(RelativePatcherTargetProvider* provider)
- : ArmBaseRelativePatcher(provider, InstructionSet::kThumb2) {
+Thumb2RelativePatcher::Thumb2RelativePatcher(RelativePatcherThunkProvider* thunk_provider,
+ RelativePatcherTargetProvider* target_provider)
+ : ArmBaseRelativePatcher(thunk_provider, target_provider, InstructionSet::kThumb2) {
}
void Thumb2RelativePatcher::PatchCall(std::vector<uint8_t>* code,
@@ -110,62 +111,6 @@ void Thumb2RelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* co
uint32_t insn = GetInsn32(code, literal_offset);
DCHECK_EQ(insn, 0xf0408000); // BNE +0 (unpatched)
ThunkKey key = GetBakerThunkKey(patch);
- if (kIsDebugBuild) {
- const uint32_t encoded_data = key.GetCustomValue1();
- BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data);
- // Check that the next instruction matches the expected LDR.
- switch (kind) {
- case BakerReadBarrierKind::kField: {
- BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data);
- if (width == BakerReadBarrierWidth::kWide) {
- DCHECK_GE(code->size() - literal_offset, 8u);
- uint32_t next_insn = GetInsn32(code, literal_offset + 4u);
- // LDR (immediate), encoding T3, with correct base_reg.
- CheckValidReg((next_insn >> 12) & 0xfu); // Check destination register.
- const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
- CHECK_EQ(next_insn & 0xffff0000u, 0xf8d00000u | (base_reg << 16));
- } else {
- DCHECK_GE(code->size() - literal_offset, 6u);
- uint32_t next_insn = GetInsn16(code, literal_offset + 4u);
- // LDR (immediate), encoding T1, with correct base_reg.
- CheckValidReg(next_insn & 0x7u); // Check destination register.
- const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
- CHECK_EQ(next_insn & 0xf838u, 0x6800u | (base_reg << 3));
- }
- break;
- }
- case BakerReadBarrierKind::kArray: {
- DCHECK_GE(code->size() - literal_offset, 8u);
- uint32_t next_insn = GetInsn32(code, literal_offset + 4u);
- // LDR (register) with correct base_reg, S=1 and option=011 (LDR Wt, [Xn, Xm, LSL #2]).
- CheckValidReg((next_insn >> 12) & 0xfu); // Check destination register.
- const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
- CHECK_EQ(next_insn & 0xffff0ff0u, 0xf8500020u | (base_reg << 16));
- CheckValidReg(next_insn & 0xf); // Check index register
- break;
- }
- case BakerReadBarrierKind::kGcRoot: {
- BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data);
- if (width == BakerReadBarrierWidth::kWide) {
- DCHECK_GE(literal_offset, 4u);
- uint32_t prev_insn = GetInsn32(code, literal_offset - 4u);
- // LDR (immediate), encoding T3, with correct root_reg.
- const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
- CHECK_EQ(prev_insn & 0xfff0f000u, 0xf8d00000u | (root_reg << 12));
- } else {
- DCHECK_GE(literal_offset, 2u);
- uint32_t prev_insn = GetInsn16(code, literal_offset - 2u);
- // LDR (immediate), encoding T1, with correct root_reg.
- const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
- CHECK_EQ(prev_insn & 0xf807u, 0x6800u | root_reg);
- }
- break;
- }
- default:
- LOG(FATAL) << "Unexpected type: " << static_cast<uint32_t>(key.GetType());
- UNREACHABLE();
- }
- }
uint32_t target_offset = GetThunkTargetOffset(key, patch_offset);
DCHECK_ALIGNED(target_offset, 4u);
uint32_t disp = target_offset - (patch_offset + kPcDisplacement);
@@ -178,250 +123,6 @@ void Thumb2RelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* co
SetInsn32(code, literal_offset, insn);
}
-#define __ assembler.GetVIXLAssembler()->
-
-static void EmitGrayCheckAndFastPath(arm::ArmVIXLAssembler& assembler,
- vixl::aarch32::Register base_reg,
- vixl::aarch32::MemOperand& lock_word,
- vixl::aarch32::Label* slow_path,
- int32_t raw_ldr_offset) {
- using namespace vixl::aarch32; // NOLINT(build/namespaces)
- // Load the lock word containing the rb_state.
- __ Ldr(ip, lock_word);
- // Given the numeric representation, it's enough to check the low bit of the rb_state.
- static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
- __ Tst(ip, Operand(LockWord::kReadBarrierStateMaskShifted));
- __ B(ne, slow_path, /* is_far_target */ false);
- __ Add(lr, lr, raw_ldr_offset);
- // Introduce a dependency on the lock_word including rb_state,
- // to prevent load-load reordering, and without using
- // a memory barrier (which would be more expensive).
- __ Add(base_reg, base_reg, Operand(ip, LSR, 32));
- __ Bx(lr); // And return back to the function.
- // Note: The fake dependency is unnecessary for the slow path.
-}
-
-// Load the read barrier introspection entrypoint in register `entrypoint`
-static void LoadReadBarrierMarkIntrospectionEntrypoint(arm::ArmVIXLAssembler& assembler,
- vixl::aarch32::Register entrypoint) {
- using vixl::aarch32::MemOperand;
- using vixl::aarch32::ip;
- // Thread Register.
- const vixl::aarch32::Register tr = vixl::aarch32::r9;
-
- // The register where the read barrier introspection entrypoint is loaded
- // is fixed: `Thumb2RelativePatcher::kBakerCcEntrypointRegister` (R4).
- DCHECK_EQ(entrypoint.GetCode(), Thumb2RelativePatcher::kBakerCcEntrypointRegister);
- // entrypoint = Thread::Current()->pReadBarrierMarkReg12, i.e. pReadBarrierMarkIntrospection.
- DCHECK_EQ(ip.GetCode(), 12u);
- const int32_t entry_point_offset =
- Thread::ReadBarrierMarkEntryPointsOffset<kArmPointerSize>(ip.GetCode());
- __ Ldr(entrypoint, MemOperand(tr, entry_point_offset));
-}
-
-void Thumb2RelativePatcher::CompileBakerReadBarrierThunk(arm::ArmVIXLAssembler& assembler,
- uint32_t encoded_data) {
- using namespace vixl::aarch32; // NOLINT(build/namespaces)
- BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data);
- switch (kind) {
- case BakerReadBarrierKind::kField: {
- // Check if the holder is gray and, if not, add fake dependency to the base register
- // and return to the LDR instruction to load the reference. Otherwise, use introspection
- // to load the reference and call the entrypoint (in kBakerCcEntrypointRegister)
- // that performs further checks on the reference and marks it if needed.
- Register base_reg(BakerReadBarrierFirstRegField::Decode(encoded_data));
- CheckValidReg(base_reg.GetCode());
- Register holder_reg(BakerReadBarrierSecondRegField::Decode(encoded_data));
- CheckValidReg(holder_reg.GetCode());
- BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data);
- UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
- temps.Exclude(ip);
- // If base_reg differs from holder_reg, the offset was too large and we must have
- // emitted an explicit null check before the load. Otherwise, we need to null-check
- // the holder as we do not necessarily do that check before going to the thunk.
- vixl::aarch32::Label throw_npe;
- if (holder_reg.Is(base_reg)) {
- __ CompareAndBranchIfZero(holder_reg, &throw_npe, /* is_far_target */ false);
- }
- vixl::aarch32::Label slow_path;
- MemOperand lock_word(holder_reg, mirror::Object::MonitorOffset().Int32Value());
- const int32_t raw_ldr_offset = (width == BakerReadBarrierWidth::kWide)
- ? BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET
- : BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET;
- EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path, raw_ldr_offset);
- __ Bind(&slow_path);
- const int32_t ldr_offset = /* Thumb state adjustment (LR contains Thumb state). */ -1 +
- raw_ldr_offset;
- Register ep_reg(kBakerCcEntrypointRegister);
- LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg);
- if (width == BakerReadBarrierWidth::kWide) {
- MemOperand ldr_half_address(lr, ldr_offset + 2);
- __ Ldrh(ip, ldr_half_address); // Load the LDR immediate half-word with "Rt | imm12".
- __ Ubfx(ip, ip, 0, 12); // Extract the offset imm12.
- __ Ldr(ip, MemOperand(base_reg, ip)); // Load the reference.
- } else {
- MemOperand ldr_address(lr, ldr_offset);
- __ Ldrh(ip, ldr_address); // Load the LDR immediate, encoding T1.
- __ Add(ep_reg, // Adjust the entrypoint address to the entrypoint
- ep_reg, // for narrow LDR.
- Operand(BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_ENTRYPOINT_OFFSET));
- __ Ubfx(ip, ip, 6, 5); // Extract the imm5, i.e. offset / 4.
- __ Ldr(ip, MemOperand(base_reg, ip, LSL, 2)); // Load the reference.
- }
- // Do not unpoison. With heap poisoning enabled, the entrypoint expects a poisoned reference.
- __ Bx(ep_reg); // Jump to the entrypoint.
- if (holder_reg.Is(base_reg)) {
- // Add null check slow path. The stack map is at the address pointed to by LR.
- __ Bind(&throw_npe);
- int32_t offset = GetThreadOffset<kArmPointerSize>(kQuickThrowNullPointer).Int32Value();
- __ Ldr(ip, MemOperand(/* Thread* */ vixl::aarch32::r9, offset));
- __ Bx(ip);
- }
- break;
- }
- case BakerReadBarrierKind::kArray: {
- Register base_reg(BakerReadBarrierFirstRegField::Decode(encoded_data));
- CheckValidReg(base_reg.GetCode());
- DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data));
- DCHECK(BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide);
- UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
- temps.Exclude(ip);
- vixl::aarch32::Label slow_path;
- int32_t data_offset =
- mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value();
- MemOperand lock_word(base_reg, mirror::Object::MonitorOffset().Int32Value() - data_offset);
- DCHECK_LT(lock_word.GetOffsetImmediate(), 0);
- const int32_t raw_ldr_offset = BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET;
- EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path, raw_ldr_offset);
- __ Bind(&slow_path);
- const int32_t ldr_offset = /* Thumb state adjustment (LR contains Thumb state). */ -1 +
- raw_ldr_offset;
- MemOperand ldr_address(lr, ldr_offset + 2);
- __ Ldrb(ip, ldr_address); // Load the LDR (register) byte with "00 | imm2 | Rm",
- // i.e. Rm+32 because the scale in imm2 is 2.
- Register ep_reg(kBakerCcEntrypointRegister);
- LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg);
- __ Bfi(ep_reg, ip, 3, 6); // Insert ip to the entrypoint address to create
- // a switch case target based on the index register.
- __ Mov(ip, base_reg); // Move the base register to ip0.
- __ Bx(ep_reg); // Jump to the entrypoint's array switch case.
- break;
- }
- case BakerReadBarrierKind::kGcRoot: {
- // Check if the reference needs to be marked and if so (i.e. not null, not marked yet
- // and it does not have a forwarding address), call the correct introspection entrypoint;
- // otherwise return the reference (or the extracted forwarding address).
- // There is no gray bit check for GC roots.
- Register root_reg(BakerReadBarrierFirstRegField::Decode(encoded_data));
- CheckValidReg(root_reg.GetCode());
- DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data));
- BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data);
- UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
- temps.Exclude(ip);
- vixl::aarch32::Label return_label, not_marked, forwarding_address;
- __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target */ false);
- MemOperand lock_word(root_reg, mirror::Object::MonitorOffset().Int32Value());
- __ Ldr(ip, lock_word);
- __ Tst(ip, LockWord::kMarkBitStateMaskShifted);
- __ B(eq, &not_marked);
- __ Bind(&return_label);
- __ Bx(lr);
- __ Bind(&not_marked);
- static_assert(LockWord::kStateShift == 30 && LockWord::kStateForwardingAddress == 3,
- "To use 'CMP ip, #modified-immediate; BHS', we need the lock word state in "
- " the highest bits and the 'forwarding address' state to have all bits set");
- __ Cmp(ip, Operand(0xc0000000));
- __ B(hs, &forwarding_address);
- Register ep_reg(kBakerCcEntrypointRegister);
- LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg);
- // Adjust the art_quick_read_barrier_mark_introspection address in kBakerCcEntrypointRegister
- // to art_quick_read_barrier_mark_introspection_gc_roots.
- int32_t entrypoint_offset = (width == BakerReadBarrierWidth::kWide)
- ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_ENTRYPOINT_OFFSET
- : BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_ENTRYPOINT_OFFSET;
- __ Add(ep_reg, ep_reg, Operand(entrypoint_offset));
- __ Mov(ip, root_reg);
- __ Bx(ep_reg);
- __ Bind(&forwarding_address);
- __ Lsl(root_reg, ip, LockWord::kForwardingAddressShift);
- __ Bx(lr);
- break;
- }
- default:
- LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind);
- UNREACHABLE();
- }
-}
-
-std::vector<uint8_t> Thumb2RelativePatcher::CompileThunk(const ThunkKey& key) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- arm::ArmVIXLAssembler assembler(&allocator);
-
- switch (key.GetType()) {
- case ThunkType::kMethodCall:
- // The thunk just uses the entry point in the ArtMethod. This works even for calls
- // to the generic JNI and interpreter trampolines.
- assembler.LoadFromOffset(
- arm::kLoadWord,
- vixl::aarch32::pc,
- vixl::aarch32::r0,
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
- __ Bkpt(0);
- break;
- case ThunkType::kBakerReadBarrier:
- CompileBakerReadBarrierThunk(assembler, key.GetCustomValue1());
- break;
- }
-
- assembler.FinalizeCode();
- std::vector<uint8_t> thunk_code(assembler.CodeSize());
- MemoryRegion code(thunk_code.data(), thunk_code.size());
- assembler.FinalizeInstructions(code);
- return thunk_code;
-}
-
-std::string Thumb2RelativePatcher::GetThunkDebugName(const ThunkKey& key) {
- switch (key.GetType()) {
- case ThunkType::kMethodCall:
- return "MethodCallThunk";
-
- case ThunkType::kBakerReadBarrier: {
- uint32_t encoded_data = key.GetCustomValue1();
- BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data);
- std::ostringstream oss;
- oss << "BakerReadBarrierThunk";
- switch (kind) {
- case BakerReadBarrierKind::kField:
- oss << "Field";
- if (BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide) {
- oss << "Wide";
- }
- oss << "_r" << BakerReadBarrierFirstRegField::Decode(encoded_data)
- << "_r" << BakerReadBarrierSecondRegField::Decode(encoded_data);
- break;
- case BakerReadBarrierKind::kArray:
- oss << "Array_r" << BakerReadBarrierFirstRegField::Decode(encoded_data);
- DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data));
- DCHECK(BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide);
- break;
- case BakerReadBarrierKind::kGcRoot:
- oss << "GcRoot";
- if (BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide) {
- oss << "Wide";
- }
- oss << "_r" << BakerReadBarrierFirstRegField::Decode(encoded_data);
- DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data));
- break;
- }
- return oss.str();
- }
- }
-}
-
-#undef __
-
uint32_t Thumb2RelativePatcher::MaxPositiveDisplacement(const ThunkKey& key) {
switch (key.GetType()) {
case ThunkType::kMethodCall:
diff --git a/compiler/linker/arm/relative_patcher_thumb2.h b/compiler/linker/arm/relative_patcher_thumb2.h
index 68386c00f4..68610d69e1 100644
--- a/compiler/linker/arm/relative_patcher_thumb2.h
+++ b/compiler/linker/arm/relative_patcher_thumb2.h
@@ -19,8 +19,6 @@
#include "arch/arm/registers_arm.h"
#include "base/array_ref.h"
-#include "base/bit_field.h"
-#include "base/bit_utils.h"
#include "linker/arm/relative_patcher_arm_base.h"
namespace art {
@@ -33,42 +31,8 @@ namespace linker {
class Thumb2RelativePatcher FINAL : public ArmBaseRelativePatcher {
public:
- static constexpr uint32_t kBakerCcEntrypointRegister = 4u;
-
- static uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg,
- uint32_t holder_reg,
- bool narrow) {
- CheckValidReg(base_reg);
- CheckValidReg(holder_reg);
- DCHECK(!narrow || base_reg < 8u) << base_reg;
- BakerReadBarrierWidth width =
- narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide;
- return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kField) |
- BakerReadBarrierFirstRegField::Encode(base_reg) |
- BakerReadBarrierSecondRegField::Encode(holder_reg) |
- BakerReadBarrierWidthField::Encode(width);
- }
-
- static uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) {
- CheckValidReg(base_reg);
- return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kArray) |
- BakerReadBarrierFirstRegField::Encode(base_reg) |
- BakerReadBarrierSecondRegField::Encode(kInvalidEncodedReg) |
- BakerReadBarrierWidthField::Encode(BakerReadBarrierWidth::kWide);
- }
-
- static uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg, bool narrow) {
- CheckValidReg(root_reg);
- DCHECK(!narrow || root_reg < 8u) << root_reg;
- BakerReadBarrierWidth width =
- narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide;
- return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) |
- BakerReadBarrierFirstRegField::Encode(root_reg) |
- BakerReadBarrierSecondRegField::Encode(kInvalidEncodedReg) |
- BakerReadBarrierWidthField::Encode(width);
- }
-
- explicit Thumb2RelativePatcher(RelativePatcherTargetProvider* provider);
+ explicit Thumb2RelativePatcher(RelativePatcherThunkProvider* thunk_provider,
+ RelativePatcherTargetProvider* target_provider);
void PatchCall(std::vector<uint8_t>* code,
uint32_t literal_offset,
@@ -83,48 +47,10 @@ class Thumb2RelativePatcher FINAL : public ArmBaseRelativePatcher {
uint32_t patch_offset) OVERRIDE;
protected:
- std::vector<uint8_t> CompileThunk(const ThunkKey& key) OVERRIDE;
- std::string GetThunkDebugName(const ThunkKey& key) OVERRIDE;
uint32_t MaxPositiveDisplacement(const ThunkKey& key) OVERRIDE;
uint32_t MaxNegativeDisplacement(const ThunkKey& key) OVERRIDE;
private:
- static constexpr uint32_t kInvalidEncodedReg = /* pc is invalid */ 15u;
-
- enum class BakerReadBarrierKind : uint8_t {
- kField, // Field get or array get with constant offset (i.e. constant index).
- kArray, // Array get with index in register.
- kGcRoot, // GC root load.
- kLast = kGcRoot
- };
-
- enum class BakerReadBarrierWidth : uint8_t {
- kWide, // 32-bit LDR (and 32-bit NEG if heap poisoning is enabled).
- kNarrow, // 16-bit LDR (and 16-bit NEG if heap poisoning is enabled).
- kLast = kNarrow
- };
-
- static constexpr size_t kBitsForBakerReadBarrierKind =
- MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierKind::kLast));
- static constexpr size_t kBitsForRegister = 4u;
- using BakerReadBarrierKindField =
- BitField<BakerReadBarrierKind, 0, kBitsForBakerReadBarrierKind>;
- using BakerReadBarrierFirstRegField =
- BitField<uint32_t, kBitsForBakerReadBarrierKind, kBitsForRegister>;
- using BakerReadBarrierSecondRegField =
- BitField<uint32_t, kBitsForBakerReadBarrierKind + kBitsForRegister, kBitsForRegister>;
- static constexpr size_t kBitsForBakerReadBarrierWidth =
- MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierWidth::kLast));
- using BakerReadBarrierWidthField = BitField<BakerReadBarrierWidth,
- kBitsForBakerReadBarrierKind + 2 * kBitsForRegister,
- kBitsForBakerReadBarrierWidth>;
-
- static void CheckValidReg(uint32_t reg) {
- DCHECK(reg < 12u && reg != kBakerCcEntrypointRegister) << reg;
- }
-
- void CompileBakerReadBarrierThunk(arm::ArmVIXLAssembler& assembler, uint32_t encoded_data);
-
void SetInsn32(std::vector<uint8_t>* code, uint32_t offset, uint32_t value);
static uint32_t GetInsn32(ArrayRef<const uint8_t> code, uint32_t offset);
diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc
index 2c22a352c2..e7b11bd16b 100644
--- a/compiler/linker/arm/relative_patcher_thumb2_test.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc
@@ -16,12 +16,15 @@
#include "linker/arm/relative_patcher_thumb2.h"
+#include "arch/arm/instruction_set_features_arm.h"
#include "base/casts.h"
#include "linker/relative_patcher_test.h"
#include "lock_word.h"
#include "mirror/array-inl.h"
#include "mirror/object.h"
#include "oat_quick_method_header.h"
+#include "optimizing/code_generator_arm_vixl.h"
+#include "optimizing/optimizing_unit_test.h"
namespace art {
namespace linker {
@@ -189,9 +192,42 @@ class Thumb2RelativePatcherTest : public RelativePatcherTest {
return result.second - 1 /* thumb mode */;
}
+ std::vector<uint8_t> CompileThunk(const LinkerPatch& patch,
+ /*out*/ std::string* debug_name = nullptr) {
+ OptimizingUnitTestHelper helper;
+ HGraph* graph = helper.CreateGraph();
+ std::string error_msg;
+ ArmFeaturesUniquePtr features =
+ ArmInstructionSetFeatures::FromVariant("default", &error_msg);
+ CompilerOptions options;
+ arm::CodeGeneratorARMVIXL codegen(graph, *features, options);
+ ArenaVector<uint8_t> code(helper.GetAllocator()->Adapter());
+ codegen.EmitThunkCode(patch, &code, debug_name);
+ return std::vector<uint8_t>(code.begin(), code.end());
+ }
+
+ void AddCompiledMethod(
+ MethodReference method_ref,
+ const ArrayRef<const uint8_t>& code,
+ const ArrayRef<const LinkerPatch>& patches = ArrayRef<const LinkerPatch>()) {
+ RelativePatcherTest::AddCompiledMethod(method_ref, code, patches);
+
+ // Make sure the ThunkProvider has all the necessary thunks.
+ for (const LinkerPatch& patch : patches) {
+ if (patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch ||
+ patch.GetType() == LinkerPatch::Type::kCallRelative) {
+ std::string debug_name;
+ std::vector<uint8_t> thunk_code = CompileThunk(patch, &debug_name);
+ thunk_provider_.SetThunkCode(patch, ArrayRef<const uint8_t>(thunk_code), debug_name);
+ }
+ }
+ }
+
std::vector<uint8_t> CompileMethodCallThunk() {
- ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetMethodCallKey();
- return static_cast<Thumb2RelativePatcher*>(patcher_.get())->CompileThunk(key);
+ LinkerPatch patch = LinkerPatch::RelativeCodePatch(/* literal_offset */ 0u,
+ /* target_dex_file*/ nullptr,
+ /* target_method_idx */ 0u);
+ return CompileThunk(patch);
}
uint32_t MethodCallThunkSize() {
@@ -228,27 +264,38 @@ class Thumb2RelativePatcherTest : public RelativePatcherTest {
void TestStringReference(uint32_t string_offset);
void CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches, uint32_t target_offset);
+ static uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg,
+ uint32_t holder_reg,
+ bool narrow) {
+ return arm::CodeGeneratorARMVIXL::EncodeBakerReadBarrierFieldData(base_reg, holder_reg, narrow);
+ }
+
+ static uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) {
+ return arm::CodeGeneratorARMVIXL::EncodeBakerReadBarrierArrayData(base_reg);
+ }
+
+ static uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg, bool narrow) {
+ return arm::CodeGeneratorARMVIXL::EncodeBakerReadBarrierGcRootData(root_reg, narrow);
+ }
+
std::vector<uint8_t> CompileBakerOffsetThunk(uint32_t base_reg,
uint32_t holder_reg,
bool narrow) {
const LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch(
- 0u, Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData(base_reg, holder_reg, narrow));
- ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch);
- return down_cast<Thumb2RelativePatcher*>(patcher_.get())->CompileThunk(key);
+ /* literal_offset */ 0u, EncodeBakerReadBarrierFieldData(base_reg, holder_reg, narrow));
+ return CompileThunk(patch);
}
std::vector<uint8_t> CompileBakerArrayThunk(uint32_t base_reg) {
LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch(
- 0u, Thumb2RelativePatcher::EncodeBakerReadBarrierArrayData(base_reg));
- ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch);
- return down_cast<Thumb2RelativePatcher*>(patcher_.get())->CompileThunk(key);
+ /* literal_offset */ 0u, EncodeBakerReadBarrierArrayData(base_reg));
+ return CompileThunk(patch);
}
std::vector<uint8_t> CompileBakerGcRootThunk(uint32_t root_reg, bool narrow) {
LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch(
- 0u, Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg, narrow));
- ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch);
- return down_cast<Thumb2RelativePatcher*>(patcher_.get())->CompileThunk(key);
+ /* literal_offset */ 0u, EncodeBakerReadBarrierGcRootData(root_reg, narrow));
+ return CompileThunk(patch);
}
uint32_t GetOutputInsn32(uint32_t offset) {
@@ -594,7 +641,7 @@ void Thumb2RelativePatcherTest::TestBakerFieldWide(uint32_t offset, uint32_t ref
const std::vector<uint8_t> raw_code = RawCode({kBneWPlus0, ldr});
ASSERT_EQ(kMethodCodeSize, raw_code.size());
ArrayRef<const uint8_t> code(raw_code);
- uint32_t encoded_data = Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData(
+ uint32_t encoded_data = EncodeBakerReadBarrierFieldData(
base_reg, holder_reg, /* narrow */ false);
const LinkerPatch patches[] = {
LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset, encoded_data),
@@ -696,7 +743,7 @@ void Thumb2RelativePatcherTest::TestBakerFieldNarrow(uint32_t offset, uint32_t r
const std::vector<uint8_t> raw_code = RawCode({kBneWPlus0, ldr});
ASSERT_EQ(kMethodCodeSize, raw_code.size());
ArrayRef<const uint8_t> code(raw_code);
- uint32_t encoded_data = Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData(
+ uint32_t encoded_data = EncodeBakerReadBarrierFieldData(
base_reg, holder_reg, /* narrow */ true);
const LinkerPatch patches[] = {
LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset, encoded_data),
@@ -809,7 +856,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkInTheMiddle) {
constexpr uint32_t kLiteralOffset1 = 6u;
const std::vector<uint8_t> raw_code1 = RawCode({kNopWInsn, kNopInsn, kBneWPlus0, kLdrWInsn});
ArrayRef<const uint8_t> code1(raw_code1);
- uint32_t encoded_data = Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData(
+ uint32_t encoded_data = EncodeBakerReadBarrierFieldData(
/* base_reg */ 0, /* holder_reg */ 0, /* narrow */ false);
const LinkerPatch patches1[] = {
LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data),
@@ -877,7 +924,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkBeforeFiller) {
constexpr uint32_t kLiteralOffset1 = 4u;
const std::vector<uint8_t> raw_code1 = RawCode({kNopWInsn, kBneWPlus0, kLdrWInsn, kNopInsn});
ArrayRef<const uint8_t> code1(raw_code1);
- uint32_t encoded_data = Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData(
+ uint32_t encoded_data = EncodeBakerReadBarrierFieldData(
/* base_reg */ 0, /* holder_reg */ 0, /* narrow */ false);
const LinkerPatch patches1[] = {
LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data),
@@ -907,7 +954,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkInTheMiddleUnreachableFromLast
constexpr uint32_t kLiteralOffset1 = 6u;
const std::vector<uint8_t> raw_code1 = RawCode({kNopWInsn, kNopInsn, kBneWPlus0, kLdrWInsn});
ArrayRef<const uint8_t> code1(raw_code1);
- uint32_t encoded_data = Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData(
+ uint32_t encoded_data = EncodeBakerReadBarrierFieldData(
/* base_reg */ 0, /* holder_reg */ 0, /* narrow */ false);
const LinkerPatch patches1[] = {
LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data),
@@ -993,7 +1040,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerArray) {
ArrayRef<const uint8_t> code(raw_code);
const LinkerPatch patches[] = {
LinkerPatch::BakerReadBarrierBranchPatch(
- kLiteralOffset, Thumb2RelativePatcher::EncodeBakerReadBarrierArrayData(base_reg)),
+ kLiteralOffset, EncodeBakerReadBarrierArrayData(base_reg)),
};
AddCompiledMethod(MethodRef(method_idx), code, ArrayRef<const LinkerPatch>(patches));
}
@@ -1074,8 +1121,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerGcRootWide) {
ArrayRef<const uint8_t> code(raw_code);
const LinkerPatch patches[] = {
LinkerPatch::BakerReadBarrierBranchPatch(
- kLiteralOffset,
- Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg, /* narrow */ false)),
+ kLiteralOffset, EncodeBakerReadBarrierGcRootData(root_reg, /* narrow */ false)),
};
AddCompiledMethod(MethodRef(method_idx), code, ArrayRef<const LinkerPatch>(patches));
}
@@ -1134,8 +1180,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerGcRootNarrow) {
ArrayRef<const uint8_t> code(raw_code);
const LinkerPatch patches[] = {
LinkerPatch::BakerReadBarrierBranchPatch(
- kLiteralOffset,
- Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg, /* narrow */ true)),
+ kLiteralOffset, EncodeBakerReadBarrierGcRootData(root_reg, /* narrow */ true)),
};
AddCompiledMethod(MethodRef(method_idx), code, ArrayRef<const LinkerPatch>(patches));
}
@@ -1182,8 +1227,7 @@ TEST_F(Thumb2RelativePatcherTest, BakerGcRootOffsetBits) {
patches.reserve(num_patches);
const uint32_t ldr =
kLdrWInsn | (/* offset */ 8) | (/* base_reg */ 0 << 16) | (/* root_reg */ 0 << 12);
- uint32_t encoded_data =
- Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(/* root_reg */ 0, /* narrow */ false);
+ uint32_t encoded_data = EncodeBakerReadBarrierGcRootData(/* root_reg */ 0, /* narrow */ false);
for (size_t i = 0; i != num_patches; ++i) {
PushBackInsn(&code, ldr);
PushBackInsn(&code, kBneWPlus0);
@@ -1264,10 +1308,8 @@ TEST_F(Thumb2RelativePatcherTest, BakerAndMethodCallInteraction) {
ldr1, kBneWPlus0, // First GC root LDR with read barrier.
ldr2, kBneWPlus0, // Second GC root LDR with read barrier.
});
- uint32_t encoded_data1 =
- Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(/* root_reg */ 1, /* narrow */ false);
- uint32_t encoded_data2 =
- Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(/* root_reg */ 2, /* narrow */ false);
+ uint32_t encoded_data1 = EncodeBakerReadBarrierGcRootData(/* root_reg */ 1, /* narrow */ false);
+ uint32_t encoded_data2 = EncodeBakerReadBarrierGcRootData(/* root_reg */ 2, /* narrow */ false);
const LinkerPatch last_method_patches[] = {
LinkerPatch::BakerReadBarrierBranchPatch(kBakerLiteralOffset1, encoded_data1),
LinkerPatch::BakerReadBarrierBranchPatch(kBakerLiteralOffset2, encoded_data2),
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index b268204b4a..135e39d100 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -82,9 +82,10 @@ inline uint32_t MaxExtraSpace(size_t num_adrp, size_t code_size) {
} // anonymous namespace
-Arm64RelativePatcher::Arm64RelativePatcher(RelativePatcherTargetProvider* provider,
+Arm64RelativePatcher::Arm64RelativePatcher(RelativePatcherThunkProvider* thunk_provider,
+ RelativePatcherTargetProvider* target_provider,
const Arm64InstructionSetFeatures* features)
- : ArmBaseRelativePatcher(provider, InstructionSet::kArm64),
+ : ArmBaseRelativePatcher(thunk_provider, target_provider, InstructionSet::kArm64),
fix_cortex_a53_843419_(features->NeedFixCortexA53_843419()),
reserved_adrp_thunks_(0u),
processed_adrp_thunks_(0u) {
@@ -313,44 +314,6 @@ void Arm64RelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* cod
uint32_t insn = GetInsn(code, literal_offset);
DCHECK_EQ(insn & 0xffffffe0u, 0xb5000000); // CBNZ Xt, +0 (unpatched)
ThunkKey key = GetBakerThunkKey(patch);
- if (kIsDebugBuild) {
- const uint32_t encoded_data = key.GetCustomValue1();
- BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data);
- // Check that the next instruction matches the expected LDR.
- switch (kind) {
- case BakerReadBarrierKind::kField: {
- DCHECK_GE(code->size() - literal_offset, 8u);
- uint32_t next_insn = GetInsn(code, literal_offset + 4u);
- // LDR (immediate) with correct base_reg.
- CheckValidReg(next_insn & 0x1fu); // Check destination register.
- const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
- CHECK_EQ(next_insn & 0xffc003e0u, 0xb9400000u | (base_reg << 5));
- break;
- }
- case BakerReadBarrierKind::kArray: {
- DCHECK_GE(code->size() - literal_offset, 8u);
- uint32_t next_insn = GetInsn(code, literal_offset + 4u);
- // LDR (register) with the correct base_reg, size=10 (32-bit), option=011 (extend = LSL),
- // and S=1 (shift amount = 2 for 32-bit version), i.e. LDR Wt, [Xn, Xm, LSL #2].
- CheckValidReg(next_insn & 0x1fu); // Check destination register.
- const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
- CHECK_EQ(next_insn & 0xffe0ffe0u, 0xb8607800u | (base_reg << 5));
- CheckValidReg((next_insn >> 16) & 0x1f); // Check index register
- break;
- }
- case BakerReadBarrierKind::kGcRoot: {
- DCHECK_GE(literal_offset, 4u);
- uint32_t prev_insn = GetInsn(code, literal_offset - 4u);
- // LDR (immediate) with correct root_reg.
- const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
- CHECK_EQ(prev_insn & 0xffc0001fu, 0xb9400000u | root_reg);
- break;
- }
- default:
- LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind);
- UNREACHABLE();
- }
- }
uint32_t target_offset = GetThunkTargetOffset(key, patch_offset);
DCHECK_ALIGNED(target_offset, 4u);
uint32_t disp = target_offset - patch_offset;
@@ -359,216 +322,6 @@ void Arm64RelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* cod
SetInsn(code, literal_offset, insn);
}
-#define __ assembler.GetVIXLAssembler()->
-
-static void EmitGrayCheckAndFastPath(arm64::Arm64Assembler& assembler,
- vixl::aarch64::Register base_reg,
- vixl::aarch64::MemOperand& lock_word,
- vixl::aarch64::Label* slow_path) {
- using namespace vixl::aarch64; // NOLINT(build/namespaces)
- // Load the lock word containing the rb_state.
- __ Ldr(ip0.W(), lock_word);
- // Given the numeric representation, it's enough to check the low bit of the rb_state.
- static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
- __ Tbnz(ip0.W(), LockWord::kReadBarrierStateShift, slow_path);
- static_assert(
- BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET,
- "Field and array LDR offsets must be the same to reuse the same code.");
- // Adjust the return address back to the LDR (1 instruction; 2 for heap poisoning).
- static_assert(BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4),
- "Field LDR must be 1 instruction (4B) before the return address label; "
- " 2 instructions (8B) for heap poisoning.");
- __ Add(lr, lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET);
- // Introduce a dependency on the lock_word including rb_state,
- // to prevent load-load reordering, and without using
- // a memory barrier (which would be more expensive).
- __ Add(base_reg, base_reg, Operand(ip0, LSR, 32));
- __ Br(lr); // And return back to the function.
- // Note: The fake dependency is unnecessary for the slow path.
-}
-
-// Load the read barrier introspection entrypoint in register `entrypoint`.
-static void LoadReadBarrierMarkIntrospectionEntrypoint(arm64::Arm64Assembler& assembler,
- vixl::aarch64::Register entrypoint) {
- using vixl::aarch64::MemOperand;
- using vixl::aarch64::ip0;
- // Thread Register.
- const vixl::aarch64::Register tr = vixl::aarch64::x19;
-
- // entrypoint = Thread::Current()->pReadBarrierMarkReg16, i.e. pReadBarrierMarkIntrospection.
- DCHECK_EQ(ip0.GetCode(), 16u);
- const int32_t entry_point_offset =
- Thread::ReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(ip0.GetCode());
- __ Ldr(entrypoint, MemOperand(tr, entry_point_offset));
-}
-
-void Arm64RelativePatcher::CompileBakerReadBarrierThunk(arm64::Arm64Assembler& assembler,
- uint32_t encoded_data) {
- using namespace vixl::aarch64; // NOLINT(build/namespaces)
- BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data);
- switch (kind) {
- case BakerReadBarrierKind::kField: {
- // Check if the holder is gray and, if not, add fake dependency to the base register
- // and return to the LDR instruction to load the reference. Otherwise, use introspection
- // to load the reference and call the entrypoint (in IP1) that performs further checks
- // on the reference and marks it if needed.
- auto base_reg =
- Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data));
- CheckValidReg(base_reg.GetCode());
- auto holder_reg =
- Register::GetXRegFromCode(BakerReadBarrierSecondRegField::Decode(encoded_data));
- CheckValidReg(holder_reg.GetCode());
- UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
- temps.Exclude(ip0, ip1);
- // If base_reg differs from holder_reg, the offset was too large and we must have
- // emitted an explicit null check before the load. Otherwise, we need to null-check
- // the holder as we do not necessarily do that check before going to the thunk.
- vixl::aarch64::Label throw_npe;
- if (holder_reg.Is(base_reg)) {
- __ Cbz(holder_reg.W(), &throw_npe);
- }
- vixl::aarch64::Label slow_path;
- MemOperand lock_word(holder_reg, mirror::Object::MonitorOffset().Int32Value());
- EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path);
- __ Bind(&slow_path);
- MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET);
- __ Ldr(ip0.W(), ldr_address); // Load the LDR (immediate) unsigned offset.
- LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1);
- __ Ubfx(ip0.W(), ip0.W(), 10, 12); // Extract the offset.
- __ Ldr(ip0.W(), MemOperand(base_reg, ip0, LSL, 2)); // Load the reference.
- // Do not unpoison. With heap poisoning enabled, the entrypoint expects a poisoned reference.
- __ Br(ip1); // Jump to the entrypoint.
- if (holder_reg.Is(base_reg)) {
- // Add null check slow path. The stack map is at the address pointed to by LR.
- __ Bind(&throw_npe);
- int32_t offset = GetThreadOffset<kArm64PointerSize>(kQuickThrowNullPointer).Int32Value();
- __ Ldr(ip0, MemOperand(/* Thread* */ vixl::aarch64::x19, offset));
- __ Br(ip0);
- }
- break;
- }
- case BakerReadBarrierKind::kArray: {
- auto base_reg =
- Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data));
- CheckValidReg(base_reg.GetCode());
- DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data));
- UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
- temps.Exclude(ip0, ip1);
- vixl::aarch64::Label slow_path;
- int32_t data_offset =
- mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value();
- MemOperand lock_word(base_reg, mirror::Object::MonitorOffset().Int32Value() - data_offset);
- DCHECK_LT(lock_word.GetOffset(), 0);
- EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path);
- __ Bind(&slow_path);
- MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET);
- __ Ldr(ip0.W(), ldr_address); // Load the LDR (register) unsigned offset.
- LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1);
- __ Ubfx(ip0, ip0, 16, 6); // Extract the index register, plus 32 (bit 21 is set).
- __ Bfi(ip1, ip0, 3, 6); // Insert ip0 to the entrypoint address to create
- // a switch case target based on the index register.
- __ Mov(ip0, base_reg); // Move the base register to ip0.
- __ Br(ip1); // Jump to the entrypoint's array switch case.
- break;
- }
- case BakerReadBarrierKind::kGcRoot: {
- // Check if the reference needs to be marked and if so (i.e. not null, not marked yet
- // and it does not have a forwarding address), call the correct introspection entrypoint;
- // otherwise return the reference (or the extracted forwarding address).
- // There is no gray bit check for GC roots.
- auto root_reg =
- Register::GetWRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data));
- CheckValidReg(root_reg.GetCode());
- DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data));
- UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
- temps.Exclude(ip0, ip1);
- vixl::aarch64::Label return_label, not_marked, forwarding_address;
- __ Cbz(root_reg, &return_label);
- MemOperand lock_word(root_reg.X(), mirror::Object::MonitorOffset().Int32Value());
- __ Ldr(ip0.W(), lock_word);
- __ Tbz(ip0.W(), LockWord::kMarkBitStateShift, &not_marked);
- __ Bind(&return_label);
- __ Br(lr);
- __ Bind(&not_marked);
- __ Tst(ip0.W(), Operand(ip0.W(), LSL, 1));
- __ B(&forwarding_address, mi);
- LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1);
- // Adjust the art_quick_read_barrier_mark_introspection address in IP1 to
- // art_quick_read_barrier_mark_introspection_gc_roots.
- __ Add(ip1, ip1, Operand(BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRYPOINT_OFFSET));
- __ Mov(ip0.W(), root_reg);
- __ Br(ip1);
- __ Bind(&forwarding_address);
- __ Lsl(root_reg, ip0.W(), LockWord::kForwardingAddressShift);
- __ Br(lr);
- break;
- }
- default:
- LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind);
- UNREACHABLE();
- }
-}
-
-std::vector<uint8_t> Arm64RelativePatcher::CompileThunk(const ThunkKey& key) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- arm64::Arm64Assembler assembler(&allocator);
-
- switch (key.GetType()) {
- case ThunkType::kMethodCall: {
- // The thunk just uses the entry point in the ArtMethod. This works even for calls
- // to the generic JNI and interpreter trampolines.
- Offset offset(ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArm64PointerSize).Int32Value());
- assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0));
- break;
- }
- case ThunkType::kBakerReadBarrier: {
- CompileBakerReadBarrierThunk(assembler, key.GetCustomValue1());
- break;
- }
- }
-
- // Ensure we emit the literal pool.
- assembler.FinalizeCode();
- std::vector<uint8_t> thunk_code(assembler.CodeSize());
- MemoryRegion code(thunk_code.data(), thunk_code.size());
- assembler.FinalizeInstructions(code);
- return thunk_code;
-}
-
-std::string Arm64RelativePatcher::GetThunkDebugName(const ThunkKey& key) {
- switch (key.GetType()) {
- case ThunkType::kMethodCall:
- return "MethodCallThunk";
-
- case ThunkType::kBakerReadBarrier: {
- uint32_t encoded_data = key.GetCustomValue1();
- BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data);
- std::ostringstream oss;
- oss << "BakerReadBarrierThunk";
- switch (kind) {
- case BakerReadBarrierKind::kField:
- oss << "Field_r" << BakerReadBarrierFirstRegField::Decode(encoded_data)
- << "_r" << BakerReadBarrierSecondRegField::Decode(encoded_data);
- break;
- case BakerReadBarrierKind::kArray:
- oss << "Array_r" << BakerReadBarrierFirstRegField::Decode(encoded_data);
- DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data));
- break;
- case BakerReadBarrierKind::kGcRoot:
- oss << "GcRoot_r" << BakerReadBarrierFirstRegField::Decode(encoded_data);
- DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data));
- break;
- }
- return oss.str();
- }
- }
-}
-
-#undef __
-
uint32_t Arm64RelativePatcher::MaxPositiveDisplacement(const ThunkKey& key) {
switch (key.GetType()) {
case ThunkType::kMethodCall:
diff --git a/compiler/linker/arm64/relative_patcher_arm64.h b/compiler/linker/arm64/relative_patcher_arm64.h
index 8ba59976e7..9dc289da44 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.h
+++ b/compiler/linker/arm64/relative_patcher_arm64.h
@@ -18,8 +18,6 @@
#define ART_COMPILER_LINKER_ARM64_RELATIVE_PATCHER_ARM64_H_
#include "base/array_ref.h"
-#include "base/bit_field.h"
-#include "base/bit_utils.h"
#include "linker/arm/relative_patcher_arm_base.h"
namespace art {
@@ -32,29 +30,8 @@ namespace linker {
class Arm64RelativePatcher FINAL : public ArmBaseRelativePatcher {
public:
- static uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg, uint32_t holder_reg) {
- CheckValidReg(base_reg);
- CheckValidReg(holder_reg);
- return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kField) |
- BakerReadBarrierFirstRegField::Encode(base_reg) |
- BakerReadBarrierSecondRegField::Encode(holder_reg);
- }
-
- static uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) {
- CheckValidReg(base_reg);
- return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kArray) |
- BakerReadBarrierFirstRegField::Encode(base_reg) |
- BakerReadBarrierSecondRegField::Encode(kInvalidEncodedReg);
- }
-
- static uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg) {
- CheckValidReg(root_reg);
- return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) |
- BakerReadBarrierFirstRegField::Encode(root_reg) |
- BakerReadBarrierSecondRegField::Encode(kInvalidEncodedReg);
- }
-
- Arm64RelativePatcher(RelativePatcherTargetProvider* provider,
+ Arm64RelativePatcher(RelativePatcherThunkProvider* thunk_provider,
+ RelativePatcherTargetProvider* target_provider,
const Arm64InstructionSetFeatures* features);
uint32_t ReserveSpace(uint32_t offset,
@@ -75,37 +52,10 @@ class Arm64RelativePatcher FINAL : public ArmBaseRelativePatcher {
uint32_t patch_offset) OVERRIDE;
protected:
- std::vector<uint8_t> CompileThunk(const ThunkKey& key) OVERRIDE;
- std::string GetThunkDebugName(const ThunkKey& key) OVERRIDE;
uint32_t MaxPositiveDisplacement(const ThunkKey& key) OVERRIDE;
uint32_t MaxNegativeDisplacement(const ThunkKey& key) OVERRIDE;
private:
- static constexpr uint32_t kInvalidEncodedReg = /* sp/zr is invalid */ 31u;
-
- enum class BakerReadBarrierKind : uint8_t {
- kField, // Field get or array get with constant offset (i.e. constant index).
- kArray, // Array get with index in register.
- kGcRoot, // GC root load.
- kLast = kGcRoot
- };
-
- static constexpr size_t kBitsForBakerReadBarrierKind =
- MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierKind::kLast));
- static constexpr size_t kBitsForRegister = 5u;
- using BakerReadBarrierKindField =
- BitField<BakerReadBarrierKind, 0, kBitsForBakerReadBarrierKind>;
- using BakerReadBarrierFirstRegField =
- BitField<uint32_t, kBitsForBakerReadBarrierKind, kBitsForRegister>;
- using BakerReadBarrierSecondRegField =
- BitField<uint32_t, kBitsForBakerReadBarrierKind + kBitsForRegister, kBitsForRegister>;
-
- static void CheckValidReg(uint32_t reg) {
- DCHECK(reg < 30u && reg != 16u && reg != 17u) << reg;
- }
-
- void CompileBakerReadBarrierThunk(arm64::Arm64Assembler& assembler, uint32_t encoded_data);
-
static uint32_t PatchAdrp(uint32_t adrp, uint32_t disp);
static bool NeedsErratum843419Thunk(ArrayRef<const uint8_t> code, uint32_t literal_offset,
diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc
index 05459a2a82..393733dd0c 100644
--- a/compiler/linker/arm64/relative_patcher_arm64_test.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc
@@ -16,12 +16,15 @@
#include "linker/arm64/relative_patcher_arm64.h"
+#include "arch/arm64/instruction_set_features_arm64.h"
#include "base/casts.h"
#include "linker/relative_patcher_test.h"
#include "lock_word.h"
#include "mirror/array-inl.h"
#include "mirror/object.h"
#include "oat_quick_method_header.h"
+#include "optimizing/code_generator_arm64.h"
+#include "optimizing/optimizing_unit_test.h"
namespace art {
namespace linker {
@@ -168,9 +171,42 @@ class Arm64RelativePatcherTest : public RelativePatcherTest {
return result.second;
}
+ std::vector<uint8_t> CompileThunk(const LinkerPatch& patch,
+ /*out*/ std::string* debug_name = nullptr) {
+ OptimizingUnitTestHelper helper;
+ HGraph* graph = helper.CreateGraph();
+ std::string error_msg;
+ Arm64FeaturesUniquePtr features =
+ Arm64InstructionSetFeatures::FromVariant("default", &error_msg);
+ CompilerOptions options;
+ arm64::CodeGeneratorARM64 codegen(graph, *features, options);
+ ArenaVector<uint8_t> code(helper.GetAllocator()->Adapter());
+ codegen.EmitThunkCode(patch, &code, debug_name);
+ return std::vector<uint8_t>(code.begin(), code.end());
+ }
+
+ void AddCompiledMethod(
+ MethodReference method_ref,
+ const ArrayRef<const uint8_t>& code,
+ const ArrayRef<const LinkerPatch>& patches = ArrayRef<const LinkerPatch>()) {
+ RelativePatcherTest::AddCompiledMethod(method_ref, code, patches);
+
+ // Make sure the ThunkProvider has all the necessary thunks.
+ for (const LinkerPatch& patch : patches) {
+ if (patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch ||
+ patch.GetType() == LinkerPatch::Type::kCallRelative) {
+ std::string debug_name;
+ std::vector<uint8_t> thunk_code = CompileThunk(patch, &debug_name);
+ thunk_provider_.SetThunkCode(patch, ArrayRef<const uint8_t>(thunk_code), debug_name);
+ }
+ }
+ }
+
std::vector<uint8_t> CompileMethodCallThunk() {
- ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetMethodCallKey();
- return down_cast<Arm64RelativePatcher*>(patcher_.get())->CompileThunk(key);
+ LinkerPatch patch = LinkerPatch::RelativeCodePatch(/* literal_offset */ 0u,
+ /* target_dex_file*/ nullptr,
+ /* target_method_idx */ 0u);
+ return CompileThunk(patch);
}
uint32_t MethodCallThunkSize() {
@@ -475,25 +511,34 @@ class Arm64RelativePatcherTest : public RelativePatcherTest {
TestAdrpInsn2Add(insn2, adrp_offset, has_thunk, string_offset);
}
+ static uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg, uint32_t holder_reg) {
+ return arm64::CodeGeneratorARM64::EncodeBakerReadBarrierFieldData(base_reg, holder_reg);
+ }
+
+ static uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) {
+ return arm64::CodeGeneratorARM64::EncodeBakerReadBarrierArrayData(base_reg);
+ }
+
+ static uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg) {
+ return arm64::CodeGeneratorARM64::EncodeBakerReadBarrierGcRootData(root_reg);
+ }
+
std::vector<uint8_t> CompileBakerOffsetThunk(uint32_t base_reg, uint32_t holder_reg) {
const LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch(
- 0u, Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(base_reg, holder_reg));
- ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch);
- return down_cast<Arm64RelativePatcher*>(patcher_.get())->CompileThunk(key);
+ /* literal_offset */ 0u, EncodeBakerReadBarrierFieldData(base_reg, holder_reg));
+ return CompileThunk(patch);
}
std::vector<uint8_t> CompileBakerArrayThunk(uint32_t base_reg) {
LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch(
- 0u, Arm64RelativePatcher::EncodeBakerReadBarrierArrayData(base_reg));
- ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch);
- return down_cast<Arm64RelativePatcher*>(patcher_.get())->CompileThunk(key);
+ /* literal_offset */ 0u, EncodeBakerReadBarrierArrayData(base_reg));
+ return CompileThunk(patch);
}
std::vector<uint8_t> CompileBakerGcRootThunk(uint32_t root_reg) {
LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch(
- 0u, Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg));
- ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch);
- return down_cast<Arm64RelativePatcher*>(patcher_.get())->CompileThunk(key);
+ /* literal_offset */ 0u, EncodeBakerReadBarrierGcRootData(root_reg));
+ return CompileThunk(patch);
}
uint32_t GetOutputInsn(uint32_t offset) {
@@ -919,8 +964,7 @@ void Arm64RelativePatcherTest::TestBakerField(uint32_t offset, uint32_t ref_reg)
const std::vector<uint8_t> raw_code = RawCode({kCbnzIP1Plus0Insn, ldr});
ASSERT_EQ(kMethodCodeSize, raw_code.size());
ArrayRef<const uint8_t> code(raw_code);
- uint32_t encoded_data =
- Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(base_reg, holder_reg);
+ uint32_t encoded_data = EncodeBakerReadBarrierFieldData(base_reg, holder_reg);
const LinkerPatch patches[] = {
LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset, encoded_data),
};
@@ -1005,8 +1049,7 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddle) {
constexpr uint32_t kLiteralOffset1 = 4;
const std::vector<uint8_t> raw_code1 = RawCode({kNopInsn, kCbnzIP1Plus0Insn, kLdrWInsn});
ArrayRef<const uint8_t> code1(raw_code1);
- uint32_t encoded_data =
- Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0);
+ uint32_t encoded_data = EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0);
const LinkerPatch patches1[] = {
LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data),
};
@@ -1066,8 +1109,7 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkBeforeFiller) {
constexpr uint32_t kLiteralOffset1 = 0;
const std::vector<uint8_t> raw_code1 = RawCode({kCbnzIP1Plus0Insn, kLdrWInsn, kNopInsn});
ArrayRef<const uint8_t> code1(raw_code1);
- uint32_t encoded_data =
- Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0);
+ uint32_t encoded_data = EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0);
const LinkerPatch patches1[] = {
LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data),
};
@@ -1096,8 +1138,7 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddleUnreachableFr
constexpr uint32_t kLiteralOffset1 = 4;
const std::vector<uint8_t> raw_code1 = RawCode({kNopInsn, kCbnzIP1Plus0Insn, kLdrWInsn});
ArrayRef<const uint8_t> code1(raw_code1);
- uint32_t encoded_data =
- Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0);
+ uint32_t encoded_data = EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0);
const LinkerPatch patches1[] = {
LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data),
};
@@ -1170,7 +1211,7 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerArray) {
ArrayRef<const uint8_t> code(raw_code);
const LinkerPatch patches[] = {
LinkerPatch::BakerReadBarrierBranchPatch(
- kLiteralOffset, Arm64RelativePatcher::EncodeBakerReadBarrierArrayData(base_reg)),
+ kLiteralOffset, EncodeBakerReadBarrierArrayData(base_reg)),
};
AddCompiledMethod(MethodRef(method_idx), code, ArrayRef<const LinkerPatch>(patches));
}
@@ -1247,7 +1288,7 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerGcRoot) {
ArrayRef<const uint8_t> code(raw_code);
const LinkerPatch patches[] = {
LinkerPatch::BakerReadBarrierBranchPatch(
- kLiteralOffset, Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg)),
+ kLiteralOffset, EncodeBakerReadBarrierGcRootData(root_reg)),
};
AddCompiledMethod(MethodRef(method_idx), code, ArrayRef<const LinkerPatch>(patches));
}
@@ -1343,8 +1384,8 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerAndMethodCallInteraction) {
kNopInsn, kNopInsn, // Padding before second GC root read barrier.
ldr2, kCbnzIP1Plus0Insn, // Second GC root LDR with read barrier.
});
- uint32_t encoded_data1 = Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(/* root_reg */ 1);
- uint32_t encoded_data2 = Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(/* root_reg */ 2);
+ uint32_t encoded_data1 = EncodeBakerReadBarrierGcRootData(/* root_reg */ 1);
+ uint32_t encoded_data2 = EncodeBakerReadBarrierGcRootData(/* root_reg */ 2);
const LinkerPatch last_method_patches[] = {
LinkerPatch::BakerReadBarrierBranchPatch(kBakerLiteralOffset1, encoded_data1),
LinkerPatch::BakerReadBarrierBranchPatch(kBakerLiteralOffset2, encoded_data2),
diff --git a/compiler/linker/linker_patch.h b/compiler/linker/linker_patch.h
index 710d8a690a..7b35fd9b0c 100644
--- a/compiler/linker/linker_patch.h
+++ b/compiler/linker/linker_patch.h
@@ -141,7 +141,7 @@ class LinkerPatch {
static LinkerPatch BakerReadBarrierBranchPatch(size_t literal_offset,
uint32_t custom_value1 = 0u,
uint32_t custom_value2 = 0u) {
- LinkerPatch patch(literal_offset, Type::kBakerReadBarrierBranch, nullptr);
+ LinkerPatch patch(literal_offset, Type::kBakerReadBarrierBranch, /* target_dex_file */ nullptr);
patch.baker_custom_value1_ = custom_value1;
patch.baker_custom_value2_ = custom_value2;
return patch;
diff --git a/compiler/linker/relative_patcher.cc b/compiler/linker/relative_patcher.cc
index 13877f8f12..b82d15230d 100644
--- a/compiler/linker/relative_patcher.cc
+++ b/compiler/linker/relative_patcher.cc
@@ -43,7 +43,8 @@ namespace linker {
std::unique_ptr<RelativePatcher> RelativePatcher::Create(
InstructionSet instruction_set,
const InstructionSetFeatures* features,
- RelativePatcherTargetProvider* provider) {
+ RelativePatcherThunkProvider* thunk_provider,
+ RelativePatcherTargetProvider* target_provider) {
class RelativePatcherNone FINAL : public RelativePatcher {
public:
RelativePatcherNone() { }
@@ -92,7 +93,8 @@ std::unique_ptr<RelativePatcher> RelativePatcher::Create(
};
UNUSED(features);
- UNUSED(provider);
+ UNUSED(thunk_provider);
+ UNUSED(target_provider);
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_x86
case InstructionSet::kX86:
@@ -106,12 +108,15 @@ std::unique_ptr<RelativePatcher> RelativePatcher::Create(
case InstructionSet::kArm:
// Fall through: we generate Thumb2 code for "arm".
case InstructionSet::kThumb2:
- return std::unique_ptr<RelativePatcher>(new Thumb2RelativePatcher(provider));
+ return std::unique_ptr<RelativePatcher>(
+ new Thumb2RelativePatcher(thunk_provider, target_provider));
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case InstructionSet::kArm64:
return std::unique_ptr<RelativePatcher>(
- new Arm64RelativePatcher(provider, features->AsArm64InstructionSetFeatures()));
+ new Arm64RelativePatcher(thunk_provider,
+ target_provider,
+ features->AsArm64InstructionSetFeatures()));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case InstructionSet::kMips:
diff --git a/compiler/linker/relative_patcher.h b/compiler/linker/relative_patcher.h
index b58e3dffbd..06c7e70d23 100644
--- a/compiler/linker/relative_patcher.h
+++ b/compiler/linker/relative_patcher.h
@@ -39,6 +39,27 @@ class LinkerPatch;
class OutputStream;
/**
+ * @class RelativePatcherThunkProvider
+ * @brief Interface for providing method offsets for relative call targets.
+ */
+class RelativePatcherThunkProvider {
+ public:
+ /**
+ * Get the code and debug name of a thunk needed by the given linker patch.
+ *
+ * @param patch The patch for which we need to retrieve the thunk code.
+ * @param code A variable to receive the code of the thunk. This code must not be empty.
+ * @param debug_name A variable to receive the debug name of the thunk.
+ */
+ virtual void GetThunkCode(const LinkerPatch& patch,
+ /*out*/ ArrayRef<const uint8_t>* code,
+ /*out*/ std::string* debug_name) = 0;
+
+ protected:
+ virtual ~RelativePatcherThunkProvider() { }
+};
+
+/**
* @class RelativePatcherTargetProvider
* @brief Interface for providing method offsets for relative call targets.
*/
@@ -70,8 +91,10 @@ class RelativePatcherTargetProvider {
class RelativePatcher {
public:
static std::unique_ptr<RelativePatcher> Create(
- InstructionSet instruction_set, const InstructionSetFeatures* features,
- RelativePatcherTargetProvider* provider);
+ InstructionSet instruction_set,
+ const InstructionSetFeatures* features,
+ RelativePatcherThunkProvider* thunk_provider,
+ RelativePatcherTargetProvider* target_provider);
virtual ~RelativePatcher() { }
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index d21f2795b9..af8dc4dbc9 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -58,7 +58,10 @@ class RelativePatcherTest : public testing::Test {
instruction_set_(instruction_set),
features_(InstructionSetFeatures::FromVariant(instruction_set, variant, &error_msg_)),
method_offset_map_(),
- patcher_(RelativePatcher::Create(instruction_set, features_.get(), &method_offset_map_)),
+ patcher_(RelativePatcher::Create(instruction_set,
+ features_.get(),
+ &thunk_provider_,
+ &method_offset_map_)),
bss_begin_(0u),
compiled_method_refs_(),
compiled_methods_(),
@@ -248,6 +251,72 @@ class RelativePatcherTest : public testing::Test {
LOG(ERROR) << " " << diff_indicator_str;
}
+ class ThunkProvider : public RelativePatcherThunkProvider {
+ public:
+ ThunkProvider() {}
+
+ void SetThunkCode(const LinkerPatch& patch,
+ ArrayRef<const uint8_t> code,
+ const std::string& debug_name) {
+ thunk_map_.emplace(ThunkKey(patch), ThunkValue(code, debug_name));
+ }
+
+ void GetThunkCode(const LinkerPatch& patch,
+ /*out*/ ArrayRef<const uint8_t>* code,
+ /*out*/ std::string* debug_name) OVERRIDE {
+ auto it = thunk_map_.find(ThunkKey(patch));
+ CHECK(it != thunk_map_.end());
+ const ThunkValue& value = it->second;
+ CHECK(code != nullptr);
+ *code = value.GetCode();
+ CHECK(debug_name != nullptr);
+ *debug_name = value.GetDebugName();
+ }
+
+ private:
+ class ThunkKey {
+ public:
+ explicit ThunkKey(const LinkerPatch& patch)
+ : type_(patch.GetType()),
+ custom_value1_(patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch
+ ? patch.GetBakerCustomValue1() : 0u),
+ custom_value2_(patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch
+ ? patch.GetBakerCustomValue2() : 0u) {
+ CHECK(patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch ||
+ patch.GetType() == LinkerPatch::Type::kCallRelative);
+ }
+
+ bool operator<(const ThunkKey& other) const {
+ if (custom_value1_ != other.custom_value1_) {
+ return custom_value1_ < other.custom_value1_;
+ }
+ if (custom_value2_ != other.custom_value2_) {
+ return custom_value2_ < other.custom_value2_;
+ }
+ return type_ < other.type_;
+ }
+
+ private:
+ const LinkerPatch::Type type_;
+ const uint32_t custom_value1_;
+ const uint32_t custom_value2_;
+ };
+
+ class ThunkValue {
+ public:
+ ThunkValue(ArrayRef<const uint8_t> code, const std::string& debug_name)
+ : code_(code.begin(), code.end()), debug_name_(debug_name) {}
+ ArrayRef<const uint8_t> GetCode() const { return ArrayRef<const uint8_t>(code_); }
+ const std::string& GetDebugName() const { return debug_name_; }
+
+ private:
+ const std::vector<uint8_t> code_;
+ const std::string debug_name_;
+ };
+
+ std::map<ThunkKey, ThunkValue> thunk_map_;
+ };
+
// Map method reference to assinged offset.
// Wrap the map in a class implementing RelativePatcherTargetProvider.
class MethodOffsetMap FINAL : public RelativePatcherTargetProvider {
@@ -272,6 +341,7 @@ class RelativePatcherTest : public testing::Test {
std::string error_msg_;
InstructionSet instruction_set_;
std::unique_ptr<const InstructionSetFeatures> features_;
+ ThunkProvider thunk_provider_;
MethodOffsetMap method_offset_map_;
std::unique_ptr<RelativePatcher> patcher_;
uint32_t bss_begin_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index c2ae7646b5..231017f55e 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -449,6 +449,18 @@ void CodeGenerator::EmitLinkerPatches(
// No linker patches by default.
}
+bool CodeGenerator::NeedsThunkCode(const linker::LinkerPatch& patch ATTRIBUTE_UNUSED) const {
+ // Code generators that create patches requiring thunk compilation should override this function.
+ return false;
+}
+
+void CodeGenerator::EmitThunkCode(const linker::LinkerPatch& patch ATTRIBUTE_UNUSED,
+ /*out*/ ArenaVector<uint8_t>* code ATTRIBUTE_UNUSED,
+ /*out*/ std::string* debug_name ATTRIBUTE_UNUSED) {
+ // Code generators that create patches requiring thunk compilation should override this function.
+ LOG(FATAL) << "Unexpected call to EmitThunkCode().";
+}
+
void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
size_t maximum_safepoint_spill_size,
size_t number_of_out_slots,
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 3bd5e14539..a86b27151d 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -21,6 +21,7 @@
#include "arch/instruction_set_features.h"
#include "base/arena_containers.h"
#include "base/arena_object.h"
+#include "base/array_ref.h"
#include "base/bit_field.h"
#include "base/bit_utils.h"
#include "base/enums.h"
@@ -74,6 +75,7 @@ class CodeAllocator {
virtual ~CodeAllocator() {}
virtual uint8_t* Allocate(size_t size) = 0;
+ virtual ArrayRef<const uint8_t> GetMemory() const = 0;
private:
DISALLOW_COPY_AND_ASSIGN(CodeAllocator);
@@ -210,6 +212,10 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
virtual void Initialize() = 0;
virtual void Finalize(CodeAllocator* allocator);
virtual void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches);
+ virtual bool NeedsThunkCode(const linker::LinkerPatch& patch) const;
+ virtual void EmitThunkCode(const linker::LinkerPatch& patch,
+ /*out*/ ArenaVector<uint8_t>* code,
+ /*out*/ std::string* debug_name);
virtual void GenerateFrameEntry() = 0;
virtual void GenerateFrameExit() = 0;
virtual void Bind(HBasicBlock* block) = 0;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 273346ab4a..31887d92e8 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -30,7 +30,6 @@
#include "heap_poisoning.h"
#include "intrinsics.h"
#include "intrinsics_arm64.h"
-#include "linker/arm64/relative_patcher_arm64.h"
#include "linker/linker_patch.h"
#include "lock_word.h"
#include "mirror/array-inl.h"
@@ -1425,6 +1424,62 @@ void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
__ FinalizeCode();
CodeGenerator::Finalize(allocator);
+
+ // Verify Baker read barrier linker patches.
+ if (kIsDebugBuild) {
+ ArrayRef<const uint8_t> code = allocator->GetMemory();
+ for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) {
+ DCHECK(info.label.IsBound());
+ uint32_t literal_offset = info.label.GetLocation();
+ DCHECK_ALIGNED(literal_offset, 4u);
+
+ auto GetInsn = [&code](uint32_t offset) {
+ DCHECK_ALIGNED(offset, 4u);
+ return
+ (static_cast<uint32_t>(code[offset + 0]) << 0) +
+ (static_cast<uint32_t>(code[offset + 1]) << 8) +
+ (static_cast<uint32_t>(code[offset + 2]) << 16)+
+ (static_cast<uint32_t>(code[offset + 3]) << 24);
+ };
+
+ const uint32_t encoded_data = info.custom_data;
+ BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data);
+ // Check that the next instruction matches the expected LDR.
+ switch (kind) {
+ case BakerReadBarrierKind::kField: {
+ DCHECK_GE(code.size() - literal_offset, 8u);
+ uint32_t next_insn = GetInsn(literal_offset + 4u);
+ // LDR (immediate) with correct base_reg.
+ CheckValidReg(next_insn & 0x1fu); // Check destination register.
+ const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
+ CHECK_EQ(next_insn & 0xffc003e0u, 0xb9400000u | (base_reg << 5));
+ break;
+ }
+ case BakerReadBarrierKind::kArray: {
+ DCHECK_GE(code.size() - literal_offset, 8u);
+ uint32_t next_insn = GetInsn(literal_offset + 4u);
+ // LDR (register) with the correct base_reg, size=10 (32-bit), option=011 (extend = LSL),
+ // and S=1 (shift amount = 2 for 32-bit version), i.e. LDR Wt, [Xn, Xm, LSL #2].
+ CheckValidReg(next_insn & 0x1fu); // Check destination register.
+ const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
+ CHECK_EQ(next_insn & 0xffe0ffe0u, 0xb8607800u | (base_reg << 5));
+ CheckValidReg((next_insn >> 16) & 0x1f); // Check index register
+ break;
+ }
+ case BakerReadBarrierKind::kGcRoot: {
+ DCHECK_GE(literal_offset, 4u);
+ uint32_t prev_insn = GetInsn(literal_offset - 4u);
+ // LDR (immediate) with correct root_reg.
+ const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
+ CHECK_EQ(prev_insn & 0xffc0001fu, 0xb9400000u | root_reg);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind);
+ UNREACHABLE();
+ }
+ }
+ }
}
void ParallelMoveResolverARM64::PrepareForEmitNativeCode() {
@@ -4814,6 +4869,44 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* lin
DCHECK_EQ(size, linker_patches->size());
}
+bool CodeGeneratorARM64::NeedsThunkCode(const linker::LinkerPatch& patch) const {
+ return patch.GetType() == linker::LinkerPatch::Type::kBakerReadBarrierBranch ||
+ patch.GetType() == linker::LinkerPatch::Type::kCallRelative;
+}
+
+void CodeGeneratorARM64::EmitThunkCode(const linker::LinkerPatch& patch,
+ /*out*/ ArenaVector<uint8_t>* code,
+ /*out*/ std::string* debug_name) {
+ Arm64Assembler assembler(GetGraph()->GetAllocator());
+ switch (patch.GetType()) {
+ case linker::LinkerPatch::Type::kCallRelative: {
+ // The thunk just uses the entry point in the ArtMethod. This works even for calls
+ // to the generic JNI and interpreter trampolines.
+ Offset offset(ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64PointerSize).Int32Value());
+ assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0));
+ if (GetCompilerOptions().GenerateAnyDebugInfo()) {
+ *debug_name = "MethodCallThunk";
+ }
+ break;
+ }
+ case linker::LinkerPatch::Type::kBakerReadBarrierBranch: {
+ DCHECK_EQ(patch.GetBakerCustomValue2(), 0u);
+ CompileBakerReadBarrierThunk(assembler, patch.GetBakerCustomValue1(), debug_name);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected patch type " << patch.GetType();
+ UNREACHABLE();
+ }
+
+ // Ensure we emit the literal pool if any.
+ assembler.FinalizeCode();
+ code->resize(assembler.CodeSize());
+ MemoryRegion code_region(code->data(), code->size());
+ assembler.FinalizeInstructions(code_region);
+}
+
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateUint32Literal(uint32_t value) {
return uint32_literals_.GetOrCreate(
value,
@@ -4954,12 +5047,12 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
DCHECK(!cls->MustGenerateClinitCheck());
// /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
Register current_method = InputRegisterAt(cls, 0);
- GenerateGcRootFieldLoad(cls,
- out_loc,
- current_method,
- ArtMethod::DeclaringClassOffset().Int32Value(),
- /* fixup_label */ nullptr,
- read_barrier_option);
+ codegen_->GenerateGcRootFieldLoad(cls,
+ out_loc,
+ current_method,
+ ArtMethod::DeclaringClassOffset().Int32Value(),
+ /* fixup_label */ nullptr,
+ read_barrier_option);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
@@ -5006,12 +5099,12 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
vixl::aarch64::Label* ldr_label =
codegen_->NewBssEntryTypePatch(dex_file, type_index, adrp_label);
// /* GcRoot<mirror::Class> */ out = *(base_address + offset) /* PC-relative */
- GenerateGcRootFieldLoad(cls,
- out_loc,
- temp,
- /* offset placeholder */ 0u,
- ldr_label,
- read_barrier_option);
+ codegen_->GenerateGcRootFieldLoad(cls,
+ out_loc,
+ temp,
+ /* offset placeholder */ 0u,
+ ldr_label,
+ read_barrier_option);
generate_null_check = true;
break;
}
@@ -5019,12 +5112,12 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
__ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
cls->GetTypeIndex(),
cls->GetClass()));
- GenerateGcRootFieldLoad(cls,
- out_loc,
- out.X(),
- /* offset */ 0,
- /* fixup_label */ nullptr,
- read_barrier_option);
+ codegen_->GenerateGcRootFieldLoad(cls,
+ out_loc,
+ out.X(),
+ /* offset */ 0,
+ /* fixup_label */ nullptr,
+ read_barrier_option);
break;
}
case HLoadClass::LoadKind::kRuntimeCall:
@@ -5167,12 +5260,12 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
vixl::aarch64::Label* ldr_label =
codegen_->NewStringBssEntryPatch(dex_file, string_index, adrp_label);
// /* GcRoot<mirror::String> */ out = *(base_address + offset) /* PC-relative */
- GenerateGcRootFieldLoad(load,
- out_loc,
- temp,
- /* offset placeholder */ 0u,
- ldr_label,
- kCompilerReadBarrierOption);
+ codegen_->GenerateGcRootFieldLoad(load,
+ out_loc,
+ temp,
+ /* offset placeholder */ 0u,
+ ldr_label,
+ kCompilerReadBarrierOption);
SlowPathCodeARM64* slow_path =
new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load);
codegen_->AddSlowPath(slow_path);
@@ -5185,12 +5278,12 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
__ Ldr(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
load->GetStringIndex(),
load->GetString()));
- GenerateGcRootFieldLoad(load,
- out_loc,
- out.X(),
- /* offset */ 0,
- /* fixup_label */ nullptr,
- kCompilerReadBarrierOption);
+ codegen_->GenerateGcRootFieldLoad(load,
+ out_loc,
+ out.X(),
+ /* offset */ 0,
+ /* fixup_label */ nullptr,
+ kCompilerReadBarrierOption);
return;
}
default:
@@ -6139,7 +6232,7 @@ void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters(
}
}
-void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(
+void CodeGeneratorARM64::GenerateGcRootFieldLoad(
HInstruction* instruction,
Location root,
Register obj,
@@ -6173,9 +6266,8 @@ void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(
DCHECK(temps.IsAvailable(ip0));
DCHECK(temps.IsAvailable(ip1));
temps.Exclude(ip0, ip1);
- uint32_t custom_data =
- linker::Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg.GetCode());
- vixl::aarch64::Label* cbnz_label = codegen_->NewBakerReadBarrierPatch(custom_data);
+ uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode());
+ vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data);
EmissionCheckScope guard(GetVIXLAssembler(), 3 * vixl::aarch64::kInstructionSize);
vixl::aarch64::Label return_address;
@@ -6204,14 +6296,14 @@ void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(
// Slow path marking the GC root `root`. The entrypoint will
// be loaded by the slow path code.
SlowPathCodeARM64* slow_path =
- new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root);
- codegen_->AddSlowPath(slow_path);
+ new (GetScopedAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root);
+ AddSlowPath(slow_path);
// /* GcRoot<mirror::Object> */ root = *(obj + offset)
if (fixup_label == nullptr) {
__ Ldr(root_reg, MemOperand(obj, offset));
} else {
- codegen_->EmitLdrOffsetPlaceholder(fixup_label, root_reg, obj);
+ EmitLdrOffsetPlaceholder(fixup_label, root_reg, obj);
}
static_assert(
sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
@@ -6231,10 +6323,10 @@ void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(
if (fixup_label == nullptr) {
__ Add(root_reg.X(), obj.X(), offset);
} else {
- codegen_->EmitAddPlaceholder(fixup_label, root_reg.X(), obj.X());
+ EmitAddPlaceholder(fixup_label, root_reg.X(), obj.X());
}
// /* mirror::Object* */ root = root->Read()
- codegen_->GenerateReadBarrierForRootSlow(instruction, root, root);
+ GenerateReadBarrierForRootSlow(instruction, root, root);
}
} else {
// Plain GC root load with no read barrier.
@@ -6242,12 +6334,12 @@ void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(
if (fixup_label == nullptr) {
__ Ldr(root_reg, MemOperand(obj, offset));
} else {
- codegen_->EmitLdrOffsetPlaceholder(fixup_label, root_reg, obj.X());
+ EmitLdrOffsetPlaceholder(fixup_label, root_reg, obj.X());
}
// Note that GC roots are not affected by heap poisoning, thus we
// do not have to unpoison `root_reg` here.
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -6296,9 +6388,7 @@ void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* ins
DCHECK(temps.IsAvailable(ip0));
DCHECK(temps.IsAvailable(ip1));
temps.Exclude(ip0, ip1);
- uint32_t custom_data = linker::Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(
- base.GetCode(),
- obj.GetCode());
+ uint32_t custom_data = EncodeBakerReadBarrierFieldData(base.GetCode(), obj.GetCode());
vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data);
{
@@ -6383,8 +6473,7 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* ins
DCHECK(temps.IsAvailable(ip0));
DCHECK(temps.IsAvailable(ip1));
temps.Exclude(ip0, ip1);
- uint32_t custom_data =
- linker::Arm64RelativePatcher::EncodeBakerReadBarrierArrayData(temp.GetCode());
+ uint32_t custom_data = EncodeBakerReadBarrierArrayData(temp.GetCode());
vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data);
__ Add(temp.X(), obj.X(), Operand(data_offset));
@@ -6744,5 +6833,176 @@ void CodeGeneratorARM64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_
#undef __
#undef QUICK_ENTRY_POINT
+#define __ assembler.GetVIXLAssembler()->
+
+static void EmitGrayCheckAndFastPath(arm64::Arm64Assembler& assembler,
+ vixl::aarch64::Register base_reg,
+ vixl::aarch64::MemOperand& lock_word,
+ vixl::aarch64::Label* slow_path) {
+ // Load the lock word containing the rb_state.
+ __ Ldr(ip0.W(), lock_word);
+ // Given the numeric representation, it's enough to check the low bit of the rb_state.
+ static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
+ __ Tbnz(ip0.W(), LockWord::kReadBarrierStateShift, slow_path);
+ static_assert(
+ BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET,
+ "Field and array LDR offsets must be the same to reuse the same code.");
+ // Adjust the return address back to the LDR (1 instruction; 2 for heap poisoning).
+ static_assert(BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4),
+ "Field LDR must be 1 instruction (4B) before the return address label; "
+ " 2 instructions (8B) for heap poisoning.");
+ __ Add(lr, lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET);
+ // Introduce a dependency on the lock_word including rb_state,
+ // to prevent load-load reordering, and without using
+ // a memory barrier (which would be more expensive).
+ __ Add(base_reg, base_reg, Operand(ip0, LSR, 32));
+ __ Br(lr); // And return back to the function.
+ // Note: The fake dependency is unnecessary for the slow path.
+}
+
+// Load the read barrier introspection entrypoint in register `entrypoint`.
+static void LoadReadBarrierMarkIntrospectionEntrypoint(arm64::Arm64Assembler& assembler,
+ vixl::aarch64::Register entrypoint) {
+ // entrypoint = Thread::Current()->pReadBarrierMarkReg16, i.e. pReadBarrierMarkIntrospection.
+ DCHECK_EQ(ip0.GetCode(), 16u);
+ const int32_t entry_point_offset =
+ Thread::ReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(ip0.GetCode());
+ __ Ldr(entrypoint, MemOperand(tr, entry_point_offset));
+}
+
+void CodeGeneratorARM64::CompileBakerReadBarrierThunk(Arm64Assembler& assembler,
+ uint32_t encoded_data,
+ /*out*/ std::string* debug_name) {
+ BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data);
+ switch (kind) {
+ case BakerReadBarrierKind::kField: {
+ // Check if the holder is gray and, if not, add fake dependency to the base register
+ // and return to the LDR instruction to load the reference. Otherwise, use introspection
+ // to load the reference and call the entrypoint (in IP1) that performs further checks
+ // on the reference and marks it if needed.
+ auto base_reg =
+ Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data));
+ CheckValidReg(base_reg.GetCode());
+ auto holder_reg =
+ Register::GetXRegFromCode(BakerReadBarrierSecondRegField::Decode(encoded_data));
+ CheckValidReg(holder_reg.GetCode());
+ UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
+ temps.Exclude(ip0, ip1);
+ // If base_reg differs from holder_reg, the offset was too large and we must have
+ // emitted an explicit null check before the load. Otherwise, we need to null-check
+ // the holder as we do not necessarily do that check before going to the thunk.
+ vixl::aarch64::Label throw_npe;
+ if (holder_reg.Is(base_reg)) {
+ __ Cbz(holder_reg.W(), &throw_npe);
+ }
+ vixl::aarch64::Label slow_path;
+ MemOperand lock_word(holder_reg, mirror::Object::MonitorOffset().Int32Value());
+ EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path);
+ __ Bind(&slow_path);
+ MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET);
+ __ Ldr(ip0.W(), ldr_address); // Load the LDR (immediate) unsigned offset.
+ LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1);
+ __ Ubfx(ip0.W(), ip0.W(), 10, 12); // Extract the offset.
+ __ Ldr(ip0.W(), MemOperand(base_reg, ip0, LSL, 2)); // Load the reference.
+ // Do not unpoison. With heap poisoning enabled, the entrypoint expects a poisoned reference.
+ __ Br(ip1); // Jump to the entrypoint.
+ if (holder_reg.Is(base_reg)) {
+ // Add null check slow path. The stack map is at the address pointed to by LR.
+ __ Bind(&throw_npe);
+ int32_t offset = GetThreadOffset<kArm64PointerSize>(kQuickThrowNullPointer).Int32Value();
+ __ Ldr(ip0, MemOperand(/* Thread* */ vixl::aarch64::x19, offset));
+ __ Br(ip0);
+ }
+ break;
+ }
+ case BakerReadBarrierKind::kArray: {
+ auto base_reg =
+ Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data));
+ CheckValidReg(base_reg.GetCode());
+ DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg,
+ BakerReadBarrierSecondRegField::Decode(encoded_data));
+ UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
+ temps.Exclude(ip0, ip1);
+ vixl::aarch64::Label slow_path;
+ int32_t data_offset =
+ mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value();
+ MemOperand lock_word(base_reg, mirror::Object::MonitorOffset().Int32Value() - data_offset);
+ DCHECK_LT(lock_word.GetOffset(), 0);
+ EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path);
+ __ Bind(&slow_path);
+ MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET);
+ __ Ldr(ip0.W(), ldr_address); // Load the LDR (register) unsigned offset.
+ LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1);
+ __ Ubfx(ip0, ip0, 16, 6); // Extract the index register, plus 32 (bit 21 is set).
+ __ Bfi(ip1, ip0, 3, 6); // Insert ip0 to the entrypoint address to create
+ // a switch case target based on the index register.
+ __ Mov(ip0, base_reg); // Move the base register to ip0.
+ __ Br(ip1); // Jump to the entrypoint's array switch case.
+ break;
+ }
+ case BakerReadBarrierKind::kGcRoot: {
+ // Check if the reference needs to be marked and if so (i.e. not null, not marked yet
+ // and it does not have a forwarding address), call the correct introspection entrypoint;
+ // otherwise return the reference (or the extracted forwarding address).
+ // There is no gray bit check for GC roots.
+ auto root_reg =
+ Register::GetWRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data));
+ CheckValidReg(root_reg.GetCode());
+ DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg,
+ BakerReadBarrierSecondRegField::Decode(encoded_data));
+ UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
+ temps.Exclude(ip0, ip1);
+ vixl::aarch64::Label return_label, not_marked, forwarding_address;
+ __ Cbz(root_reg, &return_label);
+ MemOperand lock_word(root_reg.X(), mirror::Object::MonitorOffset().Int32Value());
+ __ Ldr(ip0.W(), lock_word);
+ __ Tbz(ip0.W(), LockWord::kMarkBitStateShift, &not_marked);
+ __ Bind(&return_label);
+ __ Br(lr);
+ __ Bind(&not_marked);
+ __ Tst(ip0.W(), Operand(ip0.W(), LSL, 1));
+ __ B(&forwarding_address, mi);
+ LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1);
+ // Adjust the art_quick_read_barrier_mark_introspection address in IP1 to
+ // art_quick_read_barrier_mark_introspection_gc_roots.
+ __ Add(ip1, ip1, Operand(BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRYPOINT_OFFSET));
+ __ Mov(ip0.W(), root_reg);
+ __ Br(ip1);
+ __ Bind(&forwarding_address);
+ __ Lsl(root_reg, ip0.W(), LockWord::kForwardingAddressShift);
+ __ Br(lr);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind);
+ UNREACHABLE();
+ }
+
+ if (GetCompilerOptions().GenerateAnyDebugInfo()) {
+ std::ostringstream oss;
+ oss << "BakerReadBarrierThunk";
+ switch (kind) {
+ case BakerReadBarrierKind::kField:
+ oss << "Field_r" << BakerReadBarrierFirstRegField::Decode(encoded_data)
+ << "_r" << BakerReadBarrierSecondRegField::Decode(encoded_data);
+ break;
+ case BakerReadBarrierKind::kArray:
+ oss << "Array_r" << BakerReadBarrierFirstRegField::Decode(encoded_data);
+ DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg,
+ BakerReadBarrierSecondRegField::Decode(encoded_data));
+ break;
+ case BakerReadBarrierKind::kGcRoot:
+ oss << "GcRoot_r" << BakerReadBarrierFirstRegField::Decode(encoded_data);
+ DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg,
+ BakerReadBarrierSecondRegField::Decode(encoded_data));
+ break;
+ }
+ *debug_name = oss.str();
+ }
+}
+
+#undef __
+
} // namespace arm64
} // namespace art
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 6a52eecbd3..aa343b1185 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
#include "arch/arm64/quick_method_frame_info_arm64.h"
+#include "base/bit_field.h"
#include "code_generator.h"
#include "common_arm64.h"
#include "dex/dex_file_types.h"
@@ -36,6 +37,11 @@
#pragma GCC diagnostic pop
namespace art {
+
+namespace linker {
+class Arm64RelativePatcherTest;
+} // namespace linker
+
namespace arm64 {
class CodeGeneratorARM64;
@@ -309,17 +315,6 @@ class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator {
uint32_t offset,
Location maybe_temp,
ReadBarrierOption read_barrier_option);
- // Generate a GC root reference load:
- //
- // root <- *(obj + offset)
- //
- // while honoring read barriers based on read_barrier_option.
- void GenerateGcRootFieldLoad(HInstruction* instruction,
- Location root,
- vixl::aarch64::Register obj,
- uint32_t offset,
- vixl::aarch64::Label* fixup_label,
- ReadBarrierOption read_barrier_option);
// Generate a floating-point comparison.
void GenerateFcmp(HInstruction* instruction);
@@ -641,9 +636,24 @@ class CodeGeneratorARM64 : public CodeGenerator {
vixl::aarch64::Register base);
void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
+ bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
+ void EmitThunkCode(const linker::LinkerPatch& patch,
+ /*out*/ ArenaVector<uint8_t>* code,
+ /*out*/ std::string* debug_name) OVERRIDE;
void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+ // Generate a GC root reference load:
+ //
+ // root <- *(obj + offset)
+ //
+ // while honoring read barriers based on read_barrier_option.
+ void GenerateGcRootFieldLoad(HInstruction* instruction,
+ Location root,
+ vixl::aarch64::Register obj,
+ uint32_t offset,
+ vixl::aarch64::Label* fixup_label,
+ ReadBarrierOption read_barrier_option);
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference field load when Baker's read barriers are used.
void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -778,6 +788,62 @@ class CodeGeneratorARM64 : public CodeGenerator {
void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
private:
+ // Encoding of thunk type and data for link-time generated thunks for Baker read barriers.
+
+ enum class BakerReadBarrierKind : uint8_t {
+ kField, // Field get or array get with constant offset (i.e. constant index).
+ kArray, // Array get with index in register.
+ kGcRoot, // GC root load.
+ kLast = kGcRoot
+ };
+
+ static constexpr uint32_t kBakerReadBarrierInvalidEncodedReg = /* sp/zr is invalid */ 31u;
+
+ static constexpr size_t kBitsForBakerReadBarrierKind =
+ MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierKind::kLast));
+ static constexpr size_t kBakerReadBarrierBitsForRegister =
+ MinimumBitsToStore(kBakerReadBarrierInvalidEncodedReg);
+ using BakerReadBarrierKindField =
+ BitField<BakerReadBarrierKind, 0, kBitsForBakerReadBarrierKind>;
+ using BakerReadBarrierFirstRegField =
+ BitField<uint32_t, kBitsForBakerReadBarrierKind, kBakerReadBarrierBitsForRegister>;
+ using BakerReadBarrierSecondRegField =
+ BitField<uint32_t,
+ kBitsForBakerReadBarrierKind + kBakerReadBarrierBitsForRegister,
+ kBakerReadBarrierBitsForRegister>;
+
+ static void CheckValidReg(uint32_t reg) {
+ DCHECK(reg < vixl::aarch64::lr.GetCode() &&
+ reg != vixl::aarch64::ip0.GetCode() &&
+ reg != vixl::aarch64::ip1.GetCode()) << reg;
+ }
+
+ static inline uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg, uint32_t holder_reg) {
+ CheckValidReg(base_reg);
+ CheckValidReg(holder_reg);
+ return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kField) |
+ BakerReadBarrierFirstRegField::Encode(base_reg) |
+ BakerReadBarrierSecondRegField::Encode(holder_reg);
+ }
+
+ static inline uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) {
+ CheckValidReg(base_reg);
+ return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kArray) |
+ BakerReadBarrierFirstRegField::Encode(base_reg) |
+ BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg);
+ }
+
+ static inline uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg) {
+ CheckValidReg(root_reg);
+ return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) |
+ BakerReadBarrierFirstRegField::Encode(root_reg) |
+ BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg);
+ }
+
+ void CompileBakerReadBarrierThunk(Arm64Assembler& assembler,
+ uint32_t encoded_data,
+ /*out*/ std::string* debug_name);
+
using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, vixl::aarch64::Literal<uint64_t>*>;
using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, vixl::aarch64::Literal<uint32_t>*>;
using StringToLiteralMap = ArenaSafeMap<StringReference,
@@ -854,6 +920,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
// Patches for class literals in JIT compiled code.
TypeToLiteralMap jit_class_patches_;
+ friend class linker::Arm64RelativePatcherTest;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64);
};
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index b38a006305..15d952608d 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -94,9 +94,6 @@ constexpr bool kBakerReadBarrierLinkTimeThunksEnableForFields = true;
constexpr bool kBakerReadBarrierLinkTimeThunksEnableForArrays = true;
constexpr bool kBakerReadBarrierLinkTimeThunksEnableForGcRoots = true;
-// The reserved entrypoint register for link-time generated thunks.
-const vixl32::Register kBakerCcEntrypointRegister = r4;
-
// Using a base helps identify when we hit Marking Register check breakpoints.
constexpr int kMarkingRegisterCheckBreakCodeBaseCode = 0x10;
@@ -116,8 +113,6 @@ static inline void ExcludeIPAndBakerCcEntrypointRegister(UseScratchRegisterScope
DCHECK(temps->IsAvailable(ip));
temps->Exclude(ip);
DCHECK(!temps->IsAvailable(kBakerCcEntrypointRegister));
- DCHECK_EQ(kBakerCcEntrypointRegister.GetCode(),
- linker::Thumb2RelativePatcher::kBakerCcEntrypointRegister);
DCHECK_NE(instruction->GetLocations()->GetTempCount(), 0u);
DCHECK(RegisterFrom(instruction->GetLocations()->GetTemp(
instruction->GetLocations()->GetTempCount() - 1u)).Is(kBakerCcEntrypointRegister));
@@ -2422,6 +2417,80 @@ void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) {
FixJumpTables();
GetAssembler()->FinalizeCode();
CodeGenerator::Finalize(allocator);
+
+ // Verify Baker read barrier linker patches.
+ if (kIsDebugBuild) {
+ ArrayRef<const uint8_t> code = allocator->GetMemory();
+ for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) {
+ DCHECK(info.label.IsBound());
+ uint32_t literal_offset = info.label.GetLocation();
+ DCHECK_ALIGNED(literal_offset, 2u);
+
+ auto GetInsn16 = [&code](uint32_t offset) {
+ DCHECK_ALIGNED(offset, 2u);
+ return (static_cast<uint32_t>(code[offset + 0]) << 0) +
+ (static_cast<uint32_t>(code[offset + 1]) << 8);
+ };
+ auto GetInsn32 = [=](uint32_t offset) {
+ return (GetInsn16(offset) << 16) + (GetInsn16(offset + 2u) << 0);
+ };
+
+ uint32_t encoded_data = info.custom_data;
+ BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data);
+ // Check that the next instruction matches the expected LDR.
+ switch (kind) {
+ case BakerReadBarrierKind::kField: {
+ BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data);
+ if (width == BakerReadBarrierWidth::kWide) {
+ DCHECK_GE(code.size() - literal_offset, 8u);
+ uint32_t next_insn = GetInsn32(literal_offset + 4u);
+ // LDR (immediate), encoding T3, with correct base_reg.
+ CheckValidReg((next_insn >> 12) & 0xfu); // Check destination register.
+ const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
+ CHECK_EQ(next_insn & 0xffff0000u, 0xf8d00000u | (base_reg << 16));
+ } else {
+ DCHECK_GE(code.size() - literal_offset, 6u);
+ uint32_t next_insn = GetInsn16(literal_offset + 4u);
+ // LDR (immediate), encoding T1, with correct base_reg.
+ CheckValidReg(next_insn & 0x7u); // Check destination register.
+ const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
+ CHECK_EQ(next_insn & 0xf838u, 0x6800u | (base_reg << 3));
+ }
+ break;
+ }
+ case BakerReadBarrierKind::kArray: {
+ DCHECK_GE(code.size() - literal_offset, 8u);
+ uint32_t next_insn = GetInsn32(literal_offset + 4u);
+ // LDR (register) with correct base_reg, S=1 and option=011 (LDR Wt, [Xn, Xm, LSL #2]).
+ CheckValidReg((next_insn >> 12) & 0xfu); // Check destination register.
+ const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
+ CHECK_EQ(next_insn & 0xffff0ff0u, 0xf8500020u | (base_reg << 16));
+ CheckValidReg(next_insn & 0xf); // Check index register
+ break;
+ }
+ case BakerReadBarrierKind::kGcRoot: {
+ BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data);
+ if (width == BakerReadBarrierWidth::kWide) {
+ DCHECK_GE(literal_offset, 4u);
+ uint32_t prev_insn = GetInsn32(literal_offset - 4u);
+ // LDR (immediate), encoding T3, with correct root_reg.
+ const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
+ CHECK_EQ(prev_insn & 0xfff0f000u, 0xf8d00000u | (root_reg << 12));
+ } else {
+ DCHECK_GE(literal_offset, 2u);
+ uint32_t prev_insn = GetInsn16(literal_offset - 2u);
+ // LDR (immediate), encoding T1, with correct root_reg.
+ const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
+ CHECK_EQ(prev_insn & 0xf807u, 0x6800u | root_reg);
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind);
+ UNREACHABLE();
+ }
+ }
+ }
}
void CodeGeneratorARMVIXL::SetupBlockedRegisters() const {
@@ -7413,11 +7482,11 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
DCHECK(!cls->MustGenerateClinitCheck());
// /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
vixl32::Register current_method = InputRegisterAt(cls, 0);
- GenerateGcRootFieldLoad(cls,
- out_loc,
- current_method,
- ArtMethod::DeclaringClassOffset().Int32Value(),
- read_barrier_option);
+ codegen_->GenerateGcRootFieldLoad(cls,
+ out_loc,
+ current_method,
+ ArtMethod::DeclaringClassOffset().Int32Value(),
+ read_barrier_option);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
@@ -7448,7 +7517,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
codegen_->EmitMovwMovtPlaceholder(labels, out);
- GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
+ codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
generate_null_check = true;
break;
}
@@ -7457,7 +7526,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
cls->GetTypeIndex(),
cls->GetClass()));
// /* GcRoot<mirror::Class> */ out = *out
- GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
+ codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
break;
}
case HLoadClass::LoadKind::kRuntimeCall:
@@ -7665,7 +7734,8 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitMovwMovtPlaceholder(labels, out);
- GenerateGcRootFieldLoad(load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
+ codegen_->GenerateGcRootFieldLoad(
+ load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load);
codegen_->AddSlowPath(slow_path);
@@ -7679,7 +7749,8 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
load->GetStringIndex(),
load->GetString()));
// /* GcRoot<mirror::String> */ out = *out
- GenerateGcRootFieldLoad(load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
+ codegen_->GenerateGcRootFieldLoad(
+ load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
return;
}
default:
@@ -8730,7 +8801,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadTwoRegisters(
}
}
-void InstructionCodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
+void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
HInstruction* instruction,
Location root,
vixl32::Register obj,
@@ -8761,9 +8832,8 @@ void InstructionCodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
UseScratchRegisterScope temps(GetVIXLAssembler());
ExcludeIPAndBakerCcEntrypointRegister(&temps, instruction);
bool narrow = CanEmitNarrowLdr(root_reg, obj, offset);
- uint32_t custom_data = linker::Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(
- root_reg.GetCode(), narrow);
- vixl32::Label* bne_label = codegen_->NewBakerReadBarrierPatch(custom_data);
+ uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode(), narrow);
+ vixl32::Label* bne_label = NewBakerReadBarrierPatch(custom_data);
vixl::EmissionCheckScope guard(GetVIXLAssembler(), 4 * vixl32::kMaxInstructionSizeInBytes);
vixl32::Label return_address;
@@ -8774,7 +8844,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
DCHECK_LT(offset, kReferenceLoadMinFarOffset);
ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
__ ldr(EncodingSize(narrow ? Narrow : Wide), root_reg, MemOperand(obj, offset));
- EmitPlaceholderBne(codegen_, bne_label);
+ EmitPlaceholderBne(this, bne_label);
__ Bind(&return_address);
DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
narrow ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_OFFSET
@@ -8794,8 +8864,8 @@ void InstructionCodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
// Slow path marking the GC root `root`. The entrypoint will
// be loaded by the slow path code.
SlowPathCodeARMVIXL* slow_path =
- new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root);
- codegen_->AddSlowPath(slow_path);
+ new (GetScopedAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root);
+ AddSlowPath(slow_path);
// /* GcRoot<mirror::Object> */ root = *(obj + offset)
GetAssembler()->LoadFromOffset(kLoadWord, root_reg, obj, offset);
@@ -8816,7 +8886,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
// /* GcRoot<mirror::Object>* */ root = obj + offset
__ Add(root_reg, obj, offset);
// /* mirror::Object* */ root = root->Read()
- codegen_->GenerateReadBarrierForRootSlow(instruction, root, root);
+ GenerateReadBarrierForRootSlow(instruction, root, root);
}
} else {
// Plain GC root load with no read barrier.
@@ -8825,7 +8895,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
// Note that GC roots are not affected by heap poisoning, thus we
// do not have to unpoison `root_reg` here.
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 18);
+ MaybeGenerateMarkingRegisterCheck(/* code */ 18);
}
void CodeGeneratorARMVIXL::MaybeAddBakerCcEntrypointTempForFields(LocationSummary* locations) {
@@ -8886,8 +8956,7 @@ void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* i
}
UseScratchRegisterScope temps(GetVIXLAssembler());
ExcludeIPAndBakerCcEntrypointRegister(&temps, instruction);
- uint32_t custom_data = linker::Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData(
- base.GetCode(), obj.GetCode(), narrow);
+ uint32_t custom_data = EncodeBakerReadBarrierFieldData(base.GetCode(), obj.GetCode(), narrow);
vixl32::Label* bne_label = NewBakerReadBarrierPatch(custom_data);
{
@@ -8973,8 +9042,7 @@ void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(HInstruction* i
UseScratchRegisterScope temps(GetVIXLAssembler());
ExcludeIPAndBakerCcEntrypointRegister(&temps, instruction);
- uint32_t custom_data =
- linker::Thumb2RelativePatcher::EncodeBakerReadBarrierArrayData(data_reg.GetCode());
+ uint32_t custom_data = EncodeBakerReadBarrierArrayData(data_reg.GetCode());
vixl32::Label* bne_label = NewBakerReadBarrierPatch(custom_data);
__ Add(data_reg, obj, Operand(data_offset));
@@ -9111,7 +9179,7 @@ void CodeGeneratorARMVIXL::UpdateReferenceFieldWithBakerReadBarrier(HInstruction
void CodeGeneratorARMVIXL::GenerateRawReferenceLoad(HInstruction* instruction,
Location ref,
- vixl::aarch32::Register obj,
+ vixl32::Register obj,
uint32_t offset,
Location index,
ScaleFactor scale_factor,
@@ -9451,7 +9519,7 @@ CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativePa
return &patches->back();
}
-vixl::aarch32::Label* CodeGeneratorARMVIXL::NewBakerReadBarrierPatch(uint32_t custom_data) {
+vixl32::Label* CodeGeneratorARMVIXL::NewBakerReadBarrierPatch(uint32_t custom_data) {
baker_read_barrier_patches_.emplace_back(custom_data);
return &baker_read_barrier_patches_.back().label;
}
@@ -9548,6 +9616,45 @@ void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* l
DCHECK_EQ(size, linker_patches->size());
}
+bool CodeGeneratorARMVIXL::NeedsThunkCode(const linker::LinkerPatch& patch) const {
+ return patch.GetType() == linker::LinkerPatch::Type::kBakerReadBarrierBranch ||
+ patch.GetType() == linker::LinkerPatch::Type::kCallRelative;
+}
+
+void CodeGeneratorARMVIXL::EmitThunkCode(const linker::LinkerPatch& patch,
+ /*out*/ ArenaVector<uint8_t>* code,
+ /*out*/ std::string* debug_name) {
+ arm::ArmVIXLAssembler assembler(GetGraph()->GetAllocator());
+ switch (patch.GetType()) {
+ case linker::LinkerPatch::Type::kCallRelative:
+ // The thunk just uses the entry point in the ArtMethod. This works even for calls
+ // to the generic JNI and interpreter trampolines.
+ assembler.LoadFromOffset(
+ arm::kLoadWord,
+ vixl32::pc,
+ vixl32::r0,
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
+ assembler.GetVIXLAssembler()->Bkpt(0);
+ if (GetCompilerOptions().GenerateAnyDebugInfo()) {
+ *debug_name = "MethodCallThunk";
+ }
+ break;
+ case linker::LinkerPatch::Type::kBakerReadBarrierBranch:
+ DCHECK_EQ(patch.GetBakerCustomValue2(), 0u);
+ CompileBakerReadBarrierThunk(assembler, patch.GetBakerCustomValue1(), debug_name);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected patch type " << patch.GetType();
+ UNREACHABLE();
+ }
+
+ // Ensure we emit the literal pool if any.
+ assembler.FinalizeCode();
+ code->resize(assembler.CodeSize());
+ MemoryRegion code_region(code->data(), code->size());
+ assembler.FinalizeInstructions(code_region);
+}
+
VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateUint32Literal(
uint32_t value,
Uint32ToLiteralMap* map) {
@@ -9792,5 +9899,210 @@ void CodeGeneratorARMVIXL::EmitMovwMovtPlaceholder(
#undef QUICK_ENTRY_POINT
#undef TODO_VIXL32
+#define __ assembler.GetVIXLAssembler()->
+
+static void EmitGrayCheckAndFastPath(ArmVIXLAssembler& assembler,
+ vixl32::Register base_reg,
+ vixl32::MemOperand& lock_word,
+ vixl32::Label* slow_path,
+ int32_t raw_ldr_offset) {
+ // Load the lock word containing the rb_state.
+ __ Ldr(ip, lock_word);
+ // Given the numeric representation, it's enough to check the low bit of the rb_state.
+ static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
+ __ Tst(ip, Operand(LockWord::kReadBarrierStateMaskShifted));
+ __ B(ne, slow_path, /* is_far_target */ false);
+ __ Add(lr, lr, raw_ldr_offset);
+ // Introduce a dependency on the lock_word including rb_state,
+ // to prevent load-load reordering, and without using
+ // a memory barrier (which would be more expensive).
+ __ Add(base_reg, base_reg, Operand(ip, LSR, 32));
+ __ Bx(lr); // And return back to the function.
+ // Note: The fake dependency is unnecessary for the slow path.
+}
+
+// Load the read barrier introspection entrypoint in register `entrypoint`
+static void LoadReadBarrierMarkIntrospectionEntrypoint(ArmVIXLAssembler& assembler,
+ vixl32::Register entrypoint) {
+ // The register where the read barrier introspection entrypoint is loaded
+ // is fixed: `Thumb2RelativePatcher::kBakerCcEntrypointRegister` (R4).
+ DCHECK(entrypoint.Is(kBakerCcEntrypointRegister));
+ // entrypoint = Thread::Current()->pReadBarrierMarkReg12, i.e. pReadBarrierMarkIntrospection.
+ DCHECK_EQ(ip.GetCode(), 12u);
+ const int32_t entry_point_offset =
+ Thread::ReadBarrierMarkEntryPointsOffset<kArmPointerSize>(ip.GetCode());
+ __ Ldr(entrypoint, MemOperand(tr, entry_point_offset));
+}
+
+void CodeGeneratorARMVIXL::CompileBakerReadBarrierThunk(ArmVIXLAssembler& assembler,
+ uint32_t encoded_data,
+ /*out*/ std::string* debug_name) {
+ BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data);
+ switch (kind) {
+ case BakerReadBarrierKind::kField: {
+ // Check if the holder is gray and, if not, add fake dependency to the base register
+ // and return to the LDR instruction to load the reference. Otherwise, use introspection
+ // to load the reference and call the entrypoint (in kBakerCcEntrypointRegister)
+ // that performs further checks on the reference and marks it if needed.
+ vixl32::Register base_reg(BakerReadBarrierFirstRegField::Decode(encoded_data));
+ CheckValidReg(base_reg.GetCode());
+ vixl32::Register holder_reg(BakerReadBarrierSecondRegField::Decode(encoded_data));
+ CheckValidReg(holder_reg.GetCode());
+ BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data);
+ UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
+ temps.Exclude(ip);
+ // If base_reg differs from holder_reg, the offset was too large and we must have
+ // emitted an explicit null check before the load. Otherwise, we need to null-check
+ // the holder as we do not necessarily do that check before going to the thunk.
+ vixl32::Label throw_npe;
+ if (holder_reg.Is(base_reg)) {
+ __ CompareAndBranchIfZero(holder_reg, &throw_npe, /* is_far_target */ false);
+ }
+ vixl32::Label slow_path;
+ MemOperand lock_word(holder_reg, mirror::Object::MonitorOffset().Int32Value());
+ const int32_t raw_ldr_offset = (width == BakerReadBarrierWidth::kWide)
+ ? BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET
+ : BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET;
+ EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path, raw_ldr_offset);
+ __ Bind(&slow_path);
+ const int32_t ldr_offset = /* Thumb state adjustment (LR contains Thumb state). */ -1 +
+ raw_ldr_offset;
+ vixl32::Register ep_reg(kBakerCcEntrypointRegister);
+ LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg);
+ if (width == BakerReadBarrierWidth::kWide) {
+ MemOperand ldr_half_address(lr, ldr_offset + 2);
+ __ Ldrh(ip, ldr_half_address); // Load the LDR immediate half-word with "Rt | imm12".
+ __ Ubfx(ip, ip, 0, 12); // Extract the offset imm12.
+ __ Ldr(ip, MemOperand(base_reg, ip)); // Load the reference.
+ } else {
+ MemOperand ldr_address(lr, ldr_offset);
+ __ Ldrh(ip, ldr_address); // Load the LDR immediate, encoding T1.
+ __ Add(ep_reg, // Adjust the entrypoint address to the entrypoint
+ ep_reg, // for narrow LDR.
+ Operand(BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_ENTRYPOINT_OFFSET));
+ __ Ubfx(ip, ip, 6, 5); // Extract the imm5, i.e. offset / 4.
+ __ Ldr(ip, MemOperand(base_reg, ip, LSL, 2)); // Load the reference.
+ }
+ // Do not unpoison. With heap poisoning enabled, the entrypoint expects a poisoned reference.
+ __ Bx(ep_reg); // Jump to the entrypoint.
+ if (holder_reg.Is(base_reg)) {
+ // Add null check slow path. The stack map is at the address pointed to by LR.
+ __ Bind(&throw_npe);
+ int32_t offset = GetThreadOffset<kArmPointerSize>(kQuickThrowNullPointer).Int32Value();
+ __ Ldr(ip, MemOperand(/* Thread* */ vixl32::r9, offset));
+ __ Bx(ip);
+ }
+ break;
+ }
+ case BakerReadBarrierKind::kArray: {
+ vixl32::Register base_reg(BakerReadBarrierFirstRegField::Decode(encoded_data));
+ CheckValidReg(base_reg.GetCode());
+ DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg,
+ BakerReadBarrierSecondRegField::Decode(encoded_data));
+ DCHECK(BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide);
+ UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
+ temps.Exclude(ip);
+ vixl32::Label slow_path;
+ int32_t data_offset =
+ mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value();
+ MemOperand lock_word(base_reg, mirror::Object::MonitorOffset().Int32Value() - data_offset);
+ DCHECK_LT(lock_word.GetOffsetImmediate(), 0);
+ const int32_t raw_ldr_offset = BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET;
+ EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path, raw_ldr_offset);
+ __ Bind(&slow_path);
+ const int32_t ldr_offset = /* Thumb state adjustment (LR contains Thumb state). */ -1 +
+ raw_ldr_offset;
+ MemOperand ldr_address(lr, ldr_offset + 2);
+ __ Ldrb(ip, ldr_address); // Load the LDR (register) byte with "00 | imm2 | Rm",
+ // i.e. Rm+32 because the scale in imm2 is 2.
+ vixl32::Register ep_reg(kBakerCcEntrypointRegister);
+ LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg);
+ __ Bfi(ep_reg, ip, 3, 6); // Insert ip to the entrypoint address to create
+ // a switch case target based on the index register.
+ __ Mov(ip, base_reg); // Move the base register to ip0.
+ __ Bx(ep_reg); // Jump to the entrypoint's array switch case.
+ break;
+ }
+ case BakerReadBarrierKind::kGcRoot: {
+ // Check if the reference needs to be marked and if so (i.e. not null, not marked yet
+ // and it does not have a forwarding address), call the correct introspection entrypoint;
+ // otherwise return the reference (or the extracted forwarding address).
+ // There is no gray bit check for GC roots.
+ vixl32::Register root_reg(BakerReadBarrierFirstRegField::Decode(encoded_data));
+ CheckValidReg(root_reg.GetCode());
+ DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg,
+ BakerReadBarrierSecondRegField::Decode(encoded_data));
+ BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data);
+ UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
+ temps.Exclude(ip);
+ vixl32::Label return_label, not_marked, forwarding_address;
+ __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target */ false);
+ MemOperand lock_word(root_reg, mirror::Object::MonitorOffset().Int32Value());
+ __ Ldr(ip, lock_word);
+ __ Tst(ip, LockWord::kMarkBitStateMaskShifted);
+ __ B(eq, &not_marked);
+ __ Bind(&return_label);
+ __ Bx(lr);
+ __ Bind(&not_marked);
+ static_assert(LockWord::kStateShift == 30 && LockWord::kStateForwardingAddress == 3,
+ "To use 'CMP ip, #modified-immediate; BHS', we need the lock word state in "
+ " the highest bits and the 'forwarding address' state to have all bits set");
+ __ Cmp(ip, Operand(0xc0000000));
+ __ B(hs, &forwarding_address);
+ vixl32::Register ep_reg(kBakerCcEntrypointRegister);
+ LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg);
+ // Adjust the art_quick_read_barrier_mark_introspection address in kBakerCcEntrypointRegister
+ // to art_quick_read_barrier_mark_introspection_gc_roots.
+ int32_t entrypoint_offset = (width == BakerReadBarrierWidth::kWide)
+ ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_ENTRYPOINT_OFFSET
+ : BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_ENTRYPOINT_OFFSET;
+ __ Add(ep_reg, ep_reg, Operand(entrypoint_offset));
+ __ Mov(ip, root_reg);
+ __ Bx(ep_reg);
+ __ Bind(&forwarding_address);
+ __ Lsl(root_reg, ip, LockWord::kForwardingAddressShift);
+ __ Bx(lr);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind);
+ UNREACHABLE();
+ }
+
+ if (GetCompilerOptions().GenerateAnyDebugInfo()) {
+ std::ostringstream oss;
+ oss << "BakerReadBarrierThunk";
+ switch (kind) {
+ case BakerReadBarrierKind::kField:
+ oss << "Field";
+ if (BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide) {
+ oss << "Wide";
+ }
+ oss << "_r" << BakerReadBarrierFirstRegField::Decode(encoded_data)
+ << "_r" << BakerReadBarrierSecondRegField::Decode(encoded_data);
+ break;
+ case BakerReadBarrierKind::kArray:
+ oss << "Array_r" << BakerReadBarrierFirstRegField::Decode(encoded_data);
+ DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg,
+ BakerReadBarrierSecondRegField::Decode(encoded_data));
+ DCHECK(BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide);
+ break;
+ case BakerReadBarrierKind::kGcRoot:
+ oss << "GcRoot";
+ if (BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide) {
+ oss << "Wide";
+ }
+ oss << "_r" << BakerReadBarrierFirstRegField::Decode(encoded_data);
+ DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg,
+ BakerReadBarrierSecondRegField::Decode(encoded_data));
+ break;
+ }
+ *debug_name = oss.str();
+ }
+}
+
+#undef __
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 2114ea1ba1..6b9919ab15 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -36,6 +36,11 @@
#pragma GCC diagnostic pop
namespace art {
+
+namespace linker {
+class Thumb2RelativePatcherTest;
+} // namespace linker
+
namespace arm {
// This constant is used as an approximate margin when emission of veneer and literal pools
@@ -108,6 +113,9 @@ static const vixl::aarch32::SRegister kRuntimeParameterFpuRegistersVIXL[] = {
static const size_t kRuntimeParameterFpuRegistersLengthVIXL =
arraysize(kRuntimeParameterFpuRegistersVIXL);
+// The reserved entrypoint register for link-time generated thunks.
+const vixl::aarch32::Register kBakerCcEntrypointRegister = vixl32::r4;
+
class LoadClassSlowPathARMVIXL;
class CodeGeneratorARMVIXL;
@@ -388,16 +396,6 @@ class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator {
uint32_t offset,
Location maybe_temp,
ReadBarrierOption read_barrier_option);
- // Generate a GC root reference load:
- //
- // root <- *(obj + offset)
- //
- // while honoring read barriers based on read_barrier_option.
- void GenerateGcRootFieldLoad(HInstruction* instruction,
- Location root,
- vixl::aarch32::Register obj,
- uint32_t offset,
- ReadBarrierOption read_barrier_option);
void GenerateTestAndBranch(HInstruction* instruction,
size_t condition_input_index,
vixl::aarch32::Label* true_target,
@@ -606,6 +604,10 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
Handle<mirror::Class> handle);
void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
+ bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
+ void EmitThunkCode(const linker::LinkerPatch& patch,
+ /*out*/ ArenaVector<uint8_t>* code,
+ /*out*/ std::string* debug_name) OVERRIDE;
void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
@@ -613,6 +615,16 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
// is added only for AOT compilation if link-time generated thunks for fields are enabled.
void MaybeAddBakerCcEntrypointTempForFields(LocationSummary* locations);
+ // Generate a GC root reference load:
+ //
+ // root <- *(obj + offset)
+ //
+ // while honoring read barriers based on read_barrier_option.
+ void GenerateGcRootFieldLoad(HInstruction* instruction,
+ Location root,
+ vixl::aarch32::Register obj,
+ uint32_t offset,
+ ReadBarrierOption read_barrier_option);
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference field load when Baker's read barriers are used.
void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -767,6 +779,83 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
vixl::aarch32::Register temp = vixl32::Register());
private:
+ // Encoding of thunk type and data for link-time generated thunks for Baker read barriers.
+
+ enum class BakerReadBarrierKind : uint8_t {
+ kField, // Field get or array get with constant offset (i.e. constant index).
+ kArray, // Array get with index in register.
+ kGcRoot, // GC root load.
+ kLast = kGcRoot
+ };
+
+ enum class BakerReadBarrierWidth : uint8_t {
+ kWide, // 32-bit LDR (and 32-bit NEG if heap poisoning is enabled).
+ kNarrow, // 16-bit LDR (and 16-bit NEG if heap poisoning is enabled).
+ kLast = kNarrow
+ };
+
+ static constexpr uint32_t kBakerReadBarrierInvalidEncodedReg = /* pc is invalid */ 15u;
+
+ static constexpr size_t kBitsForBakerReadBarrierKind =
+ MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierKind::kLast));
+ static constexpr size_t kBakerReadBarrierBitsForRegister =
+ MinimumBitsToStore(kBakerReadBarrierInvalidEncodedReg);
+ using BakerReadBarrierKindField =
+ BitField<BakerReadBarrierKind, 0, kBitsForBakerReadBarrierKind>;
+ using BakerReadBarrierFirstRegField =
+ BitField<uint32_t, kBitsForBakerReadBarrierKind, kBakerReadBarrierBitsForRegister>;
+ using BakerReadBarrierSecondRegField =
+ BitField<uint32_t,
+ kBitsForBakerReadBarrierKind + kBakerReadBarrierBitsForRegister,
+ kBakerReadBarrierBitsForRegister>;
+ static constexpr size_t kBitsForBakerReadBarrierWidth =
+ MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierWidth::kLast));
+ using BakerReadBarrierWidthField =
+ BitField<BakerReadBarrierWidth,
+ kBitsForBakerReadBarrierKind + 2 * kBakerReadBarrierBitsForRegister,
+ kBitsForBakerReadBarrierWidth>;
+
+ static void CheckValidReg(uint32_t reg) {
+ DCHECK(reg < vixl::aarch32::ip.GetCode() && reg != kBakerCcEntrypointRegister.GetCode()) << reg;
+ }
+
+ static uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg,
+ uint32_t holder_reg,
+ bool narrow) {
+ CheckValidReg(base_reg);
+ CheckValidReg(holder_reg);
+ DCHECK(!narrow || base_reg < 8u) << base_reg;
+ BakerReadBarrierWidth width =
+ narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide;
+ return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kField) |
+ BakerReadBarrierFirstRegField::Encode(base_reg) |
+ BakerReadBarrierSecondRegField::Encode(holder_reg) |
+ BakerReadBarrierWidthField::Encode(width);
+ }
+
+ static uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) {
+ CheckValidReg(base_reg);
+ return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kArray) |
+ BakerReadBarrierFirstRegField::Encode(base_reg) |
+ BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg) |
+ BakerReadBarrierWidthField::Encode(BakerReadBarrierWidth::kWide);
+ }
+
+ static uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg, bool narrow) {
+ CheckValidReg(root_reg);
+ DCHECK(!narrow || root_reg < 8u) << root_reg;
+ BakerReadBarrierWidth width =
+ narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide;
+ return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) |
+ BakerReadBarrierFirstRegField::Encode(root_reg) |
+ BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg) |
+ BakerReadBarrierWidthField::Encode(width);
+ }
+
+ void CompileBakerReadBarrierThunk(ArmVIXLAssembler& assembler,
+ uint32_t encoded_data,
+ /*out*/ std::string* debug_name);
+
vixl::aarch32::Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
vixl::aarch32::Register temp);
@@ -829,6 +918,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
// Patches for class literals in JIT compiled code.
TypeToLiteralMap jit_class_patches_;
+ friend class linker::Thumb2RelativePatcherTest;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARMVIXL);
};
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index c41c290c8b..792cfb539a 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -195,7 +195,9 @@ class InternalCodeAllocator : public CodeAllocator {
}
size_t GetSize() const { return size_; }
- uint8_t* GetMemory() const { return memory_.get(); }
+ ArrayRef<const uint8_t> GetMemory() const OVERRIDE {
+ return ArrayRef<const uint8_t>(memory_.get(), size_);
+ }
private:
size_t size_;
@@ -269,8 +271,8 @@ static void Run(const InternalCodeAllocator& allocator,
InstructionSet target_isa = codegen.GetInstructionSet();
typedef Expected (*fptr)();
- CommonCompilerTest::MakeExecutable(allocator.GetMemory(), allocator.GetSize());
- fptr f = reinterpret_cast<fptr>(allocator.GetMemory());
+ CommonCompilerTest::MakeExecutable(allocator.GetMemory().data(), allocator.GetMemory().size());
+ fptr f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(allocator.GetMemory().data()));
if (target_isa == InstructionSet::kThumb2) {
// For thumb we need the bottom bit set.
f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(f) + 1);
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index d20b681b49..2e189fdd14 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -105,15 +105,15 @@ class OptimizingCFITest : public CFITest, public OptimizingUnitTestHelper {
const std::vector<uint8_t>& expected_asm,
const std::vector<uint8_t>& expected_cfi) {
// Get the outputs.
- const std::vector<uint8_t>& actual_asm = code_allocator_.GetMemory();
+ ArrayRef<const uint8_t> actual_asm = code_allocator_.GetMemory();
Assembler* opt_asm = code_gen_->GetAssembler();
- const std::vector<uint8_t>& actual_cfi = *(opt_asm->cfi().data());
+ ArrayRef<const uint8_t> actual_cfi(*(opt_asm->cfi().data()));
if (kGenerateExpected) {
GenerateExpected(stdout, isa, isa_str, actual_asm, actual_cfi);
} else {
- EXPECT_EQ(expected_asm, actual_asm);
- EXPECT_EQ(expected_cfi, actual_cfi);
+ EXPECT_EQ(ArrayRef<const uint8_t>(expected_asm), actual_asm);
+ EXPECT_EQ(ArrayRef<const uint8_t>(expected_cfi), actual_cfi);
}
}
@@ -140,7 +140,7 @@ class OptimizingCFITest : public CFITest, public OptimizingUnitTestHelper {
return memory_.data();
}
- const std::vector<uint8_t>& GetMemory() { return memory_; }
+ ArrayRef<const uint8_t> GetMemory() const OVERRIDE { return ArrayRef<const uint8_t>(memory_); }
private:
std::vector<uint8_t> memory_;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index e42dfc10ba..79165826d1 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -75,22 +75,18 @@ static constexpr const char* kPassNameSeparator = "$";
class CodeVectorAllocator FINAL : public CodeAllocator {
public:
explicit CodeVectorAllocator(ArenaAllocator* allocator)
- : memory_(allocator->Adapter(kArenaAllocCodeBuffer)),
- size_(0) {}
+ : memory_(allocator->Adapter(kArenaAllocCodeBuffer)) {}
virtual uint8_t* Allocate(size_t size) {
- size_ = size;
memory_.resize(size);
return &memory_[0];
}
- size_t GetSize() const { return size_; }
- const ArenaVector<uint8_t>& GetMemory() const { return memory_; }
+ ArrayRef<const uint8_t> GetMemory() const OVERRIDE { return ArrayRef<const uint8_t>(memory_); }
uint8_t* GetData() { return memory_.data(); }
private:
ArenaVector<uint8_t> memory_;
- size_t size_;
DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator);
};
@@ -719,7 +715,7 @@ CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
GetCompilerDriver(),
codegen->GetInstructionSet(),
- ArrayRef<const uint8_t>(code_allocator->GetMemory()),
+ code_allocator->GetMemory(),
// Follow Quick's behavior and set the frame size to zero if it is
// considered "empty" (see the definition of
// art::CodeGenerator::HasEmptyFrame).
@@ -731,6 +727,16 @@ CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
ArrayRef<const linker::LinkerPatch>(linker_patches));
+ CompiledMethodStorage* storage = GetCompilerDriver()->GetCompiledMethodStorage();
+ for (const linker::LinkerPatch& patch : linker_patches) {
+ if (codegen->NeedsThunkCode(patch) && storage->GetThunkCode(patch).empty()) {
+ ArenaVector<uint8_t> code(allocator->Adapter());
+ std::string debug_name;
+ codegen->EmitThunkCode(patch, &code, &debug_name);
+ storage->SetThunkCode(patch, ArrayRef<const uint8_t>(code), debug_name);
+ }
+ }
+
return compiled_method;
}
@@ -1339,7 +1345,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
codegen->GetCoreSpillMask(),
codegen->GetFpuSpillMask(),
code_allocator.GetMemory().data(),
- code_allocator.GetSize(),
+ code_allocator.GetMemory().size(),
data_size,
osr,
roots,
@@ -1369,7 +1375,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
info.is_optimized = true;
info.is_code_address_text_relative = false;
info.code_address = code_address;
- info.code_size = code_allocator.GetSize();
+ info.code_size = code_allocator.GetMemory().size();
info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
info.code_info = stack_map_size == 0 ? nullptr : stack_map_data;
info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
@@ -1378,7 +1384,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
if (jit_logger != nullptr) {
- jit_logger->WriteLog(code, code_allocator.GetSize(), method);
+ jit_logger->WriteLog(code, code_allocator.GetMemory().size(), method);
}
if (kArenaAllocatorCountAllocations) {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 6950b93e51..88dc6d44b6 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -404,6 +404,7 @@ NO_RETURN static void Usage(const char* fmt, ...) {
UsageError(" Example: --very-large-app-threshold=100000000");
UsageError("");
UsageError(" --app-image-fd=<file-descriptor>: specify output file descriptor for app image.");
+ UsageError(" The image is non-empty only if a profile is passed in.");
UsageError(" Example: --app-image-fd=10");
UsageError("");
UsageError(" --app-image-file=<file-name>: specify a file name for app image.");
@@ -1479,9 +1480,15 @@ class Dex2Oat FINAL {
}
void LoadClassProfileDescriptors() {
- if (profile_compilation_info_ != nullptr && IsImage()) {
- Runtime* runtime = Runtime::Current();
- CHECK(runtime != nullptr);
+ if (!IsImage()) {
+ return;
+ }
+ // If we don't have a profile, treat it as an empty set of classes. b/77340429
+ if (image_classes_ == nullptr) {
+ // May be non-null when --image-classes is passed in, in that case avoid clearing the list.
+ image_classes_.reset(new std::unordered_set<std::string>());
+ }
+ if (profile_compilation_info_ != nullptr) {
// Filter out class path classes since we don't want to include these in the image.
image_classes_.reset(
new std::unordered_set<std::string>(
@@ -2061,7 +2068,9 @@ class Dex2Oat FINAL {
{
TimingLogger::ScopedTiming t2("dex2oat Write ELF", timings_);
- linker::MultiOatRelativePatcher patcher(instruction_set_, instruction_set_features_.get());
+ linker::MultiOatRelativePatcher patcher(instruction_set_,
+ instruction_set_features_.get(),
+ driver_->GetCompiledMethodStorage());
for (size_t i = 0, size = oat_files_.size(); i != size; ++i) {
std::unique_ptr<linker::ElfWriter>& elf_writer = elf_writers_[i];
std::unique_ptr<linker::OatWriter>& oat_writer = oat_writers_[i];
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 0cd39ac11b..c890f8bef0 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -2093,4 +2093,36 @@ TEST_F(Dex2oatTest, CompactDexInZip) {
ASSERT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) != 0) << status << " " << output_;
}
+TEST_F(Dex2oatTest, AppImageNoProfile) {
+ ScratchFile app_image_file;
+ const std::string out_dir = GetScratchDir();
+ const std::string odex_location = out_dir + "/base.odex";
+ GenerateOdexForTest(GetTestDexFileName("ManyMethods"),
+ odex_location,
+ CompilerFilter::Filter::kSpeedProfile,
+ { "--app-image-fd=" + std::to_string(app_image_file.GetFd()) },
+ true, // expect_success
+ false, // use_fd
+ [](const OatFile&) {});
+ // Open our generated oat file.
+ std::string error_msg;
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(odex_location.c_str(),
+ odex_location.c_str(),
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ odex_location.c_str(),
+ &error_msg));
+ ASSERT_TRUE(odex_file != nullptr);
+ ImageHeader header = {};
+ ASSERT_TRUE(app_image_file.GetFile()->PreadFully(
+ reinterpret_cast<void*>(&header),
+ sizeof(header),
+ /*offset*/ 0u)) << app_image_file.GetFile()->GetLength();
+ EXPECT_GT(header.GetImageSection(ImageHeader::kSectionObjects).Size(), 0u);
+ EXPECT_EQ(header.GetImageSection(ImageHeader::kSectionArtMethods).Size(), 0u);
+ EXPECT_EQ(header.GetImageSection(ImageHeader::kSectionArtFields).Size(), 0u);
+}
+
} // namespace art
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index 7449191984..476a843821 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -299,7 +299,8 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
DCHECK_EQ(vdex_files.size(), oat_files.size());
for (size_t i = 0, size = oat_files.size(); i != size; ++i) {
MultiOatRelativePatcher patcher(driver->GetInstructionSet(),
- driver->GetInstructionSetFeatures());
+ driver->GetInstructionSetFeatures(),
+ driver->GetCompiledMethodStorage());
OatWriter* const oat_writer = oat_writers[i].get();
ElfWriter* const elf_writer = elf_writers[i].get();
std::vector<const DexFile*> cur_dex_files(1u, class_path[i]);
diff --git a/dex2oat/linker/multi_oat_relative_patcher.cc b/dex2oat/linker/multi_oat_relative_patcher.cc
index 1abaf7dfd1..1449d478f9 100644
--- a/dex2oat/linker/multi_oat_relative_patcher.cc
+++ b/dex2oat/linker/multi_oat_relative_patcher.cc
@@ -20,14 +20,28 @@
#include "base/bit_utils.h"
#include "globals.h"
+#include "driver/compiled_method_storage.h"
namespace art {
namespace linker {
+void MultiOatRelativePatcher::ThunkProvider::GetThunkCode(const LinkerPatch& patch,
+ /*out*/ ArrayRef<const uint8_t>* code,
+ /*out*/ std::string* debug_name) {
+ *code = storage_->GetThunkCode(patch, debug_name);
+ DCHECK(!code->empty());
+}
+
+
MultiOatRelativePatcher::MultiOatRelativePatcher(InstructionSet instruction_set,
- const InstructionSetFeatures* features)
- : method_offset_map_(),
- relative_patcher_(RelativePatcher::Create(instruction_set, features, &method_offset_map_)),
+ const InstructionSetFeatures* features,
+ CompiledMethodStorage* storage)
+ : thunk_provider_(storage),
+ method_offset_map_(),
+ relative_patcher_(RelativePatcher::Create(instruction_set,
+ features,
+ &thunk_provider_,
+ &method_offset_map_)),
adjustment_(0u),
instruction_set_(instruction_set),
start_size_code_alignment_(0u),
diff --git a/dex2oat/linker/multi_oat_relative_patcher.h b/dex2oat/linker/multi_oat_relative_patcher.h
index bd33b95318..60fcfe8b58 100644
--- a/dex2oat/linker/multi_oat_relative_patcher.h
+++ b/dex2oat/linker/multi_oat_relative_patcher.h
@@ -26,6 +26,7 @@
namespace art {
class CompiledMethod;
+class CompiledMethodStorage;
class InstructionSetFeatures;
namespace linker {
@@ -38,7 +39,9 @@ class MultiOatRelativePatcher FINAL {
public:
using const_iterator = SafeMap<MethodReference, uint32_t>::const_iterator;
- MultiOatRelativePatcher(InstructionSet instruction_set, const InstructionSetFeatures* features);
+ MultiOatRelativePatcher(InstructionSet instruction_set,
+ const InstructionSetFeatures* features,
+ CompiledMethodStorage* storage);
// Mark the start of a new oat file (for statistics retrieval) and set the
// adjustment for a new oat file to apply to all relative offsets that are
@@ -129,6 +132,19 @@ class MultiOatRelativePatcher FINAL {
uint32_t MiscThunksSize() const;
private:
+ class ThunkProvider : public RelativePatcherThunkProvider {
+ public:
+ explicit ThunkProvider(CompiledMethodStorage* storage)
+ : storage_(storage) {}
+
+ void GetThunkCode(const LinkerPatch& patch,
+ /*out*/ ArrayRef<const uint8_t>* code,
+ /*out*/ std::string* debug_name) OVERRIDE;
+
+ private:
+ CompiledMethodStorage* storage_;
+ };
+
// Map method reference to assigned offset.
// Wrap the map in a class implementing RelativePatcherTargetProvider.
class MethodOffsetMap : public RelativePatcherTargetProvider {
@@ -137,6 +153,7 @@ class MultiOatRelativePatcher FINAL {
SafeMap<MethodReference, uint32_t> map;
};
+ ThunkProvider thunk_provider_;
MethodOffsetMap method_offset_map_;
std::unique_ptr<RelativePatcher> relative_patcher_;
uint32_t adjustment_;
diff --git a/dex2oat/linker/multi_oat_relative_patcher_test.cc b/dex2oat/linker/multi_oat_relative_patcher_test.cc
index ca9c5f1e84..05fe36a590 100644
--- a/dex2oat/linker/multi_oat_relative_patcher_test.cc
+++ b/dex2oat/linker/multi_oat_relative_patcher_test.cc
@@ -122,7 +122,7 @@ class MultiOatRelativePatcherTest : public testing::Test {
MultiOatRelativePatcherTest()
: instruction_set_features_(InstructionSetFeatures::FromCppDefines()),
- patcher_(kRuntimeISA, instruction_set_features_.get()) {
+ patcher_(kRuntimeISA, instruction_set_features_.get(), /* storage */ nullptr) {
std::unique_ptr<MockPatcher> mock(new MockPatcher());
mock_ = mock.get();
patcher_.relative_patcher_ = std::move(mock);
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index 6e95393e80..ea4e210b74 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -213,7 +213,8 @@ class OatTest : public CommonCompilerTest {
class_linker->RegisterDexFile(*dex_file, nullptr);
}
MultiOatRelativePatcher patcher(compiler_driver_->GetInstructionSet(),
- instruction_set_features_.get());
+ instruction_set_features_.get(),
+ compiler_driver_->GetCompiledMethodStorage());
oat_writer.Initialize(compiler_driver_.get(), nullptr, dex_files);
oat_writer.PrepareLayout(&patcher);
elf_writer->PrepareDynamicSection(oat_writer.GetOatHeader().GetExecutableOffset(),
diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc
index 6d84ffa53f..a6f1207f34 100644
--- a/openjdkjvmti/deopt_manager.cc
+++ b/openjdkjvmti/deopt_manager.cc
@@ -53,20 +53,29 @@
namespace openjdkjvmti {
// TODO We should make this much more selective in the future so we only return true when we
-// actually care about the method (i.e. had locals changed, have breakpoints, etc.). For now though
-// we can just assume that we care we are loaded at all.
-//
-// Even if we don't keep track of this at the method level we might want to keep track of it at the
-// level of enabled capabilities.
-bool JvmtiMethodInspectionCallback::IsMethodBeingInspected(
- art::ArtMethod* method ATTRIBUTE_UNUSED) {
- return true;
+// actually care about the method at this time (ie active frames had locals changed). For now we
+// just assume that if anything has changed any frame's locals we care about all methods. If nothing
+// has we only care about methods with active breakpoints on them. In the future we should probably
+// rewrite all of this to instead do this at the ShadowFrame or thread granularity.
+bool JvmtiMethodInspectionCallback::IsMethodBeingInspected(art::ArtMethod* method) {
+ // Non-java-debuggable runtimes we need to assume that any method might not be debuggable and
+ // therefore potentially being inspected (due to inlines). If we are debuggable we rely hard on
+ // inlining not being done since we don't keep track of which methods get inlined where and simply
+ // look to see if the method is breakpointed.
+ return !art::Runtime::Current()->IsJavaDebuggable() ||
+ manager_->HaveLocalsChanged() ||
+ manager_->MethodHasBreakpoints(method);
}
bool JvmtiMethodInspectionCallback::IsMethodSafeToJit(art::ArtMethod* method) {
return !manager_->MethodHasBreakpoints(method);
}
+bool JvmtiMethodInspectionCallback::MethodNeedsDebugVersion(
+ art::ArtMethod* method ATTRIBUTE_UNUSED) {
+ return true;
+}
+
DeoptManager::DeoptManager()
: deoptimization_status_lock_("JVMTI_DeoptimizationStatusLock",
static_cast<art::LockLevel>(
@@ -75,7 +84,10 @@ DeoptManager::DeoptManager()
performing_deoptimization_(false),
global_deopt_count_(0),
deopter_count_(0),
- inspection_callback_(this) { }
+ breakpoint_status_lock_("JVMTI_BreakpointStatusLock",
+ static_cast<art::LockLevel>(art::LockLevel::kAbortLock + 1)),
+ inspection_callback_(this),
+ set_local_variable_called_(false) { }
void DeoptManager::Setup() {
art::ScopedThreadStateChange stsc(art::Thread::Current(),
@@ -121,14 +133,11 @@ void DeoptManager::FinishSetup() {
}
bool DeoptManager::MethodHasBreakpoints(art::ArtMethod* method) {
- art::MutexLock lk(art::Thread::Current(), deoptimization_status_lock_);
+ art::MutexLock lk(art::Thread::Current(), breakpoint_status_lock_);
return MethodHasBreakpointsLocked(method);
}
bool DeoptManager::MethodHasBreakpointsLocked(art::ArtMethod* method) {
- if (deopter_count_ == 0) {
- return false;
- }
auto elem = breakpoint_status_.find(method);
return elem != breakpoint_status_.end() && elem->second != 0;
}
@@ -158,18 +167,23 @@ void DeoptManager::AddMethodBreakpoint(art::ArtMethod* method) {
art::ScopedThreadSuspension sts(self, art::kSuspended);
deoptimization_status_lock_.ExclusiveLock(self);
-
- DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
-
- if (MethodHasBreakpointsLocked(method)) {
- // Don't need to do anything extra.
- breakpoint_status_[method]++;
- // Another thread might be deoptimizing the very method we just added new breakpoints for. Wait
- // for any deopts to finish before moving on.
- WaitForDeoptimizationToFinish(self);
- return;
+ {
+ breakpoint_status_lock_.ExclusiveLock(self);
+
+ DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
+
+ if (MethodHasBreakpointsLocked(method)) {
+ // Don't need to do anything extra.
+ breakpoint_status_[method]++;
+ // Another thread might be deoptimizing the very method we just added new breakpoints for.
+ // Wait for any deopts to finish before moving on.
+ breakpoint_status_lock_.ExclusiveUnlock(self);
+ WaitForDeoptimizationToFinish(self);
+ return;
+ }
+ breakpoint_status_[method] = 1;
+ breakpoint_status_lock_.ExclusiveUnlock(self);
}
- breakpoint_status_[method] = 1;
auto instrumentation = art::Runtime::Current()->GetInstrumentation();
if (instrumentation->IsForcedInterpretOnly()) {
// We are already interpreting everything so no need to do anything.
@@ -196,17 +210,22 @@ void DeoptManager::RemoveMethodBreakpoint(art::ArtMethod* method) {
// need but since that is very heavy we will instead just use a condition variable to make sure we
// don't race with ourselves.
deoptimization_status_lock_.ExclusiveLock(self);
-
- DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
- DCHECK(MethodHasBreakpointsLocked(method)) << "Breakpoint on a method was removed without "
- << "breakpoints present!";
+ bool is_last_breakpoint;
+ {
+ art::MutexLock mu(self, breakpoint_status_lock_);
+
+ DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
+ DCHECK(MethodHasBreakpointsLocked(method)) << "Breakpoint on a method was removed without "
+ << "breakpoints present!";
+ breakpoint_status_[method] -= 1;
+ is_last_breakpoint = (breakpoint_status_[method] == 0);
+ }
auto instrumentation = art::Runtime::Current()->GetInstrumentation();
- breakpoint_status_[method] -= 1;
if (UNLIKELY(instrumentation->IsForcedInterpretOnly())) {
// We don't need to do anything since we are interpreting everything anyway.
deoptimization_status_lock_.ExclusiveUnlock(self);
return;
- } else if (breakpoint_status_[method] == 0) {
+ } else if (is_last_breakpoint) {
if (UNLIKELY(is_default)) {
RemoveDeoptimizeAllMethodsLocked(self);
} else {
diff --git a/openjdkjvmti/deopt_manager.h b/openjdkjvmti/deopt_manager.h
index a495b6835c..6e991dee3d 100644
--- a/openjdkjvmti/deopt_manager.h
+++ b/openjdkjvmti/deopt_manager.h
@@ -32,6 +32,7 @@
#ifndef ART_OPENJDKJVMTI_DEOPT_MANAGER_H_
#define ART_OPENJDKJVMTI_DEOPT_MANAGER_H_
+#include <atomic>
#include <unordered_map>
#include "jni.h"
@@ -62,6 +63,9 @@ struct JvmtiMethodInspectionCallback : public art::MethodInspectionCallback {
bool IsMethodSafeToJit(art::ArtMethod* method)
OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+ bool MethodNeedsDebugVersion(art::ArtMethod* method)
+ OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+
private:
DeoptManager* manager_;
};
@@ -107,9 +111,17 @@ class DeoptManager {
static DeoptManager* Get();
+ bool HaveLocalsChanged() const {
+ return set_local_variable_called_.load();
+ }
+
+ void SetLocalsUpdated() {
+ set_local_variable_called_.store(true);
+ }
+
private:
bool MethodHasBreakpointsLocked(art::ArtMethod* method)
- REQUIRES(deoptimization_status_lock_);
+ REQUIRES(breakpoint_status_lock_);
// Wait until nothing is currently in the middle of deoptimizing/undeoptimizing something. This is
// needed to ensure that everything is synchronized since threads need to drop the
@@ -156,13 +168,20 @@ class DeoptManager {
// Number of users of deoptimization there currently are.
uint32_t deopter_count_ GUARDED_BY(deoptimization_status_lock_);
+ // A mutex that just protects the breakpoint-status map. This mutex should always be at the
+ // bottom of the lock hierarchy. Nothing more should be locked if we hold this.
+ art::Mutex breakpoint_status_lock_ ACQUIRED_BEFORE(art::Locks::abort_lock_);
// A map from methods to the number of breakpoints in them from all envs.
std::unordered_map<art::ArtMethod*, uint32_t> breakpoint_status_
- GUARDED_BY(deoptimization_status_lock_);
+ GUARDED_BY(breakpoint_status_lock_);
// The MethodInspectionCallback we use to tell the runtime if we care about particular methods.
JvmtiMethodInspectionCallback inspection_callback_;
+ // Set to true if anything calls SetLocalVariables on any thread since we need to be careful about
+ // OSR after this.
+ std::atomic<bool> set_local_variable_called_;
+
// Helper for setting up/tearing-down for deoptimization.
friend class ScopedDeoptimizationContext;
};
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index bf2e6cd104..b83310dc85 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -915,6 +915,9 @@ jvmtiError MethodUtil::SetLocalVariableGeneric(jvmtiEnv* env ATTRIBUTE_UNUSED,
if (depth < 0) {
return ERR(ILLEGAL_ARGUMENT);
}
+ // Make sure that we know not to do any OSR anymore.
+ // TODO We should really keep track of this at the Frame granularity.
+ DeoptManager::Get()->SetLocalsUpdated();
art::Thread* self = art::Thread::Current();
// Suspend JIT since it can get confused if we deoptimize methods getting jitted.
art::jit::ScopedJitSuspend suspend_jit;
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 4a9449640b..28659cb11d 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -364,6 +364,11 @@ bool DebuggerActiveMethodInspectionCallback::IsMethodSafeToJit(ArtMethod* m) {
return !Dbg::MethodHasAnyBreakpoints(m);
}
+bool DebuggerActiveMethodInspectionCallback::MethodNeedsDebugVersion(
+ ArtMethod* m ATTRIBUTE_UNUSED) {
+ return Dbg::IsDebuggerActive();
+}
+
void InternalDebuggerControlCallback::StartDebugger() {
// Release the mutator lock.
ScopedThreadStateChange stsc(art::Thread::Current(), kNative);
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 74018137a0..e1de991812 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -56,6 +56,7 @@ class Thread;
struct DebuggerActiveMethodInspectionCallback : public MethodInspectionCallback {
bool IsMethodBeingInspected(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
bool IsMethodSafeToJit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool MethodNeedsDebugVersion(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
struct DebuggerDdmCallback : public DdmCallback {
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 3015b10103..671079b128 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -37,7 +37,9 @@ namespace art {
// Static fault manger object accessed by signal handler.
FaultManager fault_manager;
-extern "C" __attribute__((visibility("default"))) void art_sigsegv_fault() {
+// This needs to be NO_INLINE since some debuggers do not read the inline-info to set a breakpoint
+// if it isn't.
+extern "C" NO_INLINE __attribute__((visibility("default"))) void art_sigsegv_fault() {
// Set a breakpoint here to be informed when a SIGSEGV is unhandled by ART.
VLOG(signals)<< "Caught unknown SIGSEGV in ART fault handler - chaining to next handler.";
}
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 84a148f21c..d7f33d5e43 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -139,10 +139,13 @@ static void UpdateEntrypoints(ArtMethod* method, const void* quick_code)
bool Instrumentation::NeedDebugVersionFor(ArtMethod* method) const
REQUIRES_SHARED(Locks::mutator_lock_) {
- return Runtime::Current()->IsJavaDebuggable() &&
+ art::Runtime* runtime = Runtime::Current();
+ // If anything says we need the debug version or we are debuggable we will need the debug version
+ // of the method.
+ return (runtime->GetRuntimeCallbacks()->MethodNeedsDebugVersion(method) ||
+ runtime->IsJavaDebuggable()) &&
!method->IsNative() &&
- !method->IsProxyMethod() &&
- Runtime::Current()->GetRuntimeCallbacks()->IsMethodBeingInspected(method);
+ !method->IsProxyMethod();
}
void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
diff --git a/runtime/runtime_callbacks.cc b/runtime/runtime_callbacks.cc
index cd3c0b7c88..758917cf7e 100644
--- a/runtime/runtime_callbacks.cc
+++ b/runtime/runtime_callbacks.cc
@@ -106,6 +106,15 @@ bool RuntimeCallbacks::IsMethodBeingInspected(ArtMethod* m) {
return false;
}
+bool RuntimeCallbacks::MethodNeedsDebugVersion(ArtMethod* m) {
+ for (MethodInspectionCallback* cb : method_inspection_callbacks_) {
+ if (cb->MethodNeedsDebugVersion(m)) {
+ return true;
+ }
+ }
+ return false;
+}
+
void RuntimeCallbacks::AddThreadLifecycleCallback(ThreadLifecycleCallback* cb) {
thread_callbacks_.push_back(cb);
}
diff --git a/runtime/runtime_callbacks.h b/runtime/runtime_callbacks.h
index 24386ba14a..9f0410d102 100644
--- a/runtime/runtime_callbacks.h
+++ b/runtime/runtime_callbacks.h
@@ -130,6 +130,10 @@ class MethodInspectionCallback {
// Note that '!IsMethodSafeToJit(m) implies IsMethodBeingInspected(m)'. That is that if this
// method returns false IsMethodBeingInspected must return true.
virtual bool IsMethodSafeToJit(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+
+ // Returns true if we expect the method to be debuggable but are not doing anything unusual with
+ // it currently.
+ virtual bool MethodNeedsDebugVersion(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
};
class RuntimeCallbacks {
@@ -198,6 +202,11 @@ class RuntimeCallbacks {
// entrypoint should not be changed to JITed code.
bool IsMethodSafeToJit(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+ // Returns true if some MethodInspectionCallback indicates the method needs to use a debug
+ // version. This allows later code to set breakpoints or perform other actions that could be
+ // broken by some optimizations.
+ bool MethodNeedsDebugVersion(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+
void AddMethodInspectionCallback(MethodInspectionCallback* cb)
REQUIRES_SHARED(Locks::mutator_lock_);
void RemoveMethodInspectionCallback(MethodInspectionCallback* cb)
diff --git a/test/1935-get-set-current-frame-jit/expected.txt b/test/1935-get-set-current-frame-jit/expected.txt
index cdb8f6a825..a685891775 100644
--- a/test/1935-get-set-current-frame-jit/expected.txt
+++ b/test/1935-get-set-current-frame-jit/expected.txt
@@ -1,7 +1,5 @@
JNI_OnLoad called
From GetLocalInt(), value is 42
-isInOsrCode? false
Value is '42'
Setting TARGET to 1337
-isInOsrCode? false
Value is '1337'
diff --git a/test/1935-get-set-current-frame-jit/run b/test/1935-get-set-current-frame-jit/run
index 51875a7e86..e569d08ffd 100755
--- a/test/1935-get-set-current-frame-jit/run
+++ b/test/1935-get-set-current-frame-jit/run
@@ -14,5 +14,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Ask for stack traces to be dumped to a file rather than to stdout.
-./default-run "$@" --jvmti
+# Ensure the test is not subject to code collection
+./default-run "$@" --jvmti --runtime-option -Xjitinitialsize:32M
diff --git a/test/1935-get-set-current-frame-jit/src/Main.java b/test/1935-get-set-current-frame-jit/src/Main.java
index 714a98aaf3..378aaf7a94 100644
--- a/test/1935-get-set-current-frame-jit/src/Main.java
+++ b/test/1935-get-set-current-frame-jit/src/Main.java
@@ -21,6 +21,7 @@ import java.lang.reflect.Constructor;
import java.lang.reflect.Executable;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
+import java.time.Instant;
import java.util.concurrent.Semaphore;
import java.util.Arrays;
import java.util.Collection;
@@ -49,9 +50,11 @@ public class Main {
public static class IntRunner implements Runnable {
private volatile boolean continueBusyLoop;
private volatile boolean inBusyLoop;
- public IntRunner() {
+ private final boolean expectOsr;
+ public IntRunner(boolean expectOsr) {
this.continueBusyLoop = true;
this.inBusyLoop = false;
+ this.expectOsr = expectOsr;
}
public void run() {
int TARGET = 42;
@@ -59,14 +62,23 @@ public class Main {
while (continueBusyLoop) {
inBusyLoop = true;
}
- int i = 0;
- while (Main.isInterpreted() && i < 10000) {
- Main.ensureJitCompiled(IntRunner.class, "run");
- i++;
- }
- // We shouldn't be doing OSR since we are using JVMTI and the get/set prevents OSR.
+ // Wait up to 300 seconds for OSR to kick in if we expect it. If we don't give up after only
+ // 3 seconds.
+ Instant osrDeadline = Instant.now().plusSeconds(expectOsr ? 600 : 3);
+ do {
+ // Don't actually do anything here.
+ inBusyLoop = true;
+ } while (hasJit() && !Main.isInOsrCode("run") && osrDeadline.compareTo(Instant.now()) > 0);
+ // We shouldn't be doing OSR since we are using JVMTI and the set prevents OSR.
// Set local will also push us to interpreter but the get local may remain in compiled code.
- System.out.println("isInOsrCode? " + (hasJit() && Main.isInOsrCode("run")));
+ if (hasJit()) {
+ boolean inOsr = Main.isInOsrCode("run");
+ if (expectOsr && !inOsr) {
+ throw new Error("Expected to be in OSR but was not.");
+ } else if (!expectOsr && inOsr) {
+ throw new Error("Expected not to be in OSR but was.");
+ }
+ }
reportValue(TARGET);
}
public void waitForBusyLoopStart() { while (!inBusyLoop) {} }
@@ -78,7 +90,7 @@ public class Main {
public static void runGet() throws Exception {
Method target = IntRunner.class.getDeclaredMethod("run");
// Get Int
- IntRunner int_runner = new IntRunner();
+ IntRunner int_runner = new IntRunner(true);
Thread target_get = new Thread(int_runner, "GetLocalInt - Target");
target_get.start();
int_runner.waitForBusyLoopStart();
@@ -108,7 +120,7 @@ public class Main {
public static void runSet() throws Exception {
Method target = IntRunner.class.getDeclaredMethod("run");
// Set Int
- IntRunner int_runner = new IntRunner();
+ IntRunner int_runner = new IntRunner(false);
Thread target_set = new Thread(int_runner, "SetLocalInt - Target");
target_set.start();
int_runner.waitForBusyLoopStart();
@@ -157,7 +169,6 @@ public class Main {
throw new Error("Unable to find stack frame in method " + target + " on thread " + thr);
}
- public static native void ensureJitCompiled(Class k, String f);
public static native boolean isInterpreted();
public static native boolean isInOsrCode(String methodName);
public static native boolean hasJit();
diff --git a/tools/libcore_network_failures.txt b/tools/libcore_network_failures.txt
new file mode 100644
index 0000000000..e7e31dbe67
--- /dev/null
+++ b/tools/libcore_network_failures.txt
@@ -0,0 +1,92 @@
+/*
+ * This file contains extra expectations for ART's buildbot regarding network tests.
+ * The script that uses this file is art/tools/run-libcore-tests.sh.
+ */
+
+[
+{
+ description: "Ignore failure of network-related tests on new devices running Android O",
+ result: EXEC_FAILED,
+ bug: 74725685,
+ modes: [device],
+ names: ["libcore.libcore.io.OsTest#test_byteBufferPositions_sendto_recvfrom_af_inet",
+ "libcore.libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithFtpURLConnection",
+ "libcore.libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithHttpURLConnection",
+ "libcore.libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithJarFtpURLConnection",
+ "libcore.libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithJarHttpURLConnection",
+ "libcore.libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithLoggingSocketHandler",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_40555",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_io_File",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_io_FileLjava_lang_String",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_io_InputStream",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_io_InputStreamLjava_lang_String",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_lang_Readable",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_lang_String",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_nio_channels_ReadableByteChannel",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_nio_channels_ReadableByteChannelLjava_lang_String",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_nio_file_Path",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_nio_file_PathLjava_lang_String",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_nio_file_PathLjava_lang_String_Exception",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_nio_file_Path_Exception",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_close",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_delimiter",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_findInLine_LPattern",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_findInLine_LString",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_findInLine_LString_NPEs",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_findWithinHorizon_LPatternI",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNext",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextBigDecimal",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextBigInteger",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextBigIntegerI",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextBigIntegerI_cache",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextBoolean",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextByte",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextByteI",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextByteI_cache",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextDouble",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextFloat",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextInt",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextIntI",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextIntI_cache",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextLPattern",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextLString",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextLine",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextLine_sequence",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextLong",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextLongI",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextLongI_cache",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextShort",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextShortI",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextShortI_cache",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_ioException",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_locale",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_match",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_next",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_nextBigDecimal",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_nextBigInteger",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_nextBigIntegerI",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_nextBoolean",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_nextByte",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_nextByteI",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_nextDouble",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_nextFloat",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_nextInt",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_nextIntI",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_nextLPattern",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_nextLString",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_nextLine",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_nextLong",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_nextLongI",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_nextShort",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_nextShortI",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_radix",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_remove",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_skip_LPattern",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_skip_LString",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_toString",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_useDelimiter_LPattern",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_useDelimiter_String",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_useLocale_LLocale",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_useRadix_I"]
+}
+]
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index de07a47df7..21ddcbc062 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -19,6 +19,16 @@ if [ ! -d libcore ]; then
exit 1
fi
+# Prevent JDWP tests from running on the following devices running
+# Android O (they are failing because of a network-related issue), as
+# a workaround for b/74725685:
+# - FA7BN1A04406 (walleye device testing configuration aosp-poison/volantis-armv7-poison-debug)
+# - FA7BN1A04412 (walleye device testing configuration aosp-poison/volantis-armv8-poison-ndebug)
+# - FA7BN1A04433 (walleye device testing configuration aosp-poison/volantis-armv8-poison-debug)
+case "$ANDROID_SERIAL" in
+ (FA7BN1A04406|FA7BN1A04412|FA7BN1A04433) exit 0;;
+esac
+
source build/envsetup.sh >&/dev/null # for get_build_var, setpaths
setpaths # include platform prebuilt java, javac, etc in $PATH.
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 2b7c624a3a..7f0383d55d 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -161,6 +161,16 @@ if [[ $gcstress ]]; then
fi
fi
+# Disable network-related libcore tests that are failing on the following
+# devices running Android O, as a workaround for b/74725685:
+# - FA7BN1A04406 (walleye device testing configuration aosp-poison/volantis-armv7-poison-debug)
+# - FA7BN1A04412 (walleye device testing configuration aosp-poison/volantis-armv8-poison-ndebug)
+# - FA7BN1A04433 (walleye device testing configuration aosp-poison/volantis-armv8-poison-debug)
+case "$ANDROID_SERIAL" in
+ (FA7BN1A04406|FA7BN1A04412|FA7BN1A04433)
+ expectations="$expectations --expectations art/tools/libcore_network_failures.txt";;
+esac
+
# Run the tests using vogar.
echo "Running tests for the following test packages:"
echo ${working_packages[@]} | tr " " "\n"
diff --git a/tools/setup-buildbot-device.sh b/tools/setup-buildbot-device.sh
index 9373c69bf8..5ce7f5244e 100755
--- a/tools/setup-buildbot-device.sh
+++ b/tools/setup-buildbot-device.sh
@@ -57,14 +57,19 @@ echo -e "${green}Setting local loopback${nc}"
adb shell ifconfig lo up
adb shell ifconfig
-# When netd is running, some libcore and JDWP tests fail with this
-# exception (b/74725685):
+# Ensure netd is running, as otherwise the logcat would be spammed
+# with the following messages on devices running Android O:
#
-# android.system.ErrnoException: connect failed: EBADMSG (Not a data message)
+# E NetdConnector: Communications error: java.io.IOException: No such file or directory
+# E mDnsConnector: Communications error: java.io.IOException: No such file or directory
#
-# Turn it off to make these tests pass.
-echo -e "${green}Turning off netd${nc}"
-adb shell stop netd
+# Netd was initially disabled as an attempt to solve issues with
+# network-related libcore and JDWP tests failing on devices running
+# Android O (MR1) (see b/74725685). These tests are currently
+# disabled. When a better solution has been found, we should remove
+# the following lines.
+echo -e "${green}Turning on netd${nc}"
+adb shell start netd
adb shell getprop init.svc.netd
echo -e "${green}List properties${nc}"
diff --git a/tools/veridex/hidden_api.cc b/tools/veridex/hidden_api.cc
index 93f921a25f..17fa1b8513 100644
--- a/tools/veridex/hidden_api.cc
+++ b/tools/veridex/hidden_api.cc
@@ -61,6 +61,11 @@ void HiddenApi::FillList(const char* filename, std::set<std::string>& entries) {
// Add the class->method name (so stripping the signature).
entries.insert(str.substr(0, pos));
}
+ pos = str.find(':');
+ if (pos != std::string::npos) {
+ // Add the class->field name (so stripping the type).
+ entries.insert(str.substr(0, pos));
+ }
}
}
}
diff --git a/tools/veridex/hidden_api_finder.cc b/tools/veridex/hidden_api_finder.cc
index d611f78eed..4885e02769 100644
--- a/tools/veridex/hidden_api_finder.cc
+++ b/tools/veridex/hidden_api_finder.cc
@@ -58,6 +58,16 @@ void HiddenApiFinder::CheckField(uint32_t field_id,
void HiddenApiFinder::CollectAccesses(VeridexResolver* resolver) {
const DexFile& dex_file = resolver->GetDexFile();
+ // Look at all types referenced in this dex file. Any of these
+ // types can lead to being used through reflection.
+ for (uint32_t i = 0; i < dex_file.NumTypeIds(); ++i) {
+ std::string name(dex_file.StringByTypeIdx(dex::TypeIndex(i)));
+ if (hidden_api_.IsInRestrictionList(name)) {
+ classes_.insert(name);
+ }
+ }
+ // Note: we collect strings constants only referenced in code items as the string table
+ // contains other kind of strings (eg types).
size_t class_def_count = dex_file.NumClassDefs();
for (size_t class_def_index = 0; class_def_index < class_def_count; ++class_def_index) {
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
@@ -76,15 +86,6 @@ void HiddenApiFinder::CollectAccesses(VeridexResolver* resolver) {
CodeItemDataAccessor code_item_accessor(dex_file, code_item);
for (const DexInstructionPcPair& inst : code_item_accessor) {
switch (inst->Opcode()) {
- case Instruction::CONST_CLASS: {
- dex::TypeIndex type_index(inst->VRegB_21c());
- std::string name = dex_file.StringByTypeIdx(type_index);
- // Only keep classes that are in a restriction list.
- if (hidden_api_.IsInRestrictionList(name)) {
- classes_.insert(name);
- }
- break;
- }
case Instruction::CONST_STRING: {
dex::StringIndex string_index(inst->VRegB_21c());
std::string name = std::string(dex_file.StringDataByIdx(string_index));