Merge "Revert "ART: Compiler support for invoke-polymorphic.""
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 402eeee..f00648f 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1378,28 +1378,21 @@
void CodeGenerator::EmitJitRoots(uint8_t* code,
Handle<mirror::ObjectArray<mirror::Object>> roots,
- const uint8_t* roots_data,
- Handle<mirror::DexCache> outer_dex_cache) {
+ const uint8_t* roots_data) {
DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
- StackHandleScope<1> hs(Thread::Current());
- MutableHandle<mirror::DexCache> h_dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
size_t index = 0;
for (auto& entry : jit_string_roots_) {
- const DexFile& entry_dex_file = *entry.first.dex_file;
- // Avoid the expensive FindDexCache call by checking if the string is
- // in the compiled method's dex file.
- h_dex_cache.Assign(IsSameDexFile(*outer_dex_cache->GetDexFile(), entry_dex_file)
- ? outer_dex_cache.Get()
- : class_linker->FindDexCache(hs.Self(), entry_dex_file));
- mirror::String* string = class_linker->LookupString(
- entry_dex_file, entry.first.string_index, h_dex_cache);
- DCHECK(string != nullptr) << "JIT roots require strings to have been loaded";
+ // Update the `roots` with the string, and replace the address temporarily
+ // stored to the index in the table.
+ uint64_t address = entry.second;
+ roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr());
+ DCHECK(roots->Get(index) != nullptr);
+ entry.second = index;
// Ensure the string is strongly interned. This is a requirement on how the JIT
// handles strings. b/32995596
- class_linker->GetInternTable()->InternStrong(string);
- roots->Set(index, string);
- entry.second = index;
+ class_linker->GetInternTable()->InternStrong(
+ reinterpret_cast<mirror::String*>(roots->Get(index)));
++index;
}
for (auto& entry : jit_class_roots_) {
@@ -1407,6 +1400,7 @@
// stored to the index in the table.
uint64_t address = entry.second;
roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr());
+ DCHECK(roots->Get(index) != nullptr);
entry.second = index;
++index;
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 2e2c3c0..6366b98 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -351,8 +351,7 @@
// Also emits literal patches.
void EmitJitRoots(uint8_t* code,
Handle<mirror::ObjectArray<mirror::Object>> roots,
- const uint8_t* roots_data,
- Handle<mirror::DexCache> outer_dex_cache)
+ const uint8_t* roots_data)
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsLeafMethod() const {
@@ -713,9 +712,9 @@
const ArenaVector<HBasicBlock*>* block_order_;
// Maps a StringReference (dex_file, string_index) to the index in the literal table.
- // Entries are intially added with a 0 index, and `EmitJitRoots` will compute all the
- // indices.
- ArenaSafeMap<StringReference, uint32_t, StringReferenceValueComparator> jit_string_roots_;
+ // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
+ // will compute all the indices.
+ ArenaSafeMap<StringReference, uint64_t, StringReferenceValueComparator> jit_string_roots_;
// Maps a ClassReference (dex_file, type_index) to the index in the literal table.
// Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 1dd526f..541a1c5 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -5936,7 +5936,9 @@
}
}
-void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
@@ -5961,8 +5963,9 @@
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
return; // No dex cache slow path.
}
@@ -5986,7 +5989,8 @@
}
case HLoadString::LoadKind::kJitTableAddress: {
__ LoadLiteral(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
- load->GetStringIndex()));
+ load->GetStringIndex(),
+ load->GetString()));
// /* GcRoot<mirror::String> */ out = *out
GenerateGcRootFieldLoad(load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
return;
@@ -7316,8 +7320,10 @@
}
Literal* CodeGeneratorARM::DeduplicateJitStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u);
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 6435851..d5968e0 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -489,7 +489,9 @@
dex::StringIndex string_index);
Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, dex::TypeIndex type_index);
Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
- Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, dex::StringIndex string_index);
+ Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle);
Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
uint64_t address);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 240e39d..9aaeadb 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4137,8 +4137,9 @@
}
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLiteral(
- const DexFile& dex_file, dex::StringIndex string_index) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u);
+ const DexFile& dex_file, dex::StringIndex string_index, Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
@@ -4527,7 +4528,9 @@
}
}
-void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
Register out = OutputRegister(load);
Location out_loc = load->GetLocations()->Out();
@@ -4550,8 +4553,10 @@
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK(load->GetAddress() != 0u && IsUint<32>(load->GetAddress()));
- __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(load->GetAddress()));
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
+ __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBssEntry: {
@@ -4582,7 +4587,8 @@
}
case HLoadString::LoadKind::kJitTableAddress: {
__ Ldr(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
- load->GetStringIndex()));
+ load->GetStringIndex(),
+ load->GetString()));
GenerateGcRootFieldLoad(load,
out_loc,
out.X(),
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 8f33b6b..d6a5f9d 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -567,7 +567,8 @@
dex::TypeIndex type_index);
vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address);
vixl::aarch64::Literal<uint32_t>* DeduplicateJitStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index);
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle);
vixl::aarch64::Literal<uint32_t>* DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex string_index,
uint64_t address);
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index cf4d94d..c769dec 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -6021,7 +6021,9 @@
}
}
-void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
vixl32::Register out = OutputRegister(load);
@@ -6041,8 +6043,9 @@
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
return; // No dex cache slow path.
}
@@ -6062,7 +6065,8 @@
}
case HLoadString::LoadKind::kJitTableAddress: {
__ Ldr(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
- load->GetStringIndex()));
+ load->GetStringIndex(),
+ load->GetString()));
// /* GcRoot<mirror::String> */ out = *out
GenerateGcRootFieldLoad(load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
return;
@@ -7443,9 +7447,12 @@
return DeduplicateUint32Literal(address, &uint32_literals_);
}
-VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u);
+VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral(
+ const DexFile& dex_file,
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() {
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 297d63c..200a463 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -573,7 +573,8 @@
VIXLUInt32Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
VIXLUInt32Literal* DeduplicateDexCacheAddressLiteral(uint32_t address);
VIXLUInt32Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index);
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle);
VIXLUInt32Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
uint64_t address);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 29f8b2a..bc62854 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -5625,7 +5625,9 @@
}
}
-void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
HLoadString::LoadKind load_kind = load->GetLoadKind();
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
@@ -5660,8 +5662,9 @@
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out,
base_or_current_method_reg,
codegen_->DeduplicateBootImageAddressLiteral(address));
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index dd3f0fe..1b9c6da 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -3628,7 +3628,9 @@
}
}
-void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
HLoadString::LoadKind load_kind = load->GetLoadKind();
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
@@ -3650,8 +3652,9 @@
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out,
kLoadUnsignedWord,
codegen_->DeduplicateBootImageAddressLiteral(address));
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 786bc50..a9b717d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -6231,15 +6231,19 @@
}
Label* CodeGeneratorX86::NewJitRootStringPatch(const DexFile& dex_file,
- dex::StringIndex dex_index) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index), /* placeholder */ 0u);
+ dex::StringIndex dex_index,
+ Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(
+ StringReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
// Add a patch entry and return the label.
jit_string_patches_.emplace_back(dex_file, dex_index.index_);
PatchInfo<Label>* info = &jit_string_patches_.back();
return &info->label;
}
-void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
@@ -6257,8 +6261,9 @@
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ movl(out, Immediate(address));
codegen_->RecordSimplePatch();
return; // No dex cache slow path.
@@ -6279,7 +6284,7 @@
case HLoadString::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86::kDummy32BitOffset);
Label* fixup_label = codegen_->NewJitRootStringPatch(
- load->GetDexFile(), load->GetStringIndex());
+ load->GetDexFile(), load->GetStringIndex(), load->GetString());
// /* GcRoot<mirror::String> */ out = *address
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
return;
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 1af6850..dd1628c 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -415,7 +415,9 @@
void RecordTypePatch(HLoadClass* load_class);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
- Label* NewJitRootStringPatch(const DexFile& dex_file, dex::StringIndex dex_index);
+ Label* NewJitRootStringPatch(const DexFile& dex_file,
+ dex::StringIndex dex_index,
+ Handle<mirror::String> handle);
Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address);
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 06b48c4..2614735 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -5630,15 +5630,19 @@
}
Label* CodeGeneratorX86_64::NewJitRootStringPatch(const DexFile& dex_file,
- dex::StringIndex dex_index) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index), /* placeholder */ 0u);
+ dex::StringIndex dex_index,
+ Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(
+ StringReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
// Add a patch entry and return the label.
jit_string_patches_.emplace_back(dex_file, dex_index.index_);
PatchInfo<Label>* info = &jit_string_patches_.back();
return &info->label;
}
-void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
CpuRegister out = out_loc.AsRegister<CpuRegister>();
@@ -5650,8 +5654,9 @@
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ movl(out, Immediate(address)); // Zero-extended.
codegen_->RecordSimplePatch();
return; // No dex cache slow path.
@@ -5672,8 +5677,8 @@
case HLoadString::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
/* no_rip */ true);
- Label* fixup_label =
- codegen_->NewJitRootStringPatch(load->GetDexFile(), load->GetStringIndex());
+ Label* fixup_label = codegen_->NewJitRootStringPatch(
+ load->GetDexFile(), load->GetStringIndex(), load->GetString());
// /* GcRoot<mirror::String> */ out = *address
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
return;
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index f827e79..32d006c 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -412,7 +412,9 @@
void RecordTypePatch(HLoadClass* load_class);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
- Label* NewJitRootStringPatch(const DexFile& dex_file, dex::StringIndex dex_index);
+ Label* NewJitRootStringPatch(const DexFile& dex_file,
+ dex::StringIndex dex_index,
+ Handle<mirror::String> handle);
Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address);
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index a599c2a..d45fa11 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2498,6 +2498,17 @@
}
}
+// Helper for InstructionDataEquals to fetch the mirror String out
+// from a kJitTableAddress LoadString kind.
+// NO_THREAD_SAFETY_ANALYSIS because even though we're accessing
+// mirrors, they are stored in a variable size handle scope which is always
+// visited during a pause. Also, the only caller of this helper
+// only uses the mirror for pointer comparison.
+static inline mirror::String* AsMirrorInternal(Handle<mirror::String> handle)
+ NO_THREAD_SAFETY_ANALYSIS {
+ return handle.Get();
+}
+
bool HLoadString::InstructionDataEquals(const HInstruction* other) const {
const HLoadString* other_load_string = other->AsLoadString();
// TODO: To allow GVN for HLoadString from different dex files, we should compare the strings
@@ -2506,16 +2517,16 @@
GetPackedFields() != other_load_string->GetPackedFields()) {
return false;
}
- LoadKind load_kind = GetLoadKind();
- if (HasAddress(load_kind)) {
- return GetAddress() == other_load_string->GetAddress();
- } else {
- DCHECK(HasStringReference(load_kind)) << load_kind;
- return IsSameDexFile(GetDexFile(), other_load_string->GetDexFile());
+ switch (GetLoadKind()) {
+ case LoadKind::kBootImageAddress:
+ case LoadKind::kJitTableAddress:
+ return AsMirrorInternal(GetString()) == AsMirrorInternal(other_load_string->GetString());
+ default:
+ return IsSameDexFile(GetDexFile(), other_load_string->GetDexFile());
}
}
-void HLoadString::SetLoadKindInternal(LoadKind load_kind) {
+void HLoadString::SetLoadKind(LoadKind load_kind) {
// Once sharpened, the load kind should not be changed again.
DCHECK_EQ(GetLoadKind(), LoadKind::kDexCacheViaMethod);
SetPackedField<LoadKindField>(load_kind);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index db1b277..ea9a94c 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -5787,39 +5787,31 @@
uint32_t dex_pc)
: HInstruction(SideEffectsForArchRuntimeCalls(), dex_pc),
special_input_(HUserRecord<HInstruction*>(current_method)),
- string_index_(string_index) {
+ string_index_(string_index),
+ dex_file_(dex_file) {
SetPackedField<LoadKindField>(LoadKind::kDexCacheViaMethod);
- load_data_.dex_file_ = &dex_file;
}
- void SetLoadKindWithAddress(LoadKind load_kind, uint64_t address) {
- DCHECK(HasAddress(load_kind));
- load_data_.address = address;
- SetLoadKindInternal(load_kind);
- }
-
- void SetLoadKindWithStringReference(LoadKind load_kind,
- const DexFile& dex_file,
- dex::StringIndex string_index) {
- DCHECK(HasStringReference(load_kind));
- load_data_.dex_file_ = &dex_file;
- string_index_ = string_index;
- SetLoadKindInternal(load_kind);
- }
+ void SetLoadKind(LoadKind load_kind);
LoadKind GetLoadKind() const {
return GetPackedField<LoadKindField>();
}
- const DexFile& GetDexFile() const;
+ const DexFile& GetDexFile() const {
+ return dex_file_;
+ }
dex::StringIndex GetStringIndex() const {
return string_index_;
}
- uint64_t GetAddress() const {
- DCHECK(HasAddress(GetLoadKind()));
- return load_data_.address;
+ Handle<mirror::String> GetString() const {
+ return string_;
+ }
+
+ void SetString(Handle<mirror::String> str) {
+ string_ = str;
}
bool CanBeMoved() const OVERRIDE { return true; }
@@ -5874,18 +5866,6 @@
static_assert(kNumberOfLoadStringPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
using LoadKindField = BitField<LoadKind, kFieldLoadKind, kFieldLoadKindSize>;
- static bool HasStringReference(LoadKind load_kind) {
- return load_kind == LoadKind::kBootImageLinkTimeAddress ||
- load_kind == LoadKind::kBootImageLinkTimePcRelative ||
- load_kind == LoadKind::kBssEntry ||
- load_kind == LoadKind::kDexCacheViaMethod ||
- load_kind == LoadKind::kJitTableAddress;
- }
-
- static bool HasAddress(LoadKind load_kind) {
- return load_kind == LoadKind::kBootImageAddress;
- }
-
void SetLoadKindInternal(LoadKind load_kind);
// The special input is the HCurrentMethod for kDexCacheViaMethod.
@@ -5893,26 +5873,16 @@
// for PC-relative loads, i.e. kDexCachePcRelative or kBootImageLinkTimePcRelative.
HUserRecord<HInstruction*> special_input_;
- // String index serves also as the hash code and it's also needed for slow-paths,
- // so it must not be overwritten with other load data.
dex::StringIndex string_index_;
+ const DexFile& dex_file_;
- union {
- const DexFile* dex_file_; // For string reference.
- uint64_t address; // Up to 64-bit, needed for kDexCacheAddress on 64-bit targets.
- } load_data_;
+ Handle<mirror::String> string_;
DISALLOW_COPY_AND_ASSIGN(HLoadString);
};
std::ostream& operator<<(std::ostream& os, HLoadString::LoadKind rhs);
// Note: defined outside class to see operator<<(., HLoadString::LoadKind).
-inline const DexFile& HLoadString::GetDexFile() const {
- DCHECK(HasStringReference(GetLoadKind())) << GetLoadKind();
- return *load_data_.dex_file_;
-}
-
-// Note: defined outside class to see operator<<(., HLoadString::LoadKind).
inline void HLoadString::AddSpecialInput(HInstruction* special_input) {
// The special input is used for PC-relative loads on some architectures,
// including literal pool loads, which are PC-relative too.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 4bf5b08..297500b 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1205,7 +1205,7 @@
}
MaybeRecordStat(MethodCompilationStat::kCompiled);
codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), *code_item);
- codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data, dex_cache);
+ codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data);
const void* code = code_cache->CommitCode(
self,
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index ca26c30..dc8ee23 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -275,7 +275,6 @@
dex::StringIndex string_index = load_string->GetStringIndex();
HLoadString::LoadKind desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
- uint64_t address = 0u; // String or dex cache element address.
{
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
@@ -284,12 +283,13 @@
Handle<mirror::DexCache> dex_cache = IsSameDexFile(dex_file, *compilation_unit_.GetDexFile())
? compilation_unit_.GetDexCache()
: hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file));
+ mirror::String* string = nullptr;
if (codegen_->GetCompilerOptions().IsBootImage()) {
// Compiling boot image. Resolve the string and allocate it if needed, to ensure
// the string will be added to the boot image.
DCHECK(!runtime->UseJitCompilation());
- mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache);
+ string = class_linker->ResolveString(dex_file, string_index, dex_cache);
CHECK(string != nullptr);
if (compiler_driver_->GetSupportBootImageFixup()) {
DCHECK(ContainsElement(compiler_driver_->GetDexFilesForOatFile(), &dex_file));
@@ -303,43 +303,32 @@
} else if (runtime->UseJitCompilation()) {
// TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
// DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
- mirror::String* string = class_linker->LookupString(dex_file, string_index, dex_cache);
+ string = class_linker->LookupString(dex_file, string_index, dex_cache);
if (string != nullptr) {
if (runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
- address = reinterpret_cast64<uint64_t>(string);
} else {
desired_load_kind = HLoadString::LoadKind::kJitTableAddress;
}
}
} else {
// AOT app compilation. Try to lookup the string without allocating if not found.
- mirror::String* string = class_linker->LookupString(dex_file, string_index, dex_cache);
+ string = class_linker->LookupString(dex_file, string_index, dex_cache);
if (string != nullptr &&
runtime->GetHeap()->ObjectIsInBootImageSpace(string) &&
!codegen_->GetCompilerOptions().GetCompilePic()) {
desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
- address = reinterpret_cast64<uint64_t>(string);
} else {
desired_load_kind = HLoadString::LoadKind::kBssEntry;
}
}
+ if (string != nullptr) {
+ load_string->SetString(handles_->NewHandle(string));
+ }
}
HLoadString::LoadKind load_kind = codegen_->GetSupportedLoadStringKind(desired_load_kind);
- switch (load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadString::LoadKind::kBssEntry:
- case HLoadString::LoadKind::kDexCacheViaMethod:
- case HLoadString::LoadKind::kJitTableAddress:
- load_string->SetLoadKindWithStringReference(load_kind, dex_file, string_index);
- break;
- case HLoadString::LoadKind::kBootImageAddress:
- DCHECK_NE(address, 0u);
- load_string->SetLoadKindWithAddress(load_kind, address);
- break;
- }
+ load_string->SetLoadKind(load_kind);
}
} // namespace art
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index e47aa67..6c2c815 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -41,59 +41,54 @@
const std::string& variant, std::string* error_msg) {
// Look for variants that have divide support.
static const char* arm_variants_with_div[] = {
- "cortex-a7", "cortex-a12", "cortex-a15", "cortex-a17", "cortex-a53", "cortex-a57",
- "cortex-a53.a57", "cortex-m3", "cortex-m4", "cortex-r4", "cortex-r5",
- "cyclone", "denver", "krait", "swift" };
+ "cortex-a7",
+ "cortex-a12",
+ "cortex-a15",
+ "cortex-a17",
+ "cortex-a53",
+ "cortex-a53.a57",
+ "cortex-a57",
+ "denver",
+ "krait",
+ };
- bool has_div = FindVariantInArray(arm_variants_with_div, arraysize(arm_variants_with_div),
+ bool has_div = FindVariantInArray(arm_variants_with_div,
+ arraysize(arm_variants_with_div),
variant);
// Look for variants that have LPAE support.
static const char* arm_variants_with_lpae[] = {
- "cortex-a7", "cortex-a15", "krait", "denver", "cortex-a53", "cortex-a57", "cortex-a53.a57"
+ "cortex-a7",
+ "cortex-a12",
+ "cortex-a15",
+ "cortex-a17",
+ "cortex-a53",
+ "cortex-a53.a57",
+ "cortex-a57",
+ "denver",
+ "krait",
};
- bool has_lpae = FindVariantInArray(arm_variants_with_lpae, arraysize(arm_variants_with_lpae),
+ bool has_lpae = FindVariantInArray(arm_variants_with_lpae,
+ arraysize(arm_variants_with_lpae),
variant);
if (has_div == false && has_lpae == false) {
- // Avoid unsupported variants.
- static const char* unsupported_arm_variants[] = {
- // ARM processors that aren't ARMv7 compatible aren't supported.
- "arm2", "arm250", "arm3", "arm6", "arm60", "arm600", "arm610", "arm620",
- "cortex-m0", "cortex-m0plus", "cortex-m1",
- "fa526", "fa626", "fa606te", "fa626te", "fmp626", "fa726te",
- "iwmmxt", "iwmmxt2",
- "strongarm", "strongarm110", "strongarm1100", "strongarm1110",
- "xscale"
- };
- if (FindVariantInArray(unsupported_arm_variants, arraysize(unsupported_arm_variants),
- variant)) {
- *error_msg = StringPrintf("Attempt to use unsupported ARM variant: %s", variant.c_str());
- return ArmFeaturesUniquePtr();
- }
- // Warn if the variant is unknown.
- // TODO: some of the variants below may have feature support, but that support is currently
- // unknown so we'll choose conservative (sub-optimal) defaults without warning.
- // TODO: some of the architectures may not support all features required by ART and should be
- // moved to unsupported_arm_variants[] above.
- static const char* arm_variants_without_known_features[] = {
+ static const char* arm_variants_with_default_features[] = {
+ "cortex-a5",
+ "cortex-a8",
+ "cortex-a9",
+ "cortex-a9-mp",
"default",
- "arm7", "arm7m", "arm7d", "arm7dm", "arm7di", "arm7dmi", "arm70", "arm700", "arm700i",
- "arm710", "arm710c", "arm7100", "arm720", "arm7500", "arm7500fe", "arm7tdmi", "arm7tdmi-s",
- "arm710t", "arm720t", "arm740t",
- "arm8", "arm810",
- "arm9", "arm9e", "arm920", "arm920t", "arm922t", "arm946e-s", "arm966e-s", "arm968e-s",
- "arm926ej-s", "arm940t", "arm9tdmi",
- "arm10tdmi", "arm1020t", "arm1026ej-s", "arm10e", "arm1020e", "arm1022e",
- "arm1136j-s", "arm1136jf-s",
- "arm1156t2-s", "arm1156t2f-s", "arm1176jz-s", "arm1176jzf-s",
- "cortex-a5", "cortex-a8", "cortex-a9", "cortex-a9-mp", "cortex-r4f",
- "marvell-pj4", "mpcore", "mpcorenovfp"
+ "generic"
};
- if (!FindVariantInArray(arm_variants_without_known_features,
- arraysize(arm_variants_without_known_features),
+ if (!FindVariantInArray(arm_variants_with_default_features,
+ arraysize(arm_variants_with_default_features),
variant)) {
- LOG(WARNING) << "Unknown instruction set features for ARM CPU variant (" << variant
+ *error_msg = StringPrintf("Attempt to use unsupported ARM variant: %s", variant.c_str());
+ return nullptr;
+ } else {
+ // Warn if we use the default features.
+ LOG(WARNING) << "Using default instruction set features for ARM CPU variant (" << variant
<< ") using conservative defaults";
}
}
diff --git a/runtime/arch/arm/instruction_set_features_arm_test.cc b/runtime/arch/arm/instruction_set_features_arm_test.cc
index 7abe53c..697ca90 100644
--- a/runtime/arch/arm/instruction_set_features_arm_test.cc
+++ b/runtime/arch/arm/instruction_set_features_arm_test.cc
@@ -48,17 +48,17 @@
EXPECT_EQ(denver_features->AsBitmap(), 3U);
// Build features for a 32-bit ARMv7 processor.
- std::unique_ptr<const InstructionSetFeatures> arm7_features(
- InstructionSetFeatures::FromVariant(kArm, "arm7", &error_msg));
- ASSERT_TRUE(arm7_features.get() != nullptr) << error_msg;
+ std::unique_ptr<const InstructionSetFeatures> generic_features(
+ InstructionSetFeatures::FromVariant(kArm, "generic", &error_msg));
+ ASSERT_TRUE(generic_features.get() != nullptr) << error_msg;
- EXPECT_TRUE(arm7_features->Equals(arm7_features.get()));
- EXPECT_FALSE(arm7_features->Equals(krait_features.get()));
- EXPECT_FALSE(krait_features->Equals(arm7_features.get()));
- EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
- EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
- EXPECT_STREQ("-div,-atomic_ldrd_strd", arm7_features->GetFeatureString().c_str());
- EXPECT_EQ(arm7_features->AsBitmap(), 0U);
+ EXPECT_TRUE(generic_features->Equals(generic_features.get()));
+ EXPECT_FALSE(generic_features->Equals(krait_features.get()));
+ EXPECT_FALSE(krait_features->Equals(generic_features.get()));
+ EXPECT_FALSE(generic_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_FALSE(generic_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("-div,-atomic_ldrd_strd", generic_features->GetFeatureString().c_str());
+ EXPECT_EQ(generic_features->AsBitmap(), 0U);
// ARM6 is not a supported architecture variant.
std::unique_ptr<const InstructionSetFeatures> arm6_features(
@@ -70,7 +70,7 @@
TEST(ArmInstructionSetFeaturesTest, ArmAddFeaturesFromString) {
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> base_features(
- InstructionSetFeatures::FromVariant(kArm, "arm7", &error_msg));
+ InstructionSetFeatures::FromVariant(kArm, "generic", &error_msg));
ASSERT_TRUE(base_features.get() != nullptr) << error_msg;
// Build features for a 32-bit ARM with LPAE and div processor.
@@ -99,17 +99,17 @@
EXPECT_EQ(denver_features->AsBitmap(), 3U);
// Build features for a 32-bit default ARM processor.
- std::unique_ptr<const InstructionSetFeatures> arm7_features(
+ std::unique_ptr<const InstructionSetFeatures> generic_features(
base_features->AddFeaturesFromString("default", &error_msg));
- ASSERT_TRUE(arm7_features.get() != nullptr) << error_msg;
+ ASSERT_TRUE(generic_features.get() != nullptr) << error_msg;
- EXPECT_TRUE(arm7_features->Equals(arm7_features.get()));
- EXPECT_FALSE(arm7_features->Equals(krait_features.get()));
- EXPECT_FALSE(krait_features->Equals(arm7_features.get()));
- EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
- EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
- EXPECT_STREQ("-div,-atomic_ldrd_strd", arm7_features->GetFeatureString().c_str());
- EXPECT_EQ(arm7_features->AsBitmap(), 0U);
+ EXPECT_TRUE(generic_features->Equals(generic_features.get()));
+ EXPECT_FALSE(generic_features->Equals(krait_features.get()));
+ EXPECT_FALSE(krait_features->Equals(generic_features.get()));
+ EXPECT_FALSE(generic_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_FALSE(generic_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("-div,-atomic_ldrd_strd", generic_features->GetFeatureString().c_str());
+ EXPECT_EQ(generic_features->AsBitmap(), 0U);
}
} // namespace art
diff --git a/runtime/art_method.h b/runtime/art_method.h
index b38508b..11dcc35 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -95,18 +95,20 @@
// This setter guarantees atomicity.
void AddAccessFlags(uint32_t flag) {
- uint32_t old_access_flags = access_flags_.load(std::memory_order_relaxed);
+ uint32_t old_access_flags;
uint32_t new_access_flags;
do {
+ old_access_flags = access_flags_.load(std::memory_order_relaxed);
new_access_flags = old_access_flags | flag;
} while (!access_flags_.compare_exchange_weak(old_access_flags, new_access_flags));
}
// This setter guarantees atomicity.
void ClearAccessFlags(uint32_t flag) {
- uint32_t old_access_flags = access_flags_.load(std::memory_order_relaxed);
+ uint32_t old_access_flags;
uint32_t new_access_flags;
do {
+ old_access_flags = access_flags_.load(std::memory_order_relaxed);
new_access_flags = old_access_flags & ~flag;
} while (!access_flags_.compare_exchange_weak(old_access_flags, new_access_flags));
}
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 1dca428..55b4306 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -26,7 +26,7 @@
// Headers for LogMessage::LogLine.
#ifdef ART_TARGET_ANDROID
-#include <android/log.h>
+#include <log/log.h>
#else
#include <sys/types.h>
#include <unistd.h>
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index bf1d4ea..a3e5b55 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -705,7 +705,7 @@
QuickExceptionHandler::DumpFramesWithType(self, true);
}
- mirror::Throwable* pending_exception = nullptr;
+ ObjPtr<mirror::Throwable> pending_exception;
bool from_code = false;
self->PopDeoptimizationContext(&result, &pending_exception, /* out */ &from_code);
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index a81458f..b809c3e 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -140,7 +140,7 @@
DISALLOW_COPY_AND_ASSIGN(CatchBlockStackVisitor);
};
-void QuickExceptionHandler::FindCatch(mirror::Throwable* exception) {
+void QuickExceptionHandler::FindCatch(ObjPtr<mirror::Throwable> exception) {
DCHECK(!is_deoptimization_);
if (kDebugExceptionDelivery) {
mirror::String* msg = exception->GetDetailMessage();
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index 5592126..3ead7db 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -46,7 +46,7 @@
}
// Find the catch handler for the given exception.
- void FindCatch(mirror::Throwable* exception) REQUIRES_SHARED(Locks::mutator_lock_);
+ void FindCatch(ObjPtr<mirror::Throwable> exception) REQUIRES_SHARED(Locks::mutator_lock_);
// Deoptimize the stack to the upcall/some code that's not deoptimizeable. For
// every compiled frame, we create a "copy" shadow frame that will be executed
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 33c6a40..bdd4ca6 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -154,18 +154,18 @@
DeoptimizationContextRecord(const JValue& ret_val,
bool is_reference,
bool from_code,
- mirror::Throwable* pending_exception,
+ ObjPtr<mirror::Throwable> pending_exception,
DeoptimizationContextRecord* link)
: ret_val_(ret_val),
is_reference_(is_reference),
from_code_(from_code),
- pending_exception_(pending_exception),
+ pending_exception_(pending_exception.Ptr()),
link_(link) {}
JValue GetReturnValue() const { return ret_val_; }
bool IsReference() const { return is_reference_; }
bool GetFromCode() const { return from_code_; }
- mirror::Throwable* GetPendingException() const { return pending_exception_; }
+ ObjPtr<mirror::Throwable> GetPendingException() const { return pending_exception_; }
DeoptimizationContextRecord* GetLink() const { return link_; }
mirror::Object** GetReturnValueAsGCRoot() {
DCHECK(is_reference_);
@@ -219,7 +219,7 @@
void Thread::PushDeoptimizationContext(const JValue& return_value,
bool is_reference,
bool from_code,
- mirror::Throwable* exception) {
+ ObjPtr<mirror::Throwable> exception) {
DeoptimizationContextRecord* record = new DeoptimizationContextRecord(
return_value,
is_reference,
@@ -230,7 +230,7 @@
}
void Thread::PopDeoptimizationContext(JValue* result,
- mirror::Throwable** exception,
+ ObjPtr<mirror::Throwable>* exception,
bool* from_code) {
AssertHasDeoptimizationContext();
DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack;
@@ -434,7 +434,7 @@
Dbg::PostThreadStart(self);
// Invoke the 'run' method of our java.lang.Thread.
- mirror::Object* receiver = self->tlsPtr_.opeer;
+ ObjPtr<mirror::Object> receiver = self->tlsPtr_.opeer;
jmethodID mid = WellKnownClasses::java_lang_Thread_run;
ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
InvokeVirtualOrInterfaceWithJValues(soa, ref.get(), mid, nullptr);
@@ -446,7 +446,7 @@
}
Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
- mirror::Object* thread_peer) {
+ ObjPtr<mirror::Object> thread_peer) {
ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer);
Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer)));
// Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
@@ -1573,8 +1573,8 @@
}
m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
const int kMaxRepetition = 3;
- mirror::Class* c = m->GetDeclaringClass();
- mirror::DexCache* dex_cache = c->GetDexCache();
+ ObjPtr<mirror::Class> c = m->GetDeclaringClass();
+ ObjPtr<mirror::DexCache> dex_cache = c->GetDexCache();
int line_number = -1;
if (dex_cache != nullptr) { // be tolerant of bad input
const DexFile* dex_file = dex_cache->GetDexFile();
@@ -1860,17 +1860,15 @@
void Thread::AssertNoPendingException() const {
if (UNLIKELY(IsExceptionPending())) {
ScopedObjectAccess soa(Thread::Current());
- mirror::Throwable* exception = GetException();
- LOG(FATAL) << "No pending exception expected: " << exception->Dump();
+ LOG(FATAL) << "No pending exception expected: " << GetException()->Dump();
}
}
void Thread::AssertNoPendingExceptionForNewException(const char* msg) const {
if (UNLIKELY(IsExceptionPending())) {
ScopedObjectAccess soa(Thread::Current());
- mirror::Throwable* exception = GetException();
LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: "
- << exception->Dump();
+ << GetException()->Dump();
}
}
@@ -2213,7 +2211,7 @@
// class of the ArtMethod pointers.
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
StackHandleScope<1> hs(self_);
- mirror::Class* array_class = class_linker->GetClassRoot(ClassLinker::kObjectArrayClass);
+ ObjPtr<mirror::Class> array_class = class_linker->GetClassRoot(ClassLinker::kObjectArrayClass);
// The first element is the methods and dex pc array, the other elements are declaring classes
// for the methods to ensure classes in the stack trace don't get unloaded.
Handle<mirror::ObjectArray<mirror::Object>> trace(
@@ -2225,7 +2223,8 @@
self_->AssertPendingOOMException();
return false;
}
- mirror::PointerArray* methods_and_pcs = class_linker->AllocPointerArray(self_, depth * 2);
+ ObjPtr<mirror::PointerArray> methods_and_pcs =
+ class_linker->AllocPointerArray(self_, depth * 2);
const char* last_no_suspend_cause =
self_->StartAssertNoThreadSuspension("Building internal stack trace");
if (methods_and_pcs == nullptr) {
@@ -2255,7 +2254,7 @@
if (m->IsRuntimeMethod()) {
return true; // Ignore runtime frames (in particular callee save).
}
- mirror::PointerArray* trace_methods_and_pcs = GetTraceMethodsAndPCs();
+ ObjPtr<mirror::PointerArray> trace_methods_and_pcs = GetTraceMethodsAndPCs();
trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(count_, m, pointer_size_);
trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(
trace_methods_and_pcs->GetLength() / 2 + count_,
@@ -2268,8 +2267,8 @@
return true;
}
- mirror::PointerArray* GetTraceMethodsAndPCs() const REQUIRES_SHARED(Locks::mutator_lock_) {
- return down_cast<mirror::PointerArray*>(trace_->Get(0));
+ ObjPtr<mirror::PointerArray> GetTraceMethodsAndPCs() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return ObjPtr<mirror::PointerArray>::DownCast(MakeObjPtr(trace_->Get(0)));
}
mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const {
@@ -2311,7 +2310,7 @@
build_trace_visitor.WalkStack();
mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
if (kIsDebugBuild) {
- mirror::PointerArray* trace_methods = build_trace_visitor.GetTraceMethodsAndPCs();
+ ObjPtr<mirror::PointerArray> trace_methods = build_trace_visitor.GetTraceMethodsAndPCs();
// Second half of trace_methods is dex PCs.
for (uint32_t i = 0; i < static_cast<uint32_t>(trace_methods->GetLength() / 2); ++i) {
auto* method = trace_methods->GetElementPtrSize<ArtMethod*>(
@@ -2326,7 +2325,7 @@
template jobject Thread::CreateInternalStackTrace<true>(
const ScopedObjectAccessAlreadyRunnable& soa) const;
-bool Thread::IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const {
+bool Thread::IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const {
CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
count_visitor.WalkStack();
return count_visitor.GetDepth() == exception->GetStackDepth();
@@ -2368,12 +2367,12 @@
}
for (int32_t i = 0; i < depth; ++i) {
- mirror::ObjectArray<mirror::Object>* decoded_traces =
+ ObjPtr<mirror::ObjectArray<mirror::Object>> decoded_traces =
soa.Decode<mirror::Object>(internal)->AsObjectArray<mirror::Object>();
// Methods and dex PC trace is element 0.
DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray());
- mirror::PointerArray* const method_trace =
- down_cast<mirror::PointerArray*>(decoded_traces->Get(0));
+ ObjPtr<mirror::PointerArray> const method_trace =
+ ObjPtr<mirror::PointerArray>::DownCast(MakeObjPtr(decoded_traces->Get(0)));
// Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize);
uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
@@ -2415,8 +2414,11 @@
if (method_name_object.Get() == nullptr) {
return nullptr;
}
- mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(
- soa.Self(), class_name_object, method_name_object, source_name_object, line_number);
+ ObjPtr<mirror::StackTraceElement> obj =mirror::StackTraceElement::Alloc(soa.Self(),
+ class_name_object,
+ method_name_object,
+ source_name_object,
+ line_number);
if (obj == nullptr) {
return nullptr;
}
@@ -2447,7 +2449,7 @@
ThrowNewWrappedException(exception_class_descriptor, msg);
}
-static mirror::ClassLoader* GetCurrentClassLoader(Thread* self)
+static ObjPtr<mirror::ClassLoader> GetCurrentClassLoader(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = self->GetCurrentMethod(nullptr);
return method != nullptr
@@ -2793,7 +2795,7 @@
void Thread::QuickDeliverException() {
// Get exception from thread.
- mirror::Throwable* exception = GetException();
+ ObjPtr<mirror::Throwable> exception = GetException();
CHECK(exception != nullptr);
if (exception == GetDeoptimizationException()) {
artDeoptimize(this);
@@ -2806,8 +2808,8 @@
IsExceptionThrownByCurrentMethod(exception)) {
// Instrumentation may cause GC so keep the exception object safe.
StackHandleScope<1> hs(this);
- HandleWrapper<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception));
- instrumentation->ExceptionCaughtEvent(this, exception);
+ HandleWrapperObjPtr<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception));
+ instrumentation->ExceptionCaughtEvent(this, exception.Ptr());
}
// Does instrumentation need to deoptimize the stack?
// Note: we do this *after* reporting the exception to instrumentation in case it
@@ -2869,7 +2871,7 @@
dex_pc_ = GetDexPc(abort_on_error_);
return false;
}
- mirror::Object* this_object_;
+ ObjPtr<mirror::Object> this_object_;
ArtMethod* method_;
uint32_t dex_pc_;
const bool abort_on_error_;
@@ -2884,11 +2886,8 @@
return visitor.method_;
}
-bool Thread::HoldsLock(mirror::Object* object) const {
- if (object == nullptr) {
- return false;
- }
- return object->GetLockOwnerThreadId() == GetThreadId();
+bool Thread::HoldsLock(ObjPtr<mirror::Object> object) const {
+ return object != nullptr && object->GetLockOwnerThreadId() == GetThreadId();
}
// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
@@ -2944,7 +2943,7 @@
void VisitDeclaringClass(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_)
NO_THREAD_SAFETY_ANALYSIS {
- mirror::Class* klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>();
+ ObjPtr<mirror::Class> klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>();
// klass can be null for runtime methods.
if (klass != nullptr) {
if (kVerifyImageObjectsMarked) {
@@ -2953,10 +2952,10 @@
/*fail_ok*/true);
if (space != nullptr && space->IsImageSpace()) {
bool failed = false;
- if (!space->GetLiveBitmap()->Test(klass)) {
+ if (!space->GetLiveBitmap()->Test(klass.Ptr())) {
failed = true;
LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image " << *space;
- } else if (!heap->GetLiveBitmap()->Test(klass)) {
+ } else if (!heap->GetLiveBitmap()->Test(klass.Ptr())) {
failed = true;
LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image through live bitmap " << *space;
}
@@ -2964,17 +2963,17 @@
GetThread()->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
space->AsImageSpace()->DumpSections(LOG_STREAM(FATAL_WITHOUT_ABORT));
LOG(FATAL_WITHOUT_ABORT) << "Method@" << method->GetDexMethodIndex() << ":" << method
- << " klass@" << klass;
+ << " klass@" << klass.Ptr();
// Pretty info last in case it crashes.
LOG(FATAL) << "Method " << method->PrettyMethod() << " klass "
<< klass->PrettyClass();
}
}
}
- mirror::Object* new_ref = klass;
+ mirror::Object* new_ref = klass.Ptr();
visitor_(&new_ref, -1, this);
if (new_ref != klass) {
- method->CASDeclaringClass(klass, new_ref->AsClass());
+ method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass());
}
}
}
@@ -3366,7 +3365,7 @@
ClearException();
ShadowFrame* shadow_frame =
PopStackedShadowFrame(StackedShadowFrameType::kDeoptimizationShadowFrame);
- mirror::Throwable* pending_exception = nullptr;
+ ObjPtr<mirror::Throwable> pending_exception;
bool from_code = false;
PopDeoptimizationContext(result, &pending_exception, &from_code);
SetTopOfStack(nullptr);
diff --git a/runtime/thread.h b/runtime/thread.h
index 6308851..a3ef9bc 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -177,7 +177,7 @@
void CheckEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
- mirror::Object* thread_peer)
+ ObjPtr<mirror::Object> thread_peer)
REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
@@ -312,7 +312,7 @@
size_t NumberOfHeldMutexes() const;
- bool HoldsLock(mirror::Object*) const REQUIRES_SHARED(Locks::mutator_lock_);
+ bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Changes the priority of this thread to match that of the java.lang.Thread object.
@@ -413,7 +413,7 @@
// Returns whether the given exception was thrown by the current Java method being executed
// (Note that this includes native Java methods).
- bool IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const
+ bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const
REQUIRES_SHARED(Locks::mutator_lock_);
void SetTopOfStack(ArtMethod** top_method) {
@@ -925,9 +925,11 @@
void PushDeoptimizationContext(const JValue& return_value,
bool is_reference,
bool from_code,
- mirror::Throwable* exception)
+ ObjPtr<mirror::Throwable> exception)
REQUIRES_SHARED(Locks::mutator_lock_);
- void PopDeoptimizationContext(JValue* result, mirror::Throwable** exception, bool* from_code)
+ void PopDeoptimizationContext(JValue* result,
+ ObjPtr<mirror::Throwable>* exception,
+ bool* from_code)
REQUIRES_SHARED(Locks::mutator_lock_);
void AssertHasDeoptimizationContext()
REQUIRES_SHARED(Locks::mutator_lock_);