summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator.cc26
-rw-r--r--compiler/optimizing/code_generator.h9
-rw-r--r--compiler/optimizing/code_generator_arm.cc21
-rw-r--r--compiler/optimizing/code_generator_arm.h4
-rw-r--r--compiler/optimizing/code_generator_arm64.cc21
-rw-r--r--compiler/optimizing/code_generator_arm64.h3
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc24
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h3
-rw-r--r--compiler/optimizing/code_generator_mips.cc12
-rw-r--r--compiler/optimizing/code_generator_mips64.cc12
-rw-r--r--compiler/optimizing/code_generator_x86.cc20
-rw-r--r--compiler/optimizing/code_generator_x86.h4
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc22
-rw-r--r--compiler/optimizing/code_generator_x86_64.h4
-rw-r--r--compiler/optimizing/codegen_test.cc5
-rw-r--r--compiler/optimizing/gvn_test.cc38
-rw-r--r--compiler/optimizing/inliner.cc26
-rw-r--r--compiler/optimizing/instruction_builder.cc19
-rw-r--r--compiler/optimizing/instruction_simplifier.cc67
-rw-r--r--compiler/optimizing/licm_test.cc11
-rw-r--r--compiler/optimizing/nodes.cc25
-rw-r--r--compiler/optimizing/nodes.h138
-rw-r--r--compiler/optimizing/optimizing_compiler.cc2
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.cc33
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.h1
-rw-r--r--compiler/optimizing/reference_type_propagation.cc273
-rw-r--r--compiler/optimizing/register_allocator_test.cc10
-rw-r--r--compiler/optimizing/sharpening.cc27
28 files changed, 484 insertions, 376 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 402eeee65f..f00648f570 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1378,28 +1378,21 @@ uint32_t CodeGenerator::GetReferenceDisableFlagOffset() const {
void CodeGenerator::EmitJitRoots(uint8_t* code,
Handle<mirror::ObjectArray<mirror::Object>> roots,
- const uint8_t* roots_data,
- Handle<mirror::DexCache> outer_dex_cache) {
+ const uint8_t* roots_data) {
DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
- StackHandleScope<1> hs(Thread::Current());
- MutableHandle<mirror::DexCache> h_dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
size_t index = 0;
for (auto& entry : jit_string_roots_) {
- const DexFile& entry_dex_file = *entry.first.dex_file;
- // Avoid the expensive FindDexCache call by checking if the string is
- // in the compiled method's dex file.
- h_dex_cache.Assign(IsSameDexFile(*outer_dex_cache->GetDexFile(), entry_dex_file)
- ? outer_dex_cache.Get()
- : class_linker->FindDexCache(hs.Self(), entry_dex_file));
- mirror::String* string = class_linker->LookupString(
- entry_dex_file, entry.first.string_index, h_dex_cache);
- DCHECK(string != nullptr) << "JIT roots require strings to have been loaded";
+ // Update the `roots` with the string, and replace the address temporarily
+ // stored to the index in the table.
+ uint64_t address = entry.second;
+ roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr());
+ DCHECK(roots->Get(index) != nullptr);
+ entry.second = index;
// Ensure the string is strongly interned. This is a requirement on how the JIT
// handles strings. b/32995596
- class_linker->GetInternTable()->InternStrong(string);
- roots->Set(index, string);
- entry.second = index;
+ class_linker->GetInternTable()->InternStrong(
+ reinterpret_cast<mirror::String*>(roots->Get(index)));
++index;
}
for (auto& entry : jit_class_roots_) {
@@ -1407,6 +1400,7 @@ void CodeGenerator::EmitJitRoots(uint8_t* code,
// stored to the index in the table.
uint64_t address = entry.second;
roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr());
+ DCHECK(roots->Get(index) != nullptr);
entry.second = index;
++index;
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 2e2c3c00af..6366b9838f 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -351,8 +351,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// Also emits literal patches.
void EmitJitRoots(uint8_t* code,
Handle<mirror::ObjectArray<mirror::Object>> roots,
- const uint8_t* roots_data,
- Handle<mirror::DexCache> outer_dex_cache)
+ const uint8_t* roots_data)
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsLeafMethod() const {
@@ -713,9 +712,9 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
const ArenaVector<HBasicBlock*>* block_order_;
// Maps a StringReference (dex_file, string_index) to the index in the literal table.
- // Entries are intially added with a 0 index, and `EmitJitRoots` will compute all the
- // indices.
- ArenaSafeMap<StringReference, uint32_t, StringReferenceValueComparator> jit_string_roots_;
+ // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
+ // will compute all the indices.
+ ArenaSafeMap<StringReference, uint64_t, StringReferenceValueComparator> jit_string_roots_;
// Maps a ClassReference (dex_file, type_index) to the index in the literal table.
// Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 8a7f6d3a33..541a1c5b8f 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -3936,7 +3936,6 @@ void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
} else {
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(Location::RegisterLocation(R0));
}
@@ -3954,7 +3953,7 @@ void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
@@ -5937,7 +5936,9 @@ void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
}
}
-void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
@@ -5962,8 +5963,9 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
return; // No dex cache slow path.
}
@@ -5987,7 +5989,8 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
}
case HLoadString::LoadKind::kJitTableAddress: {
__ LoadLiteral(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
- load->GetStringIndex()));
+ load->GetStringIndex(),
+ load->GetString()));
// /* GcRoot<mirror::String> */ out = *out
GenerateGcRootFieldLoad(load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
return;
@@ -7317,8 +7320,10 @@ Literal* CodeGeneratorARM::DeduplicateBootImageAddressLiteral(uint32_t address)
}
Literal* CodeGeneratorARM::DeduplicateJitStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u);
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 6435851320..d5968e0764 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -489,7 +489,9 @@ class CodeGeneratorARM : public CodeGenerator {
dex::StringIndex string_index);
Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, dex::TypeIndex type_index);
Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
- Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, dex::StringIndex string_index);
+ Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle);
Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
uint64_t address);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 5c33fe1a7d..9aaeadb44a 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4137,8 +4137,9 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageAddres
}
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLiteral(
- const DexFile& dex_file, dex::StringIndex string_index) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u);
+ const DexFile& dex_file, dex::StringIndex string_index, Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
@@ -4527,7 +4528,9 @@ void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
}
}
-void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
Register out = OutputRegister(load);
Location out_loc = load->GetLocations()->Out();
@@ -4550,8 +4553,10 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK(load->GetAddress() != 0u && IsUint<32>(load->GetAddress()));
- __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(load->GetAddress()));
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
+ __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBssEntry: {
@@ -4582,7 +4587,8 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
}
case HLoadString::LoadKind::kJitTableAddress: {
__ Ldr(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
- load->GetStringIndex()));
+ load->GetStringIndex(),
+ load->GetString()));
GenerateGcRootFieldLoad(load,
out_loc,
out.X(),
@@ -4738,7 +4744,6 @@ void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
locations->AddTemp(LocationFrom(kArtMethodRegister));
} else {
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
}
@@ -4756,7 +4761,7 @@ void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction)
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 8f33b6becf..d6a5f9d1fa 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -567,7 +567,8 @@ class CodeGeneratorARM64 : public CodeGenerator {
dex::TypeIndex type_index);
vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address);
vixl::aarch64::Literal<uint32_t>* DeduplicateJitStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index);
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle);
vixl::aarch64::Literal<uint32_t>* DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex string_index,
uint64_t address);
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 00ad3e34b7..c769decaa0 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -3948,7 +3948,6 @@ void LocationsBuilderARMVIXL::VisitNewInstance(HNewInstance* instruction) {
} else {
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(LocationFrom(r0));
}
@@ -3970,7 +3969,7 @@ void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
@@ -6022,7 +6021,9 @@ void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) {
}
}
-void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
vixl32::Register out = OutputRegister(load);
@@ -6042,8 +6043,9 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) {
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
return; // No dex cache slow path.
}
@@ -6063,7 +6065,8 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) {
}
case HLoadString::LoadKind::kJitTableAddress: {
__ Ldr(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
- load->GetStringIndex()));
+ load->GetStringIndex(),
+ load->GetString()));
// /* GcRoot<mirror::String> */ out = *out
GenerateGcRootFieldLoad(load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
return;
@@ -7444,9 +7447,12 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateDexCacheAddressLiteral(uint3
return DeduplicateUint32Literal(address, &uint32_literals_);
}
-VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u);
+VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral(
+ const DexFile& dex_file,
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() {
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 297d63cefd..200a463c75 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -573,7 +573,8 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
VIXLUInt32Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
VIXLUInt32Literal* DeduplicateDexCacheAddressLiteral(uint32_t address);
VIXLUInt32Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index);
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle);
VIXLUInt32Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
uint64_t address);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 01e0dac33e..bc62854e5d 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -5625,7 +5625,9 @@ void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
}
}
-void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
HLoadString::LoadKind load_kind = load->GetLoadKind();
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
@@ -5660,8 +5662,9 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) {
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out,
base_or_current_method_reg,
codegen_->DeduplicateBootImageAddressLiteral(address));
@@ -5900,7 +5903,6 @@ void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
} else {
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
}
@@ -5917,7 +5919,7 @@ void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 36690c0569..1b9c6da460 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -3628,7 +3628,9 @@ void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
}
}
-void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
HLoadString::LoadKind load_kind = load->GetLoadKind();
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
@@ -3650,8 +3652,9 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out,
kLoadUnsignedWord,
codegen_->DeduplicateBootImageAddressLiteral(address));
@@ -3841,7 +3844,6 @@ void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
} else {
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
}
@@ -3859,7 +3861,7 @@ void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction)
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 0abe85540c..a9b717db4f 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4150,7 +4150,6 @@ void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
} else {
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
}
@@ -4166,7 +4165,7 @@ void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
DCHECK(!codegen_->IsLeafMethod());
}
}
@@ -6232,15 +6231,19 @@ void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
}
Label* CodeGeneratorX86::NewJitRootStringPatch(const DexFile& dex_file,
- dex::StringIndex dex_index) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index), /* placeholder */ 0u);
+ dex::StringIndex dex_index,
+ Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(
+ StringReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
// Add a patch entry and return the label.
jit_string_patches_.emplace_back(dex_file, dex_index.index_);
PatchInfo<Label>* info = &jit_string_patches_.back();
return &info->label;
}
-void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
@@ -6258,8 +6261,9 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ movl(out, Immediate(address));
codegen_->RecordSimplePatch();
return; // No dex cache slow path.
@@ -6280,7 +6284,7 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
case HLoadString::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86::kDummy32BitOffset);
Label* fixup_label = codegen_->NewJitRootStringPatch(
- load->GetDexFile(), load->GetStringIndex());
+ load->GetDexFile(), load->GetStringIndex(), load->GetString());
// /* GcRoot<mirror::String> */ out = *address
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
return;
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 1af685087c..dd1628c867 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -415,7 +415,9 @@ class CodeGeneratorX86 : public CodeGenerator {
void RecordTypePatch(HLoadClass* load_class);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
- Label* NewJitRootStringPatch(const DexFile& dex_file, dex::StringIndex dex_index);
+ Label* NewJitRootStringPatch(const DexFile& dex_file,
+ dex::StringIndex dex_index,
+ Handle<mirror::String> handle);
Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address);
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 903844fcdb..261473505f 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -4038,7 +4038,6 @@ void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
} else {
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(Location::RegisterLocation(RAX));
}
@@ -4055,7 +4054,7 @@ void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction)
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
DCHECK(!codegen_->IsLeafMethod());
}
}
@@ -5631,15 +5630,19 @@ void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
}
Label* CodeGeneratorX86_64::NewJitRootStringPatch(const DexFile& dex_file,
- dex::StringIndex dex_index) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index), /* placeholder */ 0u);
+ dex::StringIndex dex_index,
+ Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(
+ StringReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
// Add a patch entry and return the label.
jit_string_patches_.emplace_back(dex_file, dex_index.index_);
PatchInfo<Label>* info = &jit_string_patches_.back();
return &info->label;
}
-void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
CpuRegister out = out_loc.AsRegister<CpuRegister>();
@@ -5651,8 +5654,9 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ movl(out, Immediate(address)); // Zero-extended.
codegen_->RecordSimplePatch();
return; // No dex cache slow path.
@@ -5673,8 +5677,8 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
case HLoadString::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
/* no_rip */ true);
- Label* fixup_label =
- codegen_->NewJitRootStringPatch(load->GetDexFile(), load->GetStringIndex());
+ Label* fixup_label = codegen_->NewJitRootStringPatch(
+ load->GetDexFile(), load->GetStringIndex(), load->GetString());
// /* GcRoot<mirror::String> */ out = *address
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
return;
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index f827e79a94..32d006c5f3 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -412,7 +412,9 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void RecordTypePatch(HLoadClass* load_class);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
- Label* NewJitRootStringPatch(const DexFile& dex_file, dex::StringIndex dex_index);
+ Label* NewJitRootStringPatch(const DexFile& dex_file,
+ dex::StringIndex dex_index,
+ Handle<mirror::String> handle);
Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address);
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 879b4ce59e..e3f3df0ff5 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -15,6 +15,7 @@
*/
#include <functional>
+#include <memory>
#include "arch/instruction_set.h"
#include "arch/arm/instruction_set_features_arm.h"
@@ -299,8 +300,8 @@ static void RunCode(CodegenTargetConfig target_config,
bool has_result,
Expected expected) {
CompilerOptions compiler_options;
- CodeGenerator* codegen = target_config.CreateCodeGenerator(graph, compiler_options);
- RunCode(codegen, graph, hook_before_codegen, has_result, expected);
+ std::unique_ptr<CodeGenerator> codegen(target_config.CreateCodeGenerator(graph, compiler_options));
+ RunCode(codegen.get(), graph, hook_before_codegen, has_result, expected);
}
#ifdef ART_ENABLE_CODEGEN_arm
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index 437d35ccb7..f8d37bd714 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -28,7 +28,6 @@ class GVNTest : public CommonCompilerTest {};
TEST_F(GVNTest, LocalFieldElimination) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- ScopedNullHandle<mirror::DexCache> dex_cache;
HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
@@ -45,53 +44,53 @@ TEST_F(GVNTest, LocalFieldElimination) {
entry->AddSuccessor(block);
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* to_remove = block->GetLastInstruction();
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(43),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* different_offset = block->GetLastInstruction();
// Kill the value.
block->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* use_after_kill = block->GetLastInstruction();
block->AddInstruction(new (&allocator) HExit());
@@ -113,7 +112,6 @@ TEST_F(GVNTest, LocalFieldElimination) {
TEST_F(GVNTest, GlobalFieldElimination) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- ScopedNullHandle<mirror::DexCache> dex_cache;
HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
@@ -129,13 +127,13 @@ TEST_F(GVNTest, GlobalFieldElimination) {
graph->AddBlock(block);
entry->AddSuccessor(block);
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
block->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
@@ -152,33 +150,33 @@ TEST_F(GVNTest, GlobalFieldElimination) {
else_->AddSuccessor(join);
then->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
then->AddInstruction(new (&allocator) HGoto());
else_->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
else_->AddInstruction(new (&allocator) HGoto());
join->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
join->AddInstruction(new (&allocator) HExit());
@@ -196,7 +194,6 @@ TEST_F(GVNTest, GlobalFieldElimination) {
TEST_F(GVNTest, LoopFieldElimination) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- ScopedNullHandle<mirror::DexCache> dex_cache;
HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
@@ -213,13 +210,13 @@ TEST_F(GVNTest, LoopFieldElimination) {
graph->AddBlock(block);
entry->AddSuccessor(block);
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
block->AddInstruction(new (&allocator) HGoto());
@@ -236,13 +233,13 @@ TEST_F(GVNTest, LoopFieldElimination) {
loop_body->AddSuccessor(loop_header);
loop_header->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* field_get_in_loop_header = loop_header->GetLastInstruction();
loop_header->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
@@ -251,35 +248,35 @@ TEST_F(GVNTest, LoopFieldElimination) {
// and the body to be GVN'ed.
loop_body->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* field_set = loop_body->GetLastInstruction();
loop_body->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* field_get_in_loop_body = loop_body->GetLastInstruction();
loop_body->AddInstruction(new (&allocator) HGoto());
exit->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* field_get_in_exit = exit->GetLastInstruction();
exit->AddInstruction(new (&allocator) HExit());
@@ -319,7 +316,6 @@ TEST_F(GVNTest, LoopFieldElimination) {
TEST_F(GVNTest, LoopSideEffects) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- ScopedNullHandle<mirror::DexCache> dex_cache;
static const SideEffects kCanTriggerGC = SideEffects::CanTriggerGC();
@@ -376,13 +372,13 @@ TEST_F(GVNTest, LoopSideEffects) {
// Make one block with a side effect.
entry->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
SideEffectsAnalysis side_effects(graph);
@@ -401,13 +397,13 @@ TEST_F(GVNTest, LoopSideEffects) {
outer_loop_body->InsertInstructionBefore(
new (&allocator) HInstanceFieldSet(parameter,
parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0),
outer_loop_body->GetLastInstruction());
@@ -427,13 +423,13 @@ TEST_F(GVNTest, LoopSideEffects) {
inner_loop_body->InsertInstructionBefore(
new (&allocator) HInstanceFieldSet(parameter,
parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0),
inner_loop_body->GetLastInstruction());
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 3b83e95071..c970e5cbba 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -429,13 +429,13 @@ HInstanceFieldGet* HInliner::BuildGetReceiverClass(ClassLinker* class_linker,
DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_");
HInstanceFieldGet* result = new (graph_->GetArena()) HInstanceFieldGet(
receiver,
+ field,
Primitive::kPrimNot,
field->GetOffset(),
field->IsVolatile(),
field->GetDexFieldIndex(),
field->GetDeclaringClass()->GetDexClassDefIndex(),
*field->GetDexFile(),
- handles_->NewHandle(field->GetDexCache()),
dex_pc);
// The class of a field is effectively final, and does not have any memory dependencies.
result->SetSideEffects(SideEffects::None());
@@ -618,6 +618,9 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
} else {
one_target_inlined = true;
+ VLOG(compiler) << "Polymorphic call to " << ArtMethod::PrettyMethod(resolved_method)
+ << " has inlined " << ArtMethod::PrettyMethod(method);
+
// If we have inlined all targets before, and this receiver is the last seen,
// we deoptimize instead of keeping the original invoke instruction.
bool deoptimize = all_targets_inlined &&
@@ -655,6 +658,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
<< " of its targets could be inlined";
return false;
}
+
MaybeRecordStat(kInlinedPolymorphicCall);
// Run type propagation to get the guards typed.
@@ -1161,13 +1165,13 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(Handle<mirror::DexCache> dex
DCHECK(resolved_field != nullptr);
HInstanceFieldGet* iget = new (graph_->GetArena()) HInstanceFieldGet(
obj,
+ resolved_field,
resolved_field->GetTypeAsPrimitiveType(),
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
resolved_field->GetDeclaringClass()->GetDexClassDefIndex(),
*dex_cache->GetDexFile(),
- dex_cache,
// Read barrier generates a runtime call in slow path and we need a valid
// dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
/* dex_pc */ 0);
@@ -1190,13 +1194,13 @@ HInstanceFieldSet* HInliner::CreateInstanceFieldSet(Handle<mirror::DexCache> dex
HInstanceFieldSet* iput = new (graph_->GetArena()) HInstanceFieldSet(
obj,
value,
+ resolved_field,
resolved_field->GetTypeAsPrimitiveType(),
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
resolved_field->GetDeclaringClass()->GetDexClassDefIndex(),
*dex_cache->GetDexFile(),
- dex_cache,
// Read barrier generates a runtime call in slow path and we need a valid
// dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
/* dex_pc */ 0);
@@ -1424,15 +1428,6 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
return false;
}
- if (current->IsNewInstance() &&
- (current->AsNewInstance()->GetEntrypoint() == kQuickAllocObjectWithAccessCheck)) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because it is using an entrypoint"
- << " with access checks";
- // Allocation entrypoint does not handle inlined frames.
- return false;
- }
-
if (current->IsNewArray() &&
(current->AsNewArray()->GetEntrypoint() == kQuickAllocArrayWithAccessCheck)) {
VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
@@ -1579,6 +1574,13 @@ bool HInliner::ReturnTypeMoreSpecific(HInvoke* invoke_instruction,
/* declared_can_be_null */ true,
return_replacement)) {
return true;
+ } else if (return_replacement->IsInstanceFieldGet()) {
+ HInstanceFieldGet* field_get = return_replacement->AsInstanceFieldGet();
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (field_get->GetFieldInfo().GetField() ==
+ class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0)) {
+ return true;
+ }
}
} else if (return_replacement->IsInstanceOf()) {
// Inlining InstanceOf into an If may put a tighter bound on reference types.
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index af8e2c8a7c..009d549547 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -917,11 +917,11 @@ bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t d
bool finalizable;
bool needs_access_check = NeedsAccessCheck(type_index, dex_cache, &finalizable);
- // Only the non-resolved entrypoint handles the finalizable class case. If we
+ // Only the access check entrypoint handles the finalizable class case. If we
// need access checks, then we haven't resolved the method and the class may
// again be finalizable.
QuickEntrypointEnum entrypoint = (finalizable || needs_access_check)
- ? kQuickAllocObject
+ ? kQuickAllocObjectWithChecks
: kQuickAllocObjectInitialized;
if (outer_dex_cache.Get() != dex_cache.Get()) {
@@ -946,7 +946,6 @@ bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t d
AppendInstruction(new (arena_) HNewInstance(
cls,
- graph_->GetCurrentMethod(),
dex_pc,
type_index,
*dex_compilation_unit_->GetDexFile(),
@@ -1235,13 +1234,13 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
field_set = new (arena_) HInstanceFieldSet(object,
value,
+ resolved_field,
field_type,
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
class_def_index,
*dex_file_,
- dex_compilation_unit_->GetDexCache(),
dex_pc);
}
AppendInstruction(field_set);
@@ -1256,13 +1255,13 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
} else {
uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
field_get = new (arena_) HInstanceFieldGet(object,
+ resolved_field,
field_type,
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
class_def_index,
*dex_file_,
- dex_compilation_unit_->GetDexCache(),
dex_pc);
}
AppendInstruction(field_get);
@@ -1311,9 +1310,9 @@ bool HInstructionBuilder::IsOutermostCompilingClass(dex::TypeIndex type_index) c
}
void HInstructionBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& instruction,
- uint32_t dex_pc,
- bool is_put,
- Primitive::Type field_type) {
+ uint32_t dex_pc,
+ bool is_put,
+ Primitive::Type field_type) {
uint32_t source_or_dest_reg = instruction.VRegA_21c();
uint16_t field_index = instruction.VRegB_21c();
@@ -1400,23 +1399,23 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
DCHECK_EQ(HPhi::ToPhiType(value->GetType()), HPhi::ToPhiType(field_type));
AppendInstruction(new (arena_) HStaticFieldSet(cls,
value,
+ resolved_field,
field_type,
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
class_def_index,
*dex_file_,
- dex_cache_,
dex_pc));
} else {
AppendInstruction(new (arena_) HStaticFieldGet(cls,
+ resolved_field,
field_type,
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
class_def_index,
*dex_file_,
- dex_cache_,
dex_pc));
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 439e3b66db..911bfb9cc6 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -1118,7 +1118,66 @@ void InstructionSimplifierVisitor::VisitAboveOrEqual(HAboveOrEqual* condition) {
VisitCondition(condition);
}
+// Recognize the following pattern:
+// obj.getClass() ==/!= Foo.class
+// And replace it with a constant value if the type of `obj` is statically known.
+static bool RecognizeAndSimplifyClassCheck(HCondition* condition) {
+ HInstruction* input_one = condition->InputAt(0);
+ HInstruction* input_two = condition->InputAt(1);
+ HLoadClass* load_class = input_one->IsLoadClass()
+ ? input_one->AsLoadClass()
+ : input_two->AsLoadClass();
+ if (load_class == nullptr) {
+ return false;
+ }
+
+ ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
+ if (!class_rti.IsValid()) {
+ // Unresolved class.
+ return false;
+ }
+
+ HInstanceFieldGet* field_get = (load_class == input_one)
+ ? input_two->AsInstanceFieldGet()
+ : input_one->AsInstanceFieldGet();
+ if (field_get == nullptr) {
+ return false;
+ }
+
+ HInstruction* receiver = field_get->InputAt(0);
+ ReferenceTypeInfo receiver_type = receiver->GetReferenceTypeInfo();
+ if (!receiver_type.IsExact()) {
+ return false;
+ }
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ArtField* field = class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0);
+ DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_");
+ if (field_get->GetFieldInfo().GetField() != field) {
+ return false;
+ }
+
+ // We can replace the compare.
+ int value = 0;
+ if (receiver_type.IsEqual(class_rti)) {
+ value = condition->IsEqual() ? 1 : 0;
+ } else {
+ value = condition->IsNotEqual() ? 1 : 0;
+ }
+ condition->ReplaceWith(condition->GetBlock()->GetGraph()->GetIntConstant(value));
+ return true;
+ }
+}
+
void InstructionSimplifierVisitor::VisitCondition(HCondition* condition) {
+ if (condition->IsEqual() || condition->IsNotEqual()) {
+ if (RecognizeAndSimplifyClassCheck(condition)) {
+ return;
+ }
+ }
+
// Reverse condition if left is constant. Our code generators prefer constant
// on the right hand side.
if (condition->GetLeft()->IsConstant() && !condition->GetRight()->IsConstant()) {
@@ -1843,11 +1902,11 @@ void InstructionSimplifierVisitor::SimplifyStringCharAt(HInvoke* invoke) {
// so create the HArrayLength, HBoundsCheck and HArrayGet.
HArrayLength* length = new (arena) HArrayLength(str, dex_pc, /* is_string_length */ true);
invoke->GetBlock()->InsertInstructionBefore(length, invoke);
- HBoundsCheck* bounds_check =
- new (arena) HBoundsCheck(index, length, dex_pc, invoke->GetDexMethodIndex());
+ HBoundsCheck* bounds_check = new (arena) HBoundsCheck(
+ index, length, dex_pc, invoke->GetDexMethodIndex());
invoke->GetBlock()->InsertInstructionBefore(bounds_check, invoke);
- HArrayGet* array_get =
- new (arena) HArrayGet(str, index, Primitive::kPrimChar, dex_pc, /* is_string_char_at */ true);
+ HArrayGet* array_get = new (arena) HArrayGet(
+ str, bounds_check, Primitive::kPrimChar, dex_pc, /* is_string_char_at */ true);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, array_get);
bounds_check->CopyEnvironmentFrom(invoke->GetEnvironment());
GetGraph()->SetHasBoundsChecks(true);
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index 8c34dc6a86..5bcfa4c98b 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -111,20 +111,19 @@ TEST_F(LICMTest, FieldHoisting) {
BuildLoop();
// Populate the loop with instructions: set/get field with different types.
- ScopedNullHandle<mirror::DexCache> dex_cache;
HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
+ nullptr,
Primitive::kPrimLong,
MemberOffset(10),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph_->GetDexFile(),
- dex_cache,
0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
- parameter_, int_constant_, Primitive::kPrimInt, MemberOffset(20),
- false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), dex_cache, 0);
+ parameter_, int_constant_, nullptr, Primitive::kPrimInt, MemberOffset(20),
+ false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), 0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
EXPECT_EQ(get_field->GetBlock(), loop_body_);
@@ -140,24 +139,24 @@ TEST_F(LICMTest, NoFieldHoisting) {
// Populate the loop with instructions: set/get field with same types.
ScopedNullHandle<mirror::DexCache> dex_cache;
HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
+ nullptr,
Primitive::kPrimLong,
MemberOffset(10),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph_->GetDexFile(),
- dex_cache,
0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
HInstruction* set_field = new (&allocator_) HInstanceFieldSet(parameter_,
get_field,
+ nullptr,
Primitive::kPrimLong,
MemberOffset(10),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph_->GetDexFile(),
- dex_cache,
0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index a599c2aa84..d45fa11534 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2498,6 +2498,17 @@ std::ostream& operator<<(std::ostream& os, HLoadClass::LoadKind rhs) {
}
}
+// Helper for InstructionDataEquals to fetch the mirror String out
+// from a kJitTableAddress LoadString kind.
+// NO_THREAD_SAFETY_ANALYSIS because even though we're accessing
+// mirrors, they are stored in a variable size handle scope which is always
+// visited during a pause. Also, the only caller of this helper
+// only uses the mirror for pointer comparison.
+static inline mirror::String* AsMirrorInternal(Handle<mirror::String> handle)
+ NO_THREAD_SAFETY_ANALYSIS {
+ return handle.Get();
+}
+
bool HLoadString::InstructionDataEquals(const HInstruction* other) const {
const HLoadString* other_load_string = other->AsLoadString();
// TODO: To allow GVN for HLoadString from different dex files, we should compare the strings
@@ -2506,16 +2517,16 @@ bool HLoadString::InstructionDataEquals(const HInstruction* other) const {
GetPackedFields() != other_load_string->GetPackedFields()) {
return false;
}
- LoadKind load_kind = GetLoadKind();
- if (HasAddress(load_kind)) {
- return GetAddress() == other_load_string->GetAddress();
- } else {
- DCHECK(HasStringReference(load_kind)) << load_kind;
- return IsSameDexFile(GetDexFile(), other_load_string->GetDexFile());
+ switch (GetLoadKind()) {
+ case LoadKind::kBootImageAddress:
+ case LoadKind::kJitTableAddress:
+ return AsMirrorInternal(GetString()) == AsMirrorInternal(other_load_string->GetString());
+ default:
+ return IsSameDexFile(GetDexFile(), other_load_string->GetDexFile());
}
}
-void HLoadString::SetLoadKindInternal(LoadKind load_kind) {
+void HLoadString::SetLoadKind(LoadKind load_kind) {
// Once sharpened, the load kind should not be changed again.
DCHECK_EQ(GetLoadKind(), LoadKind::kDexCacheViaMethod);
SetPackedField<LoadKindField>(load_kind);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 8c64d25aee..ea9a94c420 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -171,6 +171,7 @@ class HInstructionList : public ValueObject {
friend class HGraph;
friend class HInstruction;
friend class HInstructionIterator;
+ friend class HInstructionIteratorHandleChanges;
friend class HBackwardInstructionIterator;
DISALLOW_COPY_AND_ASSIGN(HInstructionList);
@@ -2312,6 +2313,9 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
};
std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs);
+// Iterates over the instructions, while preserving the next instruction
+// in case the current instruction gets removed from the list by the user
+// of this iterator.
class HInstructionIterator : public ValueObject {
public:
explicit HInstructionIterator(const HInstructionList& instructions)
@@ -2333,6 +2337,28 @@ class HInstructionIterator : public ValueObject {
DISALLOW_COPY_AND_ASSIGN(HInstructionIterator);
};
+// Iterates over the instructions without saving the next instruction,
+// therefore handling changes in the graph potentially made by the user
+// of this iterator.
+class HInstructionIteratorHandleChanges : public ValueObject {
+ public:
+ explicit HInstructionIteratorHandleChanges(const HInstructionList& instructions)
+ : instruction_(instructions.first_instruction_) {
+ }
+
+ bool Done() const { return instruction_ == nullptr; }
+ HInstruction* Current() const { return instruction_; }
+ void Advance() {
+ instruction_ = instruction_->GetNext();
+ }
+
+ private:
+ HInstruction* instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInstructionIteratorHandleChanges);
+};
+
+
class HBackwardInstructionIterator : public ValueObject {
public:
explicit HBackwardInstructionIterator(const HInstructionList& instructions)
@@ -3748,10 +3774,9 @@ class HCompare FINAL : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HCompare);
};
-class HNewInstance FINAL : public HExpression<2> {
+class HNewInstance FINAL : public HExpression<1> {
public:
HNewInstance(HInstruction* cls,
- HCurrentMethod* current_method,
uint32_t dex_pc,
dex::TypeIndex type_index,
const DexFile& dex_file,
@@ -3765,7 +3790,6 @@ class HNewInstance FINAL : public HExpression<2> {
SetPackedFlag<kFlagNeedsAccessCheck>(needs_access_check);
SetPackedFlag<kFlagFinalizable>(finalizable);
SetRawInputAt(0, cls);
- SetRawInputAt(1, current_method);
}
dex::TypeIndex GetTypeIndex() const { return type_index_; }
@@ -5056,60 +5080,62 @@ class HNullCheck FINAL : public HExpression<1> {
DISALLOW_COPY_AND_ASSIGN(HNullCheck);
};
+// Embeds an ArtField and all the information required by the compiler. We cache
+// that information to avoid requiring the mutator lock every time we need it.
class FieldInfo : public ValueObject {
public:
- FieldInfo(MemberOffset field_offset,
+ FieldInfo(ArtField* field,
+ MemberOffset field_offset,
Primitive::Type field_type,
bool is_volatile,
uint32_t index,
uint16_t declaring_class_def_index,
- const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache)
- : field_offset_(field_offset),
+ const DexFile& dex_file)
+ : field_(field),
+ field_offset_(field_offset),
field_type_(field_type),
is_volatile_(is_volatile),
index_(index),
declaring_class_def_index_(declaring_class_def_index),
- dex_file_(dex_file),
- dex_cache_(dex_cache) {}
+ dex_file_(dex_file) {}
+ ArtField* GetField() const { return field_; }
MemberOffset GetFieldOffset() const { return field_offset_; }
Primitive::Type GetFieldType() const { return field_type_; }
uint32_t GetFieldIndex() const { return index_; }
uint16_t GetDeclaringClassDefIndex() const { return declaring_class_def_index_;}
const DexFile& GetDexFile() const { return dex_file_; }
bool IsVolatile() const { return is_volatile_; }
- Handle<mirror::DexCache> GetDexCache() const { return dex_cache_; }
private:
+ ArtField* const field_;
const MemberOffset field_offset_;
const Primitive::Type field_type_;
const bool is_volatile_;
const uint32_t index_;
const uint16_t declaring_class_def_index_;
const DexFile& dex_file_;
- const Handle<mirror::DexCache> dex_cache_;
};
class HInstanceFieldGet FINAL : public HExpression<1> {
public:
HInstanceFieldGet(HInstruction* value,
+ ArtField* field,
Primitive::Type field_type,
MemberOffset field_offset,
bool is_volatile,
uint32_t field_idx,
uint16_t declaring_class_def_index,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
: HExpression(field_type, SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
- field_info_(field_offset,
+ field_info_(field,
+ field_offset,
field_type,
is_volatile,
field_idx,
declaring_class_def_index,
- dex_file,
- dex_cache) {
+ dex_file) {
SetRawInputAt(0, value);
}
@@ -5145,22 +5171,22 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> {
public:
HInstanceFieldSet(HInstruction* object,
HInstruction* value,
+ ArtField* field,
Primitive::Type field_type,
MemberOffset field_offset,
bool is_volatile,
uint32_t field_idx,
uint16_t declaring_class_def_index,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
: HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
- field_info_(field_offset,
+ field_info_(field,
+ field_offset,
field_type,
is_volatile,
field_idx,
declaring_class_def_index,
- dex_file,
- dex_cache) {
+ dex_file) {
SetPackedFlag<kFlagValueCanBeNull>(true);
SetRawInputAt(0, object);
SetRawInputAt(1, value);
@@ -5761,39 +5787,31 @@ class HLoadString FINAL : public HInstruction {
uint32_t dex_pc)
: HInstruction(SideEffectsForArchRuntimeCalls(), dex_pc),
special_input_(HUserRecord<HInstruction*>(current_method)),
- string_index_(string_index) {
+ string_index_(string_index),
+ dex_file_(dex_file) {
SetPackedField<LoadKindField>(LoadKind::kDexCacheViaMethod);
- load_data_.dex_file_ = &dex_file;
}
- void SetLoadKindWithAddress(LoadKind load_kind, uint64_t address) {
- DCHECK(HasAddress(load_kind));
- load_data_.address = address;
- SetLoadKindInternal(load_kind);
- }
-
- void SetLoadKindWithStringReference(LoadKind load_kind,
- const DexFile& dex_file,
- dex::StringIndex string_index) {
- DCHECK(HasStringReference(load_kind));
- load_data_.dex_file_ = &dex_file;
- string_index_ = string_index;
- SetLoadKindInternal(load_kind);
- }
+ void SetLoadKind(LoadKind load_kind);
LoadKind GetLoadKind() const {
return GetPackedField<LoadKindField>();
}
- const DexFile& GetDexFile() const;
+ const DexFile& GetDexFile() const {
+ return dex_file_;
+ }
dex::StringIndex GetStringIndex() const {
return string_index_;
}
- uint64_t GetAddress() const {
- DCHECK(HasAddress(GetLoadKind()));
- return load_data_.address;
+ Handle<mirror::String> GetString() const {
+ return string_;
+ }
+
+ void SetString(Handle<mirror::String> str) {
+ string_ = str;
}
bool CanBeMoved() const OVERRIDE { return true; }
@@ -5848,18 +5866,6 @@ class HLoadString FINAL : public HInstruction {
static_assert(kNumberOfLoadStringPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
using LoadKindField = BitField<LoadKind, kFieldLoadKind, kFieldLoadKindSize>;
- static bool HasStringReference(LoadKind load_kind) {
- return load_kind == LoadKind::kBootImageLinkTimeAddress ||
- load_kind == LoadKind::kBootImageLinkTimePcRelative ||
- load_kind == LoadKind::kBssEntry ||
- load_kind == LoadKind::kDexCacheViaMethod ||
- load_kind == LoadKind::kJitTableAddress;
- }
-
- static bool HasAddress(LoadKind load_kind) {
- return load_kind == LoadKind::kBootImageAddress;
- }
-
void SetLoadKindInternal(LoadKind load_kind);
// The special input is the HCurrentMethod for kDexCacheViaMethod.
@@ -5867,26 +5873,16 @@ class HLoadString FINAL : public HInstruction {
// for PC-relative loads, i.e. kDexCachePcRelative or kBootImageLinkTimePcRelative.
HUserRecord<HInstruction*> special_input_;
- // String index serves also as the hash code and it's also needed for slow-paths,
- // so it must not be overwritten with other load data.
dex::StringIndex string_index_;
+ const DexFile& dex_file_;
- union {
- const DexFile* dex_file_; // For string reference.
- uint64_t address; // Up to 64-bit, needed for kDexCacheAddress on 64-bit targets.
- } load_data_;
+ Handle<mirror::String> string_;
DISALLOW_COPY_AND_ASSIGN(HLoadString);
};
std::ostream& operator<<(std::ostream& os, HLoadString::LoadKind rhs);
// Note: defined outside class to see operator<<(., HLoadString::LoadKind).
-inline const DexFile& HLoadString::GetDexFile() const {
- DCHECK(HasStringReference(GetLoadKind())) << GetLoadKind();
- return *load_data_.dex_file_;
-}
-
-// Note: defined outside class to see operator<<(., HLoadString::LoadKind).
inline void HLoadString::AddSpecialInput(HInstruction* special_input) {
// The special input is used for PC-relative loads on some architectures,
// including literal pool loads, which are PC-relative too.
@@ -5937,22 +5933,22 @@ class HClinitCheck FINAL : public HExpression<1> {
class HStaticFieldGet FINAL : public HExpression<1> {
public:
HStaticFieldGet(HInstruction* cls,
+ ArtField* field,
Primitive::Type field_type,
MemberOffset field_offset,
bool is_volatile,
uint32_t field_idx,
uint16_t declaring_class_def_index,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
: HExpression(field_type, SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
- field_info_(field_offset,
+ field_info_(field,
+ field_offset,
field_type,
is_volatile,
field_idx,
declaring_class_def_index,
- dex_file,
- dex_cache) {
+ dex_file) {
SetRawInputAt(0, cls);
}
@@ -5985,22 +5981,22 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> {
public:
HStaticFieldSet(HInstruction* cls,
HInstruction* value,
+ ArtField* field,
Primitive::Type field_type,
MemberOffset field_offset,
bool is_volatile,
uint32_t field_idx,
uint16_t declaring_class_def_index,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
: HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
- field_info_(field_offset,
+ field_info_(field,
+ field_offset,
field_type,
is_volatile,
field_idx,
declaring_class_def_index,
- dex_file,
- dex_cache) {
+ dex_file) {
SetPackedFlag<kFlagValueCanBeNull>(true);
SetRawInputAt(0, cls);
SetRawInputAt(1, value);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 4bf5b080a7..297500b12f 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1205,7 +1205,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
}
MaybeRecordStat(MethodCompilationStat::kCompiled);
codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), *code_item);
- codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data, dex_cache);
+ codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data);
const void* code = code_cache->CommitCode(
self,
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index f9ac3a0f72..db7c1fbb06 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -134,39 +134,6 @@ void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) {
}
}
-void PrepareForRegisterAllocation::VisitNewInstance(HNewInstance* instruction) {
- HLoadClass* load_class = instruction->InputAt(0)->AsLoadClass();
- const bool has_only_one_use = load_class->HasOnlyOneNonEnvironmentUse();
- // Change the entrypoint to kQuickAllocObject if either:
- // - the class is finalizable (only kQuickAllocObject handles finalizable classes),
- // - the class needs access checks (we do not know if it's finalizable),
- // - or the load class has only one use.
- if (instruction->IsFinalizable() || has_only_one_use || load_class->NeedsAccessCheck()) {
- instruction->SetEntrypoint(kQuickAllocObject);
- instruction->ReplaceInput(GetGraph()->GetIntConstant(load_class->GetTypeIndex().index_), 0);
- if (has_only_one_use) {
- // We've just removed the only use of the HLoadClass. Since we don't run DCE after this pass,
- // do it manually if possible.
- if (!load_class->CanThrow()) {
- // If the load class can not throw, it has no side effects and can be removed if there is
- // only one use.
- load_class->GetBlock()->RemoveInstruction(load_class);
- } else if (!instruction->GetEnvironment()->IsFromInlinedInvoke() &&
- CanMoveClinitCheck(load_class, instruction)) {
- // The allocation entry point that deals with access checks does not work with inlined
- // methods, so we need to check whether this allocation comes from an inlined method.
- // We also need to make the same check as for moving clinit check, whether the HLoadClass
- // has the clinit check responsibility or not (HLoadClass can throw anyway).
- // If it needed access checks, we delegate the access check to the allocation.
- if (load_class->NeedsAccessCheck()) {
- instruction->SetEntrypoint(kQuickAllocObjectWithAccessCheck);
- }
- load_class->GetBlock()->RemoveInstruction(load_class);
- }
- }
- }
-}
-
bool PrepareForRegisterAllocation::CanEmitConditionAt(HCondition* condition,
HInstruction* user) const {
if (condition->GetNext() != user) {
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index a6791482a7..c128227654 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -44,7 +44,6 @@ class PrepareForRegisterAllocation : public HGraphDelegateVisitor {
void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
void VisitCondition(HCondition* condition) OVERRIDE;
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
- void VisitNewInstance(HNewInstance* instruction) OVERRIDE;
bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const;
bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const;
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 33b3875e3b..f8a4469712 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -76,6 +76,7 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
worklist_(worklist),
is_first_run_(is_first_run) {}
+ void VisitDeoptimize(HDeoptimize* deopt) OVERRIDE;
void VisitNewInstance(HNewInstance* new_instance) OVERRIDE;
void VisitLoadClass(HLoadClass* load_class) OVERRIDE;
void VisitClinitCheck(HClinitCheck* clinit_check) OVERRIDE;
@@ -151,38 +152,6 @@ void ReferenceTypePropagation::Visit(HInstruction* instruction) {
instruction->Accept(&visitor);
}
-void ReferenceTypePropagation::Run() {
- worklist_.reserve(kDefaultWorklistSize);
-
- // To properly propagate type info we need to visit in the dominator-based order.
- // Reverse post order guarantees a node's dominators are visited first.
- // We take advantage of this order in `VisitBasicBlock`.
- for (HBasicBlock* block : graph_->GetReversePostOrder()) {
- VisitBasicBlock(block);
- }
-
- ProcessWorklist();
- ValidateTypes();
-}
-
-void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
- RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_);
- // Handle Phis first as there might be instructions in the same block who depend on them.
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- VisitPhi(it.Current()->AsPhi());
- }
-
- // Handle instructions.
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- instr->Accept(&visitor);
- }
-
- // Add extra nodes to bound types.
- BoundTypeForIfNotNull(block);
- BoundTypeForIfInstanceOf(block);
-}
-
// Check if we should create a bound type for the given object at the specified
// position. Because of inlining and the fact we run RTP more than once and we
// might have a HBoundType already. If we do, we should not create a new one.
@@ -225,6 +194,153 @@ static bool ShouldCreateBoundType(HInstruction* position,
return false;
}
+// Helper method to bound the type of `receiver` for all instructions dominated
+// by `start_block`, or `start_instruction` if `start_block` is null. The new
+// bound type will have its upper bound be `class_rti`.
+static void BoundTypeIn(HInstruction* receiver,
+ HBasicBlock* start_block,
+ HInstruction* start_instruction,
+ const ReferenceTypeInfo& class_rti) {
+ // We only need to bound the type if we have uses in the relevant block.
+ // So start with null and create the HBoundType lazily, only if it's needed.
+ HBoundType* bound_type = nullptr;
+ DCHECK(!receiver->IsLoadClass()) << "We should not replace HLoadClass instructions";
+ const HUseList<HInstruction*>& uses = receiver->GetUses();
+ for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) {
+ HInstruction* user = it->GetUser();
+ size_t index = it->GetIndex();
+ // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput().
+ ++it;
+ bool dominates = (start_instruction != nullptr)
+ ? start_instruction->StrictlyDominates(user)
+ : start_block->Dominates(user->GetBlock());
+ if (!dominates) {
+ continue;
+ }
+ if (bound_type == nullptr) {
+ ScopedObjectAccess soa(Thread::Current());
+ HInstruction* insert_point = (start_instruction != nullptr)
+ ? start_instruction->GetNext()
+ : start_block->GetFirstInstruction();
+ if (ShouldCreateBoundType(
+ insert_point, receiver, class_rti, start_instruction, start_block)) {
+ bound_type = new (receiver->GetBlock()->GetGraph()->GetArena()) HBoundType(receiver);
+ bound_type->SetUpperBound(class_rti, /* bound_can_be_null */ false);
+ start_block->InsertInstructionBefore(bound_type, insert_point);
+ // To comply with the RTP algorithm, don't type the bound type just yet, it will
+ // be handled in RTPVisitor::VisitBoundType.
+ } else {
+ // We already have a bound type on the position we would need to insert
+ // the new one. The existing bound type should dominate all the users
+ // (dchecked) so there's no need to continue.
+ break;
+ }
+ }
+ user->ReplaceInput(bound_type, index);
+ }
+ // If the receiver is a null check, also bound the type of the actual
+ // receiver.
+ if (receiver->IsNullCheck()) {
+ BoundTypeIn(receiver->InputAt(0), start_block, start_instruction, class_rti);
+ }
+}
+
+// Recognize the patterns:
+// if (obj.shadow$_klass_ == Foo.class) ...
+// deoptimize if (obj.shadow$_klass_ == Foo.class)
+static void BoundTypeForClassCheck(HInstruction* check) {
+ if (!check->IsIf() && !check->IsDeoptimize()) {
+ return;
+ }
+ HInstruction* compare = check->InputAt(0);
+ if (!compare->IsEqual() && !compare->IsNotEqual()) {
+ return;
+ }
+ HInstruction* input_one = compare->InputAt(0);
+ HInstruction* input_two = compare->InputAt(1);
+ HLoadClass* load_class = input_one->IsLoadClass()
+ ? input_one->AsLoadClass()
+ : input_two->AsLoadClass();
+ if (load_class == nullptr) {
+ return;
+ }
+
+ ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
+ if (!class_rti.IsValid()) {
+ // We have loaded an unresolved class. Don't bother bounding the type.
+ return;
+ }
+
+ HInstanceFieldGet* field_get = (load_class == input_one)
+ ? input_two->AsInstanceFieldGet()
+ : input_one->AsInstanceFieldGet();
+ if (field_get == nullptr) {
+ return;
+ }
+ HInstruction* receiver = field_get->InputAt(0);
+ ReferenceTypeInfo receiver_type = receiver->GetReferenceTypeInfo();
+ if (receiver_type.IsExact()) {
+ // If we already know the receiver type, don't bother updating its users.
+ return;
+ }
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ArtField* field = class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0);
+ DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_");
+ if (field_get->GetFieldInfo().GetField() != field) {
+ return;
+ }
+ }
+
+ if (check->IsIf()) {
+ HBasicBlock* trueBlock = check->IsEqual()
+ ? check->AsIf()->IfTrueSuccessor()
+ : check->AsIf()->IfFalseSuccessor();
+ BoundTypeIn(receiver, trueBlock, /* start_instruction */ nullptr, class_rti);
+ } else {
+ DCHECK(check->IsDeoptimize());
+ if (check->IsEqual()) {
+ BoundTypeIn(receiver, check->GetBlock(), check, class_rti);
+ }
+ }
+}
+
+void ReferenceTypePropagation::Run() {
+ worklist_.reserve(kDefaultWorklistSize);
+
+ // To properly propagate type info we need to visit in the dominator-based order.
+ // Reverse post order guarantees a node's dominators are visited first.
+ // We take advantage of this order in `VisitBasicBlock`.
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ VisitBasicBlock(block);
+ }
+
+ ProcessWorklist();
+ ValidateTypes();
+}
+
+void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
+ RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_);
+ // Handle Phis first as there might be instructions in the same block who depend on them.
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+ VisitPhi(it.Current()->AsPhi());
+ }
+
+ // Handle instructions. Since RTP may add HBoundType instructions just after the
+ // last visited instruction, use `HInstructionIteratorHandleChanges` iterator.
+ for (HInstructionIteratorHandleChanges it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ instr->Accept(&visitor);
+ }
+
+ // Add extra nodes to bound types.
+ BoundTypeForIfNotNull(block);
+ BoundTypeForIfInstanceOf(block);
+ BoundTypeForClassCheck(block->GetLastInstruction());
+}
+
void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) {
HIf* ifInstruction = block->GetLastInstruction()->AsIf();
if (ifInstruction == nullptr) {
@@ -254,40 +370,14 @@ void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) {
// We only need to bound the type if we have uses in the relevant block.
// So start with null and create the HBoundType lazily, only if it's needed.
- HBoundType* bound_type = nullptr;
HBasicBlock* notNullBlock = ifInput->IsNotEqual()
? ifInstruction->IfTrueSuccessor()
: ifInstruction->IfFalseSuccessor();
- const HUseList<HInstruction*>& uses = obj->GetUses();
- for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) {
- HInstruction* user = it->GetUser();
- size_t index = it->GetIndex();
- // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput().
- ++it;
- if (notNullBlock->Dominates(user->GetBlock())) {
- if (bound_type == nullptr) {
- ScopedObjectAccess soa(Thread::Current());
- HInstruction* insert_point = notNullBlock->GetFirstInstruction();
- ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
- handle_cache_.GetObjectClassHandle(), /* is_exact */ false);
- if (ShouldCreateBoundType(insert_point, obj, object_rti, nullptr, notNullBlock)) {
- bound_type = new (graph_->GetArena()) HBoundType(obj);
- bound_type->SetUpperBound(object_rti, /* bound_can_be_null */ false);
- if (obj->GetReferenceTypeInfo().IsValid()) {
- bound_type->SetReferenceTypeInfo(obj->GetReferenceTypeInfo());
- }
- notNullBlock->InsertInstructionBefore(bound_type, insert_point);
- } else {
- // We already have a bound type on the position we would need to insert
- // the new one. The existing bound type should dominate all the users
- // (dchecked) so there's no need to continue.
- break;
- }
- }
- user->ReplaceInput(bound_type, index);
- }
- }
+ ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
+ handle_cache_.GetObjectClassHandle(), /* is_exact */ false);
+
+ BoundTypeIn(obj, notNullBlock, /* start_instruction */ nullptr, object_rti);
}
// Returns true if one of the patterns below has been recognized. If so, the
@@ -378,15 +468,10 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) {
HLoadClass* load_class = instanceOf->InputAt(1)->AsLoadClass();
ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
- {
- if (!class_rti.IsValid()) {
- // He have loaded an unresolved class. Don't bother bounding the type.
- return;
- }
+ if (!class_rti.IsValid()) {
+ // He have loaded an unresolved class. Don't bother bounding the type.
+ return;
}
- // We only need to bound the type if we have uses in the relevant block.
- // So start with null and create the HBoundType lazily, only if it's needed.
- HBoundType* bound_type = nullptr;
HInstruction* obj = instanceOf->InputAt(0);
if (obj->GetReferenceTypeInfo().IsExact() && !obj->IsPhi()) {
@@ -398,33 +483,14 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) {
// input.
return;
}
- DCHECK(!obj->IsLoadClass()) << "We should not replace HLoadClass instructions";
- const HUseList<HInstruction*>& uses = obj->GetUses();
- for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) {
- HInstruction* user = it->GetUser();
- size_t index = it->GetIndex();
- // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput().
- ++it;
- if (instanceOfTrueBlock->Dominates(user->GetBlock())) {
- if (bound_type == nullptr) {
- ScopedObjectAccess soa(Thread::Current());
- HInstruction* insert_point = instanceOfTrueBlock->GetFirstInstruction();
- if (ShouldCreateBoundType(insert_point, obj, class_rti, nullptr, instanceOfTrueBlock)) {
- bound_type = new (graph_->GetArena()) HBoundType(obj);
- bool is_exact = class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes();
- bound_type->SetUpperBound(ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), is_exact),
- /* InstanceOf fails for null. */ false);
- instanceOfTrueBlock->InsertInstructionBefore(bound_type, insert_point);
- } else {
- // We already have a bound type on the position we would need to insert
- // the new one. The existing bound type should dominate all the users
- // (dchecked) so there's no need to continue.
- break;
- }
- }
- user->ReplaceInput(bound_type, index);
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ if (!class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) {
+ class_rti = ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false);
}
}
+ BoundTypeIn(obj, instanceOfTrueBlock, /* start_instruction */ nullptr, class_rti);
}
void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* instr,
@@ -464,6 +530,10 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst
}
}
+void ReferenceTypePropagation::RTPVisitor::VisitDeoptimize(HDeoptimize* instr) {
+ BoundTypeForClassCheck(instr);
+}
+
void ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction* instr,
dex::TypeIndex type_idx,
const DexFile& dex_file,
@@ -515,16 +585,9 @@ void ReferenceTypePropagation::RTPVisitor::UpdateFieldAccessTypeInfo(HInstructio
ScopedObjectAccess soa(Thread::Current());
ObjPtr<mirror::Class> klass;
- // The field index is unknown only during tests.
- if (info.GetFieldIndex() != kUnknownFieldIndex) {
- ClassLinker* cl = Runtime::Current()->GetClassLinker();
- ArtField* field = cl->GetResolvedField(info.GetFieldIndex(),
- MakeObjPtr(info.GetDexCache().Get()));
- // TODO: There are certain cases where we can't resolve the field.
- // b/21914925 is open to keep track of a repro case for this issue.
- if (field != nullptr) {
- klass = field->GetType<false>();
- }
+ // The field is unknown only during tests.
+ if (info.GetField() != nullptr) {
+ klass = info.GetField()->GetType<false>();
}
SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 559f40923b..2227872f76 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -492,7 +492,6 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
HInstruction** input2) {
HGraph* graph = CreateGraph(allocator);
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
- ScopedNullHandle<mirror::DexCache> dex_cache;
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
HInstruction* parameter = new (allocator) HParameterValue(
@@ -504,13 +503,13 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
entry->AddSuccessor(block);
HInstruction* test = new (allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(22),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0);
block->AddInstruction(test);
block->AddInstruction(new (allocator) HIf(test));
@@ -531,22 +530,22 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
*phi = new (allocator) HPhi(allocator, 0, 0, Primitive::kPrimInt);
join->AddPhi(*phi);
*input1 = new (allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimInt,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0);
*input2 = new (allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimInt,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0);
then->AddInstruction(*input1);
else_->AddInstruction(*input2);
@@ -654,7 +653,6 @@ static HGraph* BuildFieldReturn(ArenaAllocator* allocator,
HInstruction** field,
HInstruction** ret) {
HGraph* graph = CreateGraph(allocator);
- ScopedNullHandle<mirror::DexCache> dex_cache;
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -667,13 +665,13 @@ static HGraph* BuildFieldReturn(ArenaAllocator* allocator,
entry->AddSuccessor(block);
*field = new (allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimInt,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0);
block->AddInstruction(*field);
*ret = new (allocator) HReturn(*field);
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index ca26c30dcf..dc8ee23ba4 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -275,7 +275,6 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
dex::StringIndex string_index = load_string->GetStringIndex();
HLoadString::LoadKind desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
- uint64_t address = 0u; // String or dex cache element address.
{
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
@@ -284,12 +283,13 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
Handle<mirror::DexCache> dex_cache = IsSameDexFile(dex_file, *compilation_unit_.GetDexFile())
? compilation_unit_.GetDexCache()
: hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file));
+ mirror::String* string = nullptr;
if (codegen_->GetCompilerOptions().IsBootImage()) {
// Compiling boot image. Resolve the string and allocate it if needed, to ensure
// the string will be added to the boot image.
DCHECK(!runtime->UseJitCompilation());
- mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache);
+ string = class_linker->ResolveString(dex_file, string_index, dex_cache);
CHECK(string != nullptr);
if (compiler_driver_->GetSupportBootImageFixup()) {
DCHECK(ContainsElement(compiler_driver_->GetDexFilesForOatFile(), &dex_file));
@@ -303,43 +303,32 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
} else if (runtime->UseJitCompilation()) {
// TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
// DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
- mirror::String* string = class_linker->LookupString(dex_file, string_index, dex_cache);
+ string = class_linker->LookupString(dex_file, string_index, dex_cache);
if (string != nullptr) {
if (runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
- address = reinterpret_cast64<uint64_t>(string);
} else {
desired_load_kind = HLoadString::LoadKind::kJitTableAddress;
}
}
} else {
// AOT app compilation. Try to lookup the string without allocating if not found.
- mirror::String* string = class_linker->LookupString(dex_file, string_index, dex_cache);
+ string = class_linker->LookupString(dex_file, string_index, dex_cache);
if (string != nullptr &&
runtime->GetHeap()->ObjectIsInBootImageSpace(string) &&
!codegen_->GetCompilerOptions().GetCompilePic()) {
desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
- address = reinterpret_cast64<uint64_t>(string);
} else {
desired_load_kind = HLoadString::LoadKind::kBssEntry;
}
}
+ if (string != nullptr) {
+ load_string->SetString(handles_->NewHandle(string));
+ }
}
HLoadString::LoadKind load_kind = codegen_->GetSupportedLoadStringKind(desired_load_kind);
- switch (load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadString::LoadKind::kBssEntry:
- case HLoadString::LoadKind::kDexCacheViaMethod:
- case HLoadString::LoadKind::kJitTableAddress:
- load_string->SetLoadKindWithStringReference(load_kind, dex_file, string_index);
- break;
- case HLoadString::LoadKind::kBootImageAddress:
- DCHECK_NE(address, 0u);
- load_string->SetLoadKindWithAddress(load_kind, address);
- break;
- }
+ load_string->SetLoadKind(load_kind);
}
} // namespace art