summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_arm64.cc101
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc86
-rw-r--r--compiler/optimizing/code_generator_mips.cc131
-rw-r--r--compiler/optimizing/code_generator_mips64.cc131
-rw-r--r--compiler/optimizing/code_generator_x86.cc17
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc15
-rw-r--r--compiler/optimizing/induction_var_analysis.cc240
-rw-r--r--compiler/optimizing/induction_var_analysis.h13
8 files changed, 280 insertions, 454 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a0cb43ee01..5054a299d3 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -311,40 +311,23 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
LoadClassSlowPathARM64(HLoadClass* cls,
HInstruction* at,
uint32_t dex_pc,
- bool do_clinit,
- vixl::aarch64::Register bss_entry_temp = vixl::aarch64::Register(),
- vixl::aarch64::Label* bss_entry_adrp_label = nullptr)
+ bool do_clinit)
: SlowPathCodeARM64(at),
cls_(cls),
dex_pc_(dex_pc),
- do_clinit_(do_clinit),
- bss_entry_temp_(bss_entry_temp),
- bss_entry_adrp_label_(bss_entry_adrp_label) {
+ do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
- constexpr bool call_saves_everything_except_r0_ip0 = (!kUseReadBarrier || kUseBakerReadBarrier);
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
- InvokeRuntimeCallingConvention calling_convention;
- // For HLoadClass/kBssEntry/kSaveEverything, the page address of the entry is in a temp
- // register, make sure it's not clobbered by the call or by saving/restoring registers.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- bool is_load_class_bss_entry =
- (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
- if (is_load_class_bss_entry) {
- DCHECK(bss_entry_temp_.IsValid());
- DCHECK(!bss_entry_temp_.Is(calling_convention.GetRegisterAt(0)));
- DCHECK(
- !UseScratchRegisterScope(arm64_codegen->GetVIXLAssembler()).IsAvailable(bss_entry_temp_));
- }
-
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
+ InvokeRuntimeCallingConvention calling_convention;
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ Mov(calling_convention.GetRegisterAt(0).W(), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -363,26 +346,6 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- if (is_load_class_bss_entry) {
- DCHECK(out.IsValid());
- const DexFile& dex_file = cls_->GetDexFile();
- if (call_saves_everything_except_r0_ip0) {
- // The class entry page address was preserved in bss_entry_temp_ thanks to kSaveEverything.
- } else {
- // For non-Baker read barrier, we need to re-calculate the address of the class entry page.
- bss_entry_adrp_label_ = arm64_codegen->NewBssEntryTypePatch(dex_file, type_index);
- arm64_codegen->EmitAdrpPlaceholder(bss_entry_adrp_label_, bss_entry_temp_);
- }
- vixl::aarch64::Label* strp_label =
- arm64_codegen->NewBssEntryTypePatch(dex_file, type_index, bss_entry_adrp_label_);
- {
- SingleEmissionCheckScope guard(arm64_codegen->GetVIXLAssembler());
- __ Bind(strp_label);
- __ str(RegisterFrom(locations->Out(), DataType::Type::kReference),
- MemOperand(bss_entry_temp_, /* offset placeholder */ 0));
- }
- }
__ B(GetExitLabel());
}
@@ -398,34 +361,23 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
// Whether to initialize the class.
const bool do_clinit_;
- // For HLoadClass/kBssEntry, the temp register and the label of the ADRP where it was loaded.
- vixl::aarch64::Register bss_entry_temp_;
- vixl::aarch64::Label* bss_entry_adrp_label_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
};
class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
public:
- LoadStringSlowPathARM64(HLoadString* instruction, Register temp, vixl::aarch64::Label* adrp_label)
- : SlowPathCodeARM64(instruction),
- temp_(temp),
- adrp_label_(adrp_label) {}
+ explicit LoadStringSlowPathARM64(HLoadString* instruction)
+ : SlowPathCodeARM64(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
- InvokeRuntimeCallingConvention calling_convention;
- // Make sure `temp_` is not clobbered by the call or by saving/restoring registers.
- DCHECK(temp_.IsValid());
- DCHECK(!temp_.Is(calling_convention.GetRegisterAt(0)));
- DCHECK(!UseScratchRegisterScope(arm64_codegen->GetVIXLAssembler()).IsAvailable(temp_));
-
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
+ InvokeRuntimeCallingConvention calling_convention;
const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
__ Mov(calling_convention.GetRegisterAt(0).W(), string_index.index_);
arm64_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
@@ -435,33 +387,12 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
RestoreLiveRegisters(codegen, locations);
- // Store the resolved String to the BSS entry.
- const DexFile& dex_file = instruction_->AsLoadString()->GetDexFile();
- if (!kUseReadBarrier || kUseBakerReadBarrier) {
- // The string entry page address was preserved in temp_ thanks to kSaveEverything.
- } else {
- // For non-Baker read barrier, we need to re-calculate the address of the string entry page.
- adrp_label_ = arm64_codegen->NewStringBssEntryPatch(dex_file, string_index);
- arm64_codegen->EmitAdrpPlaceholder(adrp_label_, temp_);
- }
- vixl::aarch64::Label* strp_label =
- arm64_codegen->NewStringBssEntryPatch(dex_file, string_index, adrp_label_);
- {
- SingleEmissionCheckScope guard(arm64_codegen->GetVIXLAssembler());
- __ Bind(strp_label);
- __ str(RegisterFrom(locations->Out(), DataType::Type::kReference),
- MemOperand(temp_, /* offset placeholder */ 0));
- }
-
__ B(GetExitLabel());
}
const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; }
private:
- const Register temp_;
- vixl::aarch64::Label* adrp_label_;
-
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
};
@@ -4883,7 +4814,6 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
if (cls->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- locations->AddTemp(FixedTempLocation());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
@@ -4910,8 +4840,6 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
Location out_loc = cls->GetLocations()->Out();
Register out = OutputRegister(cls);
- Register bss_entry_temp;
- vixl::aarch64::Label* bss_entry_adrp_label = nullptr;
const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
? kWithoutReadBarrier
@@ -4975,16 +4903,16 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
// Add ADRP with its PC-relative Class .bss entry patch.
const DexFile& dex_file = cls->GetDexFile();
dex::TypeIndex type_index = cls->GetTypeIndex();
- bss_entry_temp = XRegisterFrom(cls->GetLocations()->GetTemp(0));
- bss_entry_adrp_label = codegen_->NewBssEntryTypePatch(dex_file, type_index);
- codegen_->EmitAdrpPlaceholder(bss_entry_adrp_label, bss_entry_temp);
+ vixl::aarch64::Register temp = XRegisterFrom(out_loc);
+ vixl::aarch64::Label* adrp_label = codegen_->NewBssEntryTypePatch(dex_file, type_index);
+ codegen_->EmitAdrpPlaceholder(adrp_label, temp);
// Add LDR with its PC-relative Class patch.
vixl::aarch64::Label* ldr_label =
- codegen_->NewBssEntryTypePatch(dex_file, type_index, bss_entry_adrp_label);
+ codegen_->NewBssEntryTypePatch(dex_file, type_index, adrp_label);
// /* GcRoot<mirror::Class> */ out = *(base_address + offset) /* PC-relative */
GenerateGcRootFieldLoad(cls,
out_loc,
- bss_entry_temp,
+ temp,
/* offset placeholder */ 0u,
ldr_label,
read_barrier_option);
@@ -5013,7 +4941,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
if (generate_null_check || do_clinit) {
DCHECK(cls->CanCallRuntime());
SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(
- cls, cls, cls->GetDexPc(), do_clinit, bss_entry_temp, bss_entry_adrp_label);
+ cls, cls, cls->GetDexPc(), do_clinit);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ Cbz(out, slow_path->GetEntryLabel());
@@ -5078,7 +5006,6 @@ void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need.
- locations->AddTemp(FixedTempLocation());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
@@ -5138,7 +5065,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
const DexFile& dex_file = load->GetDexFile();
const dex::StringIndex string_index = load->GetStringIndex();
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- Register temp = XRegisterFrom(load->GetLocations()->GetTemp(0));
+ Register temp = XRegisterFrom(out_loc);
vixl::aarch64::Label* adrp_label = codegen_->NewStringBssEntryPatch(dex_file, string_index);
codegen_->EmitAdrpPlaceholder(adrp_label, temp);
// Add LDR with its .bss entry String patch.
@@ -5152,7 +5079,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
ldr_label,
kCompilerReadBarrierOption);
SlowPathCodeARM64* slow_path =
- new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load, temp, adrp_label);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load);
codegen_->AddSlowPath(slow_path);
__ Cbz(out.X(), slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 9e7455d488..3f8f0c44f3 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -532,29 +532,12 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
- constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- // For HLoadClass/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- bool is_load_class_bss_entry =
- (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
- vixl32::Register entry_address;
- if (is_load_class_bss_entry && call_saves_everything_except_r0) {
- vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
- // In the unlucky case that the `temp` is R0, we preserve the address in `out` across
- // the kSaveEverything call.
- bool temp_is_r0 = temp.Is(calling_convention.GetRegisterAt(0));
- entry_address = temp_is_r0 ? RegisterFrom(out) : temp;
- DCHECK(!entry_address.Is(calling_convention.GetRegisterAt(0)));
- if (temp_is_r0) {
- __ Mov(entry_address, temp);
- }
- }
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ Mov(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -566,22 +549,6 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
}
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- if (is_load_class_bss_entry) {
- if (call_saves_everything_except_r0) {
- // The class entry address was preserved in `entry_address` thanks to kSaveEverything.
- __ Str(r0, MemOperand(entry_address));
- } else {
- // For non-Baker read barrier, we need to re-calculate the address of the string entry.
- UseScratchRegisterScope temps(
- down_cast<CodeGeneratorARMVIXL*>(codegen)->GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- arm_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
- arm_codegen->EmitMovwMovtPlaceholder(labels, temp);
- __ Str(r0, MemOperand(temp));
- }
- }
// Move the class to the desired location.
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
@@ -616,48 +583,17 @@ class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL {
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- HLoadString* load = instruction_->AsLoadString();
- const dex::StringIndex string_index = load->GetStringIndex();
- vixl32::Register out = OutputRegister(load);
- constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- // In the unlucky case that the `temp` is R0, we preserve the address in `out` across
- // the kSaveEverything call.
- vixl32::Register entry_address;
- if (call_saves_everything_except_r0) {
- vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
- bool temp_is_r0 = (temp.Is(calling_convention.GetRegisterAt(0)));
- entry_address = temp_is_r0 ? out : temp;
- DCHECK(!entry_address.Is(calling_convention.GetRegisterAt(0)));
- if (temp_is_r0) {
- __ Mov(entry_address, temp);
- }
- }
-
__ Mov(calling_convention.GetRegisterAt(0), string_index.index_);
arm_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- // Store the resolved String to the .bss entry.
- if (call_saves_everything_except_r0) {
- // The string entry address was preserved in `entry_address` thanks to kSaveEverything.
- __ Str(r0, MemOperand(entry_address));
- } else {
- // For non-Baker read barrier, we need to re-calculate the address of the string entry.
- UseScratchRegisterScope temps(
- down_cast<CodeGeneratorARMVIXL*>(codegen)->GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- arm_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index);
- arm_codegen->EmitMovwMovtPlaceholder(labels, temp);
- __ Str(r0, MemOperand(temp));
- }
-
arm_codegen->Move32(locations->Out(), LocationFrom(r0));
RestoreLiveRegisters(codegen, locations);
@@ -7104,9 +7040,6 @@ void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- // Note that IP may be clobbered by saving/restoring the live register (only one thanks
- // to the custom calling convention) or by marking, so we request a different temp.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConventionARMVIXL calling_convention;
caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
@@ -7189,13 +7122,10 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
break;
}
case HLoadClass::LoadKind::kBssEntry: {
- vixl32::Register temp = (!kUseReadBarrier || kUseBakerReadBarrier)
- ? RegisterFrom(locations->GetTemp(0))
- : out;
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
- codegen_->EmitMovwMovtPlaceholder(labels, temp);
- GenerateGcRootFieldLoad(cls, out_loc, temp, /* offset */ 0, read_barrier_option);
+ codegen_->EmitMovwMovtPlaceholder(labels, out);
+ GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
generate_null_check = true;
break;
}
@@ -7296,9 +7226,6 @@ void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) {
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need, including temps.
- // Note that IP may be clobbered by saving/restoring the live register (only one thanks
- // to the custom calling convention) or by marking, so we request a different temp.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConventionARMVIXL calling_convention;
caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
@@ -7348,13 +7275,10 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
}
case HLoadString::LoadKind::kBssEntry: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- vixl32::Register temp = (!kUseReadBarrier || kUseBakerReadBarrier)
- ? RegisterFrom(locations->GetTemp(0))
- : out;
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
- codegen_->EmitMovwMovtPlaceholder(labels, temp);
- GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption);
+ codegen_->EmitMovwMovtPlaceholder(labels, out);
+ GenerateGcRootFieldLoad(load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load);
codegen_->AddSlowPath(slow_path);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index ddec0cc453..d6922d2f3f 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -220,13 +220,11 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
LoadClassSlowPathMIPS(HLoadClass* cls,
HInstruction* at,
uint32_t dex_pc,
- bool do_clinit,
- const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high = nullptr)
+ bool do_clinit)
: SlowPathCodeMIPS(at),
cls_(cls),
dex_pc_(dex_pc),
- do_clinit_(do_clinit),
- bss_info_high_(bss_info_high) {
+ do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
@@ -234,28 +232,11 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
- const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- const bool is_load_class_bss_entry =
- (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- Register entry_address = kNoRegister;
- if (is_load_class_bss_entry && baker_or_no_read_barriers) {
- Register temp = locations->GetTemp(0).AsRegister<Register>();
- bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
- // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
- // kSaveEverything call.
- entry_address = temp_is_a0 ? out.AsRegister<Register>() : temp;
- DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
- if (temp_is_a0) {
- __ Move(entry_address, temp);
- }
- }
-
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -267,18 +248,6 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
}
- // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
- if (is_load_class_bss_entry && baker_or_no_read_barriers) {
- // The class entry address was preserved in `entry_address` thanks to kSaveEverything.
- DCHECK(bss_info_high_);
- CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
- mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, bss_info_high_);
- __ Sw(calling_convention.GetRegisterAt(0),
- entry_address,
- /* placeholder */ 0x5678,
- &info_low->label);
- }
-
// Move the class to the desired location.
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
@@ -289,21 +258,6 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
- if (is_load_class_bss_entry && !baker_or_no_read_barriers) {
- // For non-Baker read barriers we need to re-calculate the address of
- // the class entry.
- const bool isR6 = mips_codegen->GetInstructionSetFeatures().IsR6();
- const bool has_irreducible_loops = codegen->GetGraph()->HasIrreducibleLoops();
- Register base =
- (isR6 || has_irreducible_loops) ? ZERO : locations->InAt(0).AsRegister<Register>();
- CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
- mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
- CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
- mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, info_high);
- mips_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base);
- __ Sw(out.AsRegister<Register>(), TMP, /* placeholder */ 0x5678, &info_low->label);
- }
__ B(GetExitLabel());
}
@@ -319,92 +273,41 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
// Whether to initialize the class.
const bool do_clinit_;
- // Pointer to the high half PC-relative patch info for HLoadClass/kBssEntry.
- const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS);
};
class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
public:
- explicit LoadStringSlowPathMIPS(HLoadString* instruction,
- const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high)
- : SlowPathCodeMIPS(instruction), bss_info_high_(bss_info_high) {}
+ explicit LoadStringSlowPathMIPS(HLoadString* instruction)
+ : SlowPathCodeMIPS(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
DCHECK(instruction_->IsLoadString());
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- HLoadString* load = instruction_->AsLoadString();
- const dex::StringIndex string_index = load->GetStringIndex();
- Register out = locations->Out().AsRegister<Register>();
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
- const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
InvokeRuntimeCallingConvention calling_convention;
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- // For HLoadString/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- Register entry_address = kNoRegister;
- if (baker_or_no_read_barriers) {
- Register temp = locations->GetTemp(0).AsRegister<Register>();
- bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
- // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
- // kSaveEverything call.
- entry_address = temp_is_a0 ? out : temp;
- DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
- if (temp_is_a0) {
- __ Move(entry_address, temp);
- }
- }
-
__ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
mips_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- // Store the resolved string to the BSS entry.
- if (baker_or_no_read_barriers) {
- // The string entry address was preserved in `entry_address` thanks to kSaveEverything.
- DCHECK(bss_info_high_);
- CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
- mips_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index, bss_info_high_);
- __ Sw(calling_convention.GetRegisterAt(0),
- entry_address,
- /* placeholder */ 0x5678,
- &info_low->label);
- }
-
DataType::Type type = instruction_->GetType();
mips_codegen->MoveLocation(locations->Out(),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
type);
RestoreLiveRegisters(codegen, locations);
- // Store the resolved string to the BSS entry.
- if (!baker_or_no_read_barriers) {
- // For non-Baker read barriers we need to re-calculate the address of
- // the string entry.
- const bool isR6 = mips_codegen->GetInstructionSetFeatures().IsR6();
- const bool has_irreducible_loops = codegen->GetGraph()->HasIrreducibleLoops();
- Register base =
- (isR6 || has_irreducible_loops) ? ZERO : locations->InAt(0).AsRegister<Register>();
- CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
- mips_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index);
- CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
- mips_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index, info_high);
- mips_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base);
- __ Sw(out, TMP, /* placeholder */ 0x5678, &info_low->label);
- }
__ B(GetExitLabel());
}
const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; }
private:
- // Pointer to the high half PC-relative patch info.
- const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high_;
-
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS);
};
@@ -7736,8 +7639,6 @@ void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- // Request a temp to hold the BSS entry location for the slow path.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -7786,7 +7687,6 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high = nullptr;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
@@ -7845,17 +7745,16 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
break;
}
case HLoadClass::LoadKind::kBssEntry: {
- bss_info_high = codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex(), bss_info_high);
- constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
- Register temp = non_baker_read_barrier ? out : locations->GetTemp(0).AsRegister<Register>();
codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high,
- temp,
+ out,
base_or_current_method_reg);
GenerateGcRootFieldLoad(cls,
out_loc,
- temp,
+ out,
/* placeholder */ 0x5678,
read_barrier_option,
&info_low->label);
@@ -7887,7 +7786,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(
- cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ Beqz(out, slow_path->GetEntryLabel());
@@ -7960,8 +7859,6 @@ void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need.
- // Request a temp to hold the BSS entry location for the slow path.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -8041,19 +7938,17 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
- constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
- Register temp = non_baker_read_barrier ? out : locations->GetTemp(0).AsRegister<Register>();
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
- temp,
+ out,
base_or_current_method_reg);
GenerateGcRootFieldLoad(load,
out_loc,
- temp,
+ out,
/* placeholder */ 0x5678,
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS* slow_path =
- new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS(load, info_high);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS(load);
codegen_->AddSlowPath(slow_path);
__ Beqz(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 0a6d9159d1..ee33b3f335 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -175,13 +175,11 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
LoadClassSlowPathMIPS64(HLoadClass* cls,
HInstruction* at,
uint32_t dex_pc,
- bool do_clinit,
- const CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high = nullptr)
+ bool do_clinit)
: SlowPathCodeMIPS64(at),
cls_(cls),
dex_pc_(dex_pc),
- do_clinit_(do_clinit),
- bss_info_high_(bss_info_high) {
+ do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
@@ -189,28 +187,11 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
- const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- const bool is_load_class_bss_entry =
- (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- GpuRegister entry_address = kNoGpuRegister;
- if (is_load_class_bss_entry && baker_or_no_read_barriers) {
- GpuRegister temp = locations->GetTemp(0).AsRegister<GpuRegister>();
- bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
- // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
- // kSaveEverything call.
- entry_address = temp_is_a0 ? out.AsRegister<GpuRegister>() : temp;
- DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
- if (temp_is_a0) {
- __ Move(entry_address, temp);
- }
- }
-
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -222,19 +203,6 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
}
- // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
- if (is_load_class_bss_entry && baker_or_no_read_barriers) {
- // The class entry address was preserved in `entry_address` thanks to kSaveEverything.
- DCHECK(bss_info_high_);
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
- mips64_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, bss_info_high_);
- __ Bind(&info_low->label);
- __ StoreToOffset(kStoreWord,
- calling_convention.GetRegisterAt(0),
- entry_address,
- /* placeholder */ 0x5678);
- }
-
// Move the class to the desired location.
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
@@ -245,17 +213,6 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
- if (is_load_class_bss_entry && !baker_or_no_read_barriers) {
- // For non-Baker read barriers we need to re-calculate the address of
- // the class entry.
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_high =
- mips64_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
- mips64_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, info_high);
- mips64_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, info_low);
- __ StoreToOffset(kStoreWord, out.AsRegister<GpuRegister>(), TMP, /* placeholder */ 0x5678);
- }
__ Bc(GetExitLabel());
}
@@ -271,46 +228,25 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
// Whether to initialize the class.
const bool do_clinit_;
- // Pointer to the high half PC-relative patch info for HLoadClass/kBssEntry.
- const CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
};
class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
- explicit LoadStringSlowPathMIPS64(HLoadString* instruction,
- const CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high)
- : SlowPathCodeMIPS64(instruction), bss_info_high_(bss_info_high) {}
+ explicit LoadStringSlowPathMIPS64(HLoadString* instruction)
+ : SlowPathCodeMIPS64(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
DCHECK(instruction_->IsLoadString());
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- HLoadString* load = instruction_->AsLoadString();
- const dex::StringIndex string_index = load->GetStringIndex();
- GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
- const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
InvokeRuntimeCallingConvention calling_convention;
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- // For HLoadString/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- GpuRegister entry_address = kNoGpuRegister;
- if (baker_or_no_read_barriers) {
- GpuRegister temp = locations->GetTemp(0).AsRegister<GpuRegister>();
- bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
- // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
- // kSaveEverything call.
- entry_address = temp_is_a0 ? out : temp;
- DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
- if (temp_is_a0) {
- __ Move(entry_address, temp);
- }
- }
-
__ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
mips64_codegen->InvokeRuntime(kQuickResolveString,
instruction_,
@@ -318,47 +254,18 @@ class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- // Store the resolved string to the BSS entry.
- if (baker_or_no_read_barriers) {
- // The string entry address was preserved in `entry_address` thanks to kSaveEverything.
- DCHECK(bss_info_high_);
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
- mips64_codegen->NewStringBssEntryPatch(load->GetDexFile(),
- string_index,
- bss_info_high_);
- __ Bind(&info_low->label);
- __ StoreToOffset(kStoreWord,
- calling_convention.GetRegisterAt(0),
- entry_address,
- /* placeholder */ 0x5678);
- }
-
DataType::Type type = instruction_->GetType();
mips64_codegen->MoveLocation(locations->Out(),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
type);
RestoreLiveRegisters(codegen, locations);
- // Store the resolved string to the BSS entry.
- if (!baker_or_no_read_barriers) {
- // For non-Baker read barriers we need to re-calculate the address of
- // the string entry.
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_high =
- mips64_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index);
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
- mips64_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index, info_high);
- mips64_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, info_low);
- __ StoreToOffset(kStoreWord, out, TMP, /* placeholder */ 0x5678);
- }
__ Bc(GetExitLabel());
}
const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
private:
- // Pointer to the high half PC-relative patch info.
- const CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high_;
-
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
};
@@ -5979,8 +5886,6 @@ void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- // Request a temp to hold the BSS entry location for the slow path.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -6014,7 +5919,6 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high = nullptr;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass:
DCHECK(!cls->CanCallRuntime());
@@ -6064,17 +5968,14 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
break;
}
case HLoadClass::LoadKind::kBssEntry: {
- bss_info_high = codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex(), bss_info_high);
- constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
- GpuRegister temp = non_baker_read_barrier
- ? out
- : locations->GetTemp(0).AsRegister<GpuRegister>();
- codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high, temp);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high, out);
GenerateGcRootFieldLoad(cls,
out_loc,
- temp,
+ out,
/* placeholder */ 0x5678,
read_barrier_option,
&info_low->label);
@@ -6098,7 +5999,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
SlowPathCodeMIPS64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(
- cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ Beqzc(out, slow_path->GetEntryLabel());
@@ -6146,8 +6047,6 @@ void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need.
- // Request a temp to hold the BSS entry location for the slow path.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -6203,19 +6102,15 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
- constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
- GpuRegister temp = non_baker_read_barrier
- ? out
- : locations->GetTemp(0).AsRegister<GpuRegister>();
- codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, temp);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, out);
GenerateGcRootFieldLoad(load,
out_loc,
- temp,
+ out,
/* placeholder */ 0x5678,
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS64* slow_path =
- new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS64(load, info_high);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS64(load);
codegen_->AddSlowPath(slow_path);
__ Beqzc(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index ad0e71aaf4..2e8170ecc4 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -240,13 +240,6 @@ class LoadStringSlowPathX86 : public SlowPathCode {
x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
RestoreLiveRegisters(codegen, locations);
- // Store the resolved String to the BSS entry.
- Register method_address = locations->InAt(0).AsRegister<Register>();
- __ movl(Address(method_address, CodeGeneratorX86::kDummy32BitOffset),
- locations->Out().AsRegister<Register>());
- Label* fixup_label = x86_codegen->NewStringBssEntryPatch(instruction_->AsLoadString());
- __ Bind(fixup_label);
-
__ jmp(GetExitLabel());
}
@@ -293,16 +286,6 @@ class LoadClassSlowPathX86 : public SlowPathCode {
x86_codegen->Move32(out, Location::RegisterLocation(EAX));
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
- DCHECK(out.IsValid());
- Register method_address = locations->InAt(0).AsRegister<Register>();
- __ movl(Address(method_address, CodeGeneratorX86::kDummy32BitOffset),
- locations->Out().AsRegister<Register>());
- Label* fixup_label = x86_codegen->NewTypeBssEntryPatch(cls_);
- __ Bind(fixup_label);
- }
__ jmp(GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index d64a49704e..e25688c9a3 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -273,15 +273,6 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
- DCHECK(out.IsValid());
- __ movl(Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false),
- locations->Out().AsRegister<CpuRegister>());
- Label* fixup_label = x86_64_codegen->NewTypeBssEntryPatch(cls_);
- __ Bind(fixup_label);
- }
__ jmp(GetExitLabel());
}
@@ -323,12 +314,6 @@ class LoadStringSlowPathX86_64 : public SlowPathCode {
x86_64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
RestoreLiveRegisters(codegen, locations);
- // Store the resolved String to the BSS entry.
- __ movl(Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false),
- locations->Out().AsRegister<CpuRegister>());
- Label* fixup_label = x86_64_codegen->NewStringBssEntryPatch(instruction_->AsLoadString());
- __ Bind(fixup_label);
-
__ jmp(GetExitLabel());
}
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index ad29ba56ab..d270c6a28e 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -93,6 +93,136 @@ static DataType::Type ImplicitConversion(DataType::Type type) {
}
}
+/**
+ * Returns true if loop is guarded by "a cmp b" on entry.
+ */
+static bool IsGuardedBy(HLoopInformation* loop,
+ IfCondition cmp,
+ HInstruction* a,
+ HInstruction* b) {
+ // Chase back through straightline code to the first potential
+ // block that has a control dependence.
+ // guard: if (x) bypass
+ // |
+ // entry: straightline code
+ // |
+ // preheader
+ // |
+ // header
+ HBasicBlock* guard = loop->GetPreHeader();
+ HBasicBlock* entry = loop->GetHeader();
+ while (guard->GetPredecessors().size() == 1 &&
+ guard->GetSuccessors().size() == 1) {
+ entry = guard;
+ guard = guard->GetSinglePredecessor();
+ }
+ // Find guard.
+ HInstruction* control = guard->GetLastInstruction();
+ if (!control->IsIf()) {
+ return false;
+ }
+ HIf* ifs = control->AsIf();
+ HInstruction* if_expr = ifs->InputAt(0);
+ if (if_expr->IsCondition()) {
+ IfCondition other_cmp = ifs->IfTrueSuccessor() == entry
+ ? if_expr->AsCondition()->GetCondition()
+ : if_expr->AsCondition()->GetOppositeCondition();
+ if (if_expr->InputAt(0) == a && if_expr->InputAt(1) == b) {
+ return cmp == other_cmp;
+ } else if (if_expr->InputAt(1) == a && if_expr->InputAt(0) == b) {
+ switch (cmp) {
+ case kCondLT: return other_cmp == kCondGT;
+ case kCondLE: return other_cmp == kCondGE;
+ case kCondGT: return other_cmp == kCondLT;
+ case kCondGE: return other_cmp == kCondLE;
+ default: LOG(FATAL) << "unexpected cmp: " << cmp;
+ }
+ }
+ }
+ return false;
+}
+
+/* Finds first loop header phi use. */
+HInstruction* FindFirstLoopHeaderPhiUse(HLoopInformation* loop, HInstruction* instruction) {
+ for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
+ if (use.GetUser()->GetBlock() == loop->GetHeader() &&
+ use.GetUser()->IsPhi() &&
+ use.GetUser()->InputAt(1) == instruction) {
+ return use.GetUser();
+ }
+ }
+ return nullptr;
+}
+
+/**
+ * Relinks the Phi structure after break-loop rewriting.
+ */
+bool FixOutsideUse(HLoopInformation* loop,
+ HInstruction* instruction,
+ HInstruction* replacement,
+ bool rewrite) {
+ // Deal with regular uses.
+ const HUseList<HInstruction*>& uses = instruction->GetUses();
+ for (auto it = uses.begin(), end = uses.end(); it != end; ) {
+ HInstruction* user = it->GetUser();
+ size_t index = it->GetIndex();
+ ++it; // increment prior to potential removal
+ if (user->GetBlock()->GetLoopInformation() != loop) {
+ if (replacement == nullptr) {
+ return false;
+ } else if (rewrite) {
+ user->ReplaceInput(replacement, index);
+ }
+ }
+ }
+ // Deal with environment uses.
+ const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
+ for (auto it = env_uses.begin(), end = env_uses.end(); it != end;) {
+ HEnvironment* user = it->GetUser();
+ size_t index = it->GetIndex();
+ ++it; // increment prior to potential removal
+ if (user->GetHolder()->GetBlock()->GetLoopInformation() != loop) {
+ if (replacement == nullptr) {
+ return false;
+ } else if (rewrite) {
+ user->RemoveAsUserOfInput(index);
+ user->SetRawEnvAt(index, replacement);
+ replacement->AddEnvUseAt(user, index);
+ }
+ }
+ }
+ return true;
+}
+
+/**
+ * Test and rewrite the loop body of a break-loop. Returns true on success.
+ */
+bool RewriteBreakLoopBody(HLoopInformation* loop,
+ HBasicBlock* body,
+ HInstruction* cond,
+ HInstruction* index,
+ HInstruction* upper,
+ bool rewrite) {
+ // Deal with Phis. Outside use prohibited, except for index (which gets exit value).
+ for (HInstructionIterator it(loop->GetHeader()->GetPhis()); !it.Done(); it.Advance()) {
+ HInstruction* exit_value = it.Current() == index ? upper : nullptr;
+ if (!FixOutsideUse(loop, it.Current(), exit_value, rewrite)) {
+ return false;
+ }
+ }
+ // Deal with other statements in header.
+ for (HInstruction* m = cond->GetPrevious(), *p = nullptr; m && !m->IsSuspendCheck(); m = p) {
+ p = m->GetPrevious();
+ if (rewrite) {
+ m->MoveBefore(body->GetFirstInstruction(), false);
+ }
+ if (!FixOutsideUse(loop, m, FindFirstLoopHeaderPhiUse(loop, m), rewrite)) {
+ return false;
+ }
+ }
+ return true;
+}
+
//
// Class methods.
//
@@ -754,6 +884,10 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolveConversion(
return nullptr;
}
+//
+// Loop trip count analysis methods.
+//
+
void HInductionVarAnalysis::VisitControl(HLoopInformation* loop) {
HInstruction* control = loop->GetHeader()->GetLastInstruction();
if (control->IsIf()) {
@@ -774,15 +908,16 @@ void HInductionVarAnalysis::VisitControl(HLoopInformation* loop) {
if (a == nullptr || b == nullptr) {
return; // Loop control is not a sequence.
} else if (if_true->GetLoopInformation() != loop && if_false->GetLoopInformation() == loop) {
- VisitCondition(loop, a, b, type, condition->GetOppositeCondition());
+ VisitCondition(loop, if_false, a, b, type, condition->GetOppositeCondition());
} else if (if_true->GetLoopInformation() == loop && if_false->GetLoopInformation() != loop) {
- VisitCondition(loop, a, b, type, condition->GetCondition());
+ VisitCondition(loop, if_true, a, b, type, condition->GetCondition());
}
}
}
}
void HInductionVarAnalysis::VisitCondition(HLoopInformation* loop,
+ HBasicBlock* body,
InductionInfo* a,
InductionInfo* b,
DataType::Type type,
@@ -790,11 +925,11 @@ void HInductionVarAnalysis::VisitCondition(HLoopInformation* loop,
if (a->induction_class == kInvariant && b->induction_class == kLinear) {
// Swap condition if induction is at right-hand-side (e.g. U > i is same as i < U).
switch (cmp) {
- case kCondLT: VisitCondition(loop, b, a, type, kCondGT); break;
- case kCondLE: VisitCondition(loop, b, a, type, kCondGE); break;
- case kCondGT: VisitCondition(loop, b, a, type, kCondLT); break;
- case kCondGE: VisitCondition(loop, b, a, type, kCondLE); break;
- case kCondNE: VisitCondition(loop, b, a, type, kCondNE); break;
+ case kCondLT: VisitCondition(loop, body, b, a, type, kCondGT); break;
+ case kCondLE: VisitCondition(loop, body, b, a, type, kCondGE); break;
+ case kCondGT: VisitCondition(loop, body, b, a, type, kCondLT); break;
+ case kCondGE: VisitCondition(loop, body, b, a, type, kCondLE); break;
+ case kCondNE: VisitCondition(loop, body, b, a, type, kCondNE); break;
default: break;
}
} else if (a->induction_class == kLinear && b->induction_class == kInvariant) {
@@ -802,24 +937,30 @@ void HInductionVarAnalysis::VisitCondition(HLoopInformation* loop,
InductionInfo* lower_expr = a->op_b;
InductionInfo* upper_expr = b;
InductionInfo* stride_expr = a->op_a;
- // Constant stride?
+ // Test for constant stride and integral condition.
int64_t stride_value = 0;
if (!IsExact(stride_expr, &stride_value)) {
- return;
+ return; // unknown stride
+ } else if (type != DataType::Type::kInt32 && type != DataType::Type::kInt64) {
+ return; // not integral
}
- // Rewrite condition i != U into strict end condition i < U or i > U if this end condition
- // is reached exactly (tested by verifying if the loop has a unit stride and the non-strict
- // condition would be always taken).
+ // Since loops with a i != U condition will not be normalized by the method below, first
+ // try to rewrite a break-loop with terminating condition i != U into an equivalent loop
+ // with non-strict end condition i <= U or i >= U if such a rewriting is possible and safe.
+ if (cmp == kCondNE && RewriteBreakLoop(loop, body, stride_value, type)) {
+ cmp = stride_value > 0 ? kCondLE : kCondGE;
+ }
+ // If this rewriting failed, try to rewrite condition i != U into strict end condition i < U
+ // or i > U if this end condition is reached exactly (tested by verifying if the loop has a
+ // unit stride and the non-strict condition would be always taken).
if (cmp == kCondNE && ((stride_value == +1 && IsTaken(lower_expr, upper_expr, kCondLE)) ||
(stride_value == -1 && IsTaken(lower_expr, upper_expr, kCondGE)))) {
cmp = stride_value > 0 ? kCondLT : kCondGT;
}
- // Only accept integral condition. A mismatch between the type of condition and the induction
- // is only allowed if the, necessarily narrower, induction range fits the narrower control.
- if (type != DataType::Type::kInt32 && type != DataType::Type::kInt64) {
- return; // not integral
- } else if (type != a->type &&
- !FitsNarrowerControl(lower_expr, upper_expr, stride_value, a->type, cmp)) {
+ // A mismatch between the type of condition and the induction is only allowed if the,
+ // necessarily narrower, induction range fits the narrower control.
+ if (type != a->type &&
+ !FitsNarrowerControl(lower_expr, upper_expr, stride_value, a->type, cmp)) {
return; // mismatched type
}
// Normalize a linear loop control with a nonzero stride:
@@ -984,6 +1125,69 @@ bool HInductionVarAnalysis::FitsNarrowerControl(InductionInfo* lower_expr,
IsAtMost(upper_expr, &value) && value <= max;
}
+bool HInductionVarAnalysis::RewriteBreakLoop(HLoopInformation* loop,
+ HBasicBlock* body,
+ int64_t stride_value,
+ DataType::Type type) {
+ // Only accept unit stride.
+ if (std::abs(stride_value) != 1) {
+ return false;
+ }
+ // Simple terminating i != U condition, used nowhere else.
+ HIf* ifs = loop->GetHeader()->GetLastInstruction()->AsIf();
+ HInstruction* cond = ifs->InputAt(0);
+ if (ifs->GetPrevious() != cond || !cond->HasOnlyOneNonEnvironmentUse()) {
+ return false;
+ }
+ int c = LookupInfo(loop, cond->InputAt(0))->induction_class == kLinear ? 0 : 1;
+ HInstruction* index = cond->InputAt(c);
+ HInstruction* upper = cond->InputAt(1 - c);
+ // Safe to rewrite into i <= U?
+ IfCondition cmp = stride_value > 0 ? kCondLE : kCondGE;
+ if (!index->IsPhi() || !IsFinite(LookupInfo(loop, upper), stride_value, type, cmp)) {
+ return false;
+ }
+ // Body consists of update to index i only, used nowhere else.
+ if (body->GetSuccessors().size() != 1 ||
+ body->GetSingleSuccessor() != loop->GetHeader() ||
+ !body->GetPhis().IsEmpty() ||
+ body->GetInstructions().IsEmpty() ||
+ body->GetFirstInstruction() != index->InputAt(1) ||
+ !body->GetFirstInstruction()->HasOnlyOneNonEnvironmentUse() ||
+ !body->GetFirstInstruction()->GetNext()->IsGoto()) {
+ return false;
+ }
+ // Always taken or guarded by enclosing condition.
+ if (!IsTaken(LookupInfo(loop, index)->op_b, LookupInfo(loop, upper), cmp) &&
+ !IsGuardedBy(loop, cmp, index->InputAt(0), upper)) {
+ return false;
+ }
+ // Test if break-loop body can be written, and do so on success.
+ if (RewriteBreakLoopBody(loop, body, cond, index, upper, /*rewrite*/ false)) {
+ RewriteBreakLoopBody(loop, body, cond, index, upper, /*rewrite*/ true);
+ } else {
+ return false;
+ }
+ // Rewrite condition in HIR.
+ if (ifs->IfTrueSuccessor() != body) {
+ cmp = (cmp == kCondLE) ? kCondGT : kCondLT;
+ }
+ HInstruction* rep = nullptr;
+ switch (cmp) {
+ case kCondLT: rep = new (graph_->GetAllocator()) HLessThan(index, upper); break;
+ case kCondGT: rep = new (graph_->GetAllocator()) HGreaterThan(index, upper); break;
+ case kCondLE: rep = new (graph_->GetAllocator()) HLessThanOrEqual(index, upper); break;
+ case kCondGE: rep = new (graph_->GetAllocator()) HGreaterThanOrEqual(index, upper); break;
+ default: LOG(FATAL) << cmp; UNREACHABLE();
+ }
+ loop->GetHeader()->ReplaceAndRemoveInstructionWith(cond, rep);
+ return true;
+}
+
+//
+// Helper methods.
+//
+
void HInductionVarAnalysis::AssignInfo(HLoopInformation* loop,
HInstruction* instruction,
InductionInfo* info) {
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 8737b890d9..acad77d35f 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -195,9 +195,14 @@ class HInductionVarAnalysis : public HOptimization {
HInstruction* entry_phi,
HTypeConversion* conversion);
+ //
+ // Loop trip count analysis methods.
+ //
+
// Trip count information.
void VisitControl(HLoopInformation* loop);
void VisitCondition(HLoopInformation* loop,
+ HBasicBlock* body,
InductionInfo* a,
InductionInfo* b,
DataType::Type type,
@@ -219,6 +224,14 @@ class HInductionVarAnalysis : public HOptimization {
int64_t stride_value,
DataType::Type type,
IfCondition cmp);
+ bool RewriteBreakLoop(HLoopInformation* loop,
+ HBasicBlock* body,
+ int64_t stride_value,
+ DataType::Type type);
+
+ //
+ // Helper methods.
+ //
// Assign and lookup.
void AssignInfo(HLoopInformation* loop, HInstruction* instruction, InductionInfo* info);