summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_arm.cc171
-rw-r--r--compiler/optimizing/code_generator_arm64.cc245
-rw-r--r--compiler/optimizing/code_generator_mips.cc233
-rw-r--r--compiler/optimizing/code_generator_mips.h4
-rw-r--r--compiler/optimizing/code_generator_x86.cc196
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc200
-rw-r--r--compiler/optimizing/common_arm64.h74
-rw-r--r--compiler/optimizing/sharpening.cc10
8 files changed, 438 insertions, 695 deletions
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 5301a6bb3e..3cc2598f8f 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -4598,7 +4598,6 @@ void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) {
}
if (needs_write_barrier) {
// Temporary registers for the write barrier.
- // These registers may be used for Baker read barriers too.
locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
locations->AddTemp(Location::RequiresRegister());
}
@@ -4716,127 +4715,42 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
__ Bind(&non_zero);
}
- if (kEmitCompilerReadBarrier) {
- if (!kUseBakerReadBarrier) {
- // When (non-Baker) read barriers are enabled, the type
- // checking instrumentation requires two read barriers
- // generated by CodeGeneratorARM::GenerateReadBarrierSlow:
- //
- // __ Mov(temp2, temp1);
- // // /* HeapReference<Class> */ temp1 = temp1->component_type_
- // __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp1_loc, temp1_loc, temp2_loc, component_offset);
- //
- // // /* HeapReference<Class> */ temp2 = value->klass_
- // __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp2_loc, temp2_loc, value_loc, class_offset, temp1_loc);
- //
- // __ cmp(temp1, ShifterOperand(temp2));
- //
- // However, the second read barrier may trash `temp`, as it
- // is a temporary register, and as such would not be saved
- // along with live registers before calling the runtime (nor
- // restored afterwards). So in this case, we bail out and
- // delegate the work to the array set slow path.
- //
- // TODO: Extend the register allocator to support a new
- // "(locally) live temp" location so as to avoid always
- // going into the slow path when read barriers are enabled?
- //
- // There is no such problem with Baker read barriers (see below).
- __ b(slow_path->GetEntryLabel());
- } else {
- Register temp3 = IP;
- Location temp3_loc = Location::RegisterLocation(temp3);
-
- // Note: `temp3` (scratch register IP) cannot be used as
- // `ref` argument of GenerateFieldLoadWithBakerReadBarrier
- // calls below (see ReadBarrierMarkSlowPathARM for more
- // details).
-
- // /* HeapReference<Class> */ temp1 = array->klass_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
- temp1_loc,
- array,
- class_offset,
- temp3_loc,
- /* needs_null_check */ true);
-
- // /* HeapReference<Class> */ temp1 = temp1->component_type_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
- temp1_loc,
- temp1,
- component_offset,
- temp3_loc,
- /* needs_null_check */ false);
- // Register `temp1` is not trashed by the read barrier
- // emitted by GenerateFieldLoadWithBakerReadBarrier below,
- // as that method produces a call to a ReadBarrierMarkRegX
- // entry point, which saves all potentially live registers,
- // including temporaries such a `temp1`.
- // /* HeapReference<Class> */ temp2 = value->klass_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
- temp2_loc,
- value,
- class_offset,
- temp3_loc,
- /* needs_null_check */ false);
- // If heap poisoning is enabled, `temp1` and `temp2` have
- // been unpoisoned by the the previous calls to
- // CodeGeneratorARM::GenerateFieldLoadWithBakerReadBarrier.
- __ cmp(temp1, ShifterOperand(temp2));
-
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- Label do_put;
- __ b(&do_put, EQ);
- // We do not need to emit a read barrier for the
- // following heap reference load, as `temp1` is only used
- // in a comparison with null below, and this reference
- // is not kept afterwards.
- // /* HeapReference<Class> */ temp1 = temp1->super_class_
- __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
- // If heap poisoning is enabled, no need to unpoison
- // `temp`, as we are comparing against null below.
- __ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel());
- __ Bind(&do_put);
- } else {
- __ b(slow_path->GetEntryLabel(), NE);
- }
- }
- } else {
- // Non read barrier code.
+ // Note that when read barriers are enabled, the type checks
+ // are performed without read barriers. This is fine, even in
+ // the case where a class object is in the from-space after
+ // the flip, as a comparison involving such a type would not
+ // produce a false positive; it may of course produce a false
+ // negative, in which case we would take the ArraySet slow
+ // path.
- // /* HeapReference<Class> */ temp1 = array->klass_
- __ LoadFromOffset(kLoadWord, temp1, array, class_offset);
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // /* HeapReference<Class> */ temp1 = array->klass_
+ __ LoadFromOffset(kLoadWord, temp1, array, class_offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ MaybeUnpoisonHeapReference(temp1);
+
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
+ // /* HeapReference<Class> */ temp2 = value->klass_
+ __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
+ // If heap poisoning is enabled, no need to unpoison `temp1`
+ // nor `temp2`, as we are comparing two poisoned references.
+ __ cmp(temp1, ShifterOperand(temp2));
+
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ Label do_put;
+ __ b(&do_put, EQ);
+ // If heap poisoning is enabled, the `temp1` reference has
+ // not been unpoisoned yet; unpoison it now.
__ MaybeUnpoisonHeapReference(temp1);
- // /* HeapReference<Class> */ temp1 = temp1->component_type_
- __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
- // /* HeapReference<Class> */ temp2 = value->klass_
- __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
- // If heap poisoning is enabled, no need to unpoison `temp1`
- // nor `temp2`, as we are comparing two poisoned references.
- __ cmp(temp1, ShifterOperand(temp2));
-
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- Label do_put;
- __ b(&do_put, EQ);
- // If heap poisoning is enabled, the `temp1` reference has
- // not been unpoisoned yet; unpoison it now.
- __ MaybeUnpoisonHeapReference(temp1);
-
- // /* HeapReference<Class> */ temp1 = temp1->super_class_
- __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
- // If heap poisoning is enabled, no need to unpoison
- // `temp1`, as we are comparing against null below.
- __ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel());
- __ Bind(&do_put);
- } else {
- __ b(slow_path->GetEntryLabel(), NE);
- }
+ // /* HeapReference<Class> */ temp1 = temp1->super_class_
+ __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
+ // If heap poisoning is enabled, no need to unpoison
+ // `temp1`, as we are comparing against null below.
+ __ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ b(slow_path->GetEntryLabel(), NE);
}
}
@@ -5508,17 +5422,6 @@ void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
HLoadString::LoadKind CodeGeneratorARM::GetSupportedLoadStringKind(
HLoadString::LoadKind desired_string_load_kind) {
- if (kEmitCompilerReadBarrier) {
- switch (desired_string_load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadString::LoadKind::kBootImageAddress:
- // TODO: Implement for read barrier.
- return HLoadString::LoadKind::kDexCacheViaMethod;
- default:
- break;
- }
- }
switch (desired_string_load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
DCHECK(!GetCompilerOptions().GetCompilePic());
@@ -5571,13 +5474,11 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
switch (load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress: {
- DCHECK(!kEmitCompilerReadBarrier);
__ LoadLiteral(out, codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
load->GetStringIndex()));
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!kEmitCompilerReadBarrier);
CodeGeneratorARM::PcRelativePatchInfo* labels =
codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
__ BindTrackedLabel(&labels->movw_label);
@@ -5589,7 +5490,6 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK(!kEmitCompilerReadBarrier);
DCHECK_NE(load->GetAddress(), 0u);
uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
__ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
@@ -5845,7 +5745,6 @@ void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- bool baker_read_barrier_slow_path = false;
switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
@@ -5854,7 +5753,6 @@ void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
call_kind = (throws_into_catch || kEmitCompilerReadBarrier) ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
- baker_read_barrier_slow_path = kUseBakerReadBarrier && !throws_into_catch;
break;
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
@@ -5864,9 +5762,6 @@ void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
}
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
- if (baker_read_barrier_slow_path) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
- }
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
// Note that TypeCheckSlowPathARM uses this "temp" register too.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 36f7b4d914..179bf76f5b 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -336,36 +336,6 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
};
-class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
- public:
- explicit LoadStringSlowPathARM64(HLoadString* instruction) : SlowPathCodeARM64(instruction) {}
-
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = instruction_->GetLocations();
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
-
- __ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, locations);
-
- InvokeRuntimeCallingConvention calling_convention;
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
- __ Mov(calling_convention.GetRegisterAt(0).W(), string_index);
- arm64_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- Primitive::Type type = instruction_->GetType();
- arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type);
-
- RestoreLiveRegisters(codegen, locations);
- __ B(GetExitLabel());
- }
-
- const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
-};
-
class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
explicit NullCheckSlowPathARM64(HNullCheck* instr) : SlowPathCodeARM64(instr) {}
@@ -2178,11 +2148,6 @@ void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
} else {
locations->SetInAt(2, Location::RequiresRegister());
}
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && (value_type == Primitive::kPrimNot)) {
- // Additional temporary registers for a Baker read barrier.
- locations->AddTemp(Location::RequiresRegister());
- locations->AddTemp(Location::RequiresRegister());
- }
}
void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
@@ -2269,144 +2234,44 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
__ Bind(&non_zero);
}
- if (kEmitCompilerReadBarrier) {
- if (!kUseBakerReadBarrier) {
- // When (non-Baker) read barriers are enabled, the type
- // checking instrumentation requires two read barriers
- // generated by CodeGeneratorARM64::GenerateReadBarrierSlow:
- //
- // __ Mov(temp2, temp);
- // // /* HeapReference<Class> */ temp = temp->component_type_
- // __ Ldr(temp, HeapOperand(temp, component_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp_loc, temp_loc, temp2_loc, component_offset);
- //
- // // /* HeapReference<Class> */ temp2 = value->klass_
- // __ Ldr(temp2, HeapOperand(Register(value), class_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp2_loc, temp2_loc, value_loc, class_offset, temp_loc);
- //
- // __ Cmp(temp, temp2);
- //
- // However, the second read barrier may trash `temp`, as it
- // is a temporary register, and as such would not be saved
- // along with live registers before calling the runtime (nor
- // restored afterwards). So in this case, we bail out and
- // delegate the work to the array set slow path.
- //
- // TODO: Extend the register allocator to support a new
- // "(locally) live temp" location so as to avoid always
- // going into the slow path when read barriers are enabled?
- //
- // There is no such problem with Baker read barriers (see below).
- __ B(slow_path->GetEntryLabel());
- } else {
- // Note that we cannot use `temps` (instance of VIXL's
- // UseScratchRegisterScope) to allocate `temp2` because
- // the Baker read barriers generated by
- // GenerateFieldLoadWithBakerReadBarrier below also use
- // that facility to allocate a temporary register, thus
- // making VIXL's scratch register pool empty.
- Location temp2_loc = locations->GetTemp(0);
- Register temp2 = WRegisterFrom(temp2_loc);
-
- // Note: Because it is acquired from VIXL's scratch register
- // pool, `temp` might be IP0, and thus cannot be used as
- // `ref` argument of GenerateFieldLoadWithBakerReadBarrier
- // calls below (see ReadBarrierMarkSlowPathARM64 for more
- // details).
-
- // /* HeapReference<Class> */ temp2 = array->klass_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
- temp2_loc,
- array,
- class_offset,
- temp,
- /* needs_null_check */ true,
- /* use_load_acquire */ false);
-
- // /* HeapReference<Class> */ temp2 = temp2->component_type_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
- temp2_loc,
- temp2,
- component_offset,
- temp,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
- // For the same reason that we request `temp2` from the
- // register allocator above, we cannot get `temp3` from
- // VIXL's scratch register pool.
- Location temp3_loc = locations->GetTemp(1);
- Register temp3 = WRegisterFrom(temp3_loc);
- // Register `temp2` is not trashed by the read barrier
- // emitted by GenerateFieldLoadWithBakerReadBarrier below,
- // as that method produces a call to a ReadBarrierMarkRegX
- // entry point, which saves all potentially live registers,
- // including temporaries such a `temp2`.
- // /* HeapReference<Class> */ temp3 = register_value->klass_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
- temp3_loc,
- value.W(),
- class_offset,
- temp,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
- // If heap poisoning is enabled, `temp2` and `temp3` have
- // been unpoisoned by the the previous calls to
- // CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier.
- __ Cmp(temp2, temp3);
-
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- vixl::aarch64::Label do_put;
- __ B(eq, &do_put);
- // We do not need to emit a read barrier for the
- // following heap reference load, as `temp2` is only used
- // in a comparison with null below, and this reference
- // is not kept afterwards.
- // /* HeapReference<Class> */ temp = temp2->super_class_
- __ Ldr(temp, HeapOperand(temp2, super_offset));
- // If heap poisoning is enabled, no need to unpoison
- // `temp`, as we are comparing against null below.
- __ Cbnz(temp, slow_path->GetEntryLabel());
- __ Bind(&do_put);
- } else {
- __ B(ne, slow_path->GetEntryLabel());
- }
- }
- } else {
- // Non read barrier code.
+ // Note that when Baker read barriers are enabled, the type
+ // checks are performed without read barriers. This is fine,
+ // even in the case where a class object is in the from-space
+ // after the flip, as a comparison involving such a type would
+ // not produce a false positive; it may of course produce a
+ // false negative, in which case we would take the ArraySet
+ // slow path.
- Register temp2 = temps.AcquireSameSizeAs(array);
- // /* HeapReference<Class> */ temp = array->klass_
- __ Ldr(temp, HeapOperand(array, class_offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ Register temp2 = temps.AcquireSameSizeAs(array);
+ // /* HeapReference<Class> */ temp = array->klass_
+ __ Ldr(temp, HeapOperand(array, class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ GetAssembler()->MaybeUnpoisonHeapReference(temp);
+
+ // /* HeapReference<Class> */ temp = temp->component_type_
+ __ Ldr(temp, HeapOperand(temp, component_offset));
+ // /* HeapReference<Class> */ temp2 = value->klass_
+ __ Ldr(temp2, HeapOperand(Register(value), class_offset));
+ // If heap poisoning is enabled, no need to unpoison `temp`
+ // nor `temp2`, as we are comparing two poisoned references.
+ __ Cmp(temp, temp2);
+ temps.Release(temp2);
+
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ vixl::aarch64::Label do_put;
+ __ B(eq, &do_put);
+ // If heap poisoning is enabled, the `temp` reference has
+ // not been unpoisoned yet; unpoison it now.
GetAssembler()->MaybeUnpoisonHeapReference(temp);
- // /* HeapReference<Class> */ temp = temp->component_type_
- __ Ldr(temp, HeapOperand(temp, component_offset));
- // /* HeapReference<Class> */ temp2 = value->klass_
- __ Ldr(temp2, HeapOperand(Register(value), class_offset));
- // If heap poisoning is enabled, no need to unpoison `temp`
- // nor `temp2`, as we are comparing two poisoned references.
- __ Cmp(temp, temp2);
- temps.Release(temp2);
-
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- vixl::aarch64::Label do_put;
- __ B(eq, &do_put);
- // If heap poisoning is enabled, the `temp` reference has
- // not been unpoisoned yet; unpoison it now.
- GetAssembler()->MaybeUnpoisonHeapReference(temp);
-
- // /* HeapReference<Class> */ temp = temp->super_class_
- __ Ldr(temp, HeapOperand(temp, super_offset));
- // If heap poisoning is enabled, no need to unpoison
- // `temp`, as we are comparing against null below.
- __ Cbnz(temp, slow_path->GetEntryLabel());
- __ Bind(&do_put);
- } else {
- __ B(ne, slow_path->GetEntryLabel());
- }
+ // /* HeapReference<Class> */ temp = temp->super_class_
+ __ Ldr(temp, HeapOperand(temp, super_offset));
+ // If heap poisoning is enabled, no need to unpoison
+ // `temp`, as we are comparing against null below.
+ __ Cbnz(temp, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ B(ne, slow_path->GetEntryLabel());
}
}
@@ -3385,7 +3250,6 @@ void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- bool baker_read_barrier_slow_path = false;
switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
@@ -3394,7 +3258,6 @@ void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
call_kind = (throws_into_catch || kEmitCompilerReadBarrier) ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
- baker_read_barrier_slow_path = kUseBakerReadBarrier && !throws_into_catch;
break;
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
@@ -3404,9 +3267,6 @@ void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
}
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
- if (baker_read_barrier_slow_path) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
- }
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
// Note that TypeCheckSlowPathARM64 uses this "temp" register too.
@@ -4259,17 +4119,6 @@ void InstructionCodeGeneratorARM64::VisitClearException(HClearException* clear A
HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind(
HLoadString::LoadKind desired_string_load_kind) {
- if (kEmitCompilerReadBarrier) {
- switch (desired_string_load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadString::LoadKind::kBootImageAddress:
- // TODO: Implement for read barrier.
- return HLoadString::LoadKind::kDexCacheViaMethod;
- default:
- break;
- }
- }
switch (desired_string_load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
DCHECK(!GetCompilerOptions().GetCompilePic());
@@ -4292,18 +4141,17 @@ HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind(
}
void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
- LocationSummary::CallKind call_kind = (load->NeedsEnvironment() || kEmitCompilerReadBarrier)
- ? LocationSummary::kCallOnSlowPath
+ LocationSummary::CallKind call_kind = load->NeedsEnvironment()
+ ? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
- if (kUseBakerReadBarrier && !load->NeedsEnvironment()) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
- }
-
if (load->GetLoadKind() == HLoadString::LoadKind::kDexCacheViaMethod) {
locations->SetInAt(0, Location::RequiresRegister());
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetOut(calling_convention.GetReturnLocation(load->GetType()));
+ } else {
+ locations->SetOut(Location::RequiresRegister());
}
- locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
@@ -4311,12 +4159,10 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
switch (load->GetLoadKind()) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!kEmitCompilerReadBarrier);
__ Ldr(out, codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
load->GetStringIndex()));
return; // No dex cache slow path.
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!kEmitCompilerReadBarrier);
// Add ADRP with its PC-relative String patch.
const DexFile& dex_file = load->GetDexFile();
uint32_t string_index = load->GetStringIndex();
@@ -4337,7 +4183,6 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK(!kEmitCompilerReadBarrier);
DCHECK(load->GetAddress() != 0u && IsUint<32>(load->GetAddress()));
__ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(load->GetAddress()));
return; // No dex cache slow path.
@@ -4347,10 +4192,10 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
}
// TODO: Re-add the compiler code to do string dex cache lookup again.
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load);
- codegen_->AddSlowPath(slow_path);
- __ B(slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
+ InvokeRuntimeCallingConvention calling_convention;
+ __ Mov(calling_convention.GetRegisterAt(0).W(), load->GetStringIndex());
+ codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
+ CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
}
void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 92e9cd9067..f07f8a0d91 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -2378,13 +2378,8 @@ void InstructionCodeGeneratorMIPS::HandleCondition(HCondition* instruction) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- // TODO: don't use branches.
- GenerateFpCompareAndBranch(instruction->GetCondition(),
- instruction->IsGtBias(),
- type,
- locations,
- &true_label);
- break;
+ GenerateFpCompare(instruction->GetCondition(), instruction->IsGtBias(), type, locations);
+ return;
}
// Convert the branches into the result.
@@ -3177,6 +3172,230 @@ void InstructionCodeGeneratorMIPS::GenerateLongCompareAndBranch(IfCondition cond
}
}
+void InstructionCodeGeneratorMIPS::GenerateFpCompare(IfCondition cond,
+ bool gt_bias,
+ Primitive::Type type,
+ LocationSummary* locations) {
+ Register dst = locations->Out().AsRegister<Register>();
+ FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
+ FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
+ bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+ if (type == Primitive::kPrimFloat) {
+ if (isR6) {
+ switch (cond) {
+ case kCondEQ:
+ __ CmpEqS(FTMP, lhs, rhs);
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ case kCondNE:
+ __ CmpEqS(FTMP, lhs, rhs);
+ __ Mfc1(dst, FTMP);
+ __ Addiu(dst, dst, 1);
+ break;
+ case kCondLT:
+ if (gt_bias) {
+ __ CmpLtS(FTMP, lhs, rhs);
+ } else {
+ __ CmpUltS(FTMP, lhs, rhs);
+ }
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ case kCondLE:
+ if (gt_bias) {
+ __ CmpLeS(FTMP, lhs, rhs);
+ } else {
+ __ CmpUleS(FTMP, lhs, rhs);
+ }
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ case kCondGT:
+ if (gt_bias) {
+ __ CmpUltS(FTMP, rhs, lhs);
+ } else {
+ __ CmpLtS(FTMP, rhs, lhs);
+ }
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ case kCondGE:
+ if (gt_bias) {
+ __ CmpUleS(FTMP, rhs, lhs);
+ } else {
+ __ CmpLeS(FTMP, rhs, lhs);
+ }
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
+ UNREACHABLE();
+ }
+ } else {
+ switch (cond) {
+ case kCondEQ:
+ __ CeqS(0, lhs, rhs);
+ __ LoadConst32(dst, 1);
+ __ Movf(dst, ZERO, 0);
+ break;
+ case kCondNE:
+ __ CeqS(0, lhs, rhs);
+ __ LoadConst32(dst, 1);
+ __ Movt(dst, ZERO, 0);
+ break;
+ case kCondLT:
+ if (gt_bias) {
+ __ ColtS(0, lhs, rhs);
+ } else {
+ __ CultS(0, lhs, rhs);
+ }
+ __ LoadConst32(dst, 1);
+ __ Movf(dst, ZERO, 0);
+ break;
+ case kCondLE:
+ if (gt_bias) {
+ __ ColeS(0, lhs, rhs);
+ } else {
+ __ CuleS(0, lhs, rhs);
+ }
+ __ LoadConst32(dst, 1);
+ __ Movf(dst, ZERO, 0);
+ break;
+ case kCondGT:
+ if (gt_bias) {
+ __ CultS(0, rhs, lhs);
+ } else {
+ __ ColtS(0, rhs, lhs);
+ }
+ __ LoadConst32(dst, 1);
+ __ Movf(dst, ZERO, 0);
+ break;
+ case kCondGE:
+ if (gt_bias) {
+ __ CuleS(0, rhs, lhs);
+ } else {
+ __ ColeS(0, rhs, lhs);
+ }
+ __ LoadConst32(dst, 1);
+ __ Movf(dst, ZERO, 0);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
+ UNREACHABLE();
+ }
+ }
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ if (isR6) {
+ switch (cond) {
+ case kCondEQ:
+ __ CmpEqD(FTMP, lhs, rhs);
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ case kCondNE:
+ __ CmpEqD(FTMP, lhs, rhs);
+ __ Mfc1(dst, FTMP);
+ __ Addiu(dst, dst, 1);
+ break;
+ case kCondLT:
+ if (gt_bias) {
+ __ CmpLtD(FTMP, lhs, rhs);
+ } else {
+ __ CmpUltD(FTMP, lhs, rhs);
+ }
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ case kCondLE:
+ if (gt_bias) {
+ __ CmpLeD(FTMP, lhs, rhs);
+ } else {
+ __ CmpUleD(FTMP, lhs, rhs);
+ }
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ case kCondGT:
+ if (gt_bias) {
+ __ CmpUltD(FTMP, rhs, lhs);
+ } else {
+ __ CmpLtD(FTMP, rhs, lhs);
+ }
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ case kCondGE:
+ if (gt_bias) {
+ __ CmpUleD(FTMP, rhs, lhs);
+ } else {
+ __ CmpLeD(FTMP, rhs, lhs);
+ }
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
+ UNREACHABLE();
+ }
+ } else {
+ switch (cond) {
+ case kCondEQ:
+ __ CeqD(0, lhs, rhs);
+ __ LoadConst32(dst, 1);
+ __ Movf(dst, ZERO, 0);
+ break;
+ case kCondNE:
+ __ CeqD(0, lhs, rhs);
+ __ LoadConst32(dst, 1);
+ __ Movt(dst, ZERO, 0);
+ break;
+ case kCondLT:
+ if (gt_bias) {
+ __ ColtD(0, lhs, rhs);
+ } else {
+ __ CultD(0, lhs, rhs);
+ }
+ __ LoadConst32(dst, 1);
+ __ Movf(dst, ZERO, 0);
+ break;
+ case kCondLE:
+ if (gt_bias) {
+ __ ColeD(0, lhs, rhs);
+ } else {
+ __ CuleD(0, lhs, rhs);
+ }
+ __ LoadConst32(dst, 1);
+ __ Movf(dst, ZERO, 0);
+ break;
+ case kCondGT:
+ if (gt_bias) {
+ __ CultD(0, rhs, lhs);
+ } else {
+ __ ColtD(0, rhs, lhs);
+ }
+ __ LoadConst32(dst, 1);
+ __ Movf(dst, ZERO, 0);
+ break;
+ case kCondGE:
+ if (gt_bias) {
+ __ CuleD(0, rhs, lhs);
+ } else {
+ __ ColeD(0, rhs, lhs);
+ }
+ __ LoadConst32(dst, 1);
+ __ Movf(dst, ZERO, 0);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
+ UNREACHABLE();
+ }
+ }
+ }
+}
+
void InstructionCodeGeneratorMIPS::GenerateFpCompareAndBranch(IfCondition cond,
bool gt_bias,
Primitive::Type type,
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 7ba6c0da0c..003998129e 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -243,6 +243,10 @@ class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator {
void GenerateLongCompareAndBranch(IfCondition cond,
LocationSummary* locations,
MipsLabel* label);
+ void GenerateFpCompare(IfCondition cond,
+ bool gt_bias,
+ Primitive::Type type,
+ LocationSummary* locations);
void GenerateFpCompareAndBranch(IfCondition cond,
bool gt_bias,
Primitive::Type type,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 4689ccb05c..e18b366411 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -213,35 +213,6 @@ class SuspendCheckSlowPathX86 : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathX86);
};
-class LoadStringSlowPathX86 : public SlowPathCode {
- public:
- explicit LoadStringSlowPathX86(HLoadString* instruction): SlowPathCode(instruction) {}
-
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = instruction_->GetLocations();
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
-
- CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
- __ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, locations);
-
- InvokeRuntimeCallingConvention calling_convention;
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
- __ movl(calling_convention.GetRegisterAt(0), Immediate(string_index));
- x86_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
- RestoreLiveRegisters(codegen, locations);
-
- __ jmp(GetExitLabel());
- }
-
- const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86"; }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86);
-};
-
class LoadClassSlowPathX86 : public SlowPathCode {
public:
LoadClassSlowPathX86(HLoadClass* cls,
@@ -5238,7 +5209,6 @@ void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) {
}
if (needs_write_barrier) {
// Temporary registers for the write barrier.
- // These registers may be used for Baker read barriers too.
locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
// Ensure the card is in a byte register.
locations->AddTemp(Location::RegisterLocation(ECX));
@@ -5328,105 +5298,40 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
__ Bind(&not_null);
}
- if (kEmitCompilerReadBarrier) {
- if (!kUseBakerReadBarrier) {
- // When (non-Baker) read barriers are enabled, the type
- // checking instrumentation requires two read barriers
- // generated by CodeGeneratorX86::GenerateReadBarrierSlow:
- //
- // __ movl(temp2, temp);
- // // /* HeapReference<Class> */ temp = temp->component_type_
- // __ movl(temp, Address(temp, component_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp_loc, temp_loc, temp2_loc, component_offset);
- //
- // // /* HeapReference<Class> */ temp2 = register_value->klass_
- // __ movl(temp2, Address(register_value, class_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp2_loc, temp2_loc, value, class_offset, temp_loc);
- //
- // __ cmpl(temp, temp2);
- //
- // However, the second read barrier may trash `temp`, as it
- // is a temporary register, and as such would not be saved
- // along with live registers before calling the runtime (nor
- // restored afterwards). So in this case, we bail out and
- // delegate the work to the array set slow path.
- //
- // TODO: Extend the register allocator to support a new
- // "(locally) live temp" location so as to avoid always
- // going into the slow path when read barriers are enabled?
- //
- // There is no such problem with Baker read barriers (see below).
- __ jmp(slow_path->GetEntryLabel());
- } else {
- Location temp2_loc = locations->GetTemp(1);
- Register temp2 = temp2_loc.AsRegister<Register>();
- // /* HeapReference<Class> */ temp = array->klass_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, temp_loc, array, class_offset, /* needs_null_check */ true);
-
- // /* HeapReference<Class> */ temp = temp->component_type_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, temp_loc, temp, component_offset, /* needs_null_check */ false);
- // Register `temp` is not trashed by the read barrier
- // emitted by GenerateFieldLoadWithBakerReadBarrier below,
- // as that method produces a call to a ReadBarrierMarkRegX
- // entry point, which saves all potentially live registers,
- // including temporaries such a `temp`.
- // /* HeapReference<Class> */ temp2 = register_value->klass_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, temp2_loc, register_value, class_offset, /* needs_null_check */ false);
- // If heap poisoning is enabled, `temp` and `temp2` have
- // been unpoisoned by the the previous calls to
- // CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier.
- __ cmpl(temp, temp2);
-
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- __ j(kEqual, &do_put);
- // We do not need to emit a read barrier for the
- // following heap reference load, as `temp` is only used
- // in a comparison with null below, and this reference
- // is not kept afterwards. Also, if heap poisoning is
- // enabled, there is no need to unpoison that heap
- // reference for the same reason (comparison with null).
- __ cmpl(Address(temp, super_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ Bind(&do_put);
- } else {
- __ j(kNotEqual, slow_path->GetEntryLabel());
- }
- }
- } else {
- // Non read barrier code.
+ // Note that when Baker read barriers are enabled, the type
+ // checks are performed without read barriers. This is fine,
+ // even in the case where a class object is in the from-space
+ // after the flip, as a comparison involving such a type would
+ // not produce a false positive; it may of course produce a
+ // false negative, in which case we would take the ArraySet
+ // slow path.
- // /* HeapReference<Class> */ temp = array->klass_
- __ movl(temp, Address(array, class_offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // /* HeapReference<Class> */ temp = array->klass_
+ __ movl(temp, Address(array, class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ MaybeUnpoisonHeapReference(temp);
+
+ // /* HeapReference<Class> */ temp = temp->component_type_
+ __ movl(temp, Address(temp, component_offset));
+ // If heap poisoning is enabled, no need to unpoison `temp`
+ // nor the object reference in `register_value->klass`, as
+ // we are comparing two poisoned references.
+ __ cmpl(temp, Address(register_value, class_offset));
+
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ __ j(kEqual, &do_put);
+ // If heap poisoning is enabled, the `temp` reference has
+ // not been unpoisoned yet; unpoison it now.
__ MaybeUnpoisonHeapReference(temp);
- // /* HeapReference<Class> */ temp = temp->component_type_
- __ movl(temp, Address(temp, component_offset));
- // If heap poisoning is enabled, no need to unpoison `temp`
- // nor the object reference in `register_value->klass`, as
- // we are comparing two poisoned references.
- __ cmpl(temp, Address(register_value, class_offset));
-
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- __ j(kEqual, &do_put);
- // If heap poisoning is enabled, the `temp` reference has
- // not been unpoisoned yet; unpoison it now.
- __ MaybeUnpoisonHeapReference(temp);
-
- // If heap poisoning is enabled, no need to unpoison the
- // heap reference loaded below, as it is only used for a
- // comparison with null.
- __ cmpl(Address(temp, super_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ Bind(&do_put);
- } else {
- __ j(kNotEqual, slow_path->GetEntryLabel());
- }
+ // If heap poisoning is enabled, no need to unpoison the
+ // heap reference loaded below, as it is only used for a
+ // comparison with null.
+ __ cmpl(Address(temp, super_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ j(kNotEqual, slow_path->GetEntryLabel());
}
}
@@ -6160,17 +6065,6 @@ void InstructionCodeGeneratorX86::GenerateClassInitializationCheck(
HLoadString::LoadKind CodeGeneratorX86::GetSupportedLoadStringKind(
HLoadString::LoadKind desired_string_load_kind) {
- if (kEmitCompilerReadBarrier) {
- switch (desired_string_load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadString::LoadKind::kBootImageAddress:
- // TODO: Implement for read barrier.
- return HLoadString::LoadKind::kDexCacheViaMethod;
- default:
- break;
- }
- }
switch (desired_string_load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
DCHECK(!GetCompilerOptions().GetCompilePic());
@@ -6201,20 +6095,20 @@ HLoadString::LoadKind CodeGeneratorX86::GetSupportedLoadStringKind(
void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = (load->NeedsEnvironment() || kEmitCompilerReadBarrier)
- ? LocationSummary::kCallOnSlowPath
+ ? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
- if (kUseBakerReadBarrier && !load->NeedsEnvironment()) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
- }
-
HLoadString::LoadKind load_kind = load->GetLoadKind();
if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod ||
load_kind == HLoadString::LoadKind::kBootImageLinkTimePcRelative ||
load_kind == HLoadString::LoadKind::kDexCachePcRelative) {
locations->SetInAt(0, Location::RequiresRegister());
}
- locations->SetOut(Location::RequiresRegister());
+ if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod) {
+ locations->SetOut(Location::RegisterLocation(EAX));
+ } else {
+ locations->SetOut(Location::RequiresRegister());
+ }
}
void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
@@ -6224,20 +6118,17 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
switch (load->GetLoadKind()) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress: {
- DCHECK(!kEmitCompilerReadBarrier);
__ movl(out, Immediate(/* placeholder */ 0));
codegen_->RecordStringPatch(load);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!kEmitCompilerReadBarrier);
Register method_address = locations->InAt(0).AsRegister<Register>();
__ leal(out, Address(method_address, CodeGeneratorX86::kDummy32BitOffset));
codegen_->RecordStringPatch(load);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK(!kEmitCompilerReadBarrier);
DCHECK_NE(load->GetAddress(), 0u);
uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
__ movl(out, Immediate(address));
@@ -6249,10 +6140,10 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
}
// TODO: Re-add the compiler code to do string dex cache lookup again.
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load);
- codegen_->AddSlowPath(slow_path);
- __ jmp(slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
+ InvokeRuntimeCallingConvention calling_convention;
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(load->GetStringIndex()));
+ codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
+ CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
}
static Address GetExceptionTlsAddress() {
@@ -6518,7 +6409,6 @@ void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- bool baker_read_barrier_slow_path = false;
switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
@@ -6527,7 +6417,6 @@ void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) {
call_kind = (throws_into_catch || kEmitCompilerReadBarrier) ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
- baker_read_barrier_slow_path = kUseBakerReadBarrier && !throws_into_catch;
break;
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
@@ -6536,9 +6425,6 @@ void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) {
break;
}
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
- if (baker_read_barrier_slow_path) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
- }
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
// Note that TypeCheckSlowPathX86 uses this "temp" register too.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index a21a09ee8a..15307fe50c 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -288,37 +288,6 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86_64);
};
-class LoadStringSlowPathX86_64 : public SlowPathCode {
- public:
- explicit LoadStringSlowPathX86_64(HLoadString* instruction) : SlowPathCode(instruction) {}
-
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = instruction_->GetLocations();
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
-
- CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
- __ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, locations);
-
- InvokeRuntimeCallingConvention calling_convention;
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
- __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(string_index));
- x86_64_codegen->InvokeRuntime(kQuickResolveString,
- instruction_,
- instruction_->GetDexPc(),
- this);
- CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- x86_64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
- RestoreLiveRegisters(codegen, locations);
- __ jmp(GetExitLabel());
- }
-
- const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86_64"; }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86_64);
-};
-
class TypeCheckSlowPathX86_64 : public SlowPathCode {
public:
TypeCheckSlowPathX86_64(HInstruction* instruction, bool is_fatal)
@@ -4732,7 +4701,6 @@ void LocationsBuilderX86_64::VisitArraySet(HArraySet* instruction) {
if (needs_write_barrier) {
// Temporary registers for the write barrier.
- // These registers may be used for Baker read barriers too.
locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
locations->AddTemp(Location::RequiresRegister());
}
@@ -4822,105 +4790,40 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
__ Bind(&not_null);
}
- if (kEmitCompilerReadBarrier) {
- if (!kUseBakerReadBarrier) {
- // When (non-Baker) read barriers are enabled, the type
- // checking instrumentation requires two read barriers
- // generated by CodeGeneratorX86_64::GenerateReadBarrierSlow:
- //
- // __ movl(temp2, temp);
- // // /* HeapReference<Class> */ temp = temp->component_type_
- // __ movl(temp, Address(temp, component_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp_loc, temp_loc, temp2_loc, component_offset);
- //
- // // /* HeapReference<Class> */ temp2 = register_value->klass_
- // __ movl(temp2, Address(register_value, class_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp2_loc, temp2_loc, value, class_offset, temp_loc);
- //
- // __ cmpl(temp, temp2);
- //
- // However, the second read barrier may trash `temp`, as it
- // is a temporary register, and as such would not be saved
- // along with live registers before calling the runtime (nor
- // restored afterwards). So in this case, we bail out and
- // delegate the work to the array set slow path.
- //
- // TODO: Extend the register allocator to support a new
- // "(locally) live temp" location so as to avoid always
- // going into the slow path when read barriers are enabled?
- //
- // There is no such problem with Baker read barriers (see below).
- __ jmp(slow_path->GetEntryLabel());
- } else {
- Location temp2_loc = locations->GetTemp(1);
- CpuRegister temp2 = temp2_loc.AsRegister<CpuRegister>();
- // /* HeapReference<Class> */ temp = array->klass_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, temp_loc, array, class_offset, /* needs_null_check */ true);
-
- // /* HeapReference<Class> */ temp = temp->component_type_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, temp_loc, temp, component_offset, /* needs_null_check */ false);
- // Register `temp` is not trashed by the read barrier
- // emitted by GenerateFieldLoadWithBakerReadBarrier below,
- // as that method produces a call to a ReadBarrierMarkRegX
- // entry point, which saves all potentially live registers,
- // including temporaries such a `temp`.
- // /* HeapReference<Class> */ temp2 = register_value->klass_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, temp2_loc, register_value, class_offset, /* needs_null_check */ false);
- // If heap poisoning is enabled, `temp` and `temp2` have
- // been unpoisoned by the the previous calls to
- // CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier.
- __ cmpl(temp, temp2);
-
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- __ j(kEqual, &do_put);
- // We do not need to emit a read barrier for the
- // following heap reference load, as `temp` is only used
- // in a comparison with null below, and this reference
- // is not kept afterwards. Also, if heap poisoning is
- // enabled, there is no need to unpoison that heap
- // reference for the same reason (comparison with null).
- __ cmpl(Address(temp, super_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ Bind(&do_put);
- } else {
- __ j(kNotEqual, slow_path->GetEntryLabel());
- }
- }
- } else {
- // Non read barrier code.
+ // Note that when Baker read barriers are enabled, the type
+ // checks are performed without read barriers. This is fine,
+ // even in the case where a class object is in the from-space
+ // after the flip, as a comparison involving such a type would
+ // not produce a false positive; it may of course produce a
+ // false negative, in which case we would take the ArraySet
+ // slow path.
- // /* HeapReference<Class> */ temp = array->klass_
- __ movl(temp, Address(array, class_offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // /* HeapReference<Class> */ temp = array->klass_
+ __ movl(temp, Address(array, class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ MaybeUnpoisonHeapReference(temp);
+
+ // /* HeapReference<Class> */ temp = temp->component_type_
+ __ movl(temp, Address(temp, component_offset));
+ // If heap poisoning is enabled, no need to unpoison `temp`
+ // nor the object reference in `register_value->klass`, as
+ // we are comparing two poisoned references.
+ __ cmpl(temp, Address(register_value, class_offset));
+
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ __ j(kEqual, &do_put);
+ // If heap poisoning is enabled, the `temp` reference has
+ // not been unpoisoned yet; unpoison it now.
__ MaybeUnpoisonHeapReference(temp);
- // /* HeapReference<Class> */ temp = temp->component_type_
- __ movl(temp, Address(temp, component_offset));
- // If heap poisoning is enabled, no need to unpoison `temp`
- // nor the object reference in `register_value->klass`, as
- // we are comparing two poisoned references.
- __ cmpl(temp, Address(register_value, class_offset));
-
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- __ j(kEqual, &do_put);
- // If heap poisoning is enabled, the `temp` reference has
- // not been unpoisoned yet; unpoison it now.
- __ MaybeUnpoisonHeapReference(temp);
-
- // If heap poisoning is enabled, no need to unpoison the
- // heap reference loaded below, as it is only used for a
- // comparison with null.
- __ cmpl(Address(temp, super_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ Bind(&do_put);
- } else {
- __ j(kNotEqual, slow_path->GetEntryLabel());
- }
+ // If heap poisoning is enabled, no need to unpoison the
+ // heap reference loaded below, as it is only used for a
+ // comparison with null.
+ __ cmpl(Address(temp, super_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ j(kNotEqual, slow_path->GetEntryLabel());
}
}
@@ -5592,17 +5495,6 @@ void InstructionCodeGeneratorX86_64::VisitClinitCheck(HClinitCheck* check) {
HLoadString::LoadKind CodeGeneratorX86_64::GetSupportedLoadStringKind(
HLoadString::LoadKind desired_string_load_kind) {
- if (kEmitCompilerReadBarrier) {
- switch (desired_string_load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadString::LoadKind::kBootImageAddress:
- // TODO: Implement for read barrier.
- return HLoadString::LoadKind::kDexCacheViaMethod;
- default:
- break;
- }
- }
switch (desired_string_load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
DCHECK(!GetCompilerOptions().GetCompilePic());
@@ -5626,18 +5518,16 @@ HLoadString::LoadKind CodeGeneratorX86_64::GetSupportedLoadStringKind(
}
void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
- LocationSummary::CallKind call_kind = (load->NeedsEnvironment() || kEmitCompilerReadBarrier)
- ? LocationSummary::kCallOnSlowPath
+ LocationSummary::CallKind call_kind = load->NeedsEnvironment()
+ ? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
- if (kUseBakerReadBarrier && !load->NeedsEnvironment()) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
- }
-
if (load->GetLoadKind() == HLoadString::LoadKind::kDexCacheViaMethod) {
locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RegisterLocation(RAX));
+ } else {
+ locations->SetOut(Location::RequiresRegister());
}
- locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
@@ -5647,13 +5537,11 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
switch (load->GetLoadKind()) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!kEmitCompilerReadBarrier);
__ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
codegen_->RecordStringPatch(load);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK(!kEmitCompilerReadBarrier);
DCHECK_NE(load->GetAddress(), 0u);
uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
__ movl(out, Immediate(address)); // Zero-extended.
@@ -5665,10 +5553,13 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
}
// TODO: Re-add the compiler code to do string dex cache lookup again.
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load);
- codegen_->AddSlowPath(slow_path);
- __ jmp(slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
+ InvokeRuntimeCallingConvention calling_convention;
+ __ movl(CpuRegister(calling_convention.GetRegisterAt(0)),
+ Immediate(load->GetStringIndex()));
+ codegen_->InvokeRuntime(kQuickResolveString,
+ load,
+ load->GetDexPc());
+ CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
}
static Address GetExceptionTlsAddress() {
@@ -5940,7 +5831,6 @@ void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- bool baker_read_barrier_slow_path = false;
switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
@@ -5949,7 +5839,6 @@ void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) {
call_kind = (throws_into_catch || kEmitCompilerReadBarrier) ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
- baker_read_barrier_slow_path = kUseBakerReadBarrier && !throws_into_catch;
break;
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
@@ -5958,9 +5847,6 @@ void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) {
break;
}
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
- if (baker_read_barrier_slow_path) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
- }
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
// Note that TypeCheckSlowPathX86_64 uses this "temp" register too.
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index cea4a7e1a6..eda0971ecc 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -38,7 +38,7 @@ namespace helpers {
static_assert((SP == 31) && (WSP == 31) && (XZR == 32) && (WZR == 32),
"Unexpected values for register codes.");
-static inline int VIXLRegCodeFromART(int code) {
+inline int VIXLRegCodeFromART(int code) {
if (code == SP) {
return vixl::aarch64::kSPRegInternalCode;
}
@@ -48,7 +48,7 @@ static inline int VIXLRegCodeFromART(int code) {
return code;
}
-static inline int ARTRegCodeFromVIXL(int code) {
+inline int ARTRegCodeFromVIXL(int code) {
if (code == vixl::aarch64::kSPRegInternalCode) {
return SP;
}
@@ -58,85 +58,85 @@ static inline int ARTRegCodeFromVIXL(int code) {
return code;
}
-static inline vixl::aarch64::Register XRegisterFrom(Location location) {
+inline vixl::aarch64::Register XRegisterFrom(Location location) {
DCHECK(location.IsRegister()) << location;
return vixl::aarch64::Register::GetXRegFromCode(VIXLRegCodeFromART(location.reg()));
}
-static inline vixl::aarch64::Register WRegisterFrom(Location location) {
+inline vixl::aarch64::Register WRegisterFrom(Location location) {
DCHECK(location.IsRegister()) << location;
return vixl::aarch64::Register::GetWRegFromCode(VIXLRegCodeFromART(location.reg()));
}
-static inline vixl::aarch64::Register RegisterFrom(Location location, Primitive::Type type) {
+inline vixl::aarch64::Register RegisterFrom(Location location, Primitive::Type type) {
DCHECK(type != Primitive::kPrimVoid && !Primitive::IsFloatingPointType(type)) << type;
return type == Primitive::kPrimLong ? XRegisterFrom(location) : WRegisterFrom(location);
}
-static inline vixl::aarch64::Register OutputRegister(HInstruction* instr) {
+inline vixl::aarch64::Register OutputRegister(HInstruction* instr) {
return RegisterFrom(instr->GetLocations()->Out(), instr->GetType());
}
-static inline vixl::aarch64::Register InputRegisterAt(HInstruction* instr, int input_index) {
+inline vixl::aarch64::Register InputRegisterAt(HInstruction* instr, int input_index) {
return RegisterFrom(instr->GetLocations()->InAt(input_index),
instr->InputAt(input_index)->GetType());
}
-static inline vixl::aarch64::FPRegister DRegisterFrom(Location location) {
+inline vixl::aarch64::FPRegister DRegisterFrom(Location location) {
DCHECK(location.IsFpuRegister()) << location;
return vixl::aarch64::FPRegister::GetDRegFromCode(location.reg());
}
-static inline vixl::aarch64::FPRegister SRegisterFrom(Location location) {
+inline vixl::aarch64::FPRegister SRegisterFrom(Location location) {
DCHECK(location.IsFpuRegister()) << location;
return vixl::aarch64::FPRegister::GetSRegFromCode(location.reg());
}
-static inline vixl::aarch64::FPRegister FPRegisterFrom(Location location, Primitive::Type type) {
+inline vixl::aarch64::FPRegister FPRegisterFrom(Location location, Primitive::Type type) {
DCHECK(Primitive::IsFloatingPointType(type)) << type;
return type == Primitive::kPrimDouble ? DRegisterFrom(location) : SRegisterFrom(location);
}
-static inline vixl::aarch64::FPRegister OutputFPRegister(HInstruction* instr) {
+inline vixl::aarch64::FPRegister OutputFPRegister(HInstruction* instr) {
return FPRegisterFrom(instr->GetLocations()->Out(), instr->GetType());
}
-static inline vixl::aarch64::FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
+inline vixl::aarch64::FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
return FPRegisterFrom(instr->GetLocations()->InAt(input_index),
instr->InputAt(input_index)->GetType());
}
-static inline vixl::aarch64::CPURegister CPURegisterFrom(Location location, Primitive::Type type) {
+inline vixl::aarch64::CPURegister CPURegisterFrom(Location location, Primitive::Type type) {
return Primitive::IsFloatingPointType(type)
? vixl::aarch64::CPURegister(FPRegisterFrom(location, type))
: vixl::aarch64::CPURegister(RegisterFrom(location, type));
}
-static inline vixl::aarch64::CPURegister OutputCPURegister(HInstruction* instr) {
+inline vixl::aarch64::CPURegister OutputCPURegister(HInstruction* instr) {
return Primitive::IsFloatingPointType(instr->GetType())
? static_cast<vixl::aarch64::CPURegister>(OutputFPRegister(instr))
: static_cast<vixl::aarch64::CPURegister>(OutputRegister(instr));
}
-static inline vixl::aarch64::CPURegister InputCPURegisterAt(HInstruction* instr, int index) {
+inline vixl::aarch64::CPURegister InputCPURegisterAt(HInstruction* instr, int index) {
return Primitive::IsFloatingPointType(instr->InputAt(index)->GetType())
? static_cast<vixl::aarch64::CPURegister>(InputFPRegisterAt(instr, index))
: static_cast<vixl::aarch64::CPURegister>(InputRegisterAt(instr, index));
}
-static inline vixl::aarch64::CPURegister InputCPURegisterOrZeroRegAt(HInstruction* instr,
+inline vixl::aarch64::CPURegister InputCPURegisterOrZeroRegAt(HInstruction* instr,
int index) {
HInstruction* input = instr->InputAt(index);
Primitive::Type input_type = input->GetType();
if (input->IsConstant() && input->AsConstant()->IsZeroBitPattern()) {
return (Primitive::ComponentSize(input_type) >= vixl::aarch64::kXRegSizeInBytes)
- ? vixl::aarch64::xzr
+ ? vixl::aarch64::xzr
: vixl::aarch64::wzr;
}
return InputCPURegisterAt(instr, index);
}
-static inline int64_t Int64ConstantFrom(Location location) {
+inline int64_t Int64ConstantFrom(Location location) {
HConstant* instr = location.GetConstant();
if (instr->IsIntConstant()) {
return instr->AsIntConstant()->GetValue();
@@ -148,7 +148,7 @@ static inline int64_t Int64ConstantFrom(Location location) {
}
}
-static inline vixl::aarch64::Operand OperandFrom(Location location, Primitive::Type type) {
+inline vixl::aarch64::Operand OperandFrom(Location location, Primitive::Type type) {
if (location.IsRegister()) {
return vixl::aarch64::Operand(RegisterFrom(location, type));
} else {
@@ -156,23 +156,23 @@ static inline vixl::aarch64::Operand OperandFrom(Location location, Primitive::T
}
}
-static inline vixl::aarch64::Operand InputOperandAt(HInstruction* instr, int input_index) {
+inline vixl::aarch64::Operand InputOperandAt(HInstruction* instr, int input_index) {
return OperandFrom(instr->GetLocations()->InAt(input_index),
instr->InputAt(input_index)->GetType());
}
-static inline vixl::aarch64::MemOperand StackOperandFrom(Location location) {
+inline vixl::aarch64::MemOperand StackOperandFrom(Location location) {
return vixl::aarch64::MemOperand(vixl::aarch64::sp, location.GetStackIndex());
}
-static inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
+inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
size_t offset = 0) {
// A heap reference must be 32bit, so fit in a W register.
DCHECK(base.IsW());
return vixl::aarch64::MemOperand(base.X(), offset);
}
-static inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
+inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
const vixl::aarch64::Register& regoffset,
vixl::aarch64::Shift shift = vixl::aarch64::LSL,
unsigned shift_amount = 0) {
@@ -181,24 +181,24 @@ static inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Registe
return vixl::aarch64::MemOperand(base.X(), regoffset, shift, shift_amount);
}
-static inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
+inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
Offset offset) {
return HeapOperand(base, offset.SizeValue());
}
-static inline vixl::aarch64::MemOperand HeapOperandFrom(Location location, Offset offset) {
+inline vixl::aarch64::MemOperand HeapOperandFrom(Location location, Offset offset) {
return HeapOperand(RegisterFrom(location, Primitive::kPrimNot), offset);
}
-static inline Location LocationFrom(const vixl::aarch64::Register& reg) {
+inline Location LocationFrom(const vixl::aarch64::Register& reg) {
return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.GetCode()));
}
-static inline Location LocationFrom(const vixl::aarch64::FPRegister& fpreg) {
+inline Location LocationFrom(const vixl::aarch64::FPRegister& fpreg) {
return Location::FpuRegisterLocation(fpreg.GetCode());
}
-static inline vixl::aarch64::Operand OperandFromMemOperand(
+inline vixl::aarch64::Operand OperandFromMemOperand(
const vixl::aarch64::MemOperand& mem_op) {
if (mem_op.IsImmediateOffset()) {
return vixl::aarch64::Operand(mem_op.GetOffset());
@@ -219,7 +219,7 @@ static inline vixl::aarch64::Operand OperandFromMemOperand(
}
}
-static bool CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* instr) {
+inline bool CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* instr) {
DCHECK(constant->IsIntConstant() || constant->IsLongConstant() || constant->IsNullConstant())
<< constant->DebugName();
@@ -258,7 +258,7 @@ static bool CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* inst
}
}
-static inline Location ARM64EncodableConstantOrRegister(HInstruction* constant,
+inline Location ARM64EncodableConstantOrRegister(HInstruction* constant,
HInstruction* instr) {
if (constant->IsConstant()
&& CanEncodeConstantAsImmediate(constant->AsConstant(), instr)) {
@@ -272,7 +272,7 @@ static inline Location ARM64EncodableConstantOrRegister(HInstruction* constant,
// codes are same, we can initialize vixl register list simply by the register masks. Currently,
// only SP/WSP and ZXR/WZR codes are different between art and vixl.
// Note: This function is only used for debug checks.
-static inline bool ArtVixlRegCodeCoherentForRegSet(uint32_t art_core_registers,
+inline bool ArtVixlRegCodeCoherentForRegSet(uint32_t art_core_registers,
size_t num_core,
uint32_t art_fpu_registers,
size_t num_fpu) {
@@ -290,7 +290,7 @@ static inline bool ArtVixlRegCodeCoherentForRegSet(uint32_t art_core_registers,
return true;
}
-static inline vixl::aarch64::Shift ShiftFromOpKind(HArm64DataProcWithShifterOp::OpKind op_kind) {
+inline vixl::aarch64::Shift ShiftFromOpKind(HArm64DataProcWithShifterOp::OpKind op_kind) {
switch (op_kind) {
case HArm64DataProcWithShifterOp::kASR: return vixl::aarch64::ASR;
case HArm64DataProcWithShifterOp::kLSL: return vixl::aarch64::LSL;
@@ -302,7 +302,7 @@ static inline vixl::aarch64::Shift ShiftFromOpKind(HArm64DataProcWithShifterOp::
}
}
-static inline vixl::aarch64::Extend ExtendFromOpKind(HArm64DataProcWithShifterOp::OpKind op_kind) {
+inline vixl::aarch64::Extend ExtendFromOpKind(HArm64DataProcWithShifterOp::OpKind op_kind) {
switch (op_kind) {
case HArm64DataProcWithShifterOp::kUXTB: return vixl::aarch64::UXTB;
case HArm64DataProcWithShifterOp::kUXTH: return vixl::aarch64::UXTH;
@@ -317,7 +317,7 @@ static inline vixl::aarch64::Extend ExtendFromOpKind(HArm64DataProcWithShifterOp
}
}
-static inline bool CanFitInShifterOperand(HInstruction* instruction) {
+inline bool CanFitInShifterOperand(HInstruction* instruction) {
if (instruction->IsTypeConversion()) {
HTypeConversion* conversion = instruction->AsTypeConversion();
Primitive::Type result_type = conversion->GetResultType();
@@ -332,7 +332,7 @@ static inline bool CanFitInShifterOperand(HInstruction* instruction) {
}
}
-static inline bool HasShifterOperand(HInstruction* instr) {
+inline bool HasShifterOperand(HInstruction* instr) {
// `neg` instructions are an alias of `sub` using the zero register as the
// first register input.
bool res = instr->IsAdd() || instr->IsAnd() || instr->IsNeg() ||
@@ -340,7 +340,7 @@ static inline bool HasShifterOperand(HInstruction* instr) {
return res;
}
-static inline bool ShifterOperandSupportsExtension(HInstruction* instruction) {
+inline bool ShifterOperandSupportsExtension(HInstruction* instruction) {
DCHECK(HasShifterOperand(instruction));
// Although the `neg` instruction is an alias of the `sub` instruction, `HNeg`
// does *not* support extension. This is because the `extended register` form
@@ -351,7 +351,7 @@ static inline bool ShifterOperandSupportsExtension(HInstruction* instruction) {
return instruction->IsAdd() || instruction->IsSub();
}
-static inline bool IsConstantZeroBitPattern(const HInstruction* instruction) {
+inline bool IsConstantZeroBitPattern(const HInstruction* instruction) {
return instruction->IsConstant() && instruction->AsConstant()->IsZeroBitPattern();
}
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 8d4d143696..b8e1379ef9 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -297,7 +297,15 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
DCHECK(!runtime->UseJitCompilation());
mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache);
CHECK(string != nullptr);
- // TODO: In follow up CL, add PcRelative and Address back in.
+ if (compiler_driver_->GetSupportBootImageFixup()) {
+ DCHECK(ContainsElement(compiler_driver_->GetDexFilesForOatFile(), &dex_file));
+ desired_load_kind = codegen_->GetCompilerOptions().GetCompilePic()
+ ? HLoadString::LoadKind::kBootImageLinkTimePcRelative
+ : HLoadString::LoadKind::kBootImageLinkTimeAddress;
+ } else {
+ // MIPS64 or compiler_driver_test. Do not sharpen.
+ DCHECK_EQ(desired_load_kind, HLoadString::LoadKind::kDexCacheViaMethod);
+ }
} else if (runtime->UseJitCompilation()) {
// TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
// DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());