summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc1248
1 files changed, 624 insertions, 624 deletions
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 1c6e56470f..f1d11354fa 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -725,110 +725,6 @@ void CodeGeneratorARMVIXL::GenerateInvokeRuntime(int32_t entry_point_offset) {
__ Blx(lr);
}
-void LocationsBuilderARMVIXL::VisitClinitCheck(HClinitCheck* check) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
- locations->SetInAt(0, Location::RequiresRegister());
- if (check->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitClinitCheck(HClinitCheck* check) {
- // We assume the class is not null.
- LoadClassSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetArena()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
- check,
- check->GetDexPc(),
- /* do_clinit */ true);
- codegen_->AddSlowPath(slow_path);
- GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
-}
-
-void InstructionCodeGeneratorARMVIXL::GenerateClassInitializationCheck(
- LoadClassSlowPathARMVIXL* slow_path, vixl32::Register class_reg) {
- UseScratchRegisterScope temps(GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- GetAssembler()->LoadFromOffset(kLoadWord,
- temp,
- class_reg,
- mirror::Class::StatusOffset().Int32Value());
- __ Cmp(temp, mirror::Class::kStatusInitialized);
- __ B(lt, slow_path->GetEntryLabel());
- // Even if the initialized flag is set, we may be in a situation where caches are not synced
- // properly. Therefore, we do a memory fence.
- __ Dmb(ISH);
- __ Bind(slow_path->GetExitLabel());
-}
-
-// Check if the desired_string_load_kind is supported. If it is, return it,
-// otherwise return a fall-back kind that should be used instead.
-HLoadString::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadStringKind(
- HLoadString::LoadKind desired_string_load_kind ATTRIBUTE_UNUSED) {
- // TODO(VIXL): Implement optimized code paths. For now we always use the simpler fallback code.
- return HLoadString::LoadKind::kDexCacheViaMethod;
-}
-
-void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) {
- LocationSummary::CallKind call_kind = load->NeedsEnvironment()
- ? LocationSummary::kCallOnMainOnly
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
-
- // TODO(VIXL): Implement optimized code paths.
- // See InstructionCodeGeneratorARMVIXL::VisitLoadString.
- HLoadString::LoadKind load_kind = load->GetLoadKind();
- if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod) {
- locations->SetInAt(0, Location::RequiresRegister());
- // TODO(VIXL): Use InvokeRuntimeCallingConventionARMVIXL instead.
- locations->SetOut(LocationFrom(r0));
- } else {
- locations->SetOut(Location::RequiresRegister());
- }
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) {
- // TODO(VIXL): Implement optimized code paths.
- // We implemented the simplest solution to get first ART tests passing, we deferred the
- // optimized path until later, we should implement it using ARM64 implementation as a
- // reference. The same related to LocationsBuilderARMVIXL::VisitLoadString.
-
- // TODO: Re-add the compiler code to do string dex cache lookup again.
- DCHECK_EQ(load->GetLoadKind(), HLoadString::LoadKind::kDexCacheViaMethod);
- InvokeRuntimeCallingConventionARMVIXL calling_convention;
- __ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex());
- codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
- CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
-}
-
-// Check if the desired_class_load_kind is supported. If it is, return it,
-// otherwise return a fall-back kind that should be used instead.
-HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
- HLoadClass::LoadKind desired_class_load_kind ATTRIBUTE_UNUSED) {
- // TODO(VIXL): Implement optimized code paths.
- return HLoadClass::LoadKind::kDexCacheViaMethod;
-}
-
-// Check if the desired_dispatch_info is supported. If it is, return it,
-// otherwise return a fall-back info that should be used instead.
-HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStaticOrDirectDispatch(
- const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info ATTRIBUTE_UNUSED,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
- // TODO(VIXL): Implement optimized code paths.
- return {
- HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
- HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
- 0u,
- 0u
- };
-}
-
-// Copy the result of a call into the given target.
-void CodeGeneratorARMVIXL::MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
- Primitive::Type type ATTRIBUTE_UNUSED) {
- TODO_VIXL32(FATAL);
-}
-
void InstructionCodeGeneratorARMVIXL::HandleGoto(HInstruction* got, HBasicBlock* successor) {
DCHECK(!successor->IsExitBlock());
HBasicBlock* block = got->GetBlock();
@@ -2151,23 +2047,400 @@ void InstructionCodeGeneratorARMVIXL::VisitMul(HMul* mul) {
}
}
-void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
+void InstructionCodeGeneratorARMVIXL::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
+
+ Location second = instruction->GetLocations()->InAt(1);
+ DCHECK(second.IsConstant());
+
+ vixl32::Register out = OutputRegister(instruction);
+ vixl32::Register dividend = InputRegisterAt(instruction, 0);
+ int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+ DCHECK(imm == 1 || imm == -1);
+
+ if (instruction->IsRem()) {
+ __ Mov(out, 0);
+ } else {
+ if (imm == 1) {
+ __ Mov(out, dividend);
+ } else {
+ __ Rsb(out, dividend, 0);
+ }
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ DCHECK(second.IsConstant());
+
+ vixl32::Register out = OutputRegister(instruction);
+ vixl32::Register dividend = InputRegisterAt(instruction, 0);
+ vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
+ int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+ uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm));
+ int ctz_imm = CTZ(abs_imm);
+
+ if (ctz_imm == 1) {
+ __ Lsr(temp, dividend, 32 - ctz_imm);
+ } else {
+ __ Asr(temp, dividend, 31);
+ __ Lsr(temp, temp, 32 - ctz_imm);
+ }
+ __ Add(out, temp, dividend);
+
+ if (instruction->IsDiv()) {
+ __ Asr(out, out, ctz_imm);
+ if (imm < 0) {
+ __ Rsb(out, out, 0);
+ }
+ } else {
+ __ Ubfx(out, out, 0, ctz_imm);
+ __ Sub(out, out, temp);
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ DCHECK(second.IsConstant());
+
+ vixl32::Register out = OutputRegister(instruction);
+ vixl32::Register dividend = InputRegisterAt(instruction, 0);
+ vixl32::Register temp1 = RegisterFrom(locations->GetTemp(0));
+ vixl32::Register temp2 = RegisterFrom(locations->GetTemp(1));
+ int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+
+ int64_t magic;
+ int shift;
+ CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+
+ __ Mov(temp1, magic);
+ __ Smull(temp2, temp1, dividend, temp1);
+
+ if (imm > 0 && magic < 0) {
+ __ Add(temp1, temp1, dividend);
+ } else if (imm < 0 && magic > 0) {
+ __ Sub(temp1, temp1, dividend);
+ }
+
+ if (shift != 0) {
+ __ Asr(temp1, temp1, shift);
+ }
+
+ if (instruction->IsDiv()) {
+ __ Sub(out, temp1, Operand(temp1, vixl32::Shift(ASR), 31));
+ } else {
+ __ Sub(temp1, temp1, Operand(temp1, vixl32::Shift(ASR), 31));
+ // TODO: Strength reduction for mls.
+ __ Mov(temp2, imm);
+ __ Mls(out, temp1, temp2, dividend);
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateDivRemConstantIntegral(
+ HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
+
+ Location second = instruction->GetLocations()->InAt(1);
+ DCHECK(second.IsConstant());
+
+ int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+ if (imm == 0) {
+ // Do not generate anything. DivZeroCheck would prevent any code to be executed.
+ } else if (imm == 1 || imm == -1) {
+ DivRemOneOrMinusOne(instruction);
+ } else if (IsPowerOfTwo(AbsOrMin(imm))) {
+ DivRemByPowerOfTwo(instruction);
+ } else {
+ DCHECK(imm <= -2 || imm >= 2);
+ GenerateDivRemWithAnyConstant(instruction);
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitDiv(HDiv* div) {
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ if (div->GetResultType() == Primitive::kPrimLong) {
+ // pLdiv runtime call.
+ call_kind = LocationSummary::kCallOnMainOnly;
+ } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) {
+ // sdiv will be replaced by other instruction sequence.
+ } else if (div->GetResultType() == Primitive::kPrimInt &&
+ !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ // pIdivmod runtime call.
+ call_kind = LocationSummary::kCallOnMainOnly;
+ }
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+
+ switch (div->GetResultType()) {
+ case Primitive::kPrimInt: {
+ if (div->InputAt(1)->IsConstant()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::ConstantLocation(div->InputAt(1)->AsConstant()));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ int32_t value = div->InputAt(1)->AsIntConstant()->GetValue();
+ if (value == 1 || value == 0 || value == -1) {
+ // No temp register required.
+ } else {
+ locations->AddTemp(Location::RequiresRegister());
+ if (!IsPowerOfTwo(AbsOrMin(value))) {
+ locations->AddTemp(Location::RequiresRegister());
+ }
+ }
+ } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ } else {
+ TODO_VIXL32(FATAL);
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ TODO_VIXL32(FATAL);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << div->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitDiv(HDiv* div) {
+ Location rhs = div->GetLocations()->InAt(1);
+
+ switch (div->GetResultType()) {
+ case Primitive::kPrimInt: {
+ if (rhs.IsConstant()) {
+ GenerateDivRemConstantIntegral(div);
+ } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ __ Sdiv(OutputRegister(div), InputRegisterAt(div, 0), InputRegisterAt(div, 1));
+ } else {
+ TODO_VIXL32(FATAL);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ TODO_VIXL32(FATAL);
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ __ Vdiv(OutputVRegister(div), InputVRegisterAt(div, 0), InputVRegisterAt(div, 1));
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << div->GetResultType();
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ // TODO(VIXL): https://android-review.googlesource.com/#/c/275337/
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ DivZeroCheckSlowPathARMVIXL* slow_path =
+ new (GetGraph()->GetArena()) DivZeroCheckSlowPathARMVIXL(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location value = locations->InAt(0);
+
+ switch (instruction->GetType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt: {
+ if (value.IsRegister()) {
+ __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ B(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ if (value.IsRegisterPair()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ __ Orrs(temp, LowRegisterFrom(value), HighRegisterFrom(value));
+ __ B(eq, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ B(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::HandleIntegerRotate(HRor* ror) {
+ LocationSummary* locations = ror->GetLocations();
+ vixl32::Register in = InputRegisterAt(ror, 0);
+ Location rhs = locations->InAt(1);
+ vixl32::Register out = OutputRegister(ror);
+
+ if (rhs.IsConstant()) {
+ // Arm32 and Thumb2 assemblers require a rotation on the interval [1,31],
+ // so map all rotations to a +ve. equivalent in that range.
+ // (e.g. left *or* right by -2 bits == 30 bits in the same direction.)
+ uint32_t rot = CodeGenerator::GetInt32ValueOf(rhs.GetConstant()) & 0x1F;
+ if (rot) {
+ // Rotate, mapping left rotations to right equivalents if necessary.
+ // (e.g. left by 2 bits == right by 30.)
+ __ Ror(out, in, rot);
+ } else if (!out.Is(in)) {
+ __ Mov(out, in);
+ }
+ } else {
+ __ Ror(out, in, RegisterFrom(rhs));
+ }
+}
+
+// Gain some speed by mapping all Long rotates onto equivalent pairs of Integer
+// rotates by swapping input regs (effectively rotating by the first 32-bits of
+// a larger rotation) or flipping direction (thus treating larger right/left
+// rotations as sub-word sized rotations in the other direction) as appropriate.
+void InstructionCodeGeneratorARMVIXL::HandleLongRotate(HRor* ror) {
+ LocationSummary* locations = ror->GetLocations();
+ vixl32::Register in_reg_lo = LowRegisterFrom(locations->InAt(0));
+ vixl32::Register in_reg_hi = HighRegisterFrom(locations->InAt(0));
+ Location rhs = locations->InAt(1);
+ vixl32::Register out_reg_lo = LowRegisterFrom(locations->Out());
+ vixl32::Register out_reg_hi = HighRegisterFrom(locations->Out());
+
+ if (rhs.IsConstant()) {
+ uint64_t rot = CodeGenerator::GetInt64ValueOf(rhs.GetConstant());
+ // Map all rotations to +ve. equivalents on the interval [0,63].
+ rot &= kMaxLongShiftDistance;
+ // For rotates over a word in size, 'pre-rotate' by 32-bits to keep rotate
+ // logic below to a simple pair of binary orr.
+ // (e.g. 34 bits == in_reg swap + 2 bits right.)
+ if (rot >= kArmBitsPerWord) {
+ rot -= kArmBitsPerWord;
+ std::swap(in_reg_hi, in_reg_lo);
+ }
+ // Rotate, or mov to out for zero or word size rotations.
+ if (rot != 0u) {
+ __ Lsr(out_reg_hi, in_reg_hi, rot);
+ __ Orr(out_reg_hi, out_reg_hi, Operand(in_reg_lo, ShiftType::LSL, kArmBitsPerWord - rot));
+ __ Lsr(out_reg_lo, in_reg_lo, rot);
+ __ Orr(out_reg_lo, out_reg_lo, Operand(in_reg_hi, ShiftType::LSL, kArmBitsPerWord - rot));
+ } else {
+ __ Mov(out_reg_lo, in_reg_lo);
+ __ Mov(out_reg_hi, in_reg_hi);
+ }
+ } else {
+ vixl32::Register shift_right = RegisterFrom(locations->GetTemp(0));
+ vixl32::Register shift_left = RegisterFrom(locations->GetTemp(1));
+ vixl32::Label end;
+ vixl32::Label shift_by_32_plus_shift_right;
+
+ __ And(shift_right, RegisterFrom(rhs), 0x1F);
+ __ Lsrs(shift_left, RegisterFrom(rhs), 6);
+ // TODO(VIXL): Check that flags are kept after "vixl32::LeaveFlags" enabled.
+ __ Rsb(shift_left, shift_right, kArmBitsPerWord);
+ __ B(cc, &shift_by_32_plus_shift_right);
+
+ // out_reg_hi = (reg_hi << shift_left) | (reg_lo >> shift_right).
+ // out_reg_lo = (reg_lo << shift_left) | (reg_hi >> shift_right).
+ __ Lsl(out_reg_hi, in_reg_hi, shift_left);
+ __ Lsr(out_reg_lo, in_reg_lo, shift_right);
+ __ Add(out_reg_hi, out_reg_hi, out_reg_lo);
+ __ Lsl(out_reg_lo, in_reg_lo, shift_left);
+ __ Lsr(shift_left, in_reg_hi, shift_right);
+ __ Add(out_reg_lo, out_reg_lo, shift_left);
+ __ B(&end);
+
+ __ Bind(&shift_by_32_plus_shift_right); // Shift by 32+shift_right.
+ // out_reg_hi = (reg_hi >> shift_right) | (reg_lo << shift_left).
+ // out_reg_lo = (reg_lo >> shift_right) | (reg_hi << shift_left).
+ __ Lsr(out_reg_hi, in_reg_hi, shift_right);
+ __ Lsl(out_reg_lo, in_reg_lo, shift_left);
+ __ Add(out_reg_hi, out_reg_hi, out_reg_lo);
+ __ Lsr(out_reg_lo, in_reg_lo, shift_right);
+ __ Lsl(shift_right, in_reg_hi, shift_left);
+ __ Add(out_reg_lo, out_reg_lo, shift_right);
+
+ __ Bind(&end);
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitRor(HRor* ror) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
- InvokeRuntimeCallingConventionARMVIXL calling_convention;
- locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
- locations->SetOut(LocationFrom(r0));
- locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(2)));
+ new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall);
+ switch (ror->GetResultType()) {
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(ror->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (ror->InputAt(1)->IsConstant()) {
+ locations->SetInAt(1, Location::ConstantLocation(ror->InputAt(1)->AsConstant()));
+ } else {
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ }
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected operation type " << ror->GetResultType();
+ }
}
-void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) {
- InvokeRuntimeCallingConventionARMVIXL calling_convention;
- __ Mov(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
- // Note: if heap poisoning is enabled, the entry point takes cares
- // of poisoning the reference.
- codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
+void InstructionCodeGeneratorARMVIXL::VisitRor(HRor* ror) {
+ Primitive::Type type = ror->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt: {
+ HandleIntegerRotate(ror);
+ break;
+ }
+ case Primitive::kPrimLong: {
+ HandleLongRotate(ror);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected operation type " << type;
+ UNREACHABLE();
+ }
}
void LocationsBuilderARMVIXL::HandleShift(HBinaryOperation* op) {
@@ -2436,6 +2709,25 @@ void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction
}
}
+void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ InvokeRuntimeCallingConventionARMVIXL calling_convention;
+ locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetOut(LocationFrom(r0));
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(2)));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) {
+ InvokeRuntimeCallingConventionARMVIXL calling_convention;
+ __ Mov(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
+ // Note: if heap poisoning is enabled, the entry point takes cares
+ // of poisoning the reference.
+ codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
+}
+
void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
@@ -2645,403 +2937,6 @@ void InstructionCodeGeneratorARMVIXL::GenerateWideAtomicStore(vixl32::Register a
__ Cbnz(temp1, &fail);
}
-void InstructionCodeGeneratorARMVIXL::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
- DCHECK(instruction->IsDiv() || instruction->IsRem());
- DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
-
- Location second = instruction->GetLocations()->InAt(1);
- DCHECK(second.IsConstant());
-
- vixl32::Register out = OutputRegister(instruction);
- vixl32::Register dividend = InputRegisterAt(instruction, 0);
- int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
- DCHECK(imm == 1 || imm == -1);
-
- if (instruction->IsRem()) {
- __ Mov(out, 0);
- } else {
- if (imm == 1) {
- __ Mov(out, dividend);
- } else {
- __ Rsb(out, dividend, 0);
- }
- }
-}
-
-void InstructionCodeGeneratorARMVIXL::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
- DCHECK(instruction->IsDiv() || instruction->IsRem());
- DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
-
- LocationSummary* locations = instruction->GetLocations();
- Location second = locations->InAt(1);
- DCHECK(second.IsConstant());
-
- vixl32::Register out = OutputRegister(instruction);
- vixl32::Register dividend = InputRegisterAt(instruction, 0);
- vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
- int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
- uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm));
- int ctz_imm = CTZ(abs_imm);
-
- if (ctz_imm == 1) {
- __ Lsr(temp, dividend, 32 - ctz_imm);
- } else {
- __ Asr(temp, dividend, 31);
- __ Lsr(temp, temp, 32 - ctz_imm);
- }
- __ Add(out, temp, dividend);
-
- if (instruction->IsDiv()) {
- __ Asr(out, out, ctz_imm);
- if (imm < 0) {
- __ Rsb(out, out, 0);
- }
- } else {
- __ Ubfx(out, out, 0, ctz_imm);
- __ Sub(out, out, temp);
- }
-}
-
-void InstructionCodeGeneratorARMVIXL::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
- DCHECK(instruction->IsDiv() || instruction->IsRem());
- DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
-
- LocationSummary* locations = instruction->GetLocations();
- Location second = locations->InAt(1);
- DCHECK(second.IsConstant());
-
- vixl32::Register out = OutputRegister(instruction);
- vixl32::Register dividend = InputRegisterAt(instruction, 0);
- vixl32::Register temp1 = RegisterFrom(locations->GetTemp(0));
- vixl32::Register temp2 = RegisterFrom(locations->GetTemp(1));
- int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
-
- int64_t magic;
- int shift;
- CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
-
- __ Mov(temp1, magic);
- __ Smull(temp2, temp1, dividend, temp1);
-
- if (imm > 0 && magic < 0) {
- __ Add(temp1, temp1, dividend);
- } else if (imm < 0 && magic > 0) {
- __ Sub(temp1, temp1, dividend);
- }
-
- if (shift != 0) {
- __ Asr(temp1, temp1, shift);
- }
-
- if (instruction->IsDiv()) {
- __ Sub(out, temp1, Operand(temp1, vixl32::Shift(ASR), 31));
- } else {
- __ Sub(temp1, temp1, Operand(temp1, vixl32::Shift(ASR), 31));
- // TODO: Strength reduction for mls.
- __ Mov(temp2, imm);
- __ Mls(out, temp1, temp2, dividend);
- }
-}
-
-void InstructionCodeGeneratorARMVIXL::GenerateDivRemConstantIntegral(
- HBinaryOperation* instruction) {
- DCHECK(instruction->IsDiv() || instruction->IsRem());
- DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
-
- Location second = instruction->GetLocations()->InAt(1);
- DCHECK(second.IsConstant());
-
- int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
- if (imm == 0) {
- // Do not generate anything. DivZeroCheck would prevent any code to be executed.
- } else if (imm == 1 || imm == -1) {
- DivRemOneOrMinusOne(instruction);
- } else if (IsPowerOfTwo(AbsOrMin(imm))) {
- DivRemByPowerOfTwo(instruction);
- } else {
- DCHECK(imm <= -2 || imm >= 2);
- GenerateDivRemWithAnyConstant(instruction);
- }
-}
-
-void LocationsBuilderARMVIXL::VisitDiv(HDiv* div) {
- LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
- if (div->GetResultType() == Primitive::kPrimLong) {
- // pLdiv runtime call.
- call_kind = LocationSummary::kCallOnMainOnly;
- } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) {
- // sdiv will be replaced by other instruction sequence.
- } else if (div->GetResultType() == Primitive::kPrimInt &&
- !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
- // pIdivmod runtime call.
- call_kind = LocationSummary::kCallOnMainOnly;
- }
-
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
-
- switch (div->GetResultType()) {
- case Primitive::kPrimInt: {
- if (div->InputAt(1)->IsConstant()) {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::ConstantLocation(div->InputAt(1)->AsConstant()));
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- int32_t value = div->InputAt(1)->AsIntConstant()->GetValue();
- if (value == 1 || value == 0 || value == -1) {
- // No temp register required.
- } else {
- locations->AddTemp(Location::RequiresRegister());
- if (!IsPowerOfTwo(AbsOrMin(value))) {
- locations->AddTemp(Location::RequiresRegister());
- }
- }
- } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- } else {
- TODO_VIXL32(FATAL);
- }
- break;
- }
- case Primitive::kPrimLong: {
- TODO_VIXL32(FATAL);
- break;
- }
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble: {
- locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
- locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
- break;
- }
-
- default:
- LOG(FATAL) << "Unexpected div type " << div->GetResultType();
- }
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitDiv(HDiv* div) {
- Location rhs = div->GetLocations()->InAt(1);
-
- switch (div->GetResultType()) {
- case Primitive::kPrimInt: {
- if (rhs.IsConstant()) {
- GenerateDivRemConstantIntegral(div);
- } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
- __ Sdiv(OutputRegister(div), InputRegisterAt(div, 0), InputRegisterAt(div, 1));
- } else {
- TODO_VIXL32(FATAL);
- }
- break;
- }
-
- case Primitive::kPrimLong: {
- TODO_VIXL32(FATAL);
- break;
- }
-
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- __ Vdiv(OutputVRegister(div), InputVRegisterAt(div, 0), InputVRegisterAt(div, 1));
- break;
-
- default:
- LOG(FATAL) << "Unexpected div type " << div->GetResultType();
- }
-}
-
-void LocationsBuilderARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- // TODO(VIXL): https://android-review.googlesource.com/#/c/275337/
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
- locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- DivZeroCheckSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetArena()) DivZeroCheckSlowPathARMVIXL(instruction);
- codegen_->AddSlowPath(slow_path);
-
- LocationSummary* locations = instruction->GetLocations();
- Location value = locations->InAt(0);
-
- switch (instruction->GetType()) {
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt: {
- if (value.IsRegister()) {
- __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
- } else {
- DCHECK(value.IsConstant()) << value;
- if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
- __ B(slow_path->GetEntryLabel());
- }
- }
- break;
- }
- case Primitive::kPrimLong: {
- if (value.IsRegisterPair()) {
- UseScratchRegisterScope temps(GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- __ Orrs(temp, LowRegisterFrom(value), HighRegisterFrom(value));
- __ B(eq, slow_path->GetEntryLabel());
- } else {
- DCHECK(value.IsConstant()) << value;
- if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
- __ B(slow_path->GetEntryLabel());
- }
- }
- break;
- }
- default:
- LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
- }
-}
-
-void InstructionCodeGeneratorARMVIXL::HandleIntegerRotate(HRor* ror) {
- LocationSummary* locations = ror->GetLocations();
- vixl32::Register in = InputRegisterAt(ror, 0);
- Location rhs = locations->InAt(1);
- vixl32::Register out = OutputRegister(ror);
-
- if (rhs.IsConstant()) {
- // Arm32 and Thumb2 assemblers require a rotation on the interval [1,31],
- // so map all rotations to a +ve. equivalent in that range.
- // (e.g. left *or* right by -2 bits == 30 bits in the same direction.)
- uint32_t rot = CodeGenerator::GetInt32ValueOf(rhs.GetConstant()) & 0x1F;
- if (rot) {
- // Rotate, mapping left rotations to right equivalents if necessary.
- // (e.g. left by 2 bits == right by 30.)
- __ Ror(out, in, rot);
- } else if (!out.Is(in)) {
- __ Mov(out, in);
- }
- } else {
- __ Ror(out, in, RegisterFrom(rhs));
- }
-}
-
-// Gain some speed by mapping all Long rotates onto equivalent pairs of Integer
-// rotates by swapping input regs (effectively rotating by the first 32-bits of
-// a larger rotation) or flipping direction (thus treating larger right/left
-// rotations as sub-word sized rotations in the other direction) as appropriate.
-void InstructionCodeGeneratorARMVIXL::HandleLongRotate(HRor* ror) {
- LocationSummary* locations = ror->GetLocations();
- vixl32::Register in_reg_lo = LowRegisterFrom(locations->InAt(0));
- vixl32::Register in_reg_hi = HighRegisterFrom(locations->InAt(0));
- Location rhs = locations->InAt(1);
- vixl32::Register out_reg_lo = LowRegisterFrom(locations->Out());
- vixl32::Register out_reg_hi = HighRegisterFrom(locations->Out());
-
- if (rhs.IsConstant()) {
- uint64_t rot = CodeGenerator::GetInt64ValueOf(rhs.GetConstant());
- // Map all rotations to +ve. equivalents on the interval [0,63].
- rot &= kMaxLongShiftDistance;
- // For rotates over a word in size, 'pre-rotate' by 32-bits to keep rotate
- // logic below to a simple pair of binary orr.
- // (e.g. 34 bits == in_reg swap + 2 bits right.)
- if (rot >= kArmBitsPerWord) {
- rot -= kArmBitsPerWord;
- std::swap(in_reg_hi, in_reg_lo);
- }
- // Rotate, or mov to out for zero or word size rotations.
- if (rot != 0u) {
- __ Lsr(out_reg_hi, in_reg_hi, rot);
- __ Orr(out_reg_hi, out_reg_hi, Operand(in_reg_lo, ShiftType::LSL, kArmBitsPerWord - rot));
- __ Lsr(out_reg_lo, in_reg_lo, rot);
- __ Orr(out_reg_lo, out_reg_lo, Operand(in_reg_hi, ShiftType::LSL, kArmBitsPerWord - rot));
- } else {
- __ Mov(out_reg_lo, in_reg_lo);
- __ Mov(out_reg_hi, in_reg_hi);
- }
- } else {
- vixl32::Register shift_right = RegisterFrom(locations->GetTemp(0));
- vixl32::Register shift_left = RegisterFrom(locations->GetTemp(1));
- vixl32::Label end;
- vixl32::Label shift_by_32_plus_shift_right;
-
- __ And(shift_right, RegisterFrom(rhs), 0x1F);
- __ Lsrs(shift_left, RegisterFrom(rhs), 6);
- // TODO(VIXL): Check that flags are kept after "vixl32::LeaveFlags" enabled.
- __ Rsb(shift_left, shift_right, kArmBitsPerWord);
- __ B(cc, &shift_by_32_plus_shift_right);
-
- // out_reg_hi = (reg_hi << shift_left) | (reg_lo >> shift_right).
- // out_reg_lo = (reg_lo << shift_left) | (reg_hi >> shift_right).
- __ Lsl(out_reg_hi, in_reg_hi, shift_left);
- __ Lsr(out_reg_lo, in_reg_lo, shift_right);
- __ Add(out_reg_hi, out_reg_hi, out_reg_lo);
- __ Lsl(out_reg_lo, in_reg_lo, shift_left);
- __ Lsr(shift_left, in_reg_hi, shift_right);
- __ Add(out_reg_lo, out_reg_lo, shift_left);
- __ B(&end);
-
- __ Bind(&shift_by_32_plus_shift_right); // Shift by 32+shift_right.
- // out_reg_hi = (reg_hi >> shift_right) | (reg_lo << shift_left).
- // out_reg_lo = (reg_lo >> shift_right) | (reg_hi << shift_left).
- __ Lsr(out_reg_hi, in_reg_hi, shift_right);
- __ Lsl(out_reg_lo, in_reg_lo, shift_left);
- __ Add(out_reg_hi, out_reg_hi, out_reg_lo);
- __ Lsr(out_reg_lo, in_reg_lo, shift_right);
- __ Lsl(shift_right, in_reg_hi, shift_left);
- __ Add(out_reg_lo, out_reg_lo, shift_right);
-
- __ Bind(&end);
- }
-}
-
-void LocationsBuilderARMVIXL::VisitRor(HRor* ror) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall);
- switch (ror->GetResultType()) {
- case Primitive::kPrimInt: {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(ror->InputAt(1)));
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- break;
- }
- case Primitive::kPrimLong: {
- locations->SetInAt(0, Location::RequiresRegister());
- if (ror->InputAt(1)->IsConstant()) {
- locations->SetInAt(1, Location::ConstantLocation(ror->InputAt(1)->AsConstant()));
- } else {
- locations->SetInAt(1, Location::RequiresRegister());
- locations->AddTemp(Location::RequiresRegister());
- locations->AddTemp(Location::RequiresRegister());
- }
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
- break;
- }
- default:
- LOG(FATAL) << "Unexpected operation type " << ror->GetResultType();
- }
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitRor(HRor* ror) {
- Primitive::Type type = ror->GetResultType();
- switch (type) {
- case Primitive::kPrimInt: {
- HandleIntegerRotate(ror);
- break;
- }
- case Primitive::kPrimLong: {
- HandleLongRotate(ror);
- break;
- }
- default:
- LOG(FATAL) << "Unexpected operation type " << type;
- UNREACHABLE();
- }
-}
-
-
void LocationsBuilderARMVIXL::HandleFieldSet(
HInstruction* instruction, const FieldInfo& field_info) {
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
@@ -3200,6 +3095,65 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction,
}
}
+void LocationsBuilderARMVIXL::HandleFieldGet(HInstruction* instruction,
+ const FieldInfo& field_info) {
+ DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
+
+ bool object_field_get_with_read_barrier =
+ kEmitCompilerReadBarrier && (field_info.GetFieldType() == Primitive::kPrimNot);
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction,
+ object_field_get_with_read_barrier ?
+ LocationSummary::kCallOnSlowPath :
+ LocationSummary::kNoCall);
+ if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ }
+ locations->SetInAt(0, Location::RequiresRegister());
+
+ bool volatile_for_double = field_info.IsVolatile()
+ && (field_info.GetFieldType() == Primitive::kPrimDouble)
+ && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
+ // The output overlaps in case of volatile long: we don't want the
+ // code generated by GenerateWideAtomicLoad to overwrite the
+ // object's location. Likewise, in the case of an object field get
+ // with read barriers enabled, we do not want the load to overwrite
+ // the object's location, as we need it to emit the read barrier.
+ bool overlap = (field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong)) ||
+ object_field_get_with_read_barrier;
+
+ if (Primitive::IsFloatingPointType(instruction->GetType())) {
+ locations->SetOut(Location::RequiresFpuRegister());
+ } else {
+ locations->SetOut(Location::RequiresRegister(),
+ (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
+ }
+ if (volatile_for_double) {
+ // ARM encoding have some additional constraints for ldrexd/strexd:
+ // - registers need to be consecutive
+ // - the first register should be even but not R14.
+ // We don't test for ARM yet, and the assertion makes sure that we
+ // revisit this if we ever enable ARM encoding.
+ DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ } else if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
+ // We need a temporary register for the read barrier marking slow
+ // path in CodeGeneratorARM::GenerateFieldLoadWithBakerReadBarrier.
+ locations->AddTemp(Location::RequiresRegister());
+ }
+}
+
+Location LocationsBuilderARMVIXL::ArithmeticZeroOrFpuRegister(HInstruction* input) {
+ DCHECK(Primitive::IsFloatingPointType(input->GetType())) << input->GetType();
+ if ((input->IsFloatConstant() && (input->AsFloatConstant()->IsArithmeticZero())) ||
+ (input->IsDoubleConstant() && (input->AsDoubleConstant()->IsArithmeticZero()))) {
+ return Location::ConstantLocation(input->AsConstant());
+ } else {
+ return Location::RequiresFpuRegister();
+ }
+}
+
Location LocationsBuilderARMVIXL::ArmEncodableConstantOrRegister(HInstruction* constant,
Opcode opcode) {
DCHECK(!Primitive::IsFloatingPointType(constant->GetType()));
@@ -3262,65 +3216,6 @@ bool LocationsBuilderARMVIXL::CanEncodeConstantAsImmediate(uint32_t value,
return assembler->ShifterOperandCanHold(neg_opcode, value, set_cc);
}
-Location LocationsBuilderARMVIXL::ArithmeticZeroOrFpuRegister(HInstruction* input) {
- DCHECK(Primitive::IsFloatingPointType(input->GetType())) << input->GetType();
- if ((input->IsFloatConstant() && (input->AsFloatConstant()->IsArithmeticZero())) ||
- (input->IsDoubleConstant() && (input->AsDoubleConstant()->IsArithmeticZero()))) {
- return Location::ConstantLocation(input->AsConstant());
- } else {
- return Location::RequiresFpuRegister();
- }
-}
-
-void LocationsBuilderARMVIXL::HandleFieldGet(HInstruction* instruction,
- const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
-
- bool object_field_get_with_read_barrier =
- kEmitCompilerReadBarrier && (field_info.GetFieldType() == Primitive::kPrimNot);
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_field_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
- if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
- }
- locations->SetInAt(0, Location::RequiresRegister());
-
- bool volatile_for_double = field_info.IsVolatile()
- && (field_info.GetFieldType() == Primitive::kPrimDouble)
- && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
- // The output overlaps in case of volatile long: we don't want the
- // code generated by GenerateWideAtomicLoad to overwrite the
- // object's location. Likewise, in the case of an object field get
- // with read barriers enabled, we do not want the load to overwrite
- // the object's location, as we need it to emit the read barrier.
- bool overlap = (field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong)) ||
- object_field_get_with_read_barrier;
-
- if (Primitive::IsFloatingPointType(instruction->GetType())) {
- locations->SetOut(Location::RequiresFpuRegister());
- } else {
- locations->SetOut(Location::RequiresRegister(),
- (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
- }
- if (volatile_for_double) {
- // ARM encoding have some additional constraints for ldrexd/strexd:
- // - registers need to be consecutive
- // - the first register should be even but not R14.
- // We don't test for ARM yet, and the assertion makes sure that we
- // revisit this if we ever enable ARM encoding.
- DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
- locations->AddTemp(Location::RequiresRegister());
- locations->AddTemp(Location::RequiresRegister());
- } else if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
- // We need a temporary register for the read barrier marking slow
- // path in CodeGeneratorARM::GenerateFieldLoadWithBakerReadBarrier.
- locations->AddTemp(Location::RequiresRegister());
- }
-}
-
void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
@@ -3800,6 +3695,14 @@ void ParallelMoveResolverARMVIXL::RestoreScratch(int reg ATTRIBUTE_UNUSED) {
TODO_VIXL32(FATAL);
}
+// Check if the desired_class_load_kind is supported. If it is, return it,
+// otherwise return a fall-back kind that should be used instead.
+HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
+ HLoadClass::LoadKind desired_class_load_kind ATTRIBUTE_UNUSED) {
+ // TODO(VIXL): Implement optimized code paths.
+ return HLoadClass::LoadKind::kDexCacheViaMethod;
+}
+
void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
if (cls->NeedsAccessCheck()) {
InvokeRuntimeCallingConventionARMVIXL calling_convention;
@@ -3885,6 +3788,121 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) {
}
}
+void LocationsBuilderARMVIXL::VisitClinitCheck(HClinitCheck* check) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (check->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitClinitCheck(HClinitCheck* check) {
+ // We assume the class is not null.
+ LoadClassSlowPathARMVIXL* slow_path =
+ new (GetGraph()->GetArena()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
+ check,
+ check->GetDexPc(),
+ /* do_clinit */ true);
+ codegen_->AddSlowPath(slow_path);
+ GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateClassInitializationCheck(
+ LoadClassSlowPathARMVIXL* slow_path, vixl32::Register class_reg) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ GetAssembler()->LoadFromOffset(kLoadWord,
+ temp,
+ class_reg,
+ mirror::Class::StatusOffset().Int32Value());
+ __ Cmp(temp, mirror::Class::kStatusInitialized);
+ __ B(lt, slow_path->GetEntryLabel());
+ // Even if the initialized flag is set, we may be in a situation where caches are not synced
+ // properly. Therefore, we do a memory fence.
+ __ Dmb(ISH);
+ __ Bind(slow_path->GetExitLabel());
+}
+
+// Check if the desired_string_load_kind is supported. If it is, return it,
+// otherwise return a fall-back kind that should be used instead.
+HLoadString::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadStringKind(
+ HLoadString::LoadKind desired_string_load_kind ATTRIBUTE_UNUSED) {
+ // TODO(VIXL): Implement optimized code paths. For now we always use the simpler fallback code.
+ return HLoadString::LoadKind::kDexCacheViaMethod;
+}
+
+void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) {
+ LocationSummary::CallKind call_kind = load->NeedsEnvironment()
+ ? LocationSummary::kCallOnMainOnly
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+
+ // TODO(VIXL): Implement optimized code paths.
+ // See InstructionCodeGeneratorARMVIXL::VisitLoadString.
+ HLoadString::LoadKind load_kind = load->GetLoadKind();
+ if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ // TODO(VIXL): Use InvokeRuntimeCallingConventionARMVIXL instead.
+ locations->SetOut(LocationFrom(r0));
+ } else {
+ locations->SetOut(Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) {
+ // TODO(VIXL): Implement optimized code paths.
+ // We implemented the simplest solution to get first ART tests passing, we deferred the
+ // optimized path until later, we should implement it using ARM64 implementation as a
+ // reference. The same related to LocationsBuilderARMVIXL::VisitLoadString.
+
+ // TODO: Re-add the compiler code to do string dex cache lookup again.
+ DCHECK_EQ(load->GetLoadKind(), HLoadString::LoadKind::kDexCacheViaMethod);
+ InvokeRuntimeCallingConventionARMVIXL calling_convention;
+ __ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex());
+ codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
+ CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
+}
+
+static int32_t GetExceptionTlsOffset() {
+ return Thread::ExceptionOffset<kArmPointerSize>().Int32Value();
+}
+
+void LocationsBuilderARMVIXL::VisitLoadException(HLoadException* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitLoadException(HLoadException* load) {
+ vixl32::Register out = OutputRegister(load);
+ GetAssembler()->LoadFromOffset(kLoadWord, out, tr, GetExceptionTlsOffset());
+}
+
+
+void LocationsBuilderARMVIXL::VisitClearException(HClearException* clear) {
+ new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ __ Mov(temp, 0);
+ GetAssembler()->StoreToOffset(kStoreWord, temp, tr, GetExceptionTlsOffset());
+}
+
+void LocationsBuilderARMVIXL::VisitThrow(HThrow* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ InvokeRuntimeCallingConventionARMVIXL calling_convention;
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitThrow(HThrow* instruction) {
+ codegen_->InvokeRuntime(kQuickDeliverException, instruction, instruction->GetDexPc());
+ CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
+}
+
void LocationsBuilderARMVIXL::VisitAnd(HAnd* instruction) {
HandleBitwiseOperation(instruction, AND);
}
@@ -4074,6 +4092,34 @@ void InstructionCodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
}
}
+void CodeGeneratorARMVIXL::MaybeGenerateReadBarrierSlow(HInstruction* instruction ATTRIBUTE_UNUSED,
+ Location out,
+ Location ref ATTRIBUTE_UNUSED,
+ Location obj ATTRIBUTE_UNUSED,
+ uint32_t offset ATTRIBUTE_UNUSED,
+ Location index ATTRIBUTE_UNUSED) {
+ if (kEmitCompilerReadBarrier) {
+ DCHECK(!kUseBakerReadBarrier);
+ TODO_VIXL32(FATAL);
+ } else if (kPoisonHeapReferences) {
+ GetAssembler()->UnpoisonHeapReference(RegisterFrom(out));
+ }
+}
+
+// Check if the desired_dispatch_info is supported. If it is, return it,
+// otherwise return a fall-back info that should be used instead.
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info ATTRIBUTE_UNUSED,
+ HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ // TODO(VIXL): Implement optimized code paths.
+ return {
+ HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ 0u,
+ 0u
+ };
+}
+
vixl32::Register CodeGeneratorARMVIXL::GetInvokeStaticOrDirectExtraParameter(
HInvokeStaticOrDirect* invoke, vixl32::Register temp) {
DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
@@ -4189,56 +4235,10 @@ void CodeGeneratorARMVIXL::GenerateVirtualCall(HInvokeVirtual* invoke, Location
__ Blx(lr);
}
-static int32_t GetExceptionTlsOffset() {
- return Thread::ExceptionOffset<kArmPointerSize>().Int32Value();
-}
-
-void LocationsBuilderARMVIXL::VisitLoadException(HLoadException* load) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
- locations->SetOut(Location::RequiresRegister());
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitLoadException(HLoadException* load) {
- vixl32::Register out = OutputRegister(load);
- GetAssembler()->LoadFromOffset(kLoadWord, out, tr, GetExceptionTlsOffset());
-}
-
-void LocationsBuilderARMVIXL::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
- UseScratchRegisterScope temps(GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- __ Mov(temp, 0);
- GetAssembler()->StoreToOffset(kStoreWord, temp, tr, GetExceptionTlsOffset());
-}
-
-void LocationsBuilderARMVIXL::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
- InvokeRuntimeCallingConventionARMVIXL calling_convention;
- locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitThrow(HThrow* instruction) {
- codegen_->InvokeRuntime(kQuickDeliverException, instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
-}
-
-void CodeGeneratorARMVIXL::MaybeGenerateReadBarrierSlow(HInstruction* instruction ATTRIBUTE_UNUSED,
- Location out,
- Location ref ATTRIBUTE_UNUSED,
- Location obj ATTRIBUTE_UNUSED,
- uint32_t offset ATTRIBUTE_UNUSED,
- Location index ATTRIBUTE_UNUSED) {
- if (kEmitCompilerReadBarrier) {
- DCHECK(!kUseBakerReadBarrier);
- TODO_VIXL32(FATAL);
- } else if (kPoisonHeapReferences) {
- GetAssembler()->UnpoisonHeapReference(RegisterFrom(out));
- }
+// Copy the result of a call into the given target.
+void CodeGeneratorARMVIXL::MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
+ Primitive::Type type ATTRIBUTE_UNUSED) {
+ TODO_VIXL32(FATAL);
}
#undef __