summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator.cc18
-rw-r--r--compiler/optimizing/code_generator.h2
-rw-r--r--compiler/optimizing/code_generator_arm.h4
-rw-r--r--compiler/optimizing/code_generator_arm64.cc15
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc20
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h4
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc4
-rw-r--r--compiler/optimizing/common_arm64.h4
-rw-r--r--compiler/optimizing/inliner.cc3
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc26
10 files changed, 76 insertions, 24 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 99427f05da..d68aa51b1b 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1417,4 +1417,22 @@ void CodeGenerator::EmitJitRoots(uint8_t* code,
EmitJitRootPatches(code, roots_data);
}
+QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass) {
+ ScopedObjectAccess soa(Thread::Current());
+ if (array_klass.Get() == nullptr) {
+ // This can only happen for non-primitive arrays, as primitive arrays can always
+ // be resolved.
+ return kQuickAllocArrayResolved32;
+ }
+
+ switch (array_klass->GetComponentSize()) {
+ case 1: return kQuickAllocArrayResolved8;
+ case 2: return kQuickAllocArrayResolved16;
+ case 4: return kQuickAllocArrayResolved32;
+ case 8: return kQuickAllocArrayResolved64;
+ }
+ LOG(FATAL) << "Unreachable";
+ return kQuickAllocArrayResolved;
+}
+
} // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 2d129aff22..b912672792 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -573,6 +573,8 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
uint32_t GetReferenceSlowFlagOffset() const;
uint32_t GetReferenceDisableFlagOffset() const;
+ static QuickEntrypointEnum GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass);
+
protected:
// Patch info used for recording locations of required linker patches and their targets,
// i.e. target method, string, type or code identified by their dex file and index.
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index c1b3a40743..df2dbc74ab 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -128,9 +128,7 @@ class FieldAccessCallingConventionARM : public FieldAccessCallingConvention {
}
Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
return Primitive::Is64BitType(type)
- ? (is_instance
- ? Location::RegisterPairLocation(R2, R3)
- : Location::RegisterPairLocation(R1, R2))
+ ? Location::RegisterPairLocation(R2, R3)
: (is_instance
? Location::RegisterLocation(R2)
: Location::RegisterLocation(R1));
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 9762ee81b1..cf824a14e3 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1533,7 +1533,9 @@ void CodeGeneratorARM64::MoveLocation(Location destination,
HConstant* src_cst = source.GetConstant();
CPURegister temp;
if (src_cst->IsZeroBitPattern()) {
- temp = (src_cst->IsLongConstant() || src_cst->IsDoubleConstant()) ? xzr : wzr;
+ temp = (src_cst->IsLongConstant() || src_cst->IsDoubleConstant())
+ ? Register(xzr)
+ : Register(wzr);
} else {
if (src_cst->IsIntConstant()) {
temp = temps.AcquireW();
@@ -1903,6 +1905,9 @@ void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction) {
LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ // We need a temporary register for the read barrier marking slow
+ // path in CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier.
+ locations->AddTemp(Location::RequiresRegister());
}
locations->SetInAt(0, Location::RequiresRegister());
if (Primitive::IsFloatingPointType(instruction->GetType())) {
@@ -1930,11 +1935,9 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
if (field_type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// Object FieldGet with Baker's read barrier case.
- MacroAssembler* masm = GetVIXLAssembler();
- UseScratchRegisterScope temps(masm);
// /* HeapReference<Object> */ out = *(base + offset)
Register base = RegisterFrom(base_loc, Primitive::kPrimNot);
- Register temp = temps.AcquireW();
+ Register temp = WRegisterFrom(locations->GetTemp(0));
// Note that potential implicit null checks are handled in this
// CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier call.
codegen_->GenerateFieldLoadWithBakerReadBarrier(
@@ -4762,7 +4765,9 @@ void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ QuickEntrypointEnum entrypoint =
+ CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 893e465392..f4d3ec54ee 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -7679,15 +7679,21 @@ void InstructionCodeGeneratorARMVIXL::VisitPackedSwitch(HPackedSwitch* switch_in
vixl32::Register jump_offset = temps.Acquire();
// Load jump offset from the table.
- __ Adr(table_base, jump_table->GetTableStartLabel());
- __ Ldr(jump_offset, MemOperand(table_base, key_reg, vixl32::LSL, 2));
+ {
+ const size_t jump_size = switch_instr->GetNumEntries() * sizeof(int32_t);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ (vixl32::kMaxInstructionSizeInBytes * 4) + jump_size,
+ CodeBufferCheckScope::kMaximumSize);
+ __ adr(table_base, jump_table->GetTableStartLabel());
+ __ ldr(jump_offset, MemOperand(table_base, key_reg, vixl32::LSL, 2));
- // Jump to target block by branching to table_base(pc related) + offset.
- vixl32::Register target_address = table_base;
- __ Add(target_address, table_base, jump_offset);
- __ Bx(target_address);
+ // Jump to target block by branching to table_base(pc related) + offset.
+ vixl32::Register target_address = table_base;
+ __ add(target_address, table_base, jump_offset);
+ __ bx(target_address);
- jump_table->EmitTable(codegen_);
+ jump_table->EmitTable(codegen_);
+ }
}
}
void LocationsBuilderARMVIXL::VisitArmDexCacheArraysBase(HArmDexCacheArraysBase* base) {
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 91051185fc..8ae3b7dc39 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -199,9 +199,7 @@ class FieldAccessCallingConventionARMVIXL : public FieldAccessCallingConvention
}
Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
return Primitive::Is64BitType(type)
- ? (is_instance
- ? helpers::LocationFrom(vixl::aarch32::r2, vixl::aarch32::r3)
- : helpers::LocationFrom(vixl::aarch32::r1, vixl::aarch32::r2))
+ ? helpers::LocationFrom(vixl::aarch32::r2, vixl::aarch32::r3)
: (is_instance
? helpers::LocationFrom(vixl::aarch32::r2)
: helpers::LocationFrom(vixl::aarch32::r1));
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index c4caf4bf9d..abd8246325 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -4096,7 +4096,9 @@ void LocationsBuilderX86_64::VisitNewArray(HNewArray* instruction) {
void InstructionCodeGeneratorX86_64::VisitNewArray(HNewArray* instruction) {
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ QuickEntrypointEnum entrypoint =
+ CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
}
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 776a483d43..93ea090583 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -130,8 +130,8 @@ inline vixl::aarch64::CPURegister InputCPURegisterOrZeroRegAt(HInstruction* inst
Primitive::Type input_type = input->GetType();
if (input->IsConstant() && input->AsConstant()->IsZeroBitPattern()) {
return (Primitive::ComponentSize(input_type) >= vixl::aarch64::kXRegSizeInBytes)
- ? vixl::aarch64::xzr
- : vixl::aarch64::wzr;
+ ? vixl::aarch64::Register(vixl::aarch64::xzr)
+ : vixl::aarch64::Register(vixl::aarch64::wzr);
}
return InputCPURegisterAt(instr, index);
}
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 5d40f75618..7772e8f973 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -304,7 +304,8 @@ ArtMethod* HInliner::TryCHADevirtualization(ArtMethod* resolved_method) {
// We do not support HDeoptimize in OSR methods.
return nullptr;
}
- return resolved_method->GetSingleImplementation();
+ PointerSize pointer_size = caller_compilation_unit_.GetClassLinker()->GetImagePointerSize();
+ return resolved_method->GetSingleImplementation(pointer_size);
}
bool HInliner::TryInline(HInvoke* invoke_instruction) {
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 68c2d2e36e..91d9c56d10 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -2742,14 +2742,36 @@ void IntrinsicCodeGeneratorARMVIXL::VisitReferenceGetReferent(HInvoke* invoke) {
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderARMVIXL::VisitMathCeil(HInvoke* invoke) {
+ if (features_.HasARMv8AInstructions()) {
+ CreateFPToFPLocations(arena_, invoke);
+ }
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathCeil(HInvoke* invoke) {
+ ArmVIXLAssembler* assembler = GetAssembler();
+ DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
+ __ Vrintp(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathFloor(HInvoke* invoke) {
+ if (features_.HasARMv8AInstructions()) {
+ CreateFPToFPLocations(arena_, invoke);
+ }
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathFloor(HInvoke* invoke) {
+ ArmVIXLAssembler* assembler = GetAssembler();
+ DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
+ __ Vrintm(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+}
+
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinDoubleDouble)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinFloatFloat)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxDoubleDouble)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxFloatFloat)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinLongLong)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxLongLong)
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathCeil) // Could be done by changing rounding mode, maybe?
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathFloor) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRint)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundFloat) // Could be done by changing rounding mode, maybe?