Implement Integer.valueOf() intrinsic for boot image.
And generate only one "boot image live objects" array rather
than one per boot*.art file.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: Pixel 2 XL boots.
Test: testrunner.py --target --optimizing
Bug: 71526895
Change-Id: I23af7f47fea5150805f801cd2512f2d152ee5b73
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index b4890e4..4b2bcc8 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2791,28 +2791,27 @@
}
void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
+ IntrinsicVisitor::IntegerValueOfInfo info =
+ IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke, codegen_->GetCompilerOptions());
LocationSummary* locations = invoke->GetLocations();
MacroAssembler* masm = GetVIXLAssembler();
Register out = RegisterFrom(locations->Out(), DataType::Type::kReference);
UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireW();
- InvokeRuntimeCallingConvention calling_convention;
- Register argument = calling_convention.GetRegisterAt(0);
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (info.value_boot_image_offset != 0u) {
+ if (static_cast<uint32_t>(value - info.low) < info.length) {
// Just embed the j.l.Integer in the code.
- codegen_->LoadBootImageAddress(out, info.value_boot_image_offset);
+ DCHECK_NE(info.value_boot_image_reference, IntegerValueOfInfo::kInvalidReference);
+ codegen_->LoadBootImageAddress(out, info.value_boot_image_reference);
} else {
DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
+ info.integer_boot_image_offset);
__ Mov(temp.W(), value);
__ Str(temp.W(), HeapOperand(out.W(), info.value_offset));
// `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
@@ -2828,7 +2827,7 @@
vixl::aarch64::Label allocate, done;
__ B(&allocate, hs);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_offset);
+ codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_reference);
MemOperand source = HeapOperand(
temp, out.X(), LSL, DataType::SizeShift(DataType::Type::kReference));
codegen_->Load(DataType::Type::kReference, out, source);
@@ -2836,9 +2835,8 @@
__ B(&done);
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
+ info.integer_boot_image_offset);
__ Str(in.W(), HeapOperand(out.W(), info.value_offset));
// `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
// one.