MIPS: java.lang.Integer.valueOf intrinsic.
Test: run-test --64 --optimizing 640-checker-integer-valueof
Test: run-test --64 640-checker-integer-valueof
Test: run-test --64 --no-prebuild --optimizing 640-checker-integer-valueof
Test: run-test --64 --no-prebuild 640-checker-integer-valueof
Test: run-test --optimizing 640-checker-integer-valueof
Test: run-test 640-checker-integer-valueof
Test: run-test --no-prebuild --optimizing 640-checker-integer-valueof
Test: run-test --no-prebuild 640-checker-integer-valueof
Test: mma test-art-host
Test: mma test-art-target
Booted on both MIPS32 and MIPS64 emulators.
Change-Id: I5b2f21cf2334c392080cff9654150504207f4c01
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index b57b41f..c5e1160 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -32,7 +32,7 @@
namespace mips64 {
IntrinsicLocationsBuilderMIPS64::IntrinsicLocationsBuilderMIPS64(CodeGeneratorMIPS64* codegen)
- : arena_(codegen->GetGraph()->GetArena()) {
+ : codegen_(codegen), arena_(codegen->GetGraph()->GetArena()) {
}
Mips64Assembler* IntrinsicCodeGeneratorMIPS64::GetAssembler() {
@@ -2564,6 +2564,84 @@
GenFPToFPCall(invoke, codegen_, kQuickTanh);
}
+// long java.lang.Integer.valueOf(long)
+void IntrinsicLocationsBuilderMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
+ InvokeRuntimeCallingConvention calling_convention;
+ IntrinsicVisitor::ComputeIntegerValueOfLocations(
+ invoke,
+ codegen_,
+ calling_convention.GetReturnLocation(Primitive::kPrimNot),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ LocationSummary* locations = invoke->GetLocations();
+ Mips64Assembler* assembler = GetAssembler();
+ InstructionCodeGeneratorMIPS64* icodegen =
+ down_cast<InstructionCodeGeneratorMIPS64*>(codegen_->GetInstructionVisitor());
+
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ InvokeRuntimeCallingConvention calling_convention;
+ if (invoke->InputAt(0)->IsConstant()) {
+ int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ if (value >= info.low && value <= info.high) {
+ // Just embed the j.l.Integer in the code.
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Object* boxed = info.cache->Get(value + (-info.low));
+ DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
+ __ LoadConst64(out, address);
+ } else {
+ // Allocate and initialize a new j.l.Integer.
+ // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
+ // JIT object table.
+ uint32_t address =
+ dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ LoadConst64(calling_convention.GetRegisterAt(0), address);
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ StoreConstToOffset(kStoreWord, value, out, info.value_offset, TMP);
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ }
+ } else {
+ GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
+ Mips64Label allocate, done;
+ int32_t count = static_cast<uint32_t>(info.high) - info.low + 1;
+
+ // Is (info.low <= in) && (in <= info.high)?
+ __ Addiu32(out, in, -info.low);
+ // As unsigned quantities is out < (info.high - info.low + 1)?
+ __ LoadConst32(AT, count);
+ // Branch if out >= (info.high - info.low + 1).
+ // This means that "in" is outside of the range [info.low, info.high].
+ __ Bgeuc(out, AT, &allocate);
+
+ // If the value is within the bounds, load the j.l.Integer directly from the array.
+ uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
+ __ LoadConst64(TMP, data_offset + address);
+ __ Dlsa(out, out, TMP, TIMES_4);
+ __ Lwu(out, out, 0);
+ __ MaybeUnpoisonHeapReference(out);
+ __ Bc(&done);
+
+ __ Bind(&allocate);
+ // Otherwise allocate and initialize a new j.l.Integer.
+ address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ LoadConst64(calling_convention.GetRegisterAt(0), address);
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ StoreToOffset(kStoreWord, in, out, info.value_offset);
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ __ Bind(&done);
+ }
+}
+
UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy)
@@ -2583,8 +2661,6 @@
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetObject)
-UNIMPLEMENTED_INTRINSIC(MIPS64, IntegerValueOf)
-
UNREACHABLE_INTRINSICS(MIPS64)
#undef __