MIPS: java.lang.Integer.valueOf intrinsic.
Test: run-test --64 --optimizing 640-checker-integer-valueof
Test: run-test --64 640-checker-integer-valueof
Test: run-test --64 --no-prebuild --optimizing 640-checker-integer-valueof
Test: run-test --64 --no-prebuild 640-checker-integer-valueof
Test: run-test --optimizing 640-checker-integer-valueof
Test: run-test 640-checker-integer-valueof
Test: run-test --no-prebuild --optimizing 640-checker-integer-valueof
Test: run-test --no-prebuild 640-checker-integer-valueof
Test: mma test-art-host
Test: mma test-art-target
Booted on both MIPS32 and MIPS64 emulators.
Change-Id: I5b2f21cf2334c392080cff9654150504207f4c01
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 3875c4b..03939e3 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -229,9 +229,10 @@
// We switch to the table-based method starting with 7 cases.
static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6;
+ void GenerateMemoryBarrier(MemBarrierKind kind);
+
private:
void GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path, Register class_reg);
- void GenerateMemoryBarrier(MemBarrierKind kind);
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
void HandleBinaryOp(HBinaryOperation* operation);
void HandleCondition(HCondition* instruction);
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index fd1a174..200e884 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -226,9 +226,10 @@
// We switch to the table-based method starting with 7 cases.
static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6;
+ void GenerateMemoryBarrier(MemBarrierKind kind);
+
private:
void GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path, GpuRegister class_reg);
- void GenerateMemoryBarrier(MemBarrierKind kind);
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
void HandleBinaryOp(HBinaryOperation* operation);
void HandleCondition(HCondition* instruction);
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 41df56b..bfe04f5 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -32,7 +32,7 @@
namespace mips {
IntrinsicLocationsBuilderMIPS::IntrinsicLocationsBuilderMIPS(CodeGeneratorMIPS* codegen)
- : arena_(codegen->GetGraph()->GetArena()) {
+ : codegen_(codegen), arena_(codegen->GetGraph()->GetArena()) {
}
MipsAssembler* IntrinsicCodeGeneratorMIPS::GetAssembler() {
@@ -3133,6 +3133,89 @@
__ Bind(slow_path->GetExitLabel());
}
+// long java.lang.Integer.valueOf(long)
+void IntrinsicLocationsBuilderMIPS::VisitIntegerValueOf(HInvoke* invoke) {
+ InvokeRuntimeCallingConvention calling_convention;
+ IntrinsicVisitor::ComputeIntegerValueOfLocations(
+ invoke,
+ codegen_,
+ calling_convention.GetReturnLocation(Primitive::kPrimNot),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitIntegerValueOf(HInvoke* invoke) {
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ LocationSummary* locations = invoke->GetLocations();
+ MipsAssembler* assembler = GetAssembler();
+ InstructionCodeGeneratorMIPS* icodegen =
+ down_cast<InstructionCodeGeneratorMIPS*>(codegen_->GetInstructionVisitor());
+
+ Register out = locations->Out().AsRegister<Register>();
+ InvokeRuntimeCallingConvention calling_convention;
+ if (invoke->InputAt(0)->IsConstant()) {
+ int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ if (value >= info.low && value <= info.high) {
+ // Just embed the j.l.Integer in the code.
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Object* boxed = info.cache->Get(value + (-info.low));
+ DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
+ __ LoadConst32(out, address);
+ } else {
+ // Allocate and initialize a new j.l.Integer.
+ // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
+ // JIT object table.
+ uint32_t address =
+ dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ LoadConst32(calling_convention.GetRegisterAt(0), address);
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ StoreConstToOffset(kStoreWord, value, out, info.value_offset, TMP);
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ }
+ } else {
+ Register in = locations->InAt(0).AsRegister<Register>();
+ MipsLabel allocate, done;
+ int32_t count = static_cast<uint32_t>(info.high) - info.low + 1;
+
+ // Is (info.low <= in) && (in <= info.high)?
+ __ Addiu32(out, in, -info.low);
+ // As unsigned quantities is out < (info.high - info.low + 1)?
+ if (IsInt<16>(count)) {
+ __ Sltiu(AT, out, count);
+ } else {
+ __ LoadConst32(AT, count);
+ __ Sltu(AT, out, AT);
+ }
+ // Branch if out >= (info.high - info.low + 1).
+ // This means that "in" is outside of the range [info.low, info.high].
+ __ Beqz(AT, &allocate);
+
+ // If the value is within the bounds, load the j.l.Integer directly from the array.
+ uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
+ __ LoadConst32(TMP, data_offset + address);
+ __ ShiftAndAdd(out, out, TMP, TIMES_4);
+ __ Lw(out, out, 0);
+ __ MaybeUnpoisonHeapReference(out);
+ __ B(&done);
+
+ __ Bind(&allocate);
+ // Otherwise allocate and initialize a new j.l.Integer.
+ address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ LoadConst32(calling_convention.GetRegisterAt(0), address);
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ StoreToOffset(kStoreWord, in, out, info.value_offset);
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ __ Bind(&done);
+ }
+}
+
// Unimplemented intrinsics.
UNIMPLEMENTED_INTRINSIC(MIPS, MathCeil)
@@ -3162,8 +3245,6 @@
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetObject)
-UNIMPLEMENTED_INTRINSIC(MIPS, IntegerValueOf)
-
UNREACHABLE_INTRINSICS(MIPS)
#undef __
diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h
index e134cb8..eaadad2 100644
--- a/compiler/optimizing/intrinsics_mips.h
+++ b/compiler/optimizing/intrinsics_mips.h
@@ -49,6 +49,7 @@
bool TryDispatch(HInvoke* invoke);
private:
+ CodeGeneratorMIPS* codegen_;
ArenaAllocator* arena_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS);
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index b57b41f..c5e1160 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -32,7 +32,7 @@
namespace mips64 {
IntrinsicLocationsBuilderMIPS64::IntrinsicLocationsBuilderMIPS64(CodeGeneratorMIPS64* codegen)
- : arena_(codegen->GetGraph()->GetArena()) {
+ : codegen_(codegen), arena_(codegen->GetGraph()->GetArena()) {
}
Mips64Assembler* IntrinsicCodeGeneratorMIPS64::GetAssembler() {
@@ -2564,6 +2564,84 @@
GenFPToFPCall(invoke, codegen_, kQuickTanh);
}
+// long java.lang.Integer.valueOf(long)
+void IntrinsicLocationsBuilderMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
+ InvokeRuntimeCallingConvention calling_convention;
+ IntrinsicVisitor::ComputeIntegerValueOfLocations(
+ invoke,
+ codegen_,
+ calling_convention.GetReturnLocation(Primitive::kPrimNot),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ LocationSummary* locations = invoke->GetLocations();
+ Mips64Assembler* assembler = GetAssembler();
+ InstructionCodeGeneratorMIPS64* icodegen =
+ down_cast<InstructionCodeGeneratorMIPS64*>(codegen_->GetInstructionVisitor());
+
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ InvokeRuntimeCallingConvention calling_convention;
+ if (invoke->InputAt(0)->IsConstant()) {
+ int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ if (value >= info.low && value <= info.high) {
+ // Just embed the j.l.Integer in the code.
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Object* boxed = info.cache->Get(value + (-info.low));
+ DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
+ __ LoadConst64(out, address);
+ } else {
+ // Allocate and initialize a new j.l.Integer.
+ // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
+ // JIT object table.
+ uint32_t address =
+ dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ LoadConst64(calling_convention.GetRegisterAt(0), address);
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ StoreConstToOffset(kStoreWord, value, out, info.value_offset, TMP);
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ }
+ } else {
+ GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
+ Mips64Label allocate, done;
+ int32_t count = static_cast<uint32_t>(info.high) - info.low + 1;
+
+ // Is (info.low <= in) && (in <= info.high)?
+ __ Addiu32(out, in, -info.low);
+ // As unsigned quantities is out < (info.high - info.low + 1)?
+ __ LoadConst32(AT, count);
+ // Branch if out >= (info.high - info.low + 1).
+ // This means that "in" is outside of the range [info.low, info.high].
+ __ Bgeuc(out, AT, &allocate);
+
+ // If the value is within the bounds, load the j.l.Integer directly from the array.
+ uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
+ __ LoadConst64(TMP, data_offset + address);
+ __ Dlsa(out, out, TMP, TIMES_4);
+ __ Lwu(out, out, 0);
+ __ MaybeUnpoisonHeapReference(out);
+ __ Bc(&done);
+
+ __ Bind(&allocate);
+ // Otherwise allocate and initialize a new j.l.Integer.
+ address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ LoadConst64(calling_convention.GetRegisterAt(0), address);
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ StoreToOffset(kStoreWord, in, out, info.value_offset);
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ __ Bind(&done);
+ }
+}
+
UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy)
@@ -2583,8 +2661,6 @@
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetObject)
-UNIMPLEMENTED_INTRINSIC(MIPS64, IntegerValueOf)
-
UNREACHABLE_INTRINSICS(MIPS64)
#undef __
diff --git a/compiler/optimizing/intrinsics_mips64.h b/compiler/optimizing/intrinsics_mips64.h
index 5b95c26..179627a 100644
--- a/compiler/optimizing/intrinsics_mips64.h
+++ b/compiler/optimizing/intrinsics_mips64.h
@@ -49,6 +49,7 @@
bool TryDispatch(HInvoke* invoke);
private:
+ CodeGeneratorMIPS64* codegen_;
ArenaAllocator* arena_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS64);