diff options
Diffstat (limited to 'compiler/optimizing/intrinsics_mips64.cc')
-rw-r--r-- | compiler/optimizing/intrinsics_mips64.cc | 82 |
1 files changed, 79 insertions, 3 deletions
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc index b57b41f686..c5e116046f 100644 --- a/compiler/optimizing/intrinsics_mips64.cc +++ b/compiler/optimizing/intrinsics_mips64.cc @@ -32,7 +32,7 @@ namespace art { namespace mips64 { IntrinsicLocationsBuilderMIPS64::IntrinsicLocationsBuilderMIPS64(CodeGeneratorMIPS64* codegen) - : arena_(codegen->GetGraph()->GetArena()) { + : codegen_(codegen), arena_(codegen->GetGraph()->GetArena()) { } Mips64Assembler* IntrinsicCodeGeneratorMIPS64::GetAssembler() { @@ -2564,6 +2564,84 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathTanh(HInvoke* invoke) { GenFPToFPCall(invoke, codegen_, kQuickTanh); } +// long java.lang.Integer.valueOf(long) +void IntrinsicLocationsBuilderMIPS64::VisitIntegerValueOf(HInvoke* invoke) { + InvokeRuntimeCallingConvention calling_convention; + IntrinsicVisitor::ComputeIntegerValueOfLocations( + invoke, + codegen_, + calling_convention.GetReturnLocation(Primitive::kPrimNot), + Location::RegisterLocation(calling_convention.GetRegisterAt(0))); +} + +void IntrinsicCodeGeneratorMIPS64::VisitIntegerValueOf(HInvoke* invoke) { + IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(); + LocationSummary* locations = invoke->GetLocations(); + Mips64Assembler* assembler = GetAssembler(); + InstructionCodeGeneratorMIPS64* icodegen = + down_cast<InstructionCodeGeneratorMIPS64*>(codegen_->GetInstructionVisitor()); + + GpuRegister out = locations->Out().AsRegister<GpuRegister>(); + InvokeRuntimeCallingConvention calling_convention; + if (invoke->InputAt(0)->IsConstant()) { + int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue(); + if (value >= info.low && value <= info.high) { + // Just embed the j.l.Integer in the code. + ScopedObjectAccess soa(Thread::Current()); + mirror::Object* boxed = info.cache->Get(value + (-info.low)); + DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed)); + uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed)); + __ LoadConst64(out, address); + } else { + // Allocate and initialize a new j.l.Integer. + // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the + // JIT object table. + uint32_t address = + dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); + __ LoadConst64(calling_convention.GetRegisterAt(0), address); + codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); + __ StoreConstToOffset(kStoreWord, value, out, info.value_offset, TMP); + // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation + // one. + icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); + } + } else { + GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>(); + Mips64Label allocate, done; + int32_t count = static_cast<uint32_t>(info.high) - info.low + 1; + + // Is (info.low <= in) && (in <= info.high)? + __ Addiu32(out, in, -info.low); + // As unsigned quantities is out < (info.high - info.low + 1)? + __ LoadConst32(AT, count); + // Branch if out >= (info.high - info.low + 1). + // This means that "in" is outside of the range [info.low, info.high]. + __ Bgeuc(out, AT, &allocate); + + // If the value is within the bounds, load the j.l.Integer directly from the array. + uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); + uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache)); + __ LoadConst64(TMP, data_offset + address); + __ Dlsa(out, out, TMP, TIMES_4); + __ Lwu(out, out, 0); + __ MaybeUnpoisonHeapReference(out); + __ Bc(&done); + + __ Bind(&allocate); + // Otherwise allocate and initialize a new j.l.Integer. + address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); + __ LoadConst64(calling_convention.GetRegisterAt(0), address); + codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); + __ StoreToOffset(kStoreWord, in, out, info.value_offset); + // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation + // one. + icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); + __ Bind(&done); + } +} + UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent) UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy) @@ -2583,8 +2661,6 @@ UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetInt) UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetLong) UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetObject) -UNIMPLEMENTED_INTRINSIC(MIPS64, IntegerValueOf) - UNREACHABLE_INTRINSICS(MIPS64) #undef __ |