summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2023-09-04 14:18:33 +0000
committer VladimĂ­r Marko <vmarko@google.com> 2023-09-05 13:11:44 +0000
commit09e2c93fbfa821bf1c1db6dbc5293e6b4b1f680b (patch)
treec87f1bf9825f9eefb0aeb870948a7442b62a4e7f /compiler/optimizing
parent844df0d9e3f74f17acbe266032ece08a405c223e (diff)
riscv64: Implement `art_quick_update_inline_cache`.
Also change the codegen to put inline cache in the right register and fix the codegen's `LoadMethod()`. Test: # Cherry-pick # https://android-review.googlesource.com/2727976 PS1, # then testrunner.py --target --64 --jit -t 004-InvokeInterface Bug: 283082089 Change-Id: I4e188d21240536a0efdf039ff9911566f2a75360
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_riscv64.cc19
1 files changed, 12 insertions, 7 deletions
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
index 9423e7593e..fa5f65fc62 100644
--- a/compiler/optimizing/code_generator_riscv64.cc
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -4105,9 +4105,8 @@ void CodeGeneratorRISCV64::LoadMethod(MethodLoadKind load_kind, Location temp, H
break;
}
case MethodLoadKind::kJitDirectAddress: {
- __ Li(temp.AsFpuRegister<XRegister>(),
- reinterpret_cast<uint64_t>(invoke->GetResolvedMethod()));
- __ Ld(temp.AsRegister<XRegister>(), temp.AsFpuRegister<XRegister>(), 0);
+ __ LoadConst64(temp.AsRegister<XRegister>(),
+ reinterpret_cast<uint64_t>(invoke->GetResolvedMethod()));
break;
}
case MethodLoadKind::kRuntimeCall: {
@@ -4212,11 +4211,17 @@ void CodeGeneratorRISCV64::MaybeGenerateInlineCacheCheck(HInstruction* instructi
InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
uint64_t address = reinterpret_cast64<uint64_t>(cache);
Riscv64Label done;
+ // The `art_quick_update_inline_cache` expects the inline cache in T5.
+ XRegister ic_reg = T5;
+ ScratchRegisterScope srs(GetAssembler());
+ DCHECK_EQ(srs.AvailableXRegisters(), 2u);
+ srs.ExcludeXRegister(ic_reg);
+ DCHECK_EQ(srs.AvailableXRegisters(), 1u);
+ __ LoadConst64(ic_reg, address);
{
- ScratchRegisterScope srs(GetAssembler());
- XRegister tmp = srs.AllocateXRegister();
- __ LoadConst64(tmp, address);
- __ Loadd(tmp, tmp, InlineCache::ClassesOffset().Int32Value());
+ ScratchRegisterScope srs2(GetAssembler());
+ XRegister tmp = srs2.AllocateXRegister();
+ __ Loadd(tmp, ic_reg, InlineCache::ClassesOffset().Int32Value());
// Fast path for a monomorphic cache.
__ Beq(klass, tmp, &done);
}