summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
author Nicolas Geoffray <ngeoffray@google.com> 2019-11-28 16:15:00 +0000
committer Nicolas Geoffray <ngeoffray@google.com> 2019-11-28 17:16:57 +0000
commit20036d80f246b564331e0943aa07ec3b50fc15d9 (patch)
tree68c421f9da0c7ff7453ba5093203b94f9ec283c6 /compiler/optimizing
parent36ec598a4d887746291d003c97c2cb28b5987768 (diff)
JIT baseline: don't update inline caches for intrinsics.
We already know the target. Bug: 119800099 Test: test.py --baseline Change-Id: I14cdafe233fec83a1f69e307326858c591309c34
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_arm64.cc6
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc6
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc15
-rw-r--r--compiler/optimizing/code_generator_x86_64.h2
4 files changed, 21 insertions, 8 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 0162311de6..47c62f9366 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4044,7 +4044,11 @@ void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
void CodeGeneratorARM64::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
Register klass) {
DCHECK_EQ(klass.GetCode(), 0u);
- if (GetCompilerOptions().IsBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ // We know the destination of an intrinsic, so no need to record inline
+ // caches.
+ if (!instruction->GetLocations()->Intrinsified() &&
+ GetCompilerOptions().IsBaseline() &&
+ !Runtime::Current()->IsAotCompiler()) {
DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
ScopedObjectAccess soa(Thread::Current());
ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 68e2dfaed9..9100c6c547 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -3301,7 +3301,11 @@ void LocationsBuilderARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) {
void CodeGeneratorARMVIXL::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
vixl32::Register klass) {
DCHECK_EQ(r0.GetCode(), klass.GetCode());
- if (GetCompilerOptions().IsBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ // We know the destination of an intrinsic, so no need to record inline
+ // caches.
+ if (!instruction->GetLocations()->Intrinsified() &&
+ GetCompilerOptions().IsBaseline() &&
+ !Runtime::Current()->IsAotCompiler()) {
DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
ScopedObjectAccess soa(Thread::Current());
ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 100a86b6a2..48a3d90f6f 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1071,7 +1071,7 @@ void CodeGeneratorX86_64::GenerateVirtualCall(
// concurrent copying collector may not in the future).
__ MaybeUnpoisonHeapReference(temp);
- MaybeGenerateInlineCacheCheck(invoke->GetDexPc(), temp);
+ MaybeGenerateInlineCacheCheck(invoke, temp);
// temp = temp->GetMethodAt(method_offset);
__ movq(temp, Address(temp, method_offset));
@@ -2525,12 +2525,17 @@ void LocationsBuilderX86_64::VisitInvokeInterface(HInvokeInterface* invoke) {
invoke->GetLocations()->AddTemp(Location::RegisterLocation(RAX));
}
-void CodeGeneratorX86_64::MaybeGenerateInlineCacheCheck(uint32_t dex_pc, CpuRegister klass) {
+void CodeGeneratorX86_64::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
+ CpuRegister klass) {
DCHECK_EQ(RDI, klass.AsRegister());
- if (GetCompilerOptions().IsBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ // We know the destination of an intrinsic, so no need to record inline
+ // caches.
+ if (!instruction->GetLocations()->Intrinsified() &&
+ GetCompilerOptions().IsBaseline() &&
+ !Runtime::Current()->IsAotCompiler()) {
ScopedObjectAccess soa(Thread::Current());
ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
- InlineCache* cache = info->GetInlineCache(dex_pc);
+ InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
uint64_t address = reinterpret_cast64<uint64_t>(cache);
NearLabel done;
__ movq(CpuRegister(TMP), Immediate(address));
@@ -2569,7 +2574,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
// concurrent copying collector may not in the future).
__ MaybeUnpoisonHeapReference(temp);
- codegen_->MaybeGenerateInlineCacheCheck(invoke->GetDexPc(), temp);
+ codegen_->MaybeGenerateInlineCacheCheck(invoke, temp);
// Set the hidden argument. This is safe to do this here, as RAX
// won't be modified thereafter, before the `call` instruction.
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 20db423643..5537a4a4d9 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -600,7 +600,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void GenerateNop() override;
void GenerateImplicitNullCheck(HNullCheck* instruction) override;
void GenerateExplicitNullCheck(HNullCheck* instruction) override;
- void MaybeGenerateInlineCacheCheck(uint32_t dex_pc, CpuRegister cls);
+ void MaybeGenerateInlineCacheCheck(HInstruction* instruction, CpuRegister cls);
// When we don't know the proper offset for the value, we use kDummy32BitOffset.