JIT baseline: don't update inline caches for intrinsics.

We already know the target.

Bug: 119800099
Test: test.py --baseline
Change-Id: I14cdafe233fec83a1f69e307326858c591309c34
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 0162311..47c62f9 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4044,7 +4044,11 @@
 void CodeGeneratorARM64::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
                                                        Register klass) {
   DCHECK_EQ(klass.GetCode(), 0u);
-  if (GetCompilerOptions().IsBaseline() && !Runtime::Current()->IsAotCompiler()) {
+  // We know the destination of an intrinsic, so no need to record inline
+  // caches.
+  if (!instruction->GetLocations()->Intrinsified() &&
+      GetCompilerOptions().IsBaseline() &&
+      !Runtime::Current()->IsAotCompiler()) {
     DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
     ScopedObjectAccess soa(Thread::Current());
     ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 68e2dfa..9100c6c 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -3301,7 +3301,11 @@
 void CodeGeneratorARMVIXL::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
                                                          vixl32::Register klass) {
   DCHECK_EQ(r0.GetCode(), klass.GetCode());
-  if (GetCompilerOptions().IsBaseline() && !Runtime::Current()->IsAotCompiler()) {
+  // We know the destination of an intrinsic, so no need to record inline
+  // caches.
+  if (!instruction->GetLocations()->Intrinsified() &&
+      GetCompilerOptions().IsBaseline() &&
+      !Runtime::Current()->IsAotCompiler()) {
     DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
     ScopedObjectAccess soa(Thread::Current());
     ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 100a86b..48a3d90 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1071,7 +1071,7 @@
   // concurrent copying collector may not in the future).
   __ MaybeUnpoisonHeapReference(temp);
 
-  MaybeGenerateInlineCacheCheck(invoke->GetDexPc(), temp);
+  MaybeGenerateInlineCacheCheck(invoke, temp);
 
   // temp = temp->GetMethodAt(method_offset);
   __ movq(temp, Address(temp, method_offset));
@@ -2525,12 +2525,17 @@
   invoke->GetLocations()->AddTemp(Location::RegisterLocation(RAX));
 }
 
-void CodeGeneratorX86_64::MaybeGenerateInlineCacheCheck(uint32_t dex_pc, CpuRegister klass) {
+void CodeGeneratorX86_64::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
+                                                        CpuRegister klass) {
   DCHECK_EQ(RDI, klass.AsRegister());
-  if (GetCompilerOptions().IsBaseline() && !Runtime::Current()->IsAotCompiler()) {
+  // We know the destination of an intrinsic, so no need to record inline
+  // caches.
+  if (!instruction->GetLocations()->Intrinsified() &&
+      GetCompilerOptions().IsBaseline() &&
+      !Runtime::Current()->IsAotCompiler()) {
     ScopedObjectAccess soa(Thread::Current());
     ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
-    InlineCache* cache = info->GetInlineCache(dex_pc);
+    InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
     uint64_t address = reinterpret_cast64<uint64_t>(cache);
     NearLabel done;
     __ movq(CpuRegister(TMP), Immediate(address));
@@ -2569,7 +2574,7 @@
   // concurrent copying collector may not in the future).
   __ MaybeUnpoisonHeapReference(temp);
 
-  codegen_->MaybeGenerateInlineCacheCheck(invoke->GetDexPc(), temp);
+  codegen_->MaybeGenerateInlineCacheCheck(invoke, temp);
 
   // Set the hidden argument. This is safe to do this here, as RAX
   // won't be modified thereafter, before the `call` instruction.
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 20db423..5537a4a 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -600,7 +600,7 @@
   void GenerateNop() override;
   void GenerateImplicitNullCheck(HNullCheck* instruction) override;
   void GenerateExplicitNullCheck(HNullCheck* instruction) override;
-  void MaybeGenerateInlineCacheCheck(uint32_t dex_pc, CpuRegister cls);
+  void MaybeGenerateInlineCacheCheck(HInstruction* instruction, CpuRegister cls);
 
 
   // When we don't know the proper offset for the value, we use kDummy32BitOffset.