[baseline] Check that the profiling info is not null.

Zygote cannot allocate profiling infos.

Test: 689-zygote-jit-deopt
Change-Id: I85e8b7f16b81ba4de435a5417dbb2588c34414b0
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 64ec987..5ef7404 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1080,41 +1080,43 @@
   if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
     ScopedObjectAccess soa(Thread::Current());
     ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
-    uint32_t address = reinterpret_cast32<uint32_t>(info);
-    vixl::aarch64::Label done;
-    UseScratchRegisterScope temps(masm);
-    Register temp = temps.AcquireX();
-    Register counter = temps.AcquireW();
-    __ Mov(temp, address);
-    __ Ldrh(counter, MemOperand(temp, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()));
-    __ Add(counter, counter, 1);
-    __ Strh(counter, MemOperand(temp, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()));
-    __ Tst(counter, 0xffff);
-    __ B(ne, &done);
-    if (is_frame_entry) {
-      if (HasEmptyFrame()) {
-        // The entyrpoint expects the method at the bottom of the stack. We
-        // claim stack space necessary for alignment.
-        __ Claim(kStackAlignment);
-        __ Stp(kArtMethodRegister, lr, MemOperand(sp, 0));
-      } else if (!RequiresCurrentMethod()) {
-        __ Str(kArtMethodRegister, MemOperand(sp, 0));
+    if (info != nullptr) {
+      uint32_t address = reinterpret_cast32<uint32_t>(info);
+      vixl::aarch64::Label done;
+      UseScratchRegisterScope temps(masm);
+      Register temp = temps.AcquireX();
+      Register counter = temps.AcquireW();
+      __ Mov(temp, address);
+      __ Ldrh(counter, MemOperand(temp, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()));
+      __ Add(counter, counter, 1);
+      __ Strh(counter, MemOperand(temp, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()));
+      __ Tst(counter, 0xffff);
+      __ B(ne, &done);
+      if (is_frame_entry) {
+        if (HasEmptyFrame()) {
+          // The entyrpoint expects the method at the bottom of the stack. We
+          // claim stack space necessary for alignment.
+          __ Claim(kStackAlignment);
+          __ Stp(kArtMethodRegister, lr, MemOperand(sp, 0));
+        } else if (!RequiresCurrentMethod()) {
+          __ Str(kArtMethodRegister, MemOperand(sp, 0));
+        }
+      } else {
+        CHECK(RequiresCurrentMethod());
       }
-    } else {
-      CHECK(RequiresCurrentMethod());
+      uint32_t entrypoint_offset =
+          GetThreadOffset<kArm64PointerSize>(kQuickCompileOptimized).Int32Value();
+      __ Ldr(lr, MemOperand(tr, entrypoint_offset));
+      // Note: we don't record the call here (and therefore don't generate a stack
+      // map), as the entrypoint should never be suspended.
+      __ Blr(lr);
+      if (HasEmptyFrame()) {
+        CHECK(is_frame_entry);
+        __ Ldr(lr, MemOperand(sp, 8));
+        __ Drop(kStackAlignment);
+      }
+      __ Bind(&done);
     }
-    uint32_t entrypoint_offset =
-        GetThreadOffset<kArm64PointerSize>(kQuickCompileOptimized).Int32Value();
-    __ Ldr(lr, MemOperand(tr, entrypoint_offset));
-    // Note: we don't record the call here (and therefore don't generate a stack
-    // map), as the entrypoint should never be suspended.
-    __ Blr(lr);
-    if (HasEmptyFrame()) {
-      CHECK(is_frame_entry);
-      __ Ldr(lr, MemOperand(sp, 8));
-      __ Drop(kStackAlignment);
-    }
-    __ Bind(&done);
   }
 }
 
@@ -4089,16 +4091,18 @@
     DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
     ScopedObjectAccess soa(Thread::Current());
     ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
-    InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
-    uint64_t address = reinterpret_cast64<uint64_t>(cache);
-    vixl::aarch64::Label done;
-    __ Mov(x8, address);
-    __ Ldr(x9, MemOperand(x8, InlineCache::ClassesOffset().Int32Value()));
-    // Fast path for a monomorphic cache.
-    __ Cmp(klass, x9);
-    __ B(eq, &done);
-    InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc());
-    __ Bind(&done);
+    if (info != nullptr) {
+      InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
+      uint64_t address = reinterpret_cast64<uint64_t>(cache);
+      vixl::aarch64::Label done;
+      __ Mov(x8, address);
+      __ Ldr(x9, MemOperand(x8, InlineCache::ClassesOffset().Int32Value()));
+      // Fast path for a monomorphic cache.
+      __ Cmp(klass, x9);
+      __ B(eq, &done);
+      InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc());
+      __ Bind(&done);
+    }
   }
 }
 
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index d4a41f7..3a2cf40 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2103,43 +2103,45 @@
   if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
     ScopedObjectAccess soa(Thread::Current());
     ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
-    uint32_t address = reinterpret_cast32<uint32_t>(info);
-    vixl::aarch32::Label done;
-    UseScratchRegisterScope temps(GetVIXLAssembler());
-    temps.Exclude(ip);
-    if (!is_frame_entry) {
-      __ Push(r4);  // Will be used as temporary. For frame entry, r4 is always available.
-    }
-    __ Mov(r4, address);
-    __ Ldrh(ip, MemOperand(r4, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()));
-    __ Add(ip, ip, 1);
-    __ Strh(ip, MemOperand(r4, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()));
-    if (!is_frame_entry) {
-      __ Pop(r4);
-    }
-    __ Lsls(ip, ip, 16);
-    __ B(ne, &done);
-    uint32_t entry_point_offset =
-        GetThreadOffset<kArmPointerSize>(kQuickCompileOptimized).Int32Value();
-    if (HasEmptyFrame()) {
-      CHECK(is_frame_entry);
-      // For leaf methods, we need to spill lr and r0. Also spill r1 and r2 for
-      // alignment.
-      uint32_t core_spill_mask =
-          (1 << lr.GetCode()) | (1 << r0.GetCode()) | (1 << r1.GetCode()) | (1 << r2.GetCode());
-      __ Push(RegisterList(core_spill_mask));
+    if (info != nullptr) {
+      uint32_t address = reinterpret_cast32<uint32_t>(info);
+      vixl::aarch32::Label done;
+      UseScratchRegisterScope temps(GetVIXLAssembler());
+      temps.Exclude(ip);
+      if (!is_frame_entry) {
+        __ Push(r4);  // Will be used as temporary. For frame entry, r4 is always available.
+      }
+      __ Mov(r4, address);
+      __ Ldrh(ip, MemOperand(r4, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()));
+      __ Add(ip, ip, 1);
+      __ Strh(ip, MemOperand(r4, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()));
+      if (!is_frame_entry) {
+        __ Pop(r4);
+      }
+      __ Lsls(ip, ip, 16);
+      __ B(ne, &done);
+      uint32_t entry_point_offset =
+          GetThreadOffset<kArmPointerSize>(kQuickCompileOptimized).Int32Value();
+      if (HasEmptyFrame()) {
+        CHECK(is_frame_entry);
+        // For leaf methods, we need to spill lr and r0. Also spill r1 and r2 for
+        // alignment.
+        uint32_t core_spill_mask =
+            (1 << lr.GetCode()) | (1 << r0.GetCode()) | (1 << r1.GetCode()) | (1 << r2.GetCode());
+        __ Push(RegisterList(core_spill_mask));
+        __ Ldr(lr, MemOperand(tr, entry_point_offset));
+        __ Blx(lr);
+        __ Pop(RegisterList(core_spill_mask));
+      } else {
+        if (!RequiresCurrentMethod()) {
+          CHECK(is_frame_entry);
+          GetAssembler()->StoreToOffset(kStoreWord, kMethodRegister, sp, 0);
+        }
       __ Ldr(lr, MemOperand(tr, entry_point_offset));
       __ Blx(lr);
-      __ Pop(RegisterList(core_spill_mask));
-    } else {
-      if (!RequiresCurrentMethod()) {
-        CHECK(is_frame_entry);
-        GetAssembler()->StoreToOffset(kStoreWord, kMethodRegister, sp, 0);
       }
-    __ Ldr(lr, MemOperand(tr, entry_point_offset));
-    __ Blx(lr);
+      __ Bind(&done);
     }
-    __ Bind(&done);
   }
 }
 
@@ -3364,18 +3366,20 @@
     DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
     ScopedObjectAccess soa(Thread::Current());
     ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
-    InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
-    uint32_t address = reinterpret_cast32<uint32_t>(cache);
-    vixl32::Label done;
-    UseScratchRegisterScope temps(GetVIXLAssembler());
-    temps.Exclude(ip);
-    __ Mov(r4, address);
-    __ Ldr(ip, MemOperand(r4, InlineCache::ClassesOffset().Int32Value()));
-    // Fast path for a monomorphic cache.
-    __ Cmp(klass, ip);
-    __ B(eq, &done, /* is_far_target= */ false);
-    InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc());
-    __ Bind(&done);
+    if (info != nullptr) {
+      InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
+      uint32_t address = reinterpret_cast32<uint32_t>(cache);
+      vixl32::Label done;
+      UseScratchRegisterScope temps(GetVIXLAssembler());
+      temps.Exclude(ip);
+      __ Mov(r4, address);
+      __ Ldr(ip, MemOperand(r4, InlineCache::ClassesOffset().Int32Value()));
+      // Fast path for a monomorphic cache.
+      __ Cmp(klass, ip);
+      __ B(eq, &done, /* is_far_target= */ false);
+      InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc());
+      __ Bind(&done);
+    }
   }
 }
 
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f02ab26..ed1a536 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1096,44 +1096,48 @@
   if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
     ScopedObjectAccess soa(Thread::Current());
     ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
-    uint32_t address = reinterpret_cast32<uint32_t>(info);
-    NearLabel done;
-    if (HasEmptyFrame()) {
-      CHECK(is_frame_entry);
-      // Alignment
-      __ subl(ESP, Immediate(8));
-      __ cfi().AdjustCFAOffset(8);
-      // We need a temporary. The stub also expects the method at bottom of stack.
-      __ pushl(EAX);
-      __ cfi().AdjustCFAOffset(4);
-      __ movl(EAX, Immediate(address));
-      __ addw(Address(EAX, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()), Immediate(1));
-      __ j(kCarryClear, &done);
-      GenerateInvokeRuntime(
-          GetThreadOffset<kX86PointerSize>(kQuickCompileOptimized).Int32Value());
-      __ Bind(&done);
-      // We don't strictly require to restore EAX, but this makes the generated
-      // code easier to reason about.
-      __ popl(EAX);
-      __ cfi().AdjustCFAOffset(-4);
-      __ addl(ESP, Immediate(8));
-      __ cfi().AdjustCFAOffset(-8);
-    } else {
-      if (!RequiresCurrentMethod()) {
+    if (info != nullptr) {
+      uint32_t address = reinterpret_cast32<uint32_t>(info);
+      NearLabel done;
+      if (HasEmptyFrame()) {
         CHECK(is_frame_entry);
-        __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument);
+        // Alignment
+        __ subl(ESP, Immediate(8));
+        __ cfi().AdjustCFAOffset(8);
+        // We need a temporary. The stub also expects the method at bottom of stack.
+        __ pushl(EAX);
+        __ cfi().AdjustCFAOffset(4);
+        __ movl(EAX, Immediate(address));
+        __ addw(Address(EAX, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()),
+                Immediate(1));
+        __ j(kCarryClear, &done);
+        GenerateInvokeRuntime(
+            GetThreadOffset<kX86PointerSize>(kQuickCompileOptimized).Int32Value());
+        __ Bind(&done);
+        // We don't strictly require to restore EAX, but this makes the generated
+        // code easier to reason about.
+        __ popl(EAX);
+        __ cfi().AdjustCFAOffset(-4);
+        __ addl(ESP, Immediate(8));
+        __ cfi().AdjustCFAOffset(-8);
+      } else {
+        if (!RequiresCurrentMethod()) {
+          CHECK(is_frame_entry);
+          __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument);
+        }
+        // We need a temporary.
+        __ pushl(EAX);
+        __ cfi().AdjustCFAOffset(4);
+        __ movl(EAX, Immediate(address));
+        __ addw(Address(EAX, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()),
+                Immediate(1));
+        __ popl(EAX);  // Put stack as expected before exiting or calling stub.
+        __ cfi().AdjustCFAOffset(-4);
+        __ j(kCarryClear, &done);
+        GenerateInvokeRuntime(
+            GetThreadOffset<kX86PointerSize>(kQuickCompileOptimized).Int32Value());
+        __ Bind(&done);
       }
-      // We need a temporary.
-      __ pushl(EAX);
-      __ cfi().AdjustCFAOffset(4);
-      __ movl(EAX, Immediate(address));
-      __ addw(Address(EAX, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()), Immediate(1));
-      __ popl(EAX);  // Put stack as expected before exiting or calling stub.
-      __ cfi().AdjustCFAOffset(-4);
-      __ j(kCarryClear, &done);
-      GenerateInvokeRuntime(
-          GetThreadOffset<kX86PointerSize>(kQuickCompileOptimized).Int32Value());
-      __ Bind(&done);
     }
   }
 }
@@ -2366,20 +2370,22 @@
     DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
     ScopedObjectAccess soa(Thread::Current());
     ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
-    InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
-    uint32_t address = reinterpret_cast32<uint32_t>(cache);
-    if (kIsDebugBuild) {
-      uint32_t temp_index = instruction->GetLocations()->GetTempCount() - 1u;
-      CHECK_EQ(EBP, instruction->GetLocations()->GetTemp(temp_index).AsRegister<Register>());
+    if (info != nullptr) {
+      InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
+      uint32_t address = reinterpret_cast32<uint32_t>(cache);
+      if (kIsDebugBuild) {
+        uint32_t temp_index = instruction->GetLocations()->GetTempCount() - 1u;
+        CHECK_EQ(EBP, instruction->GetLocations()->GetTemp(temp_index).AsRegister<Register>());
+      }
+      Register temp = EBP;
+      NearLabel done;
+      __ movl(temp, Immediate(address));
+      // Fast path for a monomorphic cache.
+      __ cmpl(klass, Address(temp, InlineCache::ClassesOffset().Int32Value()));
+      __ j(kEqual, &done);
+      GenerateInvokeRuntime(GetThreadOffset<kX86PointerSize>(kQuickUpdateInlineCache).Int32Value());
+      __ Bind(&done);
     }
-    Register temp = EBP;
-    NearLabel done;
-    __ movl(temp, Immediate(address));
-    // Fast path for a monomorphic cache.
-    __ cmpl(klass, Address(temp, InlineCache::ClassesOffset().Int32Value()));
-    __ j(kEqual, &done);
-    GenerateInvokeRuntime(GetThreadOffset<kX86PointerSize>(kQuickUpdateInlineCache).Int32Value());
-    __ Bind(&done);
   }
 }
 
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 1172776..8518b6d 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1366,30 +1366,32 @@
   if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
     ScopedObjectAccess soa(Thread::Current());
     ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
-    uint64_t address = reinterpret_cast64<uint64_t>(info);
-    NearLabel done;
-    __ movq(CpuRegister(TMP), Immediate(address));
-    __ addw(Address(CpuRegister(TMP), ProfilingInfo::BaselineHotnessCountOffset().Int32Value()),
-            Immediate(1));
-    __ j(kCarryClear, &done);
-    if (HasEmptyFrame()) {
-      CHECK(is_frame_entry);
-      // Frame alignment, and the stub expects the method on the stack.
-      __ pushq(CpuRegister(RDI));
-      __ cfi().AdjustCFAOffset(kX86_64WordSize);
-      __ cfi().RelOffset(DWARFReg(RDI), 0);
-    } else if (!RequiresCurrentMethod()) {
-      CHECK(is_frame_entry);
-      __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(RDI));
+    if (info != nullptr) {
+      uint64_t address = reinterpret_cast64<uint64_t>(info);
+      NearLabel done;
+      __ movq(CpuRegister(TMP), Immediate(address));
+      __ addw(Address(CpuRegister(TMP), ProfilingInfo::BaselineHotnessCountOffset().Int32Value()),
+              Immediate(1));
+      __ j(kCarryClear, &done);
+      if (HasEmptyFrame()) {
+        CHECK(is_frame_entry);
+        // Frame alignment, and the stub expects the method on the stack.
+        __ pushq(CpuRegister(RDI));
+        __ cfi().AdjustCFAOffset(kX86_64WordSize);
+        __ cfi().RelOffset(DWARFReg(RDI), 0);
+      } else if (!RequiresCurrentMethod()) {
+        CHECK(is_frame_entry);
+        __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(RDI));
+      }
+      GenerateInvokeRuntime(
+          GetThreadOffset<kX86_64PointerSize>(kQuickCompileOptimized).Int32Value());
+      if (HasEmptyFrame()) {
+        __ popq(CpuRegister(RDI));
+        __ cfi().AdjustCFAOffset(-static_cast<int>(kX86_64WordSize));
+        __ cfi().Restore(DWARFReg(RDI));
+      }
+      __ Bind(&done);
     }
-    GenerateInvokeRuntime(
-        GetThreadOffset<kX86_64PointerSize>(kQuickCompileOptimized).Int32Value());
-    if (HasEmptyFrame()) {
-      __ popq(CpuRegister(RDI));
-      __ cfi().AdjustCFAOffset(-static_cast<int>(kX86_64WordSize));
-      __ cfi().Restore(DWARFReg(RDI));
-    }
-    __ Bind(&done);
   }
 }
 
@@ -2577,16 +2579,18 @@
       !Runtime::Current()->IsAotCompiler()) {
     ScopedObjectAccess soa(Thread::Current());
     ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
-    InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
-    uint64_t address = reinterpret_cast64<uint64_t>(cache);
-    NearLabel done;
-    __ movq(CpuRegister(TMP), Immediate(address));
-    // Fast path for a monomorphic cache.
-    __ cmpl(Address(CpuRegister(TMP), InlineCache::ClassesOffset().Int32Value()), klass);
-    __ j(kEqual, &done);
-    GenerateInvokeRuntime(
-        GetThreadOffset<kX86_64PointerSize>(kQuickUpdateInlineCache).Int32Value());
-    __ Bind(&done);
+    if (info != nullptr) {
+      InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
+      uint64_t address = reinterpret_cast64<uint64_t>(cache);
+      NearLabel done;
+      __ movq(CpuRegister(TMP), Immediate(address));
+      // Fast path for a monomorphic cache.
+      __ cmpl(Address(CpuRegister(TMP), InlineCache::ClassesOffset().Int32Value()), klass);
+      __ j(kEqual, &done);
+      GenerateInvokeRuntime(
+          GetThreadOffset<kX86_64PointerSize>(kQuickUpdateInlineCache).Int32Value());
+      __ Bind(&done);
+    }
   }
 }