[baseline] Check that the profiling info is not null.

Zygote cannot allocate profiling infos.

Test: 689-zygote-jit-deopt
Change-Id: I85e8b7f16b81ba4de435a5417dbb2588c34414b0
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 1172776..8518b6d 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1366,30 +1366,32 @@
   if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
     ScopedObjectAccess soa(Thread::Current());
     ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
-    uint64_t address = reinterpret_cast64<uint64_t>(info);
-    NearLabel done;
-    __ movq(CpuRegister(TMP), Immediate(address));
-    __ addw(Address(CpuRegister(TMP), ProfilingInfo::BaselineHotnessCountOffset().Int32Value()),
-            Immediate(1));
-    __ j(kCarryClear, &done);
-    if (HasEmptyFrame()) {
-      CHECK(is_frame_entry);
-      // Frame alignment, and the stub expects the method on the stack.
-      __ pushq(CpuRegister(RDI));
-      __ cfi().AdjustCFAOffset(kX86_64WordSize);
-      __ cfi().RelOffset(DWARFReg(RDI), 0);
-    } else if (!RequiresCurrentMethod()) {
-      CHECK(is_frame_entry);
-      __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(RDI));
+    if (info != nullptr) {
+      uint64_t address = reinterpret_cast64<uint64_t>(info);
+      NearLabel done;
+      __ movq(CpuRegister(TMP), Immediate(address));
+      __ addw(Address(CpuRegister(TMP), ProfilingInfo::BaselineHotnessCountOffset().Int32Value()),
+              Immediate(1));
+      __ j(kCarryClear, &done);
+      if (HasEmptyFrame()) {
+        CHECK(is_frame_entry);
+        // Frame alignment, and the stub expects the method on the stack.
+        __ pushq(CpuRegister(RDI));
+        __ cfi().AdjustCFAOffset(kX86_64WordSize);
+        __ cfi().RelOffset(DWARFReg(RDI), 0);
+      } else if (!RequiresCurrentMethod()) {
+        CHECK(is_frame_entry);
+        __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(RDI));
+      }
+      GenerateInvokeRuntime(
+          GetThreadOffset<kX86_64PointerSize>(kQuickCompileOptimized).Int32Value());
+      if (HasEmptyFrame()) {
+        __ popq(CpuRegister(RDI));
+        __ cfi().AdjustCFAOffset(-static_cast<int>(kX86_64WordSize));
+        __ cfi().Restore(DWARFReg(RDI));
+      }
+      __ Bind(&done);
     }
-    GenerateInvokeRuntime(
-        GetThreadOffset<kX86_64PointerSize>(kQuickCompileOptimized).Int32Value());
-    if (HasEmptyFrame()) {
-      __ popq(CpuRegister(RDI));
-      __ cfi().AdjustCFAOffset(-static_cast<int>(kX86_64WordSize));
-      __ cfi().Restore(DWARFReg(RDI));
-    }
-    __ Bind(&done);
   }
 }
 
@@ -2577,16 +2579,18 @@
       !Runtime::Current()->IsAotCompiler()) {
     ScopedObjectAccess soa(Thread::Current());
     ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
-    InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
-    uint64_t address = reinterpret_cast64<uint64_t>(cache);
-    NearLabel done;
-    __ movq(CpuRegister(TMP), Immediate(address));
-    // Fast path for a monomorphic cache.
-    __ cmpl(Address(CpuRegister(TMP), InlineCache::ClassesOffset().Int32Value()), klass);
-    __ j(kEqual, &done);
-    GenerateInvokeRuntime(
-        GetThreadOffset<kX86_64PointerSize>(kQuickUpdateInlineCache).Int32Value());
-    __ Bind(&done);
+    if (info != nullptr) {
+      InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
+      uint64_t address = reinterpret_cast64<uint64_t>(cache);
+      NearLabel done;
+      __ movq(CpuRegister(TMP), Immediate(address));
+      // Fast path for a monomorphic cache.
+      __ cmpl(Address(CpuRegister(TMP), InlineCache::ClassesOffset().Int32Value()), klass);
+      __ j(kEqual, &done);
+      GenerateInvokeRuntime(
+          GetThreadOffset<kX86_64PointerSize>(kQuickUpdateInlineCache).Int32Value());
+      __ Bind(&done);
+    }
   }
 }