Revert^2 "JIT: Separate code allocation and initialization."

This reverts commit 63b0c26aae3e7237166dd781eb7a15fbc7c091c2.

Test: ./art/test.py -b -r --host --all-gc -t 708
Reason for revert: Reland after bug fix.
Change-Id: Ic13e2799bf4bdd8ca468f72cc0f3b72f224f2b08
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 6ab811b..ff23385 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -323,53 +323,6 @@
   return nullptr;
 }
 
-uint8_t* JitCodeCache::CommitCode(Thread* self,
-                                  JitMemoryRegion* region,
-                                  ArtMethod* method,
-                                  const uint8_t* code,
-                                  size_t code_size,
-                                  const uint8_t* stack_map,
-                                  size_t stack_map_size,
-                                  const uint8_t* roots_data,
-                                  const std::vector<Handle<mirror::Object>>& roots,
-                                  bool osr,
-                                  bool has_should_deoptimize_flag,
-                                  const ArenaSet<ArtMethod*>& cha_single_implementation_list,
-                                  const std::function<void(const uint8_t* code)>&
-                                      generate_debug_info) {
-  uint8_t* result = CommitCodeInternal(self,
-                                       region,
-                                       method,
-                                       code,
-                                       code_size,
-                                       stack_map,
-                                       stack_map_size,
-                                       roots_data,
-                                       roots,
-                                       osr,
-                                       has_should_deoptimize_flag,
-                                       cha_single_implementation_list,
-                                       generate_debug_info);
-  if (result == nullptr) {
-    // Retry.
-    GarbageCollectCache(self);
-    result = CommitCodeInternal(self,
-                                region,
-                                method,
-                                code,
-                                code_size,
-                                stack_map,
-                                stack_map_size,
-                                roots_data,
-                                roots,
-                                osr,
-                                has_should_deoptimize_flag,
-                                cha_single_implementation_list,
-                                generate_debug_info);
-  }
-  return result;
-}
-
 bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
   bool in_collection = false;
   while (collection_in_progress_) {
@@ -672,21 +625,17 @@
   }
 }
 
-uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
-                                          JitMemoryRegion* region,
-                                          ArtMethod* method,
-                                          const uint8_t* code,
-                                          size_t code_size,
-                                          const uint8_t* stack_map,
-                                          size_t stack_map_size,
-                                          const uint8_t* roots_data,
-                                          const std::vector<Handle<mirror::Object>>& roots,
-                                          bool osr,
-                                          bool has_should_deoptimize_flag,
-                                          const ArenaSet<ArtMethod*>&
-                                              cha_single_implementation_list,
-                                          const std::function<void(const uint8_t* code)>&
-                                              generate_debug_info) {
+bool JitCodeCache::Commit(Thread* self,
+                          JitMemoryRegion* region,
+                          ArtMethod* method,
+                          ArrayRef<const uint8_t> reserved_code,
+                          ArrayRef<const uint8_t> code,
+                          ArrayRef<const uint8_t> reserved_data,
+                          const std::vector<Handle<mirror::Object>>& roots,
+                          ArrayRef<const uint8_t> stack_map,
+                          bool osr,
+                          bool has_should_deoptimize_flag,
+                          const ArenaSet<ArtMethod*>& cha_single_implementation_list) {
   DCHECK(!method->IsNative() || !osr);
 
   if (!method->IsNative()) {
@@ -695,6 +644,7 @@
     DCheckRootsAreValid(roots, IsSharedRegion(*region));
   }
 
+  const uint8_t* roots_data = reserved_data.data();
   size_t root_table_size = ComputeRootTableSize(roots.size());
   const uint8_t* stack_map_data = roots_data + root_table_size;
 
@@ -702,26 +652,20 @@
   // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
   // finish.
   WaitForPotentialCollectionToCompleteRunnable(self);
-  const uint8_t* code_ptr = region->AllocateCode(
-      code, code_size, stack_map_data, has_should_deoptimize_flag);
+  const uint8_t* code_ptr = region->CommitCode(
+      reserved_code, code, stack_map_data, has_should_deoptimize_flag);
   if (code_ptr == nullptr) {
-    return nullptr;
+    return false;
   }
   OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
 
   // Commit roots and stack maps before updating the entry point.
-  if (!region->CommitData(roots_data, roots, stack_map, stack_map_size)) {
-    ScopedCodeCacheWrite ccw(*region);
-    uintptr_t allocation = FromCodeToAllocation(code_ptr);
-    region->FreeCode(reinterpret_cast<uint8_t*>(allocation));
-    return nullptr;
+  if (!region->CommitData(reserved_data, roots, stack_map)) {
+    return false;
   }
 
   number_of_compilations_++;
 
-  // Add debug info after we know the code location but before we update entry-point.
-  generate_debug_info(code_ptr);
-
   // We need to update the entry point in the runnable state for the instrumentation.
   {
     // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the
@@ -743,10 +687,7 @@
     // Discard the code if any single-implementation assumptions are now invalid.
     if (UNLIKELY(!single_impl_still_valid)) {
       VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions.";
-      ScopedCodeCacheWrite ccw(*region);
-      uintptr_t allocation = FromCodeToAllocation(code_ptr);
-      region->FreeCode(reinterpret_cast<uint8_t*>(allocation));
-      return nullptr;
+      return false;
     }
     DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsJavaDebuggable())
         << "Should not be using cha on debuggable apps/runs!";
@@ -805,16 +746,9 @@
         << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
         << reinterpret_cast<const void*>(method_header->GetEntryPoint() +
                                          method_header->GetCodeSize());
-    histogram_code_memory_use_.AddValue(code_size);
-    if (code_size > kCodeSizeLogThreshold) {
-      LOG(INFO) << "JIT allocated "
-                << PrettySize(code_size)
-                << " for compiled code of "
-                << ArtMethod::PrettyMethod(method);
-    }
   }
 
-  return reinterpret_cast<uint8_t*>(method_header);
+  return true;
 }
 
 size_t JitCodeCache::CodeCacheSize() {
@@ -966,38 +900,73 @@
   return GetCurrentRegion()->GetUsedMemoryForData();
 }
 
-void JitCodeCache::ClearData(Thread* self,
-                             JitMemoryRegion* region,
-                             const uint8_t* roots_data) {
-  MutexLock mu(self, *Locks::jit_lock_);
-  region->FreeData(roots_data);
-}
+bool JitCodeCache::Reserve(Thread* self,
+                           JitMemoryRegion* region,
+                           size_t code_size,
+                           size_t stack_map_size,
+                           size_t number_of_roots,
+                           ArtMethod* method,
+                           /*out*/ArrayRef<const uint8_t>* reserved_code,
+                           /*out*/ArrayRef<const uint8_t>* reserved_data) {
+  code_size = OatQuickMethodHeader::InstructionAlignedSize() + code_size;
+  size_t data_size = RoundUp(ComputeRootTableSize(number_of_roots) + stack_map_size, sizeof(void*));
 
-const uint8_t* JitCodeCache::ReserveData(Thread* self,
-                                         JitMemoryRegion* region,
-                                         size_t stack_map_size,
-                                         size_t number_of_roots,
-                                         ArtMethod* method) {
-  size_t table_size = ComputeRootTableSize(number_of_roots);
-  size_t size = RoundUp(stack_map_size + table_size, sizeof(void*));
-  const uint8_t* result = nullptr;
-
-  {
-    ScopedThreadSuspension sts(self, kSuspended);
-    MutexLock mu(self, *Locks::jit_lock_);
-    WaitForPotentialCollectionToComplete(self);
-    result = region->AllocateData(size);
+  const uint8_t* code;
+  const uint8_t* data;
+  // We might need to try the allocation twice (with GC in between to free up memory).
+  for (int i = 0; i < 2; i++) {
+    {
+      ScopedThreadSuspension sts(self, kSuspended);
+      MutexLock mu(self, *Locks::jit_lock_);
+      WaitForPotentialCollectionToComplete(self);
+      ScopedCodeCacheWrite ccw(*region);
+      code = region->AllocateCode(code_size);
+      data = region->AllocateData(data_size);
+    }
+    if (code == nullptr || data == nullptr) {
+      Free(self, region, code, data);
+      if (i == 0) {
+        GarbageCollectCache(self);
+        continue;  // Retry after GC.
+      } else {
+        return false;  // Fail.
+      }
+    }
+    break;  // Success.
   }
+  *reserved_code = ArrayRef<const uint8_t>(code, code_size);
+  *reserved_data = ArrayRef<const uint8_t>(data, data_size);
 
   MutexLock mu(self, *Locks::jit_lock_);
-  histogram_stack_map_memory_use_.AddValue(size);
-  if (size > kStackMapSizeLogThreshold) {
+  histogram_code_memory_use_.AddValue(code_size);
+  if (code_size > kCodeSizeLogThreshold) {
     LOG(INFO) << "JIT allocated "
-              << PrettySize(size)
+              << PrettySize(code_size)
+              << " for compiled code of "
+              << ArtMethod::PrettyMethod(method);
+  }
+  histogram_stack_map_memory_use_.AddValue(data_size);
+  if (data_size > kStackMapSizeLogThreshold) {
+    LOG(INFO) << "JIT allocated "
+              << PrettySize(data_size)
               << " for stack maps of "
               << ArtMethod::PrettyMethod(method);
   }
-  return result;
+  return true;
+}
+
+void JitCodeCache::Free(Thread* self,
+                        JitMemoryRegion* region,
+                        const uint8_t* code,
+                        const uint8_t* data) {
+  MutexLock mu(self, *Locks::jit_lock_);
+  ScopedCodeCacheWrite ccw(*region);
+  if (code != nullptr) {
+    region->FreeCode(code);
+  }
+  if (data != nullptr) {
+    region->FreeData(data);
+  }
 }
 
 class MarkCodeClosure final : public Closure {
@@ -1685,7 +1654,7 @@
     if (UNLIKELY(!data->IsCompiled())) {
       // Failed to compile; the JNI compiler never fails, but the cache may be full.
       jni_stubs_map_.erase(it);  // Remove the entry added in NotifyCompilationOf().
-    }  // else CommitCodeInternal() updated entrypoints of all methods in the JniStubData.
+    }  // else Commit() updated entrypoints of all methods in the JniStubData.
   } else {
     ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
     if (info != nullptr) {