Revert^2 "Fix race in CommitCodeInternal and cleanup"

This reverts commit 8dde74eb7bec8e989f34d86a01c26b0f5c7e6443.

We were failing to do a null check in cha.cc. Due to compiler
optimizations this would actually run without error on non-asan
builds, causing the error to pass testing.

Reason for revert: Fixed issue causing asan issue.
Test: run asan tests

Change-Id: Ic5492c69b7735555108d8b18c8e687ce510c4549
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index b92affa..a8692a0 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -608,17 +608,17 @@
 
 void JitCodeCache::FreeAllMethodHeaders(
     const std::unordered_set<OatQuickMethodHeader*>& method_headers) {
-  {
-    MutexLock mu(Thread::Current(), *Locks::cha_lock_);
-    Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
-        ->RemoveDependentsWithMethodHeaders(method_headers);
-  }
-
   // We need to remove entries in method_headers from CHA dependencies
   // first since once we do FreeCode() below, the memory can be reused
   // so it's possible for the same method_header to start representing
   // different compile code.
   MutexLock mu(Thread::Current(), lock_);
+  {
+    MutexLock mu2(Thread::Current(), *Locks::cha_lock_);
+    Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
+        ->RemoveDependentsWithMethodHeaders(method_headers);
+  }
+
   ScopedCodeCacheWrite scc(this);
   for (const OatQuickMethodHeader* method_header : method_headers) {
     FreeCodeAndData(method_header->GetCode());
@@ -742,6 +742,18 @@
   method->SetCounter(std::min(jit_warmup_threshold - 1, 1));
 }
 
+void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) {
+  while (collection_in_progress_) {
+    lock_.Unlock(self);
+    {
+      ScopedThreadSuspension sts(self, kSuspended);
+      MutexLock mu(self, lock_);
+      WaitForPotentialCollectionToComplete(self);
+    }
+    lock_.Lock(self);
+  }
+}
+
 uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
                                           ArtMethod* method,
                                           uint8_t* stack_map,
@@ -755,6 +767,13 @@
                                           const ArenaSet<ArtMethod*>&
                                               cha_single_implementation_list) {
   DCHECK(!method->IsNative() || !osr);
+
+  if (!method->IsNative()) {
+    // We need to do this before grabbing the lock_ because it needs to be able to see the string
+    // InternTable. Native methods do not have roots.
+    DCheckRootsAreValid(roots);
+  }
+
   size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
   // Ensure the header ends up at expected instruction alignment.
   size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
@@ -763,44 +782,45 @@
   OatQuickMethodHeader* method_header = nullptr;
   uint8_t* code_ptr = nullptr;
   uint8_t* memory = nullptr;
+  MutexLock mu(self, lock_);
+  // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
+  // finish.
+  WaitForPotentialCollectionToCompleteRunnable(self);
   {
-    ScopedThreadSuspension sts(self, kSuspended);
-    MutexLock mu(self, lock_);
-    WaitForPotentialCollectionToComplete(self);
-    {
-      ScopedCodeCacheWrite scc(this);
-      memory = AllocateCode(total_size);
-      if (memory == nullptr) {
-        return nullptr;
-      }
-      code_ptr = memory + header_size;
+    ScopedCodeCacheWrite scc(this);
+    memory = AllocateCode(total_size);
+    if (memory == nullptr) {
+      return nullptr;
+    }
+    code_ptr = memory + header_size;
 
-      std::copy(code, code + code_size, code_ptr);
-      method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
-      new (method_header) OatQuickMethodHeader(
-          (stack_map != nullptr) ? code_ptr - stack_map : 0u,
-          code_size);
-      // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
-      // trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
-      // This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
-      // 6P) stop being supported or their kernels are fixed.
-      //
-      // For reference, this behavior is caused by this commit:
-      // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
-      FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
-                            reinterpret_cast<char*>(code_ptr + code_size));
-      DCHECK(!Runtime::Current()->IsAotCompiler());
-      if (has_should_deoptimize_flag) {
-        method_header->SetHasShouldDeoptimizeFlag();
-      }
+    std::copy(code, code + code_size, code_ptr);
+    method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+    new (method_header) OatQuickMethodHeader(
+        (stack_map != nullptr) ? code_ptr - stack_map : 0u,
+        code_size);
+    // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
+    // trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
+    // This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
+    // 6P) stop being supported or their kernels are fixed.
+    //
+    // For reference, this behavior is caused by this commit:
+    // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
+    FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
+                          reinterpret_cast<char*>(code_ptr + code_size));
+    DCHECK(!Runtime::Current()->IsAotCompiler());
+    if (has_should_deoptimize_flag) {
+      method_header->SetHasShouldDeoptimizeFlag();
     }
 
     number_of_compilations_++;
   }
   // We need to update the entry point in the runnable state for the instrumentation.
   {
-    // Need cha_lock_ for checking all single-implementation flags and register
-    // dependencies.
+    // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the
+    // compiled code is considered invalidated by some class linking, but below we still make the
+    // compiled code valid for the method.  Need cha_lock_ for checking all single-implementation
+    // flags and register dependencies.
     MutexLock cha_mu(self, *Locks::cha_lock_);
     bool single_impl_still_valid = true;
     for (ArtMethod* single_impl : cha_single_implementation_list) {
@@ -826,16 +846,6 @@
           single_impl, method, method_header);
     }
 
-    if (!method->IsNative()) {
-      // We need to do this before grabbing the lock_ because it needs to be able to see the string
-      // InternTable. Native methods do not have roots.
-      DCheckRootsAreValid(roots);
-    }
-
-    // The following needs to be guarded by cha_lock_ also. Otherwise it's
-    // possible that the compiled code is considered invalidated by some class linking,
-    // but below we still make the compiled code valid for the method.
-    MutexLock mu(self, lock_);
     if (UNLIKELY(method->IsNative())) {
       auto it = jni_stubs_map_.find(JniStubKey(method));
       DCHECK(it != jni_stubs_map_.end())
@@ -867,11 +877,6 @@
             method, method_header->GetEntryPoint());
       }
     }
-    if (collection_in_progress_) {
-      // We need to update the live bitmap if there is a GC to ensure it sees this new
-      // code.
-      GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
-    }
     VLOG(jit)
         << "JIT added (osr=" << std::boolalpha << osr << std::noboolalpha << ") "
         << ArtMethod::PrettyMethod(method) << "@" << method
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 29f9c9c..632b45b 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -314,6 +314,12 @@
       REQUIRES(lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // If a collection is in progress, wait for it to finish. Must be called with the mutator lock.
+  // The non-mutator lock version should be used if possible. This method will release then
+  // re-acquire the mutator lock.
+  void WaitForPotentialCollectionToCompleteRunnable(Thread* self)
+      REQUIRES(lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_);
+
   // If a collection is in progress, wait for it to finish. Return
   // whether the thread actually waited.
   bool WaitForPotentialCollectionToComplete(Thread* self)