Revert "Add missing card mark verification to CC"

Fails in read-barrier-gcstress for 944-transform-classloaders


Bug: 12687968

This reverts commit 49ba69667ce70f8efbed7d68814ab184bee53486.

Change-Id: Ie91eaa034cea77918235766983661efa14fb1a14
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 57fc909..46f1644 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -3798,10 +3798,14 @@
 }
 
 void ClassLinker::WriteBarrierForBootOatFileBssRoots(const OatFile* oat_file) {
-  WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
-  DCHECK(!oat_file->GetBssGcRoots().empty()) << oat_file->GetLocation();
-  if (log_new_roots_ && !ContainsElement(new_bss_roots_boot_oat_files_, oat_file)) {
-    new_bss_roots_boot_oat_files_.push_back(oat_file);
+  if (!kUseReadBarrier) {
+    WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+    DCHECK(!oat_file->GetBssGcRoots().empty()) << oat_file->GetLocation();
+    if (log_new_roots_ && !ContainsElement(new_bss_roots_boot_oat_files_, oat_file)) {
+      new_bss_roots_boot_oat_files_.push_back(oat_file);
+    }
+  } else {
+    LOG(FATAL) << "UNREACHABLE";
   }
 }
 
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 355d7b3..47c6b51 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -32,9 +32,12 @@
 namespace art {
 
 static inline void BssWriteBarrier(ArtMethod* outer_method) REQUIRES_SHARED(Locks::mutator_lock_) {
-  // For AOT code, we need a write barrier for the class loader that holds the
-  // GC roots in the .bss.
-  const DexFile* dex_file = outer_method->GetDexFile();
+  // For non-CC AOT code, we need a write barrier for the class loader that holds the
+  // GC roots in the .bss. For CC, we do not need to do anything because the roots
+  // we're storing are all referencing to-space and do not need to be re-visited.
+  // However, we do the DCHECK() for the registration of oat files with .bss sections.
+  const DexFile* dex_file =
+      (kUseReadBarrier && !kIsDebugBuild) ? nullptr : outer_method->GetDexFile();
   if (dex_file != nullptr &&
       dex_file->GetOatDexFile() != nullptr &&
       !dex_file->GetOatDexFile()->GetOatFile()->GetBssGcRoots().empty()) {
@@ -47,13 +50,15 @@
           << "Oat file with .bss GC roots was not registered in class table: "
           << dex_file->GetOatDexFile()->GetOatFile()->GetLocation();
     }
-    if (class_loader != nullptr) {
-      // Note that we emit the barrier before the compiled code stores the String or Class
-      // as a GC root. This is OK as there is no suspend point point in between.
-      Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
-    } else {
-      Runtime::Current()->GetClassLinker()->WriteBarrierForBootOatFileBssRoots(
-          dex_file->GetOatDexFile()->GetOatFile());
+    if (!kUseReadBarrier) {
+      if (class_loader != nullptr) {
+        // Note that we emit the barrier before the compiled code stores the String or Class
+        // as a GC root. This is OK as there is no suspend point point in between.
+        Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+      } else {
+        Runtime::Current()->GetClassLinker()->WriteBarrierForBootOatFileBssRoots(
+            dex_file->GetOatDexFile()->GetOatFile());
+      }
     }
   }
 }
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index b939cbe..f18ffb4 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -53,8 +53,6 @@
 // Slow path mark stack size, increase this if the stack is getting full and it is causing
 // performance problems.
 static constexpr size_t kReadBarrierMarkStackSize = 512 * KB;
-// Verify that there are no missing card marks.
-static constexpr bool kVerifyNoMissingCardMarks = kIsDebugBuild;
 
 ConcurrentCopying::ConcurrentCopying(Heap* heap,
                                      const std::string& name_prefix,
@@ -320,9 +318,6 @@
     TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
     // Note: self is not necessarily equal to thread since thread may be suspended.
     Thread* self = Thread::Current();
-    if (kVerifyNoMissingCardMarks) {
-      cc->VerifyNoMissingCardMarks();
-    }
     CHECK(thread == self);
     Locks::mutator_lock_->AssertExclusiveHeld(self);
     cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
@@ -433,72 +428,6 @@
   }
 }
 
-class ConcurrentCopying::VerifyNoMissingCardMarkVisitor {
- public:
-  VerifyNoMissingCardMarkVisitor(ConcurrentCopying* cc, ObjPtr<mirror::Object> holder)
-    : cc_(cc),
-      holder_(holder) {}
-
-  void operator()(ObjPtr<mirror::Object> obj,
-                  MemberOffset offset,
-                  bool is_static ATTRIBUTE_UNUSED) const
-      REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
-    if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) {
-     CheckReference(obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(
-         offset), offset.Uint32Value());
-    }
-  }
-  void operator()(ObjPtr<mirror::Class> klass,
-                  ObjPtr<mirror::Reference> ref) const
-      REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
-    CHECK(klass->IsTypeOfReferenceClass());
-    this->operator()(ref, mirror::Reference::ReferentOffset(), false);
-  }
-
-  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (!root->IsNull()) {
-      VisitRoot(root);
-    }
-  }
-
-  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    CheckReference(root->AsMirrorPtr());
-  }
-
-  void CheckReference(mirror::Object* ref, uint32_t offset = 0) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    CHECK(ref == nullptr || !cc_->region_space_->IsInNewlyAllocatedRegion(ref))
-        << holder_->PrettyTypeOf() << "(" << holder_.Ptr() << ") references object "
-        << ref->PrettyTypeOf() << "(" << ref << ") in newly allocated region at offset=" << offset;
-  }
-
- private:
-  ConcurrentCopying* const cc_;
-  ObjPtr<mirror::Object> const holder_;
-};
-
-void ConcurrentCopying::VerifyNoMissingCardMarkCallback(mirror::Object* obj, void* arg) {
-  auto* collector = reinterpret_cast<ConcurrentCopying*>(arg);
-  // Objects not on dirty cards should never have references to newly allocated regions.
-  if (!collector->heap_->GetCardTable()->IsDirty(obj)) {
-    VerifyNoMissingCardMarkVisitor visitor(collector, /*holder*/ obj);
-    obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
-        visitor,
-        visitor);
-  }
-}
-
-void ConcurrentCopying::VerifyNoMissingCardMarks() {
-  TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
-  region_space_->Walk(&VerifyNoMissingCardMarkCallback, this);
-  {
-    ReaderMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
-    heap_->GetLiveBitmap()->Walk(&VerifyNoMissingCardMarkCallback, this);
-  }
-}
-
 // Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
 void ConcurrentCopying::FlipThreadRoots() {
   TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
@@ -2401,9 +2330,7 @@
     MutexLock mu(self, mark_stack_lock_);
     CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
   }
-  // kVerifyNoMissingCardMarks relies on the region space cards not being cleared to avoid false
-  // positives.
-  if (!kVerifyNoMissingCardMarks) {
+  {
     TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings());
     // We do not currently use the region space cards at all, madvise them away to save ram.
     heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 11060a8..844bb45 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -162,12 +162,6 @@
   void VerifyGrayImmuneObjects()
       REQUIRES(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_);
-  static void VerifyNoMissingCardMarkCallback(mirror::Object* obj, void* arg)
-      REQUIRES(Locks::mutator_lock_)
-      REQUIRES(!mark_stack_lock_);
-  void VerifyNoMissingCardMarks()
-      REQUIRES(Locks::mutator_lock_)
-      REQUIRES(!mark_stack_lock_);
   size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
   void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
@@ -335,7 +329,6 @@
   class VerifyNoFromSpaceRefsFieldVisitor;
   class VerifyNoFromSpaceRefsObjectVisitor;
   class VerifyNoFromSpaceRefsVisitor;
-  class VerifyNoMissingCardMarkVisitor;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
 };
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index feab9b0..f3b9595 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -176,14 +176,6 @@
     return false;
   }
 
-  bool IsInNewlyAllocatedRegion(mirror::Object* ref) {
-    if (HasAddress(ref)) {
-      Region* r = RefToRegionUnlocked(ref);
-      return r->IsNewlyAllocated();
-    }
-    return false;
-  }
-
   bool IsInUnevacFromSpace(mirror::Object* ref) {
     if (HasAddress(ref)) {
       Region* r = RefToRegionUnlocked(ref);
@@ -359,10 +351,6 @@
       return idx_;
     }
 
-    bool IsNewlyAllocated() const {
-      return is_newly_allocated_;
-    }
-
     bool IsInFromSpace() const {
       return type_ == RegionType::kRegionTypeFromSpace;
     }