Merge "ART: Add primitive array reporting"
diff --git a/Android.mk b/Android.mk
index f3ab3c1..0e86188 100644
--- a/Android.mk
+++ b/Android.mk
@@ -42,7 +42,7 @@
 
 .PHONY: clean-oat-host
 clean-oat-host:
-	find $(OUT_DIR) -name "*.oat" -o -name "*.odex" -o -name "*.art" | xargs rm -f
+	find $(OUT_DIR) -name "*.oat" -o -name "*.odex" -o -name "*.art" -o -name '*.vdex' | xargs rm -f
 ifneq ($(TMPDIR),)
 	rm -rf $(TMPDIR)/$(USER)/test-*/dalvik-cache/*
 	rm -rf $(TMPDIR)/android-data/dalvik-cache/*
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 52ffa55..7e91453 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1054,11 +1054,16 @@
 }
 
 bool CompilerDriver::ShouldCompileBasedOnProfile(const MethodReference& method_ref) const {
+  // Profile compilation info may be null if no profile is passed.
   if (!CompilerFilter::DependsOnProfile(compiler_options_->GetCompilerFilter())) {
     // Use the compiler filter instead of the presence of profile_compilation_info_ since
     // we may want to have full speed compilation along with profile based layout optimizations.
     return true;
   }
+  // If we are using a profile filter but do not have a profile compilation info, compile nothing.
+  if (profile_compilation_info_ == nullptr) {
+    return false;
+  }
   bool result = profile_compilation_info_->ContainsMethod(method_ref);
 
   if (kDebugProfileGuidedCompilation) {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index dded966..be75628 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -424,7 +424,13 @@
         shutting_down_(false) {
     const char* reason = "dex2oat watch dog thread startup";
     CHECK_WATCH_DOG_PTHREAD_CALL(pthread_mutex_init, (&mutex_, nullptr), reason);
-    CHECK_WATCH_DOG_PTHREAD_CALL(pthread_cond_init, (&cond_, nullptr), reason);
+#ifndef __APPLE__
+    pthread_condattr_t condattr;
+    CHECK_WATCH_DOG_PTHREAD_CALL(pthread_condattr_init, (&condattr), reason);
+    CHECK_WATCH_DOG_PTHREAD_CALL(pthread_condattr_setclock, (&condattr, CLOCK_MONOTONIC), reason);
+    CHECK_WATCH_DOG_PTHREAD_CALL(pthread_cond_init, (&cond_, &condattr), reason);
+    CHECK_WATCH_DOG_PTHREAD_CALL(pthread_condattr_destroy, (&condattr), reason);
+#endif
     CHECK_WATCH_DOG_PTHREAD_CALL(pthread_attr_init, (&attr_), reason);
     CHECK_WATCH_DOG_PTHREAD_CALL(pthread_create, (&pthread_, &attr_, &CallBack, this), reason);
     CHECK_WATCH_DOG_PTHREAD_CALL(pthread_attr_destroy, (&attr_), reason);
@@ -482,7 +488,11 @@
 
   void Wait() {
     timespec timeout_ts;
+#if defined(__APPLE__)
     InitTimeSpec(true, CLOCK_REALTIME, timeout_in_milliseconds_, 0, &timeout_ts);
+#else
+    InitTimeSpec(true, CLOCK_MONOTONIC, timeout_in_milliseconds_, 0, &timeout_ts);
+#endif
     const char* reason = "dex2oat watch dog thread waiting";
     CHECK_WATCH_DOG_PTHREAD_CALL(pthread_mutex_lock, (&mutex_), reason);
     while (!shutting_down_) {
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 6881f75..2c0b125 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -554,6 +554,12 @@
   RunTest(CompilerFilter::kSpeed, true, { "--very-large-app-threshold=100" });
 }
 
+// Regressin test for b/35665292.
+TEST_F(Dex2oatVeryLargeTest, SpeedProfileNoProfile) {
+  // Test that dex2oat doesn't crash with speed-profile but no input profile.
+  RunTest(CompilerFilter::kSpeedProfile, false);
+}
+
 class Dex2oatLayoutTest : public Dex2oatTest {
  protected:
   void CheckFilter(CompilerFilter::Filter input ATTRIBUTE_UNUSED,
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index 2d9bbfd..609068f 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -616,6 +616,7 @@
       for (std::unique_ptr<const CatchHandler>& existing_handlers : *handler_list) {
         if (handler_off == existing_handlers->GetListOffset()) {
           handlers = existing_handlers.get();
+          break;
         }
       }
       if (handlers == nullptr) {
@@ -634,7 +635,51 @@
       TryItem* try_item = new TryItem(start_addr, insn_count, handlers);
       tries->push_back(std::unique_ptr<const TryItem>(try_item));
     }
+    // Manually walk catch handlers list and add any missing handlers unreferenced by try items.
+    const uint8_t* handlers_base = DexFile::GetCatchHandlerData(disk_code_item, 0);
+    const uint8_t* handlers_data = handlers_base;
+    uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_data);
+    while (handlers_size > handler_list->size()) {
+      bool already_added = false;
+      uint16_t handler_off = handlers_data - handlers_base;
+      for (std::unique_ptr<const CatchHandler>& existing_handlers : *handler_list) {
+        if (handler_off == existing_handlers->GetListOffset()) {
+          already_added = true;
+          break;
+        }
+      }
+      int32_t size = DecodeSignedLeb128(&handlers_data);
+      bool has_catch_all = size < 0;
+      if (has_catch_all) {
+        size = -size;
+      }
+      if (already_added == true)  {
+        for (int32_t i = 0; i < size; i++) {
+          DecodeUnsignedLeb128(&handlers_data);
+          DecodeUnsignedLeb128(&handlers_data);
+        }
+        if (has_catch_all) {
+          DecodeUnsignedLeb128(&handlers_data);
+        }
+        continue;
+      }
+      TypeAddrPairVector* addr_pairs = new TypeAddrPairVector();
+      for (int32_t i = 0; i < size; i++) {
+        const TypeId* type_id = GetTypeIdOrNullPtr(DecodeUnsignedLeb128(&handlers_data));
+        uint32_t addr = DecodeUnsignedLeb128(&handlers_data);
+        addr_pairs->push_back(
+            std::unique_ptr<const TypeAddrPair>(new TypeAddrPair(type_id, addr)));
+      }
+      if (has_catch_all) {
+        uint32_t addr = DecodeUnsignedLeb128(&handlers_data);
+        addr_pairs->push_back(
+            std::unique_ptr<const TypeAddrPair>(new TypeAddrPair(nullptr, addr)));
+      }
+      const CatchHandler* handler = new CatchHandler(has_catch_all, handler_off, addr_pairs);
+      handler_list->push_back(std::unique_ptr<const CatchHandler>(handler));
+    }
   }
+
   uint32_t size = GetCodeItemSize(dex_file, disk_code_item);
   CodeItem* code_item = new CodeItem(
       registers_size, ins_size, outs_size, debug_info, insns_size, insns, tries, handler_list);
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 562d948..9881e28 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -100,6 +100,26 @@
     "ASAAAAIAAACEAQAABiAAAAIAAACwAQAAARAAAAIAAADYAQAAAiAAABIAAADoAQAAAyAAAAIAAADw"
     "AgAABCAAAAIAAAD8AgAAACAAAAIAAAAIAwAAABAAAAEAAAAgAwAA";
 
+// Dex file with catch handler unreferenced by try blocks.
+// Constructed by building a dex file with try/catch blocks and hex editing.
+static const char kUnreferencedCatchHandlerInputDex[] =
+    "ZGV4CjAzNQD+exd52Y0f9nY5x5GmInXq5nXrO6Kl2RV4AwAAcAAAAHhWNBIAAAAAAAAAANgCAAAS"
+    "AAAAcAAAAAgAAAC4AAAAAwAAANgAAAABAAAA/AAAAAQAAAAEAQAAAQAAACQBAAA0AgAARAEAANYB"
+    "AADeAQAA5gEAAO4BAAAAAgAADwIAACYCAAA9AgAAUQIAAGUCAAB5AgAAfwIAAIUCAACIAgAAjAIA"
+    "AKECAACnAgAArAIAAAQAAAAFAAAABgAAAAcAAAAIAAAACQAAAAwAAAAOAAAADAAAAAYAAAAAAAAA"
+    "DQAAAAYAAADIAQAADQAAAAYAAADQAQAABQABABAAAAAAAAAAAAAAAAAAAgAPAAAAAQABABEAAAAD"
+    "AAAAAAAAAAAAAAABAAAAAwAAAAAAAAADAAAAAAAAAMgCAAAAAAAAAQABAAEAAAC1AgAABAAAAHAQ"
+    "AwAAAA4AAwABAAIAAgC6AgAAIQAAAGIAAAAaAQoAbiACABAAYgAAABoBCwBuIAIAEAAOAA0AYgAA"
+    "ABoBAQBuIAIAEAAo8A0AYgAAABoBAgBuIAIAEAAo7gAAAAAAAAcAAQAHAAAABwABAAIBAg8BAhgA"
+    "AQAAAAQAAAABAAAABwAGPGluaXQ+AAZDYXRjaDEABkNhdGNoMgAQSGFuZGxlclRlc3QuamF2YQAN"
+    "TEhhbmRsZXJUZXN0OwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABVMamF2YS9sYW5nL0V4Y2VwdGlv"
+    "bjsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABJMamF2YS9sYW5nL1N5"
+    "c3RlbTsABFRyeTEABFRyeTIAAVYAAlZMABNbTGphdmEvbGFuZy9TdHJpbmc7AARtYWluAANvdXQA"
+    "B3ByaW50bG4AAQAHDgAEAQAHDn17AncdHoseAAAAAgAAgYAExAIBCdwCAAANAAAAAAAAAAEAAAAA"
+    "AAAAAQAAABIAAABwAAAAAgAAAAgAAAC4AAAAAwAAAAMAAADYAAAABAAAAAEAAAD8AAAABQAAAAQA"
+    "AAAEAQAABgAAAAEAAAAkAQAAASAAAAIAAABEAQAAARAAAAIAAADIAQAAAiAAABIAAADWAQAAAyAA"
+    "AAIAAAC1AgAAACAAAAEAAADIAgAAABAAAAEAAADYAgAA";
+
 static void WriteBase64ToFile(const char* base64, File* file) {
   // Decode base64.
   CHECK(base64 != nullptr);
@@ -219,7 +239,7 @@
     EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
 
     std::vector<std::string> dexlayout_exec_argv =
-    { dexlayout, "-w", tmp_dir, "-o", tmp_name, "-p", profile_file, dex_file };
+        { dexlayout, "-w", tmp_dir, "-o", tmp_name, "-p", profile_file, dex_file };
     if (!::art::Exec(dexlayout_exec_argv, error_msg)) {
       return false;
     }
@@ -236,6 +256,40 @@
     }
     return true;
   }
+
+  // Runs UnreferencedCatchHandlerTest.
+  bool UnreferencedCatchHandlerExec(std::string* error_msg) {
+    ScratchFile tmp_file;
+    std::string tmp_name = tmp_file.GetFilename();
+    size_t tmp_last_slash = tmp_name.rfind("/");
+    std::string tmp_dir = tmp_name.substr(0, tmp_last_slash + 1);
+
+    // Write inputs and expected outputs.
+    std::string input_dex = tmp_dir + "classes.dex";
+    WriteFileBase64(kUnreferencedCatchHandlerInputDex, input_dex.c_str());
+    std::string output_dex = tmp_dir + "classes.dex.new";
+
+    std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
+    EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
+
+    std::vector<std::string> dexlayout_exec_argv =
+        { dexlayout, "-w", tmp_dir, "-o", "/dev/null", input_dex };
+    if (!::art::Exec(dexlayout_exec_argv, error_msg)) {
+      return false;
+    }
+
+    // Diff input and output. They should be the same.
+    std::vector<std::string> diff_exec_argv = { "/usr/bin/diff", input_dex, output_dex };
+    if (!::art::Exec(diff_exec_argv, error_msg)) {
+      return false;
+    }
+
+    std::vector<std::string> rm_exec_argv = { "/bin/rm", input_dex, output_dex };
+    if (!::art::Exec(rm_exec_argv, error_msg)) {
+      return false;
+    }
+    return true;
+  }
 };
 
 
@@ -297,4 +351,11 @@
   }
 }
 
+TEST_F(DexLayoutTest, UnreferencedCatchHandler) {
+  // Disable test on target.
+  TEST_DISABLED_FOR_TARGET();
+  std::string error_msg;
+  ASSERT_TRUE(UnreferencedCatchHandlerExec(&error_msg)) << error_msg;
+}
+
 }  // namespace art
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index b93b293..24846e5 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -46,6 +46,7 @@
 ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
 Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
 Mutex* Locks::intern_table_lock_ = nullptr;
+Mutex* Locks::jdwp_event_list_lock_ = nullptr;
 Mutex* Locks::jni_function_table_lock_ = nullptr;
 Mutex* Locks::jni_libraries_lock_ = nullptr;
 Mutex* Locks::logging_lock_ = nullptr;
@@ -998,6 +999,7 @@
     DCHECK(verifier_deps_lock_ != nullptr);
     DCHECK(host_dlopen_handles_lock_ != nullptr);
     DCHECK(intern_table_lock_ != nullptr);
+    DCHECK(jdwp_event_list_lock_ != nullptr);
     DCHECK(jni_function_table_lock_ != nullptr);
     DCHECK(jni_libraries_lock_ != nullptr);
     DCHECK(logging_lock_ != nullptr);
@@ -1040,6 +1042,10 @@
     DCHECK(runtime_shutdown_lock_ == nullptr);
     runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
 
+    UPDATE_CURRENT_LOCK_LEVEL(kJdwpEventListLock);
+    DCHECK(jdwp_event_list_lock_ == nullptr);
+    jdwp_event_list_lock_ = new Mutex("JDWP event list lock", current_lock_level);
+
     UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
     DCHECK(profiler_lock_ == nullptr);
     profiler_lock_ = new Mutex("profiler lock", current_lock_level);
@@ -1167,6 +1173,8 @@
     expected_mutexes_on_weak_ref_access_.push_back(dex_lock_);
     classlinker_classes_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
     expected_mutexes_on_weak_ref_access_.push_back(classlinker_classes_lock_);
+    jdwp_event_list_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
+    expected_mutexes_on_weak_ref_access_.push_back(jdwp_event_list_lock_);
     jni_libraries_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
     expected_mutexes_on_weak_ref_access_.push_back(jni_libraries_lock_);
 
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 9b6938f..c59664b 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -630,8 +630,12 @@
   // Guards shutdown of the runtime.
   static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
 
+  static Mutex* jdwp_event_list_lock_
+      ACQUIRED_AFTER(runtime_shutdown_lock_)
+      ACQUIRED_BEFORE(breakpoint_lock_);
+
   // Guards background profiler global state.
-  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
+  static Mutex* profiler_lock_ ACQUIRED_AFTER(jdwp_event_list_lock_);
 
   // Guards trace (ie traceview) requests.
   static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 46f1644..9b0ffaf 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -3471,6 +3471,11 @@
     return nullptr;
   }
   table->InsertStrongRoot(h_dex_cache.Get());
+  if (h_class_loader.Get() != nullptr) {
+    // Since we added a strong root to the class table, do the write barrier as required for
+    // remembered sets and generational GCs.
+    Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(h_class_loader.Get());
+  }
   return h_dex_cache.Get();
 }
 
@@ -3798,14 +3803,10 @@
 }
 
 void ClassLinker::WriteBarrierForBootOatFileBssRoots(const OatFile* oat_file) {
-  if (!kUseReadBarrier) {
-    WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
-    DCHECK(!oat_file->GetBssGcRoots().empty()) << oat_file->GetLocation();
-    if (log_new_roots_ && !ContainsElement(new_bss_roots_boot_oat_files_, oat_file)) {
-      new_bss_roots_boot_oat_files_.push_back(oat_file);
-    }
-  } else {
-    LOG(FATAL) << "UNREACHABLE";
+  WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+  DCHECK(!oat_file->GetBssGcRoots().empty()) << oat_file->GetLocation();
+  if (log_new_roots_ && !ContainsElement(new_bss_roots_boot_oat_files_, oat_file)) {
+    new_bss_roots_boot_oat_files_.push_back(oat_file);
   }
 }
 
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 47c6b51..355d7b3 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -32,12 +32,9 @@
 namespace art {
 
 static inline void BssWriteBarrier(ArtMethod* outer_method) REQUIRES_SHARED(Locks::mutator_lock_) {
-  // For non-CC AOT code, we need a write barrier for the class loader that holds the
-  // GC roots in the .bss. For CC, we do not need to do anything because the roots
-  // we're storing are all referencing to-space and do not need to be re-visited.
-  // However, we do the DCHECK() for the registration of oat files with .bss sections.
-  const DexFile* dex_file =
-      (kUseReadBarrier && !kIsDebugBuild) ? nullptr : outer_method->GetDexFile();
+  // For AOT code, we need a write barrier for the class loader that holds the
+  // GC roots in the .bss.
+  const DexFile* dex_file = outer_method->GetDexFile();
   if (dex_file != nullptr &&
       dex_file->GetOatDexFile() != nullptr &&
       !dex_file->GetOatDexFile()->GetOatFile()->GetBssGcRoots().empty()) {
@@ -50,15 +47,13 @@
           << "Oat file with .bss GC roots was not registered in class table: "
           << dex_file->GetOatDexFile()->GetOatFile()->GetLocation();
     }
-    if (!kUseReadBarrier) {
-      if (class_loader != nullptr) {
-        // Note that we emit the barrier before the compiled code stores the String or Class
-        // as a GC root. This is OK as there is no suspend point point in between.
-        Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
-      } else {
-        Runtime::Current()->GetClassLinker()->WriteBarrierForBootOatFileBssRoots(
-            dex_file->GetOatDexFile()->GetOatFile());
-      }
+    if (class_loader != nullptr) {
+      // Note that we emit the barrier before the compiled code stores the String or Class
+      // as a GC root. This is OK as there is no suspend point point in between.
+      Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+    } else {
+      Runtime::Current()->GetClassLinker()->WriteBarrierForBootOatFileBssRoots(
+          dex_file->GetOatDexFile()->GetOatFile());
     }
   }
 }
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 3d2fd0b..8f9c187 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -53,6 +53,8 @@
 // Slow path mark stack size, increase this if the stack is getting full and it is causing
 // performance problems.
 static constexpr size_t kReadBarrierMarkStackSize = 512 * KB;
+// Verify that there are no missing card marks.
+static constexpr bool kVerifyNoMissingCardMarks = kIsDebugBuild;
 
 ConcurrentCopying::ConcurrentCopying(Heap* heap,
                                      const std::string& name_prefix,
@@ -155,7 +157,7 @@
     MarkingPhase();
   }
   // Verify no from space refs. This causes a pause.
-  if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
+  if (kEnableNoFromSpaceRefsVerification) {
     TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
     ScopedPause pause(this, false);
     CheckEmptyMarkStack();
@@ -335,6 +337,9 @@
     TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
     // Note: self is not necessarily equal to thread since thread may be suspended.
     Thread* self = Thread::Current();
+    if (kVerifyNoMissingCardMarks) {
+      cc->VerifyNoMissingCardMarks();
+    }
     CHECK(thread == self);
     Locks::mutator_lock_->AssertExclusiveHeld(self);
     cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
@@ -445,6 +450,72 @@
   }
 }
 
+class ConcurrentCopying::VerifyNoMissingCardMarkVisitor {
+ public:
+  VerifyNoMissingCardMarkVisitor(ConcurrentCopying* cc, ObjPtr<mirror::Object> holder)
+    : cc_(cc),
+      holder_(holder) {}
+
+  void operator()(ObjPtr<mirror::Object> obj,
+                  MemberOffset offset,
+                  bool is_static ATTRIBUTE_UNUSED) const
+      REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
+    if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) {
+     CheckReference(obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(
+         offset), offset.Uint32Value());
+    }
+  }
+  void operator()(ObjPtr<mirror::Class> klass,
+                  ObjPtr<mirror::Reference> ref) const
+      REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
+    CHECK(klass->IsTypeOfReferenceClass());
+    this->operator()(ref, mirror::Reference::ReferentOffset(), false);
+  }
+
+  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (!root->IsNull()) {
+      VisitRoot(root);
+    }
+  }
+
+  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    CheckReference(root->AsMirrorPtr());
+  }
+
+  void CheckReference(mirror::Object* ref, int32_t offset = -1) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    CHECK(ref == nullptr || !cc_->region_space_->IsInNewlyAllocatedRegion(ref))
+        << holder_->PrettyTypeOf() << "(" << holder_.Ptr() << ") references object "
+        << ref->PrettyTypeOf() << "(" << ref << ") in newly allocated region at offset=" << offset;
+  }
+
+ private:
+  ConcurrentCopying* const cc_;
+  ObjPtr<mirror::Object> const holder_;
+};
+
+void ConcurrentCopying::VerifyNoMissingCardMarkCallback(mirror::Object* obj, void* arg) {
+  auto* collector = reinterpret_cast<ConcurrentCopying*>(arg);
+  // Objects not on dirty cards should never have references to newly allocated regions.
+  if (!collector->heap_->GetCardTable()->IsDirty(obj)) {
+    VerifyNoMissingCardMarkVisitor visitor(collector, /*holder*/ obj);
+    obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
+        visitor,
+        visitor);
+  }
+}
+
+void ConcurrentCopying::VerifyNoMissingCardMarks() {
+  TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
+  region_space_->Walk(&VerifyNoMissingCardMarkCallback, this);
+  {
+    ReaderMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
+    heap_->GetLiveBitmap()->Walk(&VerifyNoMissingCardMarkCallback, this);
+  }
+}
+
 // Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
 void ConcurrentCopying::FlipThreadRoots() {
   TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
@@ -1397,7 +1468,7 @@
     size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
     region_space_->AddLiveBytes(to_ref, alloc_size);
   }
-  if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
+  if (ReadBarrier::kEnableToSpaceInvariantChecks) {
     AssertToSpaceInvariantObjectVisitor visitor(this);
     visitor(to_ref);
   }
@@ -2347,7 +2418,9 @@
     MutexLock mu(self, mark_stack_lock_);
     CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
   }
-  {
+  // kVerifyNoMissingCardMarks relies on the region space cards not being cleared to avoid false
+  // positives.
+  if (!kVerifyNoMissingCardMarks) {
     TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings());
     // We do not currently use the region space cards at all, madvise them away to save ram.
     heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 073326d..a0da9fc 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -162,6 +162,12 @@
   void VerifyGrayImmuneObjects()
       REQUIRES(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_);
+  static void VerifyNoMissingCardMarkCallback(mirror::Object* obj, void* arg)
+      REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_);
+  void VerifyNoMissingCardMarks()
+      REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_);
   size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
   void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
@@ -330,6 +336,7 @@
   class VerifyNoFromSpaceRefsFieldVisitor;
   class VerifyNoFromSpaceRefsObjectVisitor;
   class VerifyNoFromSpaceRefsVisitor;
+  class VerifyNoMissingCardMarkVisitor;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
 };
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index f3b9595..feab9b0 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -176,6 +176,14 @@
     return false;
   }
 
+  bool IsInNewlyAllocatedRegion(mirror::Object* ref) {
+    if (HasAddress(ref)) {
+      Region* r = RefToRegionUnlocked(ref);
+      return r->IsNewlyAllocated();
+    }
+    return false;
+  }
+
   bool IsInUnevacFromSpace(mirror::Object* ref) {
     if (HasAddress(ref)) {
       Region* r = RefToRegionUnlocked(ref);
@@ -351,6 +359,10 @@
       return idx_;
     }
 
+    bool IsNewlyAllocated() const {
+      return is_newly_allocated_;
+    }
+
     bool IsInFromSpace() const {
       return type_ == RegionType::kRegionTypeFromSpace;
     }
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index 86af6d4..af29468 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -203,7 +203,8 @@
    */
   void PostLocationEvent(const EventLocation* pLoc, mirror::Object* thisPtr, int eventFlags,
                          const JValue* returnValue)
-     REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   /*
    * A field of interest has been accessed or modified. This is used for field access and field
@@ -214,7 +215,8 @@
    */
   void PostFieldEvent(const EventLocation* pLoc, ArtField* field, mirror::Object* thisPtr,
                       const JValue* fieldValue, bool is_modification)
-      REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   /*
    * An exception has been thrown.
@@ -223,19 +225,22 @@
    */
   void PostException(const EventLocation* pThrowLoc, mirror::Throwable* exception_object,
                      const EventLocation* pCatchLoc, mirror::Object* thisPtr)
-      REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   /*
    * A thread has started or stopped.
    */
   void PostThreadChange(Thread* thread, bool start)
-      REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   /*
    * Class has been prepared.
    */
   void PostClassPrepare(mirror::Class* klass)
-      REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   /*
    * The VM is about to stop.
@@ -259,7 +264,7 @@
   void SendRequest(ExpandBuf* pReq);
 
   void ResetState()
-      REQUIRES(!event_list_lock_)
+      REQUIRES(!Locks::jdwp_event_list_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   /* atomic ops to get next serial number */
@@ -268,7 +273,7 @@
 
   void Run()
       REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_, !thread_start_lock_,
-               !attach_lock_, !event_list_lock_);
+               !attach_lock_, !Locks::jdwp_event_list_lock_);
 
   /*
    * Register an event by adding it to the event list.
@@ -277,25 +282,25 @@
    * may discard its pointer after calling this.
    */
   JdwpError RegisterEvent(JdwpEvent* pEvent)
-      REQUIRES(!event_list_lock_)
+      REQUIRES(!Locks::jdwp_event_list_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   /*
    * Unregister an event, given the requestId.
    */
   void UnregisterEventById(uint32_t requestId)
-      REQUIRES(!event_list_lock_)
+      REQUIRES(!Locks::jdwp_event_list_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void UnregisterLocationEventsOnClass(ObjPtr<mirror::Class> klass)
-      REQUIRES(!event_list_lock_)
+      REQUIRES(!Locks::jdwp_event_list_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   /*
    * Unregister all events.
    */
   void UnregisterAll()
-      REQUIRES(!event_list_lock_)
+      REQUIRES(!Locks::jdwp_event_list_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
@@ -310,16 +315,16 @@
                                      ObjectId threadId)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
   void CleanupMatchList(const std::vector<JdwpEvent*>& match_list)
-      REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES(Locks::jdwp_event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
   void EventFinish(ExpandBuf* pReq);
   bool FindMatchingEvents(JdwpEventKind eventKind, const ModBasket& basket,
                           std::vector<JdwpEvent*>* match_list)
-      REQUIRES(!event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES(!Locks::jdwp_event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
   void FindMatchingEventsLocked(JdwpEventKind eventKind, const ModBasket& basket,
                                 std::vector<JdwpEvent*>* match_list)
-      REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES(Locks::jdwp_event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
   void UnregisterEvent(JdwpEvent* pEvent)
-      REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES(Locks::jdwp_event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
   void SendBufferedRequest(uint32_t type, const std::vector<iovec>& iov);
 
   /*
@@ -387,9 +392,8 @@
   AtomicInteger event_serial_;
 
   // Linked list of events requested by the debugger (breakpoints, class prep, etc).
-  Mutex event_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_BEFORE(Locks::breakpoint_lock_);
-  JdwpEvent* event_list_ GUARDED_BY(event_list_lock_);
-  size_t event_list_size_ GUARDED_BY(event_list_lock_);  // Number of elements in event_list_.
+  JdwpEvent* event_list_ GUARDED_BY(Locks::jdwp_event_list_lock_);
+  size_t event_list_size_ GUARDED_BY(Locks::jdwp_event_list_lock_);  // Number of elements in event_list_.
 
   // Used to synchronize JDWP command handler thread and event threads so only one
   // thread does JDWP stuff at a time. This prevent from interleaving command handling
@@ -410,7 +414,7 @@
   // When the runtime shuts down, it needs to stop JDWP command handler thread by closing the
   // JDWP connection. However, if the JDWP thread is processing a command, it needs to wait
   // for the command to finish so we can send its reply before closing the connection.
-  Mutex shutdown_lock_ ACQUIRED_AFTER(event_list_lock_);
+  Mutex shutdown_lock_ ACQUIRED_AFTER(Locks::jdwp_event_list_lock_);
   ConditionVariable shutdown_cond_ GUARDED_BY(shutdown_lock_);
   bool processing_request_ GUARDED_BY(shutdown_lock_);
 };
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 96249f9..36d733e 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -237,7 +237,7 @@
     /*
      * Add to list.
      */
-    MutexLock mu(Thread::Current(), event_list_lock_);
+    MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
     if (event_list_ != nullptr) {
       pEvent->next = event_list_;
       event_list_->prev = pEvent;
@@ -256,7 +256,7 @@
   StackHandleScope<1> hs(Thread::Current());
   Handle<mirror::Class> h_klass(hs.NewHandle(klass));
   std::vector<JdwpEvent*> to_remove;
-  MutexLock mu(Thread::Current(), event_list_lock_);
+  MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
   for (JdwpEvent* cur_event = event_list_; cur_event != nullptr; cur_event = cur_event->next) {
     // Fill in the to_remove list
     bool found_event = false;
@@ -356,7 +356,7 @@
 void JdwpState::UnregisterEventById(uint32_t requestId) {
   bool found = false;
   {
-    MutexLock mu(Thread::Current(), event_list_lock_);
+    MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
 
     for (JdwpEvent* pEvent = event_list_; pEvent != nullptr; pEvent = pEvent->next) {
       if (pEvent->requestId == requestId) {
@@ -383,7 +383,7 @@
  * Remove all entries from the event list.
  */
 void JdwpState::UnregisterAll() {
-  MutexLock mu(Thread::Current(), event_list_lock_);
+  MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
 
   JdwpEvent* pEvent = event_list_;
   while (pEvent != nullptr) {
@@ -593,7 +593,7 @@
  */
 bool JdwpState::FindMatchingEvents(JdwpEventKind event_kind, const ModBasket& basket,
                                    std::vector<JdwpEvent*>* match_list) {
-  MutexLock mu(Thread::Current(), event_list_lock_);
+  MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
   match_list->reserve(event_list_size_);
   FindMatchingEventsLocked(event_kind, basket, match_list);
   return !match_list->empty();
@@ -908,7 +908,7 @@
   std::vector<JdwpEvent*> match_list;
   {
     // We use the locked version because we have multiple possible match events.
-    MutexLock mu(Thread::Current(), event_list_lock_);
+    MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
     match_list.reserve(event_list_size_);
     if ((eventFlags & Dbg::kBreakpoint) != 0) {
       FindMatchingEventsLocked(EK_BREAKPOINT, basket, &match_list);
@@ -955,7 +955,7 @@
   }
 
   {
-    MutexLock mu(Thread::Current(), event_list_lock_);
+    MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
     CleanupMatchList(match_list);
   }
 
@@ -1041,7 +1041,7 @@
   }
 
   {
-    MutexLock mu(Thread::Current(), event_list_lock_);
+    MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
     CleanupMatchList(match_list);
   }
 
@@ -1103,7 +1103,7 @@
   }
 
   {
-    MutexLock mu(Thread::Current(), event_list_lock_);
+    MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
     CleanupMatchList(match_list);
   }
 
@@ -1213,7 +1213,7 @@
   }
 
   {
-    MutexLock mu(Thread::Current(), event_list_lock_);
+    MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
     CleanupMatchList(match_list);
   }
 
@@ -1295,7 +1295,7 @@
   }
 
   {
-    MutexLock mu(Thread::Current(), event_list_lock_);
+    MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
     CleanupMatchList(match_list);
   }
 
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 7707ba4..64ed724 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -227,7 +227,6 @@
       last_activity_time_ms_(0),
       request_serial_(0x10000000),
       event_serial_(0x20000000),
-      event_list_lock_("JDWP event list lock", kJdwpEventListLock),
       event_list_(nullptr),
       event_list_size_(0),
       jdwp_token_lock_("JDWP token lock"),
@@ -331,7 +330,7 @@
 
   UnregisterAll();
   {
-    MutexLock mu(Thread::Current(), event_list_lock_);
+    MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
     CHECK(event_list_ == nullptr);
   }
 
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 37cf257..2b38b2e 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -198,7 +198,7 @@
 
 inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
                                                 mirror::Object* ref) {
-  if (kEnableToSpaceInvariantChecks || kIsDebugBuild) {
+  if (kEnableToSpaceInvariantChecks) {
     if (ref == nullptr || IsDuringStartup()) {
       return;
     }
@@ -209,7 +209,7 @@
 
 inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source,
                                                 mirror::Object* ref) {
-  if (kEnableToSpaceInvariantChecks || kIsDebugBuild) {
+  if (kEnableToSpaceInvariantChecks) {
     if (ref == nullptr || IsDuringStartup()) {
       return;
     }