Don't sanitize TrackingHeader::GetSize()

Also avoid tracking GC-root updates when ASAN is enabled as it promotes
stack frames to heap for stack-use-after-return detection.

Test: DIST_DIR=/tmp/dist TARGET_PRODUCT=armv8 TARGET_BUILD_VARIANT=eng \
  ./prebuilts/build-tools/path/linux-x86/python3 \
  ./art/test/testrunner/run_build_test_target.py -j80 art-gtest-heap-poisoning
Bug: 240930225
Bug: 160737021
Change-Id: I6a6bb2790d26a95bdbe386080cddb0b739dd3002
diff --git a/runtime/gc/collector/mark_compact-inl.h b/runtime/gc/collector/mark_compact-inl.h
index 262569f..5760032 100644
--- a/runtime/gc/collector/mark_compact-inl.h
+++ b/runtime/gc/collector/mark_compact-inl.h
@@ -221,31 +221,31 @@
 inline bool MarkCompact::VerifyRootSingleUpdate(void* root,
                                                 mirror::Object* old_ref,
                                                 const RootInfo& info) {
-  void* stack_end = stack_end_;
-  void* stack_addr = stack_addr_;
-  if (!live_words_bitmap_->HasAddress(old_ref)) {
-    return false;
+  // ASAN promotes stack-frames to heap in order to detect
+  // stack-use-after-return issues. So skip using this double-root update
+  // detection on ASAN as well.
+  if (kIsDebugBuild && !kMemoryToolIsAvailable) {
+    void* stack_low_addr = stack_low_addr_;
+    void* stack_high_addr = stack_high_addr_;
+    if (!live_words_bitmap_->HasAddress(old_ref)) {
+      return false;
+    }
+    if (UNLIKELY(stack_low_addr == nullptr)) {
+      Thread* self = Thread::Current();
+      stack_low_addr = self->GetStackEnd();
+      stack_high_addr = reinterpret_cast<char*>(stack_low_addr) + self->GetStackSize();
+    }
+    if (root < stack_low_addr || root > stack_high_addr) {
+      auto ret = updated_roots_.insert(root);
+      DCHECK(ret.second) << "root=" << root << " old_ref=" << old_ref
+                         << " stack_low_addr=" << stack_low_addr
+                         << " stack_high_addr=" << stack_high_addr;
+    }
+    DCHECK(reinterpret_cast<uint8_t*>(old_ref) >= black_allocations_begin_ ||
+           live_words_bitmap_->Test(old_ref))
+        << "ref=" << old_ref << " <" << mirror::Object::PrettyTypeOf(old_ref) << "> RootInfo ["
+        << info << "]";
   }
-  if (UNLIKELY(stack_end == nullptr)) {
-    pthread_attr_t attr;
-    size_t stack_size;
-    pthread_getattr_np(pthread_self(), &attr);
-    pthread_attr_getstack(&attr, &stack_addr, &stack_size);
-    pthread_attr_destroy(&attr);
-    stack_end = reinterpret_cast<char*>(stack_addr) + stack_size;
-  }
-  if (root < stack_addr || root > stack_end) {
-    auto ret = updated_roots_.insert(root);
-    DCHECK(ret.second) << "root=" << root
-                       << " old_ref=" << old_ref
-                       << " stack_addr=" << stack_addr
-                       << " stack_end=" << stack_end;
-  }
-  DCHECK(reinterpret_cast<uint8_t*>(old_ref) >= black_allocations_begin_
-         || live_words_bitmap_->Test(old_ref))
-        << "ref=" << old_ref
-        << " <" << mirror::Object::PrettyTypeOf(old_ref)
-        << "> RootInfo [" << info << "]";
   return true;
 }
 
@@ -253,7 +253,7 @@
                                     const RootInfo& info) {
   DCHECK(!root->IsNull());
   mirror::Object* old_ref = root->AsMirrorPtr();
-  if (!kIsDebugBuild || VerifyRootSingleUpdate(root, old_ref, info)) {
+  if (VerifyRootSingleUpdate(root, old_ref, info)) {
     mirror::Object* new_ref = PostCompactAddress(old_ref);
     if (old_ref != new_ref) {
       root->Assign(new_ref);
@@ -263,7 +263,7 @@
 
 inline void MarkCompact::UpdateRoot(mirror::Object** root, const RootInfo& info) {
   mirror::Object* old_ref = *root;
-  if (!kIsDebugBuild || VerifyRootSingleUpdate(root, old_ref, info)) {
+  if (VerifyRootSingleUpdate(root, old_ref, info)) {
     mirror::Object* new_ref = PostCompactAddress(old_ref);
     if (old_ref != new_ref) {
       *root = new_ref;
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index c86e687..865281b 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -2119,10 +2119,8 @@
     for (uint8_t* byte = first_obj; byte < page_end;) {
       TrackingHeader* header = reinterpret_cast<TrackingHeader*>(byte);
       obj_size = header->GetSize();
-      LinearAllocKind kind = header->GetKind();
       if (UNLIKELY(obj_size == 0)) {
         // No more objects in this page to visit.
-        DCHECK_EQ(kind, LinearAllocKind::kNoGCRoots);
         break;
       }
       uint8_t* obj = byte + sizeof(TrackingHeader);
@@ -2133,7 +2131,7 @@
       uint8_t* begin_boundary = std::max(obj, page_begin);
       uint8_t* end_boundary = std::min(obj_end, page_end);
       if (begin_boundary < end_boundary) {
-        VisitObject(kind, obj, begin_boundary, end_boundary);
+        VisitObject(header->GetKind(), obj, begin_boundary, end_boundary);
       }
       if (ArenaAllocator::IsRunningOnMemoryTool()) {
         obj_size += ArenaAllocator::kMemoryToolRedZoneBytes;
@@ -2229,14 +2227,10 @@
   Runtime* runtime = Runtime::Current();
   non_moving_space_bitmap_ = non_moving_space_->GetLiveBitmap();
   if (kIsDebugBuild) {
-    pthread_attr_t attr;
-    size_t stack_size;
-    void* stack_addr;
-    pthread_getattr_np(pthread_self(), &attr);
-    pthread_attr_getstack(&attr, &stack_addr, &stack_size);
-    pthread_attr_destroy(&attr);
-    stack_addr_ = stack_addr;
-    stack_end_ = reinterpret_cast<char*>(stack_addr) + stack_size;
+    DCHECK_EQ(thread_running_gc_, Thread::Current());
+    stack_low_addr_ = thread_running_gc_->GetStackEnd();
+    stack_high_addr_ =
+        reinterpret_cast<char*>(stack_low_addr_) + thread_running_gc_->GetStackSize();
   }
 
   compacting_ = true;
@@ -2370,7 +2364,7 @@
     // We must start worker threads before resuming mutators to avoid deadlocks.
     heap_->GetThreadPool()->StartWorkers(thread_running_gc_);
   }
-  stack_end_ = nullptr;
+  stack_low_addr_ = nullptr;
 }
 
 void MarkCompact::KernelPrepareRange(uint8_t* to_addr,
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 1330602..71c250d 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -626,8 +626,8 @@
 
   // TODO: Remove once an efficient mechanism to deal with double root updation
   // is incorporated.
-  void* stack_addr_;
-  void* stack_end_;
+  void* stack_high_addr_;
+  void* stack_low_addr_;
 
   uint8_t* conc_compaction_termination_page_;
   PointerSize pointer_size_;
diff --git a/runtime/linear_alloc.h b/runtime/linear_alloc.h
index 12c772b..ad1e349 100644
--- a/runtime/linear_alloc.h
+++ b/runtime/linear_alloc.h
@@ -49,7 +49,10 @@
   }
 
   LinearAllocKind GetKind() const { return kind_; }
-  size_t GetSize() const { return size_ & ~kIs16Aligned; }
+  // Since we are linearly allocating and hop from one object to the next during
+  // visits, reading 'size_ == 0' indicates that there are no more objects to
+  // visit in the given page. But ASAN detects it as use-after-poison access.
+  ATTRIBUTE_NO_SANITIZE_ADDRESS size_t GetSize() const { return size_ & ~kIs16Aligned; }
   bool Is16Aligned() const { return size_ & kIs16Aligned; }
 
  private: