ART: Remove old visit functions
Remove now-unused ObjectCallback-based VisitObjects functions.
Test: m
Change-Id: Iefccd6ff28654d86f5254a411e7c30263a85b9aa
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 2515316..ba1161f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -906,134 +906,6 @@
}
}
-// Visit objects when threads aren't suspended. If concurrent moving
-// GC, disable moving GC and suspend threads and then visit objects.
-void Heap::VisitObjects(ObjectCallback callback, void* arg) {
- Thread* self = Thread::Current();
- Locks::mutator_lock_->AssertSharedHeld(self);
- DCHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)) << "Call VisitObjectsPaused() instead";
- if (IsGcConcurrentAndMoving()) {
- // Concurrent moving GC. Just suspending threads isn't sufficient
- // because a collection isn't one big pause and we could suspend
- // threads in the middle (between phases) of a concurrent moving
- // collection where it's not easily known which objects are alive
- // (both the region space and the non-moving space) or which
- // copies of objects to visit, and the to-space invariant could be
- // easily broken. Visit objects while GC isn't running by using
- // IncrementDisableMovingGC() and threads are suspended.
- IncrementDisableMovingGC(self);
- {
- ScopedThreadSuspension sts(self, kWaitingForVisitObjects);
- ScopedSuspendAll ssa(__FUNCTION__);
- VisitObjectsInternalRegionSpace(callback, arg);
- VisitObjectsInternal(callback, arg);
- }
- DecrementDisableMovingGC(self);
- } else {
- // Since concurrent moving GC has thread suspension, also poison ObjPtr the normal case to
- // catch bugs.
- self->PoisonObjectPointers();
- // GCs can move objects, so don't allow this.
- ScopedAssertNoThreadSuspension ants("Visiting objects");
- DCHECK(region_space_ == nullptr);
- VisitObjectsInternal(callback, arg);
- self->PoisonObjectPointers();
- }
-}
-
-// Visit objects when threads are already suspended.
-void Heap::VisitObjectsPaused(ObjectCallback callback, void* arg) {
- Thread* self = Thread::Current();
- Locks::mutator_lock_->AssertExclusiveHeld(self);
- VisitObjectsInternalRegionSpace(callback, arg);
- VisitObjectsInternal(callback, arg);
-}
-
-// Visit objects in the region spaces.
-void Heap::VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg) {
- Thread* self = Thread::Current();
- Locks::mutator_lock_->AssertExclusiveHeld(self);
- if (region_space_ != nullptr) {
- DCHECK(IsGcConcurrentAndMoving());
- if (!zygote_creation_lock_.IsExclusiveHeld(self)) {
- // Exclude the pre-zygote fork time where the semi-space collector
- // calls VerifyHeapReferences() as part of the zygote compaction
- // which then would call here without the moving GC disabled,
- // which is fine.
- bool is_thread_running_gc = false;
- if (kIsDebugBuild) {
- MutexLock mu(self, *gc_complete_lock_);
- is_thread_running_gc = self == thread_running_gc_;
- }
- // If we are not the thread running the GC on in a GC exclusive region, then moving GC
- // must be disabled.
- DCHECK(is_thread_running_gc || IsMovingGCDisabled(self));
- }
- region_space_->Walk(callback, arg);
- }
-}
-
-// Visit objects in the other spaces.
-void Heap::VisitObjectsInternal(ObjectCallback callback, void* arg) {
- if (bump_pointer_space_ != nullptr) {
- // Visit objects in bump pointer space.
- bump_pointer_space_->Walk(callback, arg);
- }
- // TODO: Switch to standard begin and end to use ranged a based loop.
- for (auto* it = allocation_stack_->Begin(), *end = allocation_stack_->End(); it < end; ++it) {
- mirror::Object* const obj = it->AsMirrorPtr();
-
- mirror::Class* kls = nullptr;
- if (obj != nullptr && (kls = obj->GetClass()) != nullptr) {
- // Below invariant is safe regardless of what space the Object is in.
- // For speed reasons, only perform it when Rosalloc could possibly be used.
- // (Disabled for read barriers because it never uses Rosalloc).
- // (See the DCHECK in RosAllocSpace constructor).
- if (!kUseReadBarrier) {
- // Rosalloc has a race in allocation. Objects can be written into the allocation
- // stack before their header writes are visible to this thread.
- // See b/28790624 for more details.
- //
- // obj.class will either be pointing to a valid Class*, or it will point
- // to a rosalloc free buffer.
- //
- // If it's pointing to a valid Class* then that Class's Class will be the
- // ClassClass (whose Class is itself).
- //
- // A rosalloc free buffer will point to another rosalloc free buffer
- // (or to null), and never to itself.
- //
- // Either way dereferencing while its not-null is safe because it will
- // always point to another valid pointer or to null.
- mirror::Class* klsClass = kls->GetClass();
-
- if (klsClass == nullptr) {
- continue;
- } else if (klsClass->GetClass() != klsClass) {
- continue;
- }
- } else {
- // Ensure the invariant is not broken for non-rosalloc cases.
- DCHECK(Heap::rosalloc_space_ == nullptr)
- << "unexpected rosalloc with read barriers";
- DCHECK(kls->GetClass() != nullptr)
- << "invalid object: class does not have a class";
- DCHECK_EQ(kls->GetClass()->GetClass(), kls->GetClass())
- << "invalid object: class's class is not ClassClass";
- }
-
- // Avoid the race condition caused by the object not yet being written into the allocation
- // stack or the class not yet being written in the object. Or, if
- // kUseThreadLocalAllocationStack, there can be nulls on the allocation stack.
- callback(obj, arg);
- }
- }
- {
- ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- GetLiveBitmap()->Walk(callback, arg);
- }
-}
-
void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
space::ContinuousSpace* space2 = non_moving_space_;
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 78a21de..d1e8d4f 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -251,12 +251,6 @@
}
// Visit all of the live objects in the heap.
- void VisitObjects(ObjectCallback callback, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
- void VisitObjectsPaused(ObjectCallback callback, void* arg)
- REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
-
template <typename Visitor>
ALWAYS_INLINE void VisitObjects(Visitor&& visitor)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -1060,12 +1054,6 @@
// Trim 0 pages at the end of reference tables.
void TrimIndirectReferenceTables(Thread* self);
- void VisitObjectsInternal(ObjectCallback callback, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
- void VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg)
- REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
-
template <typename Visitor>
ALWAYS_INLINE void VisitObjectsInternal(Visitor&& visitor)
REQUIRES_SHARED(Locks::mutator_lock_)