Add root types and thread id to root visiting.
Enables us to pass the root type and thread id to hprof.
Bug: 12680863
Change-Id: I6a0f1f9e3aa8f9b4033d695818ae7ca3460d67cb
diff --git a/runtime/gc/accounting/heap_bitmap.cc b/runtime/gc/accounting/heap_bitmap.cc
index c520ee6..f94cf24 100644
--- a/runtime/gc/accounting/heap_bitmap.cc
+++ b/runtime/gc/accounting/heap_bitmap.cc
@@ -72,12 +72,10 @@
discontinuous_space_sets_.erase(it);
}
-void HeapBitmap::Walk(SpaceBitmap::Callback* callback, void* arg) {
+void HeapBitmap::Walk(ObjectCallback* callback, void* arg) {
for (const auto& bitmap : continuous_space_bitmaps_) {
bitmap->Walk(callback, arg);
}
-
- DCHECK(!discontinuous_space_sets_.empty());
for (const auto& space_set : discontinuous_space_sets_) {
space_set->Walk(callback, arg);
}
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index bcf36a2..dde1425 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -20,6 +20,7 @@
#include "base/logging.h"
#include "gc_allocator.h"
#include "locks.h"
+#include "object_callbacks.h"
#include "space_bitmap.h"
namespace art {
@@ -83,7 +84,7 @@
return NULL;
}
- void Walk(SpaceBitmap::Callback* callback, void* arg)
+ void Walk(ObjectCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
template <typename Visitor>
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 0225f29..aad214a 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -70,8 +70,8 @@
class ModUnionUpdateObjectReferencesVisitor {
public:
- ModUnionUpdateObjectReferencesVisitor(RootVisitor visitor, void* arg)
- : visitor_(visitor),
+ ModUnionUpdateObjectReferencesVisitor(RootCallback* callback, void* arg)
+ : callback_(callback),
arg_(arg) {
}
@@ -80,7 +80,7 @@
bool /* is_static */) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Only add the reference if it is non null and fits our criteria.
if (ref != nullptr) {
- Object* new_ref = visitor_(ref, arg_);
+ Object* new_ref = callback_(ref, arg_, 0, kRootVMInternal);
if (new_ref != ref) {
// Use SetFieldObjectWithoutWriteBarrier to avoid card mark as an optimization which
// reduces dirtied pages and improves performance.
@@ -90,26 +90,26 @@
}
private:
- RootVisitor* visitor_;
+ RootCallback* const callback_;
void* arg_;
};
class ModUnionScanImageRootVisitor {
public:
- ModUnionScanImageRootVisitor(RootVisitor visitor, void* arg)
- : visitor_(visitor), arg_(arg) {}
+ ModUnionScanImageRootVisitor(RootCallback* callback, void* arg)
+ : callback_(callback), arg_(arg) {}
void operator()(Object* root) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(root != NULL);
- ModUnionUpdateObjectReferencesVisitor ref_visitor(visitor_, arg_);
+ ModUnionUpdateObjectReferencesVisitor ref_visitor(callback_, arg_);
collector::MarkSweep::VisitObjectReferences(root, ref_visitor, true);
}
private:
- RootVisitor* visitor_;
- void* arg_;
+ RootCallback* const callback_;
+ void* const arg_;
};
void ModUnionTableReferenceCache::ClearCards() {
@@ -261,7 +261,7 @@
}
}
-void ModUnionTableReferenceCache::UpdateAndMarkReferences(RootVisitor visitor, void* arg) {
+void ModUnionTableReferenceCache::UpdateAndMarkReferences(RootCallback* callback, void* arg) {
Heap* heap = GetHeap();
CardTable* card_table = heap->GetCardTable();
@@ -296,7 +296,7 @@
for (mirror::HeapReference<Object>* obj_ptr : ref.second) {
Object* obj = obj_ptr->AsMirrorPtr();
if (obj != nullptr) {
- Object* new_obj = visitor(obj, arg);
+ Object* new_obj = callback(obj, arg, 0, kRootVMInternal);
// Avoid dirtying pages in the image unless necessary.
if (new_obj != obj) {
obj_ptr->Assign(new_obj);
@@ -318,9 +318,9 @@
}
// Mark all references to the alloc space(s).
-void ModUnionTableCardCache::UpdateAndMarkReferences(RootVisitor visitor, void* arg) {
+void ModUnionTableCardCache::UpdateAndMarkReferences(RootCallback* callback, void* arg) {
CardTable* card_table = heap_->GetCardTable();
- ModUnionScanImageRootVisitor scan_visitor(visitor, arg);
+ ModUnionScanImageRootVisitor scan_visitor(callback, arg);
SpaceBitmap* bitmap = space_->GetLiveBitmap();
for (const byte* card_addr : cleared_cards_) {
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index a89dbd1..7d5d8d2 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -19,7 +19,7 @@
#include "gc_allocator.h"
#include "globals.h"
-#include "root_visitor.h"
+#include "object_callbacks.h"
#include "safe_map.h"
#include <set>
@@ -69,7 +69,7 @@
// Update the mod-union table using data stored by ClearCards. There may be multiple ClearCards
// before a call to update, for example, back-to-back sticky GCs. Also mark references to other
// spaces which are stored in the mod-union table.
- virtual void UpdateAndMarkReferences(RootVisitor visitor, void* arg) = 0;
+ virtual void UpdateAndMarkReferences(RootCallback* callback, void* arg) = 0;
// Verification, sanity checks that we don't have clean cards which conflict with out cached data
// for said cards. Exclusive lock is required since verify sometimes uses
@@ -106,7 +106,7 @@
void ClearCards();
// Update table based on cleared cards and mark all references to the other spaces.
- void UpdateAndMarkReferences(RootVisitor visitor, void* arg)
+ void UpdateAndMarkReferences(RootCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -142,7 +142,7 @@
void ClearCards();
// Mark all references to the alloc space(s).
- void UpdateAndMarkReferences(RootVisitor visitor, void* arg)
+ void UpdateAndMarkReferences(RootCallback* callback, void* arg)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index a080bee..ad4ff1b 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -44,7 +44,7 @@
reinterpret_cast<void*>(HeapLimit()));
}
-void ObjectSet::Walk(SpaceBitmap::Callback* callback, void* arg) {
+void ObjectSet::Walk(ObjectCallback* callback, void* arg) {
for (const mirror::Object* obj : contained_) {
callback(const_cast<mirror::Object*>(obj), arg);
}
@@ -102,7 +102,7 @@
// Visits set bits in address order. The callback is not permitted to
// change the bitmap bits or max during the traversal.
-void SpaceBitmap::Walk(SpaceBitmap::Callback* callback, void* arg) {
+void SpaceBitmap::Walk(ObjectCallback* callback, void* arg) {
CHECK(bitmap_begin_ != NULL);
CHECK(callback != NULL);
@@ -174,12 +174,12 @@
}
}
-static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, mirror::Object* obj,
+static void WalkFieldsInOrder(SpaceBitmap* visited, ObjectCallback* callback, mirror::Object* obj,
void* arg);
// Walk instance fields of the given Class. Separate function to allow recursion on the super
// class.
-static void WalkInstanceFields(SpaceBitmap* visited, SpaceBitmap::Callback* callback, mirror::Object* obj,
+static void WalkInstanceFields(SpaceBitmap* visited, ObjectCallback* callback, mirror::Object* obj,
mirror::Class* klass, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Visit fields of parent classes first.
@@ -204,7 +204,7 @@
}
// For an unvisited object, visit it then all its children found via fields.
-static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, mirror::Object* obj,
+static void WalkFieldsInOrder(SpaceBitmap* visited, ObjectCallback* callback, mirror::Object* obj,
void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (visited->Test(obj)) {
@@ -246,7 +246,7 @@
// Visits set bits with an in order traversal. The callback is not permitted to change the bitmap
// bits or max during the traversal.
-void SpaceBitmap::InOrderWalk(SpaceBitmap::Callback* callback, void* arg) {
+void SpaceBitmap::InOrderWalk(ObjectCallback* callback, void* arg) {
UniquePtr<SpaceBitmap> visited(Create("bitmap for in-order walk",
reinterpret_cast<byte*>(heap_begin_),
IndexToOffset(bitmap_size_ / kWordSize)));
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index aa074eb..3c4b674 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -17,10 +17,11 @@
#ifndef ART_RUNTIME_GC_ACCOUNTING_SPACE_BITMAP_H_
#define ART_RUNTIME_GC_ACCOUNTING_SPACE_BITMAP_H_
-#include "locks.h"
#include "gc_allocator.h"
#include "globals.h"
+#include "locks.h"
#include "mem_map.h"
+#include "object_callbacks.h"
#include "UniquePtr.h"
#include <limits.h>
@@ -42,8 +43,6 @@
// Alignment of objects within spaces.
static const size_t kAlignment = 8;
- typedef void Callback(mirror::Object* obj, void* arg);
-
typedef void ScanCallback(mirror::Object* obj, void* finger, void* arg);
typedef void SweepCallback(size_t ptr_count, mirror::Object** ptrs, void* arg);
@@ -102,7 +101,7 @@
return index < bitmap_size_ / kWordSize;
}
- void VisitRange(uintptr_t base, uintptr_t max, Callback* visitor, void* arg) const;
+ void VisitRange(uintptr_t base, uintptr_t max, ObjectCallback* callback, void* arg) const;
class ClearVisitor {
public:
@@ -129,10 +128,10 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Walk(Callback* callback, void* arg)
+ void Walk(ObjectCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void InOrderWalk(Callback* callback, void* arg)
+ void InOrderWalk(ObjectCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
static void SweepWalk(const SpaceBitmap& live, const SpaceBitmap& mark, uintptr_t base,
@@ -249,7 +248,7 @@
contained_ = space_set.contained_;
}
- void Walk(SpaceBitmap::Callback* callback, void* arg)
+ void Walk(ObjectCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_);
template <typename Visitor>
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 862d06f..de9f59e 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -525,14 +525,16 @@
}
}
-Object* MarkSweep::MarkRootParallelCallback(Object* root, void* arg) {
+mirror::Object* MarkSweep::MarkRootParallelCallback(mirror::Object* root, void* arg,
+ uint32_t /*thread_id*/, RootType /*root_type*/) {
DCHECK(root != NULL);
DCHECK(arg != NULL);
reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(root);
return root;
}
-Object* MarkSweep::MarkRootCallback(Object* root, void* arg) {
+Object* MarkSweep::MarkRootCallback(Object* root, void* arg, uint32_t /*thread_id*/,
+ RootType /*root_type*/) {
DCHECK(root != nullptr);
DCHECK(arg != nullptr);
reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(root);
@@ -930,7 +932,7 @@
ProcessMarkStack(false);
}
-mirror::Object* MarkSweep::IsMarkedCallback(Object* object, void* arg) {
+mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) {
if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
return object;
}
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index bfedac7..8bc0bb5 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -22,8 +22,8 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "garbage_collector.h"
+#include "object_callbacks.h"
#include "offsets.h"
-#include "root_visitor.h"
#include "UniquePtr.h"
namespace art {
@@ -180,11 +180,13 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static mirror::Object* MarkRootCallback(mirror::Object* root, void* arg)
+ static mirror::Object* MarkRootCallback(mirror::Object* root, void* arg, uint32_t thread_id,
+ RootType root_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static mirror::Object* MarkRootParallelCallback(mirror::Object* root, void* arg);
+ static mirror::Object* MarkRootParallelCallback(mirror::Object* root, void* arg,
+ uint32_t thread_id, RootType root_type);
// Marks an object.
void MarkObject(const mirror::Object* obj)
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 0c6a938..b37b9d2 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -513,7 +513,7 @@
return forward_address;
}
-Object* SemiSpace::RecursiveMarkObjectCallback(Object* root, void* arg) {
+mirror::Object* SemiSpace::RecursiveMarkObjectCallback(mirror::Object* root, void* arg) {
DCHECK(root != nullptr);
DCHECK(arg != nullptr);
SemiSpace* semi_space = reinterpret_cast<SemiSpace*>(arg);
@@ -522,7 +522,8 @@
return ret;
}
-Object* SemiSpace::MarkRootCallback(Object* root, void* arg) {
+Object* SemiSpace::MarkRootCallback(Object* root, void* arg, uint32_t /*thread_id*/,
+ RootType /*root_type*/) {
DCHECK(root != nullptr);
DCHECK(arg != nullptr);
return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root);
@@ -536,7 +537,7 @@
timings_.EndSplit();
}
-mirror::Object* SemiSpace::MarkedForwardingAddressCallback(Object* object, void* arg) {
+mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) {
return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
}
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 685b33c..f58402f 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -22,8 +22,8 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "garbage_collector.h"
+#include "object_callbacks.h"
#include "offsets.h"
-#include "root_visitor.h"
#include "UniquePtr.h"
namespace art {
@@ -142,7 +142,8 @@
static void VisitObjectReferencesAndClass(mirror::Object* obj, const Visitor& visitor)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- static mirror::Object* MarkRootCallback(mirror::Object* root, void* arg)
+ static mirror::Object* MarkRootCallback(mirror::Object* root, void* arg, uint32_t /*tid*/,
+ RootType /*root_type*/)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
static mirror::Object* RecursiveMarkObjectCallback(mirror::Object* root, void* arg)
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 62567d7..9c828b2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -369,7 +369,7 @@
}
}
-void Heap::VisitObjects(ObjectVisitorCallback callback, void* arg) {
+void Heap::VisitObjects(ObjectCallback callback, void* arg) {
Thread* self = Thread::Current();
// GCs can move objects, so don't allow this.
const char* old_cause = self->StartAssertNoThreadSuspension("Visiting objects");
@@ -604,8 +604,8 @@
}
struct SoftReferenceArgs {
- RootVisitor* is_marked_callback_;
- RootVisitor* recursive_mark_callback_;
+ IsMarkedCallback* is_marked_callback_;
+ MarkObjectCallback* recursive_mark_callback_;
void* arg_;
};
@@ -617,8 +617,8 @@
// Process reference class instances and schedule finalizations.
void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
- RootVisitor* is_marked_callback,
- RootVisitor* recursive_mark_object_callback, void* arg) {
+ IsMarkedCallback* is_marked_callback,
+ MarkObjectCallback* recursive_mark_object_callback, void* arg) {
// Unless we are in the zygote or required to clear soft references with white references,
// preserve some white referents.
if (!clear_soft && !Runtime::Current()->IsZygote()) {
@@ -671,13 +671,13 @@
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj,
- RootVisitor mark_visitor, void* arg) {
+ IsMarkedCallback is_marked_callback, void* arg) {
DCHECK(klass != nullptr);
DCHECK(klass->IsReferenceClass());
DCHECK(obj != nullptr);
mirror::Object* referent = GetReferenceReferent(obj);
if (referent != nullptr) {
- mirror::Object* forward_address = mark_visitor(referent, arg);
+ mirror::Object* forward_address = is_marked_callback(referent, arg);
// Null means that the object is not currently marked.
if (forward_address == nullptr) {
Thread* self = Thread::Current();
@@ -1169,7 +1169,7 @@
void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
std::vector<mirror::Object*>& referring_objects) {
- // Can't do any GC in this function since this may move classes.
+ // Can't do any GC in this function since this may move the object o.
Thread* self = Thread::Current();
auto* old_cause = self->StartAssertNoThreadSuspension("GetReferringObjects");
ReferringObjectsFinder finder(o, max_count, referring_objects);
@@ -1696,7 +1696,8 @@
gc_complete_cond_->Broadcast(self);
}
-static mirror::Object* RootMatchesObjectVisitor(mirror::Object* root, void* arg) {
+static mirror::Object* RootMatchesObjectVisitor(mirror::Object* root, void* arg,
+ uint32_t /*thread_id*/, RootType /*root_type*/) {
mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
if (root == obj) {
LOG(INFO) << "Object " << obj << " is a root";
@@ -1823,7 +1824,8 @@
return heap_->IsLiveObjectLocked(obj, true, false, true);
}
- static mirror::Object* VerifyRoots(mirror::Object* root, void* arg) {
+ static mirror::Object* VerifyRoots(mirror::Object* root, void* arg, uint32_t /*thread_id*/,
+ RootType /*root_type*/) {
VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
(*visitor)(nullptr, root, MemberOffset(0), true);
return root;
@@ -2041,7 +2043,7 @@
}
}
-static mirror::Object* IdentityCallback(mirror::Object* obj, void*) {
+static mirror::Object* IdentityRootCallback(mirror::Object* obj, void*, uint32_t, RootType) {
return obj;
}
@@ -2080,7 +2082,7 @@
ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
for (const auto& table_pair : mod_union_tables_) {
accounting::ModUnionTable* mod_union_table = table_pair.second;
- mod_union_table->UpdateAndMarkReferences(IdentityCallback, nullptr);
+ mod_union_table->UpdateAndMarkReferences(IdentityRootCallback, nullptr);
mod_union_table->Verify();
}
thread_list->ResumeAll();
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 476ceee..368a687 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -32,9 +32,9 @@
#include "gtest/gtest.h"
#include "jni.h"
#include "locks.h"
+#include "object_callbacks.h"
#include "offsets.h"
#include "reference_queue.h"
-#include "root_visitor.h"
#include "safe_map.h"
#include "thread_pool.h"
@@ -183,7 +183,7 @@
}
// Visit all of the live objects in the heap.
- void VisitObjects(ObjectVisitorCallback callback, void* arg)
+ void VisitObjects(ObjectCallback callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -328,8 +328,9 @@
return finalizer_reference_zombie_offset_;
}
static mirror::Object* PreserveSoftReferenceCallback(mirror::Object* obj, void* arg);
- void ProcessReferences(TimingLogger& timings, bool clear_soft, RootVisitor* is_marked_callback,
- RootVisitor* recursive_mark_object_callback, void* arg)
+ void ProcessReferences(TimingLogger& timings, bool clear_soft,
+ IsMarkedCallback* is_marked_callback,
+ MarkObjectCallback* recursive_mark_object_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -605,8 +606,9 @@
// Returns true if the reference object has not yet been enqueued.
bool IsEnqueuable(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsEnqueued(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj, RootVisitor mark_visitor,
- void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj,
+ IsMarkedCallback is_marked_callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Run the finalizers.
void RunFinalization(JNIEnv* env);
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 2d73a71..fae4cac 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -94,13 +94,14 @@
}
}
-void ReferenceQueue::ClearWhiteReferences(ReferenceQueue& cleared_references, RootVisitor visitor,
+void ReferenceQueue::ClearWhiteReferences(ReferenceQueue& cleared_references,
+ IsMarkedCallback* preserve_callback,
void* arg) {
while (!IsEmpty()) {
mirror::Object* ref = DequeuePendingReference();
mirror::Object* referent = heap_->GetReferenceReferent(ref);
if (referent != nullptr) {
- mirror::Object* forward_address = visitor(referent, arg);
+ mirror::Object* forward_address = preserve_callback(referent, arg);
if (forward_address == nullptr) {
// Referent is white, clear it.
heap_->ClearReferenceReferent(ref);
@@ -108,7 +109,7 @@
cleared_references.EnqueuePendingReference(ref);
}
} else if (referent != forward_address) {
- // Object moved, need to updated the referrent.
+ // Object moved, need to updated the referent.
heap_->SetReferenceReferent(ref, forward_address);
}
}
@@ -116,8 +117,9 @@
}
void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue& cleared_references,
- RootVisitor is_marked_callback,
- RootVisitor recursive_mark_callback, void* arg) {
+ IsMarkedCallback is_marked_callback,
+ MarkObjectCallback recursive_mark_callback,
+ void* arg) {
while (!IsEmpty()) {
mirror::Object* ref = DequeuePendingReference();
mirror::Object* referent = heap_->GetReferenceReferent(ref);
@@ -139,7 +141,7 @@
}
}
-void ReferenceQueue::PreserveSomeSoftReferences(RootVisitor preserve_callback, void* arg) {
+void ReferenceQueue::PreserveSomeSoftReferences(IsMarkedCallback preserve_callback, void* arg) {
ReferenceQueue cleared(heap_);
while (!IsEmpty()) {
mirror::Object* ref = DequeuePendingReference();
@@ -149,7 +151,7 @@
if (forward_address == nullptr) {
// Either the reference isn't marked or we don't wish to preserve it.
cleared.EnqueuePendingReference(ref);
- } else {
+ } else if (forward_address != referent) {
heap_->SetReferenceReferent(ref, forward_address);
}
}
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 3f3069e..e12a95f 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -27,8 +27,8 @@
#include "gtest/gtest.h"
#include "jni.h"
#include "locks.h"
+#include "object_callbacks.h"
#include "offsets.h"
-#include "root_visitor.h"
#include "thread_pool.h"
namespace art {
@@ -56,17 +56,18 @@
// Enqueues finalizer references with white referents. White referents are blackened, moved to the
// zombie field, and the referent field is cleared.
void EnqueueFinalizerReferences(ReferenceQueue& cleared_references,
- RootVisitor is_marked_callback,
- RootVisitor recursive_mark_callback, void* arg)
+ IsMarkedCallback is_marked_callback,
+ MarkObjectCallback recursive_mark_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Walks the reference list marking any references subject to the reference clearing policy.
// References with a black referent are removed from the list. References with white referents
// biased toward saving are blackened and also removed from the list.
- void PreserveSomeSoftReferences(RootVisitor preserve_callback, void* arg)
+ void PreserveSomeSoftReferences(IsMarkedCallback* preserve_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Unlink the reference list clearing references objects with white referents. Cleared references
// registered to a reference queue are scheduled for appending by the heap worker thread.
- void ClearWhiteReferences(ReferenceQueue& cleared_references, RootVisitor visitor, void* arg)
+ void ClearWhiteReferences(ReferenceQueue& cleared_references, IsMarkedCallback is_marked_callback,
+ void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Dump(std::ostream& os) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index a314d74..2e07bd3 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -137,7 +137,7 @@
return storage;
}
-void BumpPointerSpace::Walk(ObjectVisitorCallback callback, void* arg) {
+void BumpPointerSpace::Walk(ObjectCallback* callback, void* arg) {
byte* pos = Begin();
{
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index d73fe3b..ddd17be 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -17,7 +17,7 @@
#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
-#include "root_visitor.h"
+#include "object_callbacks.h"
#include "space.h"
namespace art {
@@ -121,7 +121,7 @@
}
// Go through all of the blocks and visit the continuous objects.
- void Walk(ObjectVisitorCallback callback, void* arg)
+ void Walk(ObjectCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Object alignment within the space.