Clean up GC callbacks to be virtual methods
Change-Id: Ia08034a4e5931c4fcb329c3bd3c4b1f301135735
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index cd3f910..009254b 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -21,16 +21,11 @@
#include "base/stl_util.h"
#include "bitmap-inl.h"
#include "card_table-inl.h"
-#include "heap_bitmap.h"
#include "gc/accounting/space_bitmap-inl.h"
-#include "gc/collector/mark_sweep.h"
-#include "gc/collector/mark_sweep-inl.h"
#include "gc/heap.h"
-#include "gc/space/space.h"
#include "gc/space/image_space.h"
+#include "gc/space/space.h"
#include "mirror/object-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/object_array-inl.h"
#include "space_bitmap-inl.h"
#include "thread.h"
@@ -95,11 +90,11 @@
class ModUnionUpdateObjectReferencesVisitor {
public:
- ModUnionUpdateObjectReferencesVisitor(MarkHeapReferenceCallback* callback, void* arg,
+ ModUnionUpdateObjectReferencesVisitor(MarkObjectVisitor* visitor,
space::ContinuousSpace* from_space,
space::ContinuousSpace* immune_space,
bool* contains_reference_to_other_space)
- : callback_(callback), arg_(arg), from_space_(from_space), immune_space_(immune_space),
+ : visitor_(visitor), from_space_(from_space), immune_space_(immune_space),
contains_reference_to_other_space_(contains_reference_to_other_space) {
}
@@ -111,13 +106,12 @@
mirror::Object* ref = obj_ptr->AsMirrorPtr();
if (ref != nullptr && !from_space_->HasAddress(ref) && !immune_space_->HasAddress(ref)) {
*contains_reference_to_other_space_ = true;
- callback_(obj_ptr, arg_);
+ visitor_->MarkHeapReference(obj_ptr);
}
}
private:
- MarkHeapReferenceCallback* const callback_;
- void* const arg_;
+ MarkObjectVisitor* const visitor_;
// Space which we are scanning
space::ContinuousSpace* const from_space_;
space::ContinuousSpace* const immune_space_;
@@ -129,25 +123,24 @@
public:
// Immune space is any other space which we don't care about references to. Currently this is
// the image space in the case of the zygote mod union table.
- ModUnionScanImageRootVisitor(MarkHeapReferenceCallback* callback, void* arg,
+ ModUnionScanImageRootVisitor(MarkObjectVisitor* visitor,
space::ContinuousSpace* from_space,
space::ContinuousSpace* immune_space,
bool* contains_reference_to_other_space)
- : callback_(callback), arg_(arg), from_space_(from_space), immune_space_(immune_space),
+ : visitor_(visitor), from_space_(from_space), immune_space_(immune_space),
contains_reference_to_other_space_(contains_reference_to_other_space) {}
void operator()(Object* root) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(root != nullptr);
- ModUnionUpdateObjectReferencesVisitor ref_visitor(callback_, arg_, from_space_, immune_space_,
+ ModUnionUpdateObjectReferencesVisitor ref_visitor(visitor_, from_space_, immune_space_,
contains_reference_to_other_space_);
root->VisitReferences<kMovingClasses>(ref_visitor, VoidFunctor());
}
private:
- MarkHeapReferenceCallback* const callback_;
- void* const arg_;
+ MarkObjectVisitor* const visitor_;
// Space which we are scanning
space::ContinuousSpace* const from_space_;
space::ContinuousSpace* const immune_space_;
@@ -305,8 +298,7 @@
}
}
-void ModUnionTableReferenceCache::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
- void* arg) {
+void ModUnionTableReferenceCache::UpdateAndMarkReferences(MarkObjectVisitor* visitor) {
CardTable* card_table = heap_->GetCardTable();
std::vector<mirror::HeapReference<Object>*> cards_references;
@@ -338,7 +330,7 @@
size_t count = 0;
for (const auto& ref : references_) {
for (mirror::HeapReference<Object>* obj_ptr : ref.second) {
- callback(obj_ptr, arg);
+ visitor->MarkHeapReference(obj_ptr);
}
count += ref.second.size();
}
@@ -362,9 +354,9 @@
class CardBitVisitor {
public:
- CardBitVisitor(MarkHeapReferenceCallback* callback, void* arg, space::ContinuousSpace* space,
+ CardBitVisitor(MarkObjectVisitor* visitor, space::ContinuousSpace* space,
space::ContinuousSpace* immune_space, ModUnionTable::CardBitmap* card_bitmap)
- : callback_(callback), arg_(arg), space_(space), immune_space_(immune_space),
+ : visitor_(visitor), space_(space), immune_space_(immune_space),
bitmap_(space->GetLiveBitmap()), card_bitmap_(card_bitmap) {
DCHECK(immune_space_ != nullptr);
}
@@ -374,7 +366,7 @@
DCHECK(space_->HasAddress(reinterpret_cast<mirror::Object*>(start)))
<< start << " " << *space_;
bool reference_to_other_space = false;
- ModUnionScanImageRootVisitor scan_visitor(callback_, arg_, space_, immune_space_,
+ ModUnionScanImageRootVisitor scan_visitor(visitor_, space_, immune_space_,
&reference_to_other_space);
bitmap_->VisitMarkedRange(start, start + CardTable::kCardSize, scan_visitor);
if (!reference_to_other_space) {
@@ -384,8 +376,7 @@
}
private:
- MarkHeapReferenceCallback* const callback_;
- void* const arg_;
+ MarkObjectVisitor* const visitor_;
space::ContinuousSpace* const space_;
space::ContinuousSpace* const immune_space_;
ContinuousSpaceBitmap* const bitmap_;
@@ -400,15 +391,14 @@
}
// Mark all references to the alloc space(s).
-void ModUnionTableCardCache::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
- void* arg) {
+void ModUnionTableCardCache::UpdateAndMarkReferences(MarkObjectVisitor* visitor) {
auto* image_space = heap_->GetImageSpace();
// If we don't have an image space, just pass in space_ as the immune space. Pass in the same
// space_ instead of image_space to avoid a null check in ModUnionUpdateObjectReferencesVisitor.
- CardBitVisitor visitor(callback, arg, space_, image_space != nullptr ? image_space : space_,
+ CardBitVisitor bit_visitor(visitor, space_, image_space != nullptr ? image_space : space_,
card_bitmap_.get());
card_bitmap_->VisitSetBits(
- 0, RoundUp(space_->Size(), CardTable::kCardSize) / CardTable::kCardSize, visitor);
+ 0, RoundUp(space_->Size(), CardTable::kCardSize) / CardTable::kCardSize, bit_visitor);
}
void ModUnionTableCardCache::Dump(std::ostream& os) {
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 2e232ca..520cc1c 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -76,7 +76,7 @@
// Update the mod-union table using data stored by ClearCards. There may be multiple ClearCards
// before a call to update, for example, back-to-back sticky GCs. Also mark references to other
// spaces which are stored in the mod-union table.
- virtual void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, void* arg) = 0;
+ virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) = 0;
// Verification, sanity checks that we don't have clean cards which conflict with out cached data
// for said cards. Exclusive lock is required since verify sometimes uses
@@ -117,7 +117,7 @@
void ClearCards() OVERRIDE;
// Update table based on cleared cards and mark all references to the other spaces.
- void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, void* arg) OVERRIDE
+ void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -157,7 +157,7 @@
virtual void ClearCards() OVERRIDE;
// Mark all references to the alloc space(s).
- virtual void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, void* arg) OVERRIDE
+ virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index 363b76a..aad8a25 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -93,12 +93,24 @@
};
// Collect visited objects into container.
-static void CollectVisitedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(ref != nullptr);
- DCHECK(arg != nullptr);
- reinterpret_cast<std::set<mirror::Object*>*>(arg)->insert(ref->AsMirrorPtr());
-}
+class CollectVisitedVisitor : public MarkObjectVisitor {
+ public:
+ explicit CollectVisitedVisitor(std::set<mirror::Object*>* out) : out_(out) {}
+ virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(ref != nullptr);
+ MarkObject(ref->AsMirrorPtr());
+ }
+ virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(obj != nullptr);
+ out_->insert(obj);
+ return obj;
+ }
+
+ private:
+ std::set<mirror::Object*>* const out_;
+};
// A mod union table that only holds references to a specified target space.
class ModUnionTableRefCacheToSpace : public ModUnionTableReferenceCache {
@@ -199,7 +211,8 @@
obj2->Set(3, other_space_ref2);
table->ClearCards();
std::set<mirror::Object*> visited_before;
- table->UpdateAndMarkReferences(&CollectVisitedCallback, &visited_before);
+ CollectVisitedVisitor collector_before(&visited_before);
+ table->UpdateAndMarkReferences(&collector_before);
// Check that we visited all the references in other spaces only.
ASSERT_GE(visited_before.size(), 2u);
ASSERT_TRUE(visited_before.find(other_space_ref1) != visited_before.end());
@@ -230,7 +243,8 @@
}
// Visit again and make sure the cards got cleared back to their sane state.
std::set<mirror::Object*> visited_after;
- table->UpdateAndMarkReferences(&CollectVisitedCallback, &visited_after);
+ CollectVisitedVisitor collector_after(&visited_after);
+ table->UpdateAndMarkReferences(&collector_after);
// Check that we visited a superset after.
for (auto* obj : visited_before) {
ASSERT_TRUE(visited_after.find(obj) != visited_after.end()) << obj;
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index eeb385e..23ab8df 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -61,11 +61,10 @@
class RememberedSetReferenceVisitor {
public:
- RememberedSetReferenceVisitor(MarkHeapReferenceCallback* callback,
- DelayReferenceReferentCallback* ref_callback,
- space::ContinuousSpace* target_space,
- bool* const contains_reference_to_target_space, void* arg)
- : callback_(callback), ref_callback_(ref_callback), target_space_(target_space), arg_(arg),
+ RememberedSetReferenceVisitor(space::ContinuousSpace* target_space,
+ bool* const contains_reference_to_target_space,
+ collector::GarbageCollector* collector)
+ : collector_(collector), target_space_(target_space),
contains_reference_to_target_space_(contains_reference_to_target_space) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
@@ -74,7 +73,7 @@
mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
if (target_space_->HasAddress(ref_ptr->AsMirrorPtr())) {
*contains_reference_to_target_space_ = true;
- callback_(ref_ptr, arg_);
+ collector_->MarkHeapReference(ref_ptr);
DCHECK(!target_space_->HasAddress(ref_ptr->AsMirrorPtr()));
}
}
@@ -84,49 +83,43 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
if (target_space_->HasAddress(ref->GetReferent())) {
*contains_reference_to_target_space_ = true;
- ref_callback_(klass, ref, arg_);
+ collector_->DelayReferenceReferent(klass, ref);
}
}
private:
- MarkHeapReferenceCallback* const callback_;
- DelayReferenceReferentCallback* const ref_callback_;
+ collector::GarbageCollector* const collector_;
space::ContinuousSpace* const target_space_;
- void* const arg_;
bool* const contains_reference_to_target_space_;
};
class RememberedSetObjectVisitor {
public:
- RememberedSetObjectVisitor(MarkHeapReferenceCallback* callback,
- DelayReferenceReferentCallback* ref_callback,
- space::ContinuousSpace* target_space,
- bool* const contains_reference_to_target_space, void* arg)
- : callback_(callback), ref_callback_(ref_callback), target_space_(target_space), arg_(arg),
+ RememberedSetObjectVisitor(space::ContinuousSpace* target_space,
+ bool* const contains_reference_to_target_space,
+ collector::GarbageCollector* collector)
+ : collector_(collector), target_space_(target_space),
contains_reference_to_target_space_(contains_reference_to_target_space) {}
void operator()(mirror::Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- RememberedSetReferenceVisitor visitor(callback_, ref_callback_, target_space_,
- contains_reference_to_target_space_, arg_);
+ RememberedSetReferenceVisitor visitor(target_space_, contains_reference_to_target_space_,
+ collector_);
obj->VisitReferences<kMovingClasses>(visitor, visitor);
}
private:
- MarkHeapReferenceCallback* const callback_;
- DelayReferenceReferentCallback* const ref_callback_;
+ collector::GarbageCollector* const collector_;
space::ContinuousSpace* const target_space_;
- void* const arg_;
bool* const contains_reference_to_target_space_;
};
-void RememberedSet::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
- DelayReferenceReferentCallback* ref_callback,
- space::ContinuousSpace* target_space, void* arg) {
+void RememberedSet::UpdateAndMarkReferences(space::ContinuousSpace* target_space,
+ collector::GarbageCollector* collector) {
CardTable* card_table = heap_->GetCardTable();
bool contains_reference_to_target_space = false;
- RememberedSetObjectVisitor obj_visitor(callback, ref_callback, target_space,
- &contains_reference_to_target_space, arg);
+ RememberedSetObjectVisitor obj_visitor(target_space, &contains_reference_to_target_space,
+ collector);
ContinuousSpaceBitmap* bitmap = space_->GetLiveBitmap();
CardSet remove_card_set;
for (uint8_t* const card_addr : dirty_cards_) {
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index c51e26d..affe863 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -29,6 +29,7 @@
namespace gc {
namespace collector {
+ class GarbageCollector;
class MarkSweep;
} // namespace collector
namespace space {
@@ -53,9 +54,8 @@
void ClearCards();
// Mark through all references to the target space.
- void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
- DelayReferenceReferentCallback* ref_callback,
- space::ContinuousSpace* target_space, void* arg)
+ void UpdateAndMarkReferences(space::ContinuousSpace* target_space,
+ collector::GarbageCollector* collector)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 35faff3..e0661b6 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -30,6 +30,7 @@
namespace art {
namespace mirror {
+ class Class;
class Object;
} // namespace mirror
class MemMap;
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 88c475b..3108b7c 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -110,23 +110,24 @@
}
}
-static inline void SweepClassObject(AllocRecord* record, IsMarkedCallback* callback, void* arg)
+static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
// This does not need a read barrier because this is called by GC.
mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
- // The class object can become null if we implement class unloading.
- // In that case we might still want to keep the class name string (not implemented).
- mirror::Object* new_object = UNLIKELY(old_object == nullptr) ?
- nullptr : callback(old_object, arg);
- if (UNLIKELY(old_object != new_object)) {
- mirror::Class* new_klass = UNLIKELY(new_object == nullptr) ? nullptr : new_object->AsClass();
- klass = GcRoot<mirror::Class>(new_klass);
+ if (old_object != nullptr) {
+ // The class object can become null if we implement class unloading.
+ // In that case we might still want to keep the class name string (not implemented).
+ mirror::Object* new_object = visitor->IsMarked(old_object);
+ DCHECK(new_object != nullptr);
+ if (UNLIKELY(old_object != new_object)) {
+ klass = GcRoot<mirror::Class>(new_object->AsClass());
+ }
}
}
-void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedCallback* callback, void* arg) {
+void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) {
VLOG(heap) << "Start SweepAllocationRecords()";
size_t count_deleted = 0, count_moved = 0, count = 0;
// Only the first (size - recent_record_max_) number of records can be deleted.
@@ -141,11 +142,11 @@
// This does not need a read barrier because this is called by GC.
mirror::Object* old_object = it->first.Read<kWithoutReadBarrier>();
AllocRecord* record = it->second;
- mirror::Object* new_object = old_object == nullptr ? nullptr : callback(old_object, arg);
+ mirror::Object* new_object = old_object == nullptr ? nullptr : visitor->IsMarked(old_object);
if (new_object == nullptr) {
if (count > delete_bound) {
it->first = GcRoot<mirror::Object>(nullptr);
- SweepClassObject(record, callback, arg);
+ SweepClassObject(record, visitor);
++it;
} else {
delete record;
@@ -157,7 +158,7 @@
it->first = GcRoot<mirror::Object>(new_object);
++count_moved;
}
- SweepClassObject(record, callback, arg);
+ SweepClassObject(record, visitor);
++it;
}
}
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index 06721c8..933363b 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -261,7 +261,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
- void SweepAllocationRecords(IsMarkedCallback* callback, void* arg)
+ void SweepAllocationRecords(IsMarkedVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 9316b27..b5d5c34 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -73,6 +73,12 @@
}
}
+void ConcurrentCopying::MarkHeapReference(
+ mirror::HeapReference<mirror::Object>* from_ref ATTRIBUTE_UNUSED) {
+ // Unused, usually called from mod union tables.
+ UNIMPLEMENTED(FATAL);
+}
+
ConcurrentCopying::~ConcurrentCopying() {
STLDeleteElements(&pooled_mark_stacks_);
}
@@ -308,7 +314,7 @@
}
private:
- ConcurrentCopying* collector_;
+ ConcurrentCopying* const collector_;
};
class EmptyCheckpoint : public Closure {
@@ -429,7 +435,7 @@
LOG(INFO) << "ProcessReferences";
}
// Process weak references. This may produce new refs to process and have them processed via
- // ProcessMarkStackCallback (in the GC exclusive mark stack mode).
+ // ProcessMarkStack (in the GC exclusive mark stack mode).
ProcessReferences(self);
CheckEmptyMarkStack();
if (kVerboseMode) {
@@ -644,7 +650,7 @@
}
private:
- ConcurrentCopying* collector_;
+ ConcurrentCopying* const collector_;
};
class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
@@ -732,16 +738,9 @@
}
collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
}
- static void RootCallback(mirror::Object** root, void *arg, const RootInfo& /*root_info*/)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
- ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector);
- DCHECK(root != nullptr);
- visitor(*root);
- }
private:
- ConcurrentCopying* collector_;
+ ConcurrentCopying* const collector_;
};
class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
@@ -762,7 +761,7 @@
}
private:
- ConcurrentCopying* collector_;
+ ConcurrentCopying* const collector_;
};
class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
@@ -785,7 +784,7 @@
}
private:
- ConcurrentCopying* collector_;
+ ConcurrentCopying* const collector_;
};
class RevokeThreadLocalMarkStackCheckpoint : public Closure {
@@ -1088,7 +1087,7 @@
void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
+ Runtime::Current()->SweepSystemWeaks(this);
}
void ConcurrentCopying::Sweep(bool swap_bitmaps) {
@@ -1293,7 +1292,7 @@
}
private:
- ConcurrentCopying* collector_;
+ ConcurrentCopying* const collector_;
};
// Compute how much live objects are left in regions.
@@ -2029,14 +2028,9 @@
heap_->ClearMarkedObjects();
}
-mirror::Object* ConcurrentCopying::IsMarkedCallback(mirror::Object* from_ref, void* arg) {
- return reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref);
-}
-
-bool ConcurrentCopying::IsHeapReferenceMarkedCallback(
- mirror::HeapReference<mirror::Object>* field, void* arg) {
+bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
mirror::Object* from_ref = field->AsMirrorPtr();
- mirror::Object* to_ref = reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref);
+ mirror::Object* to_ref = IsMarked(from_ref);
if (to_ref == nullptr) {
return false;
}
@@ -2048,18 +2042,12 @@
return true;
}
-mirror::Object* ConcurrentCopying::MarkCallback(mirror::Object* from_ref, void* arg) {
- return reinterpret_cast<ConcurrentCopying*>(arg)->Mark(from_ref);
-}
-
-void ConcurrentCopying::ProcessMarkStackCallback(void* arg) {
- ConcurrentCopying* concurrent_copying = reinterpret_cast<ConcurrentCopying*>(arg);
- concurrent_copying->ProcessMarkStack();
+mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
+ return Mark(from_ref);
}
void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
- heap_->GetReferenceProcessor()->DelayReferenceReferent(
- klass, reference, &IsHeapReferenceMarkedCallback, this);
+ heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
void ConcurrentCopying::ProcessReferences(Thread* self) {
@@ -2067,8 +2055,7 @@
// We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->GetReferenceProcessor()->ProcessReferences(
- true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
- &IsHeapReferenceMarkedCallback, &MarkCallback, &ProcessMarkStackCallback, this);
+ true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
}
void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 1fb4703..4f92ea0 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -130,18 +130,16 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SwitchToSharedMarkStackMode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SwitchToGcExclusiveMarkStackMode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ProcessReferences(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Object* IsMarked(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static mirror::Object* MarkCallback(mirror::Object* from_ref, void* arg)
+ virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static mirror::Object* IsMarkedCallback(mirror::Object* from_ref, void* arg)
+ virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static bool IsHeapReferenceMarkedCallback(
- mirror::HeapReference<mirror::Object>* field, void* arg)
+ virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void ProcessMarkStackCallback(void* arg)
+ virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 9b76d1a..e10bef4 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -17,6 +17,9 @@
#ifndef ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
#define ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
+#include <stdint.h>
+#include <vector>
+
#include "base/histogram.h"
#include "base/mutex.h"
#include "base/timing_logger.h"
@@ -24,10 +27,16 @@
#include "gc/gc_cause.h"
#include "gc_root.h"
#include "gc_type.h"
-#include <stdint.h>
-#include <vector>
+#include "object_callbacks.h"
namespace art {
+
+namespace mirror {
+class Class;
+class Object;
+class Reference;
+} // namespace mirror
+
namespace gc {
class Heap;
@@ -113,7 +122,7 @@
DISALLOW_COPY_AND_ASSIGN(Iteration);
};
-class GarbageCollector : public RootVisitor {
+class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public MarkObjectVisitor {
public:
class SCOPED_LOCKABLE ScopedPause {
public:
@@ -172,6 +181,22 @@
void RecordFreeLOS(const ObjectBytePair& freed);
void DumpPerformanceInfo(std::ostream& os) LOCKS_EXCLUDED(pause_histogram_lock_);
+ // Helper functions for querying if objects are marked at compile time. These are used for
+ // reading system weaks, processing references.
+ virtual mirror::Object* IsMarked(mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ // Used by reference processor.
+ virtual void ProcessMarkStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ // Force mark an object.
+ virtual mirror::Object* MarkObject(mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+
protected:
// Run all of the GC phases.
virtual void RunPhases() = 0;
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 3c247cd..65e6b40 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -21,34 +21,19 @@
#include "base/timing_logger.h"
#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/mod_union_table.h"
-#include "gc/accounting/remembered_set.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "gc/reference_processor.h"
-#include "gc/space/bump_pointer_space.h"
#include "gc/space/bump_pointer_space-inl.h"
-#include "gc/space/image_space.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
-#include "indirect_reference_table.h"
-#include "intern_table.h"
-#include "jni_internal.h"
-#include "mark_sweep-inl.h"
-#include "monitor.h"
#include "mirror/class-inl.h"
-#include "mirror/class_loader.h"
-#include "mirror/dex_cache.h"
-#include "mirror/reference-inl.h"
#include "mirror/object-inl.h"
-#include "mirror/object_array.h"
-#include "mirror/object_array-inl.h"
#include "runtime.h"
#include "stack.h"
#include "thread-inl.h"
#include "thread_list.h"
-using ::art::mirror::Object;
-
namespace art {
namespace gc {
namespace collector {
@@ -67,7 +52,7 @@
MarkCompact::MarkCompact(Heap* heap, const std::string& name_prefix)
: GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + "mark compact"),
- space_(nullptr), collector_name_(name_) {
+ space_(nullptr), collector_name_(name_), updating_references_(false) {
}
void MarkCompact::RunPhases() {
@@ -107,7 +92,7 @@
void operator()(mirror::Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_,
Locks::heap_bitmap_lock_) {
DCHECK_ALIGNED(obj, space::BumpPointerSpace::kAlignment);
- DCHECK(collector_->IsMarked(obj));
+ DCHECK(collector_->IsMarked(obj) != nullptr);
collector_->ForwardObject(obj);
}
@@ -141,8 +126,7 @@
void MarkCompact::ProcessReferences(Thread* self) {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
heap_->GetReferenceProcessor()->ProcessReferences(
- false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
- &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
+ false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
}
class BitmapSetSlowPathVisitor {
@@ -156,29 +140,29 @@
}
};
-inline void MarkCompact::MarkObject(mirror::Object* obj) {
+inline mirror::Object* MarkCompact::MarkObject(mirror::Object* obj) {
if (obj == nullptr) {
- return;
+ return obj;
}
if (kUseBakerOrBrooksReadBarrier) {
// Verify all the objects have the correct forward pointer installed.
obj->AssertReadBarrierPointer();
}
- if (immune_region_.ContainsObject(obj)) {
- return;
- }
- if (objects_before_forwarding_->HasAddress(obj)) {
- if (!objects_before_forwarding_->Set(obj)) {
- MarkStackPush(obj); // This object was not previously marked.
- }
- } else {
- DCHECK(!space_->HasAddress(obj));
- BitmapSetSlowPathVisitor visitor;
- if (!mark_bitmap_->Set(obj, visitor)) {
- // This object was not previously marked.
- MarkStackPush(obj);
+ if (!immune_region_.ContainsObject(obj)) {
+ if (objects_before_forwarding_->HasAddress(obj)) {
+ if (!objects_before_forwarding_->Set(obj)) {
+ MarkStackPush(obj); // This object was not previously marked.
+ }
+ } else {
+ DCHECK(!space_->HasAddress(obj));
+ BitmapSetSlowPathVisitor visitor;
+ if (!mark_bitmap_->Set(obj, visitor)) {
+ // This object was not previously marked.
+ MarkStackPush(obj);
+ }
}
}
+ return obj;
}
void MarkCompact::MarkingPhase() {
@@ -240,7 +224,7 @@
TimingLogger::ScopedTiming t2(
space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
"UpdateAndMarkImageModUnionTable", GetTimings());
- table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
+ table->UpdateAndMarkReferences(this);
}
}
}
@@ -272,7 +256,7 @@
}
void MarkCompact::ResizeMarkStack(size_t new_size) {
- std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End());
+ std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(), mark_stack_->End());
CHECK_LE(mark_stack_->Size(), new_size);
mark_stack_->Resize(new_size);
for (auto& obj : temp) {
@@ -280,7 +264,7 @@
}
}
-inline void MarkCompact::MarkStackPush(Object* obj) {
+inline void MarkCompact::MarkStackPush(mirror::Object* obj) {
if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
ResizeMarkStack(mark_stack_->Capacity() * 2);
}
@@ -288,23 +272,12 @@
mark_stack_->PushBack(obj);
}
-void MarkCompact::ProcessMarkStackCallback(void* arg) {
- reinterpret_cast<MarkCompact*>(arg)->ProcessMarkStack();
-}
-
-mirror::Object* MarkCompact::MarkObjectCallback(mirror::Object* root, void* arg) {
- reinterpret_cast<MarkCompact*>(arg)->MarkObject(root);
- return root;
-}
-
-void MarkCompact::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr,
- void* arg) {
- reinterpret_cast<MarkCompact*>(arg)->MarkObject(obj_ptr->AsMirrorPtr());
-}
-
-void MarkCompact::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
- void* arg) {
- reinterpret_cast<MarkCompact*>(arg)->DelayReferenceReferent(klass, ref);
+void MarkCompact::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) {
+ if (updating_references_) {
+ UpdateHeapReference(obj_ptr);
+ } else {
+ MarkObject(obj_ptr->AsMirrorPtr());
+ }
}
void MarkCompact::VisitRoots(
@@ -373,6 +346,7 @@
void MarkCompact::UpdateReferences() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ updating_references_ = true;
Runtime* runtime = Runtime::Current();
// Update roots.
UpdateRootVisitor update_root_visitor(this);
@@ -387,7 +361,7 @@
space->IsZygoteSpace() ? "UpdateZygoteModUnionTableReferences" :
"UpdateImageModUnionTableReferences",
GetTimings());
- table->UpdateAndMarkReferences(&UpdateHeapReferenceCallback, this);
+ table->UpdateAndMarkReferences(this);
} else {
// No mod union table, so we need to scan the space using bitmap visit.
// Scan the space using bitmap visit.
@@ -403,14 +377,15 @@
CHECK(!kMovingClasses)
<< "Didn't update large object classes since they are assumed to not move.";
// Update the system weaks, these should already have been swept.
- runtime->SweepSystemWeaks(&MarkedForwardingAddressCallback, this);
+ runtime->SweepSystemWeaks(this);
// Update the objects in the bump pointer space last, these objects don't have a bitmap.
UpdateObjectReferencesVisitor visitor(this);
objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
reinterpret_cast<uintptr_t>(space_->End()),
visitor);
// Update the reference processor cleared list.
- heap_->GetReferenceProcessor()->UpdateRoots(&MarkedForwardingAddressCallback, this);
+ heap_->GetReferenceProcessor()->UpdateRoots(this);
+ updating_references_ = false;
}
void MarkCompact::Compact() {
@@ -436,10 +411,6 @@
Runtime::Current()->VisitRoots(this);
}
-mirror::Object* MarkCompact::MarkedForwardingAddressCallback(mirror::Object* obj, void* arg) {
- return reinterpret_cast<MarkCompact*>(arg)->GetMarkedForwardAddress(obj);
-}
-
inline void MarkCompact::UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference) {
mirror::Object* obj = reference->AsMirrorPtr();
if (obj != nullptr) {
@@ -451,17 +422,12 @@
}
}
-void MarkCompact::UpdateHeapReferenceCallback(mirror::HeapReference<mirror::Object>* reference,
- void* arg) {
- reinterpret_cast<MarkCompact*>(arg)->UpdateHeapReference(reference);
-}
-
class UpdateReferenceVisitor {
public:
explicit UpdateReferenceVisitor(MarkCompact* collector) : collector_(collector) {
}
- void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
ALWAYS_INLINE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
collector_->UpdateHeapReference(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
}
@@ -481,7 +447,7 @@
obj->VisitReferences<kMovingClasses>(visitor, visitor);
}
-inline mirror::Object* MarkCompact::GetMarkedForwardAddress(mirror::Object* obj) const {
+inline mirror::Object* MarkCompact::GetMarkedForwardAddress(mirror::Object* obj) {
DCHECK(obj != nullptr);
if (objects_before_forwarding_->HasAddress(obj)) {
DCHECK(objects_before_forwarding_->Test(obj));
@@ -491,33 +457,30 @@
return ret;
}
DCHECK(!space_->HasAddress(obj));
- DCHECK(IsMarked(obj));
return obj;
}
-inline bool MarkCompact::IsMarked(const Object* object) const {
+mirror::Object* MarkCompact::IsMarked(mirror::Object* object) {
if (immune_region_.ContainsObject(object)) {
- return true;
+ return object;
+ }
+ if (updating_references_) {
+ return GetMarkedForwardAddress(object);
}
if (objects_before_forwarding_->HasAddress(object)) {
- return objects_before_forwarding_->Test(object);
+ return objects_before_forwarding_->Test(object) ? object : nullptr;
}
- return mark_bitmap_->Test(object);
+ return mark_bitmap_->Test(object) ? object : nullptr;
}
-mirror::Object* MarkCompact::IsMarkedCallback(mirror::Object* object, void* arg) {
- return reinterpret_cast<MarkCompact*>(arg)->IsMarked(object) ? object : nullptr;
-}
-
-bool MarkCompact::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref_ptr,
- void* arg) {
+bool MarkCompact::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr) {
// Side effect free since we call this before ever moving objects.
- return reinterpret_cast<MarkCompact*>(arg)->IsMarked(ref_ptr->AsMirrorPtr());
+ return IsMarked(ref_ptr->AsMirrorPtr()) != nullptr;
}
void MarkCompact::SweepSystemWeaks() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
+ Runtime::Current()->SweepSystemWeaks(this);
}
bool MarkCompact::ShouldSweepSpace(space::ContinuousSpace* space) const {
@@ -592,8 +555,7 @@
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void MarkCompact::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
- heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference,
- &HeapReferenceMarkedCallback, this);
+ heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
class MarkCompactMarkObjectVisitor {
@@ -601,7 +563,7 @@
explicit MarkCompactMarkObjectVisitor(MarkCompact* collector) : collector_(collector) {
}
- void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// Object was already verified when we scanned it.
collector_->MarkObject(obj->GetFieldObject<mirror::Object, kVerifyNone>(offset));
@@ -618,7 +580,7 @@
};
// Visit all of the references of an object and update.
-void MarkCompact::ScanObject(Object* obj) {
+void MarkCompact::ScanObject(mirror::Object* obj) {
MarkCompactMarkObjectVisitor visitor(this);
obj->VisitReferences<kMovingClasses>(visitor, visitor);
}
@@ -627,7 +589,7 @@
void MarkCompact::ProcessMarkStack() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
while (!mark_stack_->IsEmpty()) {
- Object* obj = mark_stack_->PopBack();
+ mirror::Object* obj = mark_stack_->PopBack();
DCHECK(obj != nullptr);
ScanObject(obj);
}
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index f59a2cd..89d66b5 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -121,23 +121,6 @@
const RootInfo& info)
OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- static mirror::Object* MarkObjectCallback(mirror::Object* root, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- static bool HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref_ptr,
- void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- static void ProcessMarkStackCallback(void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-
- static void DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
- void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
// Schedules an unmarked object for reference processing.
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
@@ -145,11 +128,7 @@
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
- mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg)
+ mirror::Object* GetMarkedForwardAddress(mirror::Object* object)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -184,30 +163,27 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Update the references of objects by using the forwarding addresses.
void UpdateReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- static void UpdateRootCallback(mirror::Object** root, void* arg, const RootInfo& /*root_info*/)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Move objects and restore lock words.
void MoveObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
// Move a single object to its forward address.
void MoveObject(mirror::Object* obj, size_t len) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
// Mark a single object.
- void MarkObject(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
- Locks::mutator_lock_);
- bool IsMarked(const mirror::Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) OVERRIDE
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
void ForwardObject(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_);
// Update a single heap reference.
void UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void UpdateHeapReferenceCallback(mirror::HeapReference<mirror::Object>* reference,
- void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
// Update all of the references of a single object.
void UpdateObjectReferences(mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
@@ -242,6 +218,9 @@
// Which lock words we need to restore as we are moving objects.
std::deque<LockWord> lock_words_to_restore_;
+ // State whether or not we are updating references.
+ bool updating_references_;
+
private:
friend class BitmapSetSlowPathVisitor;
friend class CalculateObjectForwardingAddressVisitor;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 1c9c412..e0d6d6b 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -37,7 +37,6 @@
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "gc/reference_processor.h"
-#include "gc/space/image_space.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
#include "mark_sweep-inl.h"
@@ -47,8 +46,6 @@
#include "thread-inl.h"
#include "thread_list.h"
-using ::art::mirror::Object;
-
namespace art {
namespace gc {
namespace collector {
@@ -175,8 +172,7 @@
void MarkSweep::ProcessReferences(Thread* self) {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->GetReferenceProcessor()->ProcessReferences(
- true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
- &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
+ true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
}
void MarkSweep::PausePhase() {
@@ -273,7 +269,7 @@
TimingLogger::ScopedTiming t(name, GetTimings());
accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
CHECK(mod_union_table != nullptr);
- mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
+ mod_union_table->UpdateAndMarkReferences(this);
}
}
}
@@ -333,7 +329,7 @@
// Someone else acquired the lock and expanded the mark stack before us.
return;
}
- std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End());
+ std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(), mark_stack_->End());
CHECK_LE(mark_stack_->Size(), new_size);
mark_stack_->Resize(new_size);
for (auto& obj : temp) {
@@ -341,7 +337,7 @@
}
}
-inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) {
+inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) {
DCHECK(obj != nullptr);
if (MarkObjectParallel(obj)) {
MutexLock mu(Thread::Current(), mark_stack_lock_);
@@ -353,28 +349,18 @@
}
}
-mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) {
- MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
- mark_sweep->MarkObject(obj);
- return obj;
-}
-
-void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
- reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr());
-}
-
-bool MarkSweep::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
- return reinterpret_cast<MarkSweep*>(arg)->IsMarked(ref->AsMirrorPtr());
+bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) {
+ return IsMarked(ref->AsMirrorPtr());
}
class MarkSweepMarkObjectSlowPath {
public:
- explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep, Object* holder = nullptr,
+ explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep, mirror::Object* holder = nullptr,
MemberOffset offset = MemberOffset(0))
: mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {
}
- void operator()(const Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(const mirror::Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
if (kProfileLargeObjects) {
// TODO: Differentiate between marking and testing somehow.
++mark_sweep_->large_object_test_;
@@ -450,7 +436,8 @@
MemberOffset offset_;
};
-inline void MarkSweep::MarkObjectNonNull(Object* obj, Object* holder, MemberOffset offset) {
+inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder,
+ MemberOffset offset) {
DCHECK(obj != nullptr);
if (kUseBakerOrBrooksReadBarrier) {
// Verify all the objects have the correct pointer installed.
@@ -481,7 +468,7 @@
}
}
-inline void MarkSweep::PushOnMarkStack(Object* obj) {
+inline void MarkSweep::PushOnMarkStack(mirror::Object* obj) {
if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
// Lock is not needed but is here anyways to please annotalysis.
MutexLock mu(Thread::Current(), mark_stack_lock_);
@@ -491,14 +478,14 @@
mark_stack_->PushBack(obj);
}
-inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
+inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) {
DCHECK(obj != nullptr);
if (kUseBakerOrBrooksReadBarrier) {
// Verify all the objects have the correct pointer installed.
obj->AssertReadBarrierPointer();
}
if (immune_region_.ContainsObject(obj)) {
- DCHECK(IsMarked(obj));
+ DCHECK(IsMarked(obj) != nullptr);
return false;
}
// Try to take advantage of locality of references within a space, failing this find the space
@@ -511,8 +498,18 @@
return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
}
+mirror::Object* MarkSweep::MarkObject(mirror::Object* obj) {
+ MarkObject(obj, nullptr, MemberOffset(0));
+ return obj;
+}
+
+void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) {
+ MarkObject(ref->AsMirrorPtr(), nullptr, MemberOffset(0));
+}
+
// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
-inline void MarkSweep::MarkObject(Object* obj, Object* holder, MemberOffset offset) {
+inline void MarkSweep::MarkObject(mirror::Object* obj, mirror::Object* holder,
+ MemberOffset offset) {
if (obj != nullptr) {
MarkObjectNonNull(obj, holder, offset);
} else if (kCountMarkedObjects) {
@@ -526,7 +523,7 @@
void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
- CHECK(collector_->IsMarked(root)) << info.ToString();
+ CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
}
private:
@@ -599,7 +596,8 @@
explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
: mark_sweep_(mark_sweep) {}
- void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ void operator()(mirror::Object* obj) const ALWAYS_INLINE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
@@ -631,7 +629,7 @@
class MarkStackTask : public Task {
public:
MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
- StackReference<Object>* mark_stack)
+ StackReference<mirror::Object>* mark_stack)
: mark_sweep_(mark_sweep),
thread_pool_(thread_pool),
mark_stack_pos_(mark_stack_size) {
@@ -655,7 +653,7 @@
MarkSweep* mark_sweep) ALWAYS_INLINE
: chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
- void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
@@ -681,7 +679,7 @@
: chunk_task_(chunk_task) {}
// No thread safety analysis since multiple threads will use this visitor.
- void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
@@ -704,11 +702,12 @@
MarkSweep* const mark_sweep_;
ThreadPool* const thread_pool_;
// Thread local mark stack for this task.
- StackReference<Object> mark_stack_[kMaxSize];
+ StackReference<mirror::Object> mark_stack_[kMaxSize];
// Mark stack position.
size_t mark_stack_pos_;
- ALWAYS_INLINE void MarkStackPush(Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE void MarkStackPush(mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
// Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
mark_stack_pos_ /= 2;
@@ -732,12 +731,12 @@
ScanObjectParallelVisitor visitor(this);
// TODO: Tune this.
static const size_t kFifoSize = 4;
- BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
+ BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo;
for (;;) {
- Object* obj = nullptr;
+ mirror::Object* obj = nullptr;
if (kUseMarkStackPrefetch) {
while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
- Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
+ mirror::Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
DCHECK(mark_stack_obj != nullptr);
__builtin_prefetch(mark_stack_obj);
prefetch_fifo.push_back(mark_stack_obj);
@@ -764,7 +763,7 @@
CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
accounting::ContinuousSpaceBitmap* bitmap,
uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size,
- StackReference<Object>* mark_stack_obj, bool clear_card)
+ StackReference<mirror::Object>* mark_stack_obj, bool clear_card)
: MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
bitmap_(bitmap),
begin_(begin),
@@ -815,8 +814,8 @@
TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__,
GetTimings());
// Try to take some of the mark stack since we can pass this off to the worker tasks.
- StackReference<Object>* mark_stack_begin = mark_stack_->Begin();
- StackReference<Object>* mark_stack_end = mark_stack_->End();
+ StackReference<mirror::Object>* mark_stack_begin = mark_stack_->Begin();
+ StackReference<mirror::Object>* mark_stack_end = mark_stack_->End();
const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
// Estimated number of work tasks we will create.
const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
@@ -988,13 +987,6 @@
ProcessMarkStack(false);
}
-mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) {
- if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
- return object;
- }
- return nullptr;
-}
-
void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) {
ScanGrayObjects(paused, minimum_age);
ProcessMarkStack(paused);
@@ -1015,16 +1007,23 @@
void MarkSweep::SweepSystemWeaks(Thread* self) {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
+ Runtime::Current()->SweepSystemWeaks(this);
}
-mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
- reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
- // We don't actually want to sweep the object, so lets return "marked"
- return obj;
-}
+class VerifySystemWeakVisitor : public IsMarkedVisitor {
+ public:
+ explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
-void MarkSweep::VerifyIsLive(const Object* obj) {
+ virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ mark_sweep_->VerifyIsLive(obj);
+ return obj;
+ }
+
+ MarkSweep* const mark_sweep_;
+};
+
+void MarkSweep::VerifyIsLive(const mirror::Object* obj) {
if (!heap_->GetLiveBitmap()->Test(obj)) {
// TODO: Consider live stack? Has this code bitrotted?
CHECK(!heap_->allocation_stack_->Contains(obj))
@@ -1035,7 +1034,8 @@
void MarkSweep::VerifySystemWeaks() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// Verify system weaks, uses a special object visitor which returns the input object.
- Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
+ VerifySystemWeakVisitor visitor(this);
+ Runtime::Current()->SweepSystemWeaks(&visitor);
}
class CheckpointMarkThreadRoots : public Closure, public RootVisitor {
@@ -1122,7 +1122,7 @@
ObjectBytePair freed;
ObjectBytePair freed_los;
// How many objects are left in the array, modified after each space is swept.
- StackReference<Object>* objects = allocations->Begin();
+ StackReference<mirror::Object>* objects = allocations->Begin();
size_t count = allocations->Size();
// Change the order to ensure that the non-moving space last swept as an optimization.
std::vector<space::ContinuousSpace*> sweep_spaces;
@@ -1150,9 +1150,9 @@
if (swap_bitmaps) {
std::swap(live_bitmap, mark_bitmap);
}
- StackReference<Object>* out = objects;
+ StackReference<mirror::Object>* out = objects;
for (size_t i = 0; i < count; ++i) {
- Object* const obj = objects[i].AsMirrorPtr();
+ mirror::Object* const obj = objects[i].AsMirrorPtr();
if (kUseThreadLocalAllocationStack && obj == nullptr) {
continue;
}
@@ -1191,7 +1191,7 @@
std::swap(large_live_objects, large_mark_objects);
}
for (size_t i = 0; i < count; ++i) {
- Object* const obj = objects[i].AsMirrorPtr();
+ mirror::Object* const obj = objects[i].AsMirrorPtr();
// Handle large objects.
if (kUseThreadLocalAllocationStack && obj == nullptr) {
continue;
@@ -1250,16 +1250,15 @@
if (kCountJavaLangRefs) {
++reference_count_;
}
- heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, &HeapReferenceMarkedCallback,
- this);
+ heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this);
}
-class MarkObjectVisitor {
+class MarkVisitor {
public:
- explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {
+ explicit MarkVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {
}
- void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
if (kCheckLocks) {
@@ -1275,16 +1274,12 @@
// Scans an object reference. Determines the type of the reference
// and dispatches to a specialized scanning routine.
-void MarkSweep::ScanObject(Object* obj) {
- MarkObjectVisitor mark_visitor(this);
+void MarkSweep::ScanObject(mirror::Object* obj) {
+ MarkVisitor mark_visitor(this);
DelayReferenceReferentVisitor ref_visitor(this);
ScanObjectVisit(obj, mark_visitor, ref_visitor);
}
-void MarkSweep::ProcessMarkStackCallback(void* arg) {
- reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(false);
-}
-
void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
Thread* self = Thread::Current();
ThreadPool* thread_pool = GetHeap()->GetThreadPool();
@@ -1317,12 +1312,12 @@
} else {
// TODO: Tune this.
static const size_t kFifoSize = 4;
- BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
+ BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo;
for (;;) {
- Object* obj = nullptr;
+ mirror::Object* obj = nullptr;
if (kUseMarkStackPrefetch) {
while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
- Object* mark_stack_obj = mark_stack_->PopBack();
+ mirror::Object* mark_stack_obj = mark_stack_->PopBack();
DCHECK(mark_stack_obj != nullptr);
__builtin_prefetch(mark_stack_obj);
prefetch_fifo.push_back(mark_stack_obj);
@@ -1344,14 +1339,14 @@
}
}
-inline bool MarkSweep::IsMarked(const Object* object) const {
+inline mirror::Object* MarkSweep::IsMarked(mirror::Object* object) {
if (immune_region_.ContainsObject(object)) {
- return true;
+ return object;
}
if (current_space_bitmap_->HasAddress(object)) {
- return current_space_bitmap_->Test(object);
+ return current_space_bitmap_->Test(object) ? object : nullptr;
}
- return mark_bitmap_->Test(object);
+ return mark_bitmap_->Test(object) ? object : nullptr;
}
void MarkSweep::FinishPhase() {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index d29d87a..c13755c 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -170,18 +170,9 @@
// Verify that an object is live, either in a live bitmap or in the allocation stack.
void VerifyIsLive(const mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- static mirror::Object* MarkObjectCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- static bool HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg)
+ virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -194,13 +185,14 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static void ProcessMarkStackCallback(void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Marks an object.
- void MarkObject(mirror::Object* obj, mirror::Object* holder = nullptr,
- MemberOffset offset = MemberOffset(0))
+ virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void MarkObject(mirror::Object* obj, mirror::Object* holder, MemberOffset offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -214,15 +206,9 @@
protected:
// Returns true if the object has its bit set in the mark bitmap.
- bool IsMarked(const mirror::Object* object) const
+ virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- static void VerifyImageRootVisitor(mirror::Object* root, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
void MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder = nullptr,
MemberOffset offset = MemberOffset(0))
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
@@ -233,7 +219,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns true if we need to add obj to a mark stack.
- bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
+ bool MarkObjectParallel(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
// Verify the roots of the heap and print out information related to any invalid roots.
// Called in MarkObject, so may we may not hold the mutator lock.
@@ -258,6 +244,11 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void ProcessMarkStack() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ProcessMarkStack(false);
+ }
+
// Recursively blackens objects on the mark stack.
void ProcessMarkStack(bool paused)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 82d02e7..2a9f47a 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -157,8 +157,7 @@
void SemiSpace::ProcessReferences(Thread* self) {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->GetReferenceProcessor()->ProcessReferences(
- false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
- &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
+ false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
}
void SemiSpace::MarkingPhase() {
@@ -336,7 +335,7 @@
space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
"UpdateAndMarkImageModUnionTable",
GetTimings());
- table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
+ table->UpdateAndMarkReferences(this);
DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
} else if (collect_from_space_only_ && space->GetLiveBitmap() != nullptr) {
// If the space has no mod union table (the non-moving space and main spaces when the bump
@@ -351,8 +350,7 @@
CHECK_EQ(rem_set != nullptr, kUseRememberedSet);
if (rem_set != nullptr) {
TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings());
- rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback,
- from_space_, this);
+ rem_set->UpdateAndMarkReferences(from_space_, this);
if (kIsDebugBuild) {
// Verify that there are no from-space references that
// remain in the space, that is, the remembered set (and the
@@ -583,24 +581,14 @@
return forward_address;
}
-void SemiSpace::ProcessMarkStackCallback(void* arg) {
- reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack();
-}
-
-mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) {
+mirror::Object* SemiSpace::MarkObject(mirror::Object* root) {
auto ref = StackReference<mirror::Object>::FromMirrorPtr(root);
- reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref);
+ MarkObject(&ref);
return ref.AsMirrorPtr();
}
-void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr,
- void* arg) {
- reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr);
-}
-
-void SemiSpace::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
- void* arg) {
- reinterpret_cast<SemiSpace*>(arg)->DelayReferenceReferent(klass, ref);
+void SemiSpace::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) {
+ MarkObject(obj_ptr);
}
void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count,
@@ -628,29 +616,9 @@
Runtime::Current()->VisitRoots(this);
}
-bool SemiSpace::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* object,
- void* arg) {
- mirror::Object* obj = object->AsMirrorPtr();
- mirror::Object* new_obj =
- reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(obj);
- if (new_obj == nullptr) {
- return false;
- }
- if (new_obj != obj) {
- // Write barrier is not necessary since it still points to the same object, just at a different
- // address.
- object->Assign(new_obj);
- }
- return true;
-}
-
-mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) {
- return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
-}
-
void SemiSpace::SweepSystemWeaks() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this);
+ Runtime::Current()->SweepSystemWeaks(this);
}
bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
@@ -688,8 +656,7 @@
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
- heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference,
- &HeapReferenceMarkedCallback, this);
+ heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
class SemiSpaceMarkObjectVisitor {
@@ -746,8 +713,7 @@
}
}
-inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+mirror::Object* SemiSpace::IsMarked(mirror::Object* obj) {
// All immune objects are assumed marked.
if (from_space_->HasAddress(obj)) {
// Returns either the forwarding address or null.
@@ -759,6 +725,20 @@
return mark_bitmap_->Test(obj) ? obj : nullptr;
}
+bool SemiSpace::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) {
+ mirror::Object* obj = object->AsMirrorPtr();
+ mirror::Object* new_obj = IsMarked(obj);
+ if (new_obj == nullptr) {
+ return false;
+ }
+ if (new_obj != obj) {
+ // Write barrier is not necessary since it still points to the same object, just at a different
+ // address.
+ object->Assign(new_obj);
+ }
+ return true;
+}
+
void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
DCHECK(to_space != nullptr);
to_space_ = to_space;
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 3c25f53..6b7ea0d 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -103,6 +103,12 @@
void MarkObject(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ virtual mirror::Object* MarkObject(mirror::Object* root) OVERRIDE
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) OVERRIDE
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
void ScanObject(mirror::Object* obj)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
@@ -140,19 +146,6 @@
const RootInfo& info) OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- static mirror::Object* MarkObjectCallback(mirror::Object* root, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- static void ProcessMarkStackCallback(void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-
- static void DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
- void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
@@ -163,15 +156,11 @@
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
- mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const
+ virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static bool HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* object, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg)
+ virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 6317351..795d2a2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3055,8 +3055,13 @@
}
}
-static void IdentityMarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>*, void*) {
-}
+struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
+ virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {
+ return obj;
+ }
+ virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*) OVERRIDE {
+ }
+};
void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
Thread* const self = Thread::Current();
@@ -3085,7 +3090,8 @@
ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
for (const auto& table_pair : mod_union_tables_) {
accounting::ModUnionTable* mod_union_table = table_pair.second;
- mod_union_table->UpdateAndMarkReferences(IdentityMarkHeapReferenceCallback, nullptr);
+ IdentityMarkHeapReferenceVisitor visitor;
+ mod_union_table->UpdateAndMarkReferences(&visitor);
mod_union_table->Verify();
}
}
@@ -3714,11 +3720,11 @@
}
}
-void Heap::SweepAllocationRecords(IsMarkedCallback* visitor, void* arg) const {
+void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
if (IsAllocTrackingEnabled()) {
MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
if (IsAllocTrackingEnabled()) {
- GetAllocationRecords()->SweepAllocationRecords(visitor, arg);
+ GetAllocationRecords()->SweepAllocationRecords(visitor);
}
}
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 2df5a4e..ee3d510 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -705,7 +705,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
- void SweepAllocationRecords(IsMarkedCallback* visitor, void* arg) const
+ void SweepAllocationRecords(IsMarkedVisitor* visitor) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index c08ed0e..256cdd2 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -17,6 +17,7 @@
#include "reference_processor.h"
#include "base/time_utils.h"
+#include "collector/garbage_collector.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/reference-inl.h"
@@ -34,7 +35,7 @@
static constexpr bool kAsyncReferenceQueueAdd = false;
ReferenceProcessor::ReferenceProcessor()
- : process_references_args_(nullptr, nullptr, nullptr),
+ : collector_(nullptr),
preserving_references_(false),
condition_("reference processor condition", *Locks::reference_processor_lock_) ,
soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
@@ -83,16 +84,14 @@
}
// Try to see if the referent is already marked by using the is_marked_callback. We can return
// it to the mutator as long as the GC is not preserving references.
- IsHeapReferenceMarkedCallback* const is_marked_callback =
- process_references_args_.is_marked_callback_;
- if (LIKELY(is_marked_callback != nullptr)) {
+ if (LIKELY(collector_ != nullptr)) {
// If it's null it means not marked, but it could become marked if the referent is reachable
// by finalizer referents. So we can not return in this case and must block. Otherwise, we
// can return it to the mutator as long as the GC is not preserving references, in which
// case only black nodes can be safely returned. If the GC is preserving references, the
// mutator could take a white field from a grey or white node and move it somewhere else
// in the heap causing corruption since this field would get swept.
- if (is_marked_callback(referent_addr, process_references_args_.arg_)) {
+ if (collector_->IsMarkedHeapReference(referent_addr)) {
if (!preserving_references_ ||
(LIKELY(!reference->IsFinalizerReferenceInstance()) && !reference->IsEnqueued())) {
return referent_addr->AsMirrorPtr();
@@ -104,16 +103,6 @@
return reference->GetReferent();
}
-bool ReferenceProcessor::PreserveSoftReferenceCallback(mirror::HeapReference<mirror::Object>* obj,
- void* arg) {
- auto* const args = reinterpret_cast<ProcessReferencesArgs*>(arg);
- // TODO: Add smarter logic for preserving soft references.
- mirror::Object* new_obj = args->mark_callback_(obj->AsMirrorPtr(), args->arg_);
- DCHECK(new_obj != nullptr);
- obj->Assign(new_obj);
- return true;
-}
-
void ReferenceProcessor::StartPreservingReferences(Thread* self) {
MutexLock mu(self, *Locks::reference_processor_lock_);
preserving_references_ = true;
@@ -129,17 +118,12 @@
// Process reference class instances and schedule finalizations.
void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings,
bool clear_soft_references,
- IsHeapReferenceMarkedCallback* is_marked_callback,
- MarkObjectCallback* mark_object_callback,
- ProcessMarkStackCallback* process_mark_stack_callback,
- void* arg) {
+ collector::GarbageCollector* collector) {
TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
Thread* self = Thread::Current();
{
MutexLock mu(self, *Locks::reference_processor_lock_);
- process_references_args_.is_marked_callback_ = is_marked_callback;
- process_references_args_.mark_callback_ = mark_object_callback;
- process_references_args_.arg_ = arg;
+ collector_ = collector;
if (!kUseReadBarrier) {
CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent";
} else {
@@ -154,16 +138,16 @@
if (concurrent) {
StartPreservingReferences(self);
}
- soft_reference_queue_.ForwardSoftReferences(&PreserveSoftReferenceCallback,
- &process_references_args_);
- process_mark_stack_callback(arg);
+ // TODO: Add smarter logic for preserving soft references.
+ soft_reference_queue_.ForwardSoftReferences(collector);
+ collector->ProcessMarkStack();
if (concurrent) {
StopPreservingReferences(self);
}
}
// Clear all remaining soft and weak references with white referents.
- soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
- weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
+ soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
+ weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
{
TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" :
"(Paused)EnqueueFinalizerReferences", timings);
@@ -171,18 +155,17 @@
StartPreservingReferences(self);
}
// Preserve all white objects with finalize methods and schedule them for finalization.
- finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, is_marked_callback,
- mark_object_callback, arg);
- process_mark_stack_callback(arg);
+ finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, collector);
+ collector->ProcessMarkStack();
if (concurrent) {
StopPreservingReferences(self);
}
}
// Clear all finalizer referent reachable soft and weak references with white referents.
- soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
- weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
+ soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
+ weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
// Clear all phantom references with white referents.
- phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
+ phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
// At this point all reference queues other than the cleared references should be empty.
DCHECK(soft_reference_queue_.IsEmpty());
DCHECK(weak_reference_queue_.IsEmpty());
@@ -194,12 +177,10 @@
// could result in a stale is_marked_callback_ being called before the reference processing
// starts since there is a small window of time where slow_path_enabled_ is enabled but the
// callback isn't yet set.
- process_references_args_.is_marked_callback_ = nullptr;
- if (!kUseReadBarrier) {
- if (concurrent) {
- // Done processing, disable the slow path and broadcast to the waiters.
- DisableSlowPath(self);
- }
+ collector_ = nullptr;
+ if (!kUseReadBarrier && concurrent) {
+ // Done processing, disable the slow path and broadcast to the waiters.
+ DisableSlowPath(self);
}
}
}
@@ -207,13 +188,12 @@
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
- IsHeapReferenceMarkedCallback* is_marked_callback,
- void* arg) {
+ collector::GarbageCollector* collector) {
// klass can be the class of the old object if the visitor already updated the class of ref.
DCHECK(klass != nullptr);
DCHECK(klass->IsTypeOfReferenceClass());
mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
- if (referent->AsMirrorPtr() != nullptr && !is_marked_callback(referent, arg)) {
+ if (referent->AsMirrorPtr() != nullptr && !collector->IsMarkedHeapReference(referent)) {
Thread* self = Thread::Current();
// TODO: Remove these locks, and use atomic stacks for storing references?
// We need to check that the references haven't already been enqueued since we can end up
@@ -233,8 +213,8 @@
}
}
-void ReferenceProcessor::UpdateRoots(IsMarkedCallback* callback, void* arg) {
- cleared_references_.UpdateRoots(callback, arg);
+void ReferenceProcessor::UpdateRoots(IsMarkedVisitor* visitor) {
+ cleared_references_.UpdateRoots(visitor);
}
class ClearedReferenceTask : public HeapTask {
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index 284d13c..95877d1 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -28,6 +28,7 @@
class TimingLogger;
namespace mirror {
+class Class;
class FinalizerReference;
class Object;
class Reference;
@@ -35,18 +36,18 @@
namespace gc {
+namespace collector {
+class GarbageCollector;
+} // namespace collector
+
class Heap;
// Used to process java.lang.References concurrently or paused.
class ReferenceProcessor {
public:
explicit ReferenceProcessor();
- static bool PreserveSoftReferenceCallback(mirror::HeapReference<mirror::Object>* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ProcessReferences(bool concurrent, TimingLogger* timings, bool clear_soft_references,
- IsHeapReferenceMarkedCallback* is_marked_callback,
- MarkObjectCallback* mark_object_callback,
- ProcessMarkStackCallback* process_mark_stack_callback, void* arg)
+ gc::collector::GarbageCollector* collector)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
LOCKS_EXCLUDED(Locks::reference_processor_lock_);
@@ -60,9 +61,9 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
void EnqueueClearedReferences(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_);
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
- IsHeapReferenceMarkedCallback* is_marked_callback, void* arg)
+ collector::GarbageCollector* collector)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void UpdateRoots(IsMarkedCallback* callback, void* arg)
+ void UpdateRoots(IsMarkedVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Make a circular list with reference if it is not enqueued. Uses the finalizer queue lock.
bool MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference)
@@ -71,21 +72,6 @@
Locks::reference_queue_finalizer_references_lock_);
private:
- class ProcessReferencesArgs {
- public:
- ProcessReferencesArgs(IsHeapReferenceMarkedCallback* is_marked_callback,
- MarkObjectCallback* mark_callback, void* arg)
- : is_marked_callback_(is_marked_callback), mark_callback_(mark_callback), arg_(arg) {
- }
-
- // The is marked callback is null when the args aren't set up.
- IsHeapReferenceMarkedCallback* is_marked_callback_;
- MarkObjectCallback* mark_callback_;
- void* arg_;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ProcessReferencesArgs);
- };
bool SlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Called by ProcessReferences.
void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::reference_processor_lock_)
@@ -95,8 +81,9 @@
// referents.
void StartPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
- // Process args, used by the GetReferent to return referents which are already marked.
- ProcessReferencesArgs process_references_args_ GUARDED_BY(Locks::reference_processor_lock_);
+ // Collector which is clearing references, used by the GetReferent to return referents which are
+ // already marked.
+ collector::GarbageCollector* collector_ GUARDED_BY(Locks::reference_processor_lock_);
// Boolean for whether or not we are preserving references (either soft references or finalizers).
// If this is true, then we cannot return a referent (see comment in GetReferent).
bool preserving_references_ GUARDED_BY(Locks::reference_processor_lock_);
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 4ba3983..f505428 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -137,12 +137,12 @@
}
void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
- IsHeapReferenceMarkedCallback* preserve_callback,
- void* arg) {
+ collector::GarbageCollector* collector) {
while (!IsEmpty()) {
mirror::Reference* ref = DequeuePendingReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
- if (referent_addr->AsMirrorPtr() != nullptr && !preserve_callback(referent_addr, arg)) {
+ if (referent_addr->AsMirrorPtr() != nullptr &&
+ !collector->IsMarkedHeapReference(referent_addr)) {
// Referent is white, clear it.
if (Runtime::Current()->IsActiveTransaction()) {
ref->ClearReferent<true>();
@@ -157,14 +157,13 @@
}
void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
- IsHeapReferenceMarkedCallback* is_marked_callback,
- MarkObjectCallback* mark_object_callback,
- void* arg) {
+ collector::GarbageCollector* collector) {
while (!IsEmpty()) {
mirror::FinalizerReference* ref = DequeuePendingReference()->AsFinalizerReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
- if (referent_addr->AsMirrorPtr() != nullptr && !is_marked_callback(referent_addr, arg)) {
- mirror::Object* forward_address = mark_object_callback(referent_addr->AsMirrorPtr(), arg);
+ if (referent_addr->AsMirrorPtr() != nullptr &&
+ !collector->IsMarkedHeapReference(referent_addr)) {
+ mirror::Object* forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
// If the referent is non-null the reference must queuable.
DCHECK(ref->IsEnqueuable());
// Move the updated referent to the zombie field.
@@ -180,8 +179,7 @@
}
}
-void ReferenceQueue::ForwardSoftReferences(IsHeapReferenceMarkedCallback* preserve_callback,
- void* arg) {
+void ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) {
if (UNLIKELY(IsEmpty())) {
return;
}
@@ -190,15 +188,15 @@
do {
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
if (referent_addr->AsMirrorPtr() != nullptr) {
- UNUSED(preserve_callback(referent_addr, arg));
+ visitor->MarkHeapReference(referent_addr);
}
ref = ref->GetPendingNext();
} while (LIKELY(ref != head));
}
-void ReferenceQueue::UpdateRoots(IsMarkedCallback* callback, void* arg) {
+void ReferenceQueue::UpdateRoots(IsMarkedVisitor* visitor) {
if (list_ != nullptr) {
- list_ = down_cast<mirror::Reference*>(callback(list_, arg));
+ list_ = down_cast<mirror::Reference*>(visitor->IsMarked(list_));
}
}
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index c45be85..7d9ddf6 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -36,6 +36,10 @@
namespace gc {
+namespace collector {
+class GarbageCollector;
+} // namespace collector
+
class Heap;
// Used to temporarily store java.lang.ref.Reference(s) during GC and prior to queueing on the
@@ -65,20 +69,19 @@
// Enqueues finalizer references with white referents. White referents are blackened, moved to
// the zombie field, and the referent field is cleared.
void EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
- IsHeapReferenceMarkedCallback* is_marked_callback,
- MarkObjectCallback* mark_object_callback, void* arg)
+ collector::GarbageCollector* collector)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Walks the reference list marking any references subject to the reference clearing policy.
// References with a black referent are removed from the list. References with white referents
// biased toward saving are blackened and also removed from the list.
- void ForwardSoftReferences(IsHeapReferenceMarkedCallback* preserve_callback, void* arg)
+ void ForwardSoftReferences(MarkObjectVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Unlink the reference list clearing references objects with white referents. Cleared references
// registered to a reference queue are scheduled for appending by the heap worker thread.
void ClearWhiteReferences(ReferenceQueue* cleared_references,
- IsHeapReferenceMarkedCallback* is_marked_callback, void* arg)
+ collector::GarbageCollector* collector)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -95,7 +98,7 @@
}
// Visits list_, currently only used for the mark compact GC.
- void UpdateRoots(IsMarkedCallback* callback, void* arg)
+ void UpdateRoots(IsMarkedVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 2a06ab3..6ea047f 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -19,6 +19,7 @@
#include <memory>
#include "gc_root-inl.h"
+#include "gc/collector/garbage_collector.h"
#include "gc/space/image_space.h"
#include "mirror/dex_cache.h"
#include "mirror/object_array-inl.h"
@@ -296,9 +297,9 @@
return LookupWeak(s) == s;
}
-void InternTable::SweepInternTableWeaks(IsMarkedCallback* callback, void* arg) {
+void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- weak_interns_.SweepWeaks(callback, arg);
+ weak_interns_.SweepWeaks(visitor);
}
void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) {
@@ -401,16 +402,16 @@
}
}
-void InternTable::Table::SweepWeaks(IsMarkedCallback* callback, void* arg) {
- SweepWeaks(&pre_zygote_table_, callback, arg);
- SweepWeaks(&post_zygote_table_, callback, arg);
+void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) {
+ SweepWeaks(&pre_zygote_table_, visitor);
+ SweepWeaks(&post_zygote_table_, visitor);
}
-void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedCallback* callback, void* arg) {
+void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) {
for (auto it = set->begin(), end = set->end(); it != end;) {
// This does not need a read barrier because this is called by GC.
mirror::Object* object = it->Read<kWithoutReadBarrier>();
- mirror::Object* new_object = callback(object, arg);
+ mirror::Object* new_object = visitor->IsMarked(object);
if (new_object == nullptr) {
it = set->Erase(it);
} else {
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 53f6f75..67a8b34 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -68,7 +68,7 @@
// Interns a potentially new string in the 'weak' table. (See above.)
mirror::String* InternWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SweepInternTableWeaks(IsMarkedCallback* callback, void* arg)
+ void SweepInternTableWeaks(IsMarkedVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool ContainsWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -143,7 +143,7 @@
void VisitRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
- void SweepWeaks(IsMarkedCallback* callback, void* arg)
+ void SweepWeaks(IsMarkedVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
void SwapPostZygoteWithPreZygote() EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
@@ -163,7 +163,7 @@
typedef HashSet<GcRoot<mirror::String>, GcRootEmptyFn, StringHashEquals, StringHashEquals,
TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> UnorderedSet;
- void SweepWeaks(UnorderedSet* set, IsMarkedCallback* callback, void* arg)
+ void SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index 194d0af..c987180 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -60,9 +60,9 @@
EXPECT_EQ(2U, t.Size());
}
-class TestPredicate {
+class TestPredicate : public IsMarkedVisitor {
public:
- bool IsMarked(const mirror::Object* s) const {
+ mirror::Object* IsMarked(mirror::Object* s) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
bool erased = false;
for (auto it = expected_.begin(), end = expected_.end(); it != end; ++it) {
if (*it == s) {
@@ -72,7 +72,7 @@
}
}
EXPECT_TRUE(erased);
- return false;
+ return nullptr;
}
void Expect(const mirror::String* s) {
@@ -87,13 +87,6 @@
mutable std::vector<const mirror::String*> expected_;
};
-mirror::Object* IsMarkedSweepingCallback(mirror::Object* object, void* arg) {
- if (reinterpret_cast<TestPredicate*>(arg)->IsMarked(object)) {
- return object;
- }
- return nullptr;
-}
-
TEST_F(InternTableTest, SweepInternTableWeaks) {
ScopedObjectAccess soa(Thread::Current());
InternTable t;
@@ -115,7 +108,7 @@
p.Expect(s1.Get());
{
ReaderMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
- t.SweepInternTableWeaks(IsMarkedSweepingCallback, &p);
+ t.SweepInternTableWeaks(&p);
}
EXPECT_EQ(2U, t.Size());
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 36adbea..36e3aa3 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -766,7 +766,7 @@
return native_method;
}
-void JavaVMExt::SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg) {
+void JavaVMExt::SweepJniWeakGlobals(IsMarkedVisitor* visitor) {
MutexLock mu(Thread::Current(), weak_globals_lock_);
Runtime* const runtime = Runtime::Current();
for (auto* entry : weak_globals_) {
@@ -774,7 +774,7 @@
if (!entry->IsNull()) {
// Since this is called by the GC, we don't need a read barrier.
mirror::Object* obj = entry->Read<kWithoutReadBarrier>();
- mirror::Object* new_obj = callback(obj, arg);
+ mirror::Object* new_obj = visitor->IsMarked(obj);
if (new_obj == nullptr) {
new_obj = runtime->GetClearedJniWeakGlobal();
}
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index 694a545..97fbbc5 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -120,7 +120,7 @@
void DeleteWeakGlobalRef(Thread* self, jweak obj);
- void SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg)
+ void SweepJniWeakGlobals(IsMarkedVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Object* DecodeGlobal(Thread* self, IndirectRef ref)
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index bc89890..fd9c1b1 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1163,7 +1163,7 @@
list_.push_front(m);
}
-void MonitorList::SweepMonitorList(IsMarkedCallback* callback, void* arg) {
+void MonitorList::SweepMonitorList(IsMarkedVisitor* visitor) {
Thread* self = Thread::Current();
MutexLock mu(self, monitor_list_lock_);
for (auto it = list_.begin(); it != list_.end(); ) {
@@ -1171,7 +1171,7 @@
// Disable the read barrier in GetObject() as this is called by GC.
mirror::Object* obj = m->GetObject<kWithoutReadBarrier>();
// The object of a monitor can be null if we have deflated it.
- mirror::Object* new_obj = obj != nullptr ? callback(obj, arg) : nullptr;
+ mirror::Object* new_obj = obj != nullptr ? visitor->IsMarked(obj) : nullptr;
if (new_obj == nullptr) {
VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object "
<< obj;
@@ -1184,29 +1184,30 @@
}
}
-struct MonitorDeflateArgs {
- MonitorDeflateArgs() : self(Thread::Current()), deflate_count(0) {}
- Thread* const self;
- size_t deflate_count;
+class MonitorDeflateVisitor : public IsMarkedVisitor {
+ public:
+ MonitorDeflateVisitor() : self_(Thread::Current()), deflate_count_(0) {}
+
+ virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (Monitor::Deflate(self_, object)) {
+ DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
+ ++deflate_count_;
+ // If we deflated, return null so that the monitor gets removed from the array.
+ return nullptr;
+ }
+ return object; // Monitor was not deflated.
+ }
+
+ Thread* const self_;
+ size_t deflate_count_;
};
-static mirror::Object* MonitorDeflateCallback(mirror::Object* object, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- MonitorDeflateArgs* args = reinterpret_cast<MonitorDeflateArgs*>(arg);
- if (Monitor::Deflate(args->self, object)) {
- DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
- ++args->deflate_count;
- // If we deflated, return null so that the monitor gets removed from the array.
- return nullptr;
- }
- return object; // Monitor was not deflated.
-}
-
size_t MonitorList::DeflateMonitors() {
- MonitorDeflateArgs args;
- Locks::mutator_lock_->AssertExclusiveHeld(args.self);
- SweepMonitorList(MonitorDeflateCallback, &args);
- return args.deflate_count;
+ MonitorDeflateVisitor visitor;
+ Locks::mutator_lock_->AssertExclusiveHeld(visitor.self_);
+ SweepMonitorList(&visitor);
+ return visitor.deflate_count_;
}
MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(nullptr), entry_count_(0) {
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 8f6fb75..09a6cb6 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -287,7 +287,7 @@
void Add(Monitor* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SweepMonitorList(IsMarkedCallback* callback, void* arg)
+ void SweepMonitorList(IsMarkedVisitor* visitor)
LOCKS_EXCLUDED(monitor_list_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DisallowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_);
void AllowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_);
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index df34ce7..4d726ec 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -21,31 +21,30 @@
namespace art {
namespace mirror {
- class Class;
class Object;
template<class MirrorType> class HeapReference;
- class Reference;
} // namespace mirror
-class StackVisitor;
// A callback for visiting an object in the heap.
typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
-// A callback used for marking an object, returns the new address of the object if the object moved.
-typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg) WARN_UNUSED;
-typedef void (MarkHeapReferenceCallback)(mirror::HeapReference<mirror::Object>* ref, void* arg);
-typedef void (DelayReferenceReferentCallback)(mirror::Class* klass, mirror::Reference* ref,
- void* arg);
+class IsMarkedVisitor {
+ public:
+ virtual ~IsMarkedVisitor() {}
+ // Return null if an object is not marked, otherwise returns the new address of that object.
+ // May return the same address as the input if the object did not move.
+ virtual mirror::Object* IsMarked(mirror::Object* obj) = 0;
+};
-// A callback for testing if an object is marked, returns null if not marked, otherwise the new
-// address the object (if the object didn't move, returns the object input parameter).
-typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg) WARN_UNUSED;
-
-// Returns true if the object in the heap reference is marked, if it is marked and has moved the
-// callback updates the heap reference contain the new value.
-typedef bool (IsHeapReferenceMarkedCallback)(mirror::HeapReference<mirror::Object>* object,
- void* arg) WARN_UNUSED;
-typedef void (ProcessMarkStackCallback)(void* arg);
+class MarkObjectVisitor {
+ public:
+ virtual ~MarkObjectVisitor() {}
+ // Mark an object and return the new address of an object.
+ // May return the same address as the input if the object did not move.
+ virtual mirror::Object* MarkObject(mirror::Object* obj) = 0;
+ // Mark an object and update the value stored in the heap reference if the object moved.
+ virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj) = 0;
+};
} // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 1aab933..3b0ca9e 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -403,11 +403,11 @@
}
}
-void Runtime::SweepSystemWeaks(IsMarkedCallback* visitor, void* arg) {
- GetInternTable()->SweepInternTableWeaks(visitor, arg);
- GetMonitorList()->SweepMonitorList(visitor, arg);
- GetJavaVM()->SweepJniWeakGlobals(visitor, arg);
- GetHeap()->SweepAllocationRecords(visitor, arg);
+void Runtime::SweepSystemWeaks(IsMarkedVisitor* visitor) {
+ GetInternTable()->SweepInternTableWeaks(visitor);
+ GetMonitorList()->SweepMonitorList(visitor);
+ GetJavaVM()->SweepJniWeakGlobals(visitor);
+ GetHeap()->SweepAllocationRecords(visitor);
}
bool Runtime::Create(const RuntimeOptions& options, bool ignore_unrecognized) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index c1fa55a..9ee96a3 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -336,7 +336,7 @@
// Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
// system weak is updated to be the visitor's returned value.
- void SweepSystemWeaks(IsMarkedCallback* visitor, void* arg)
+ void SweepSystemWeaks(IsMarkedVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Constant roots are the roots which never change after the runtime is initialized, they only