Clean up GC callbacks to be virtual methods
Change-Id: Ia08034a4e5931c4fcb329c3bd3c4b1f301135735
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index c08ed0e..256cdd2 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -17,6 +17,7 @@
#include "reference_processor.h"
#include "base/time_utils.h"
+#include "collector/garbage_collector.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/reference-inl.h"
@@ -34,7 +35,7 @@
static constexpr bool kAsyncReferenceQueueAdd = false;
ReferenceProcessor::ReferenceProcessor()
- : process_references_args_(nullptr, nullptr, nullptr),
+ : collector_(nullptr),
preserving_references_(false),
condition_("reference processor condition", *Locks::reference_processor_lock_) ,
soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
@@ -83,16 +84,14 @@
}
// Try to see if the referent is already marked by using the is_marked_callback. We can return
// it to the mutator as long as the GC is not preserving references.
- IsHeapReferenceMarkedCallback* const is_marked_callback =
- process_references_args_.is_marked_callback_;
- if (LIKELY(is_marked_callback != nullptr)) {
+ if (LIKELY(collector_ != nullptr)) {
// If it's null it means not marked, but it could become marked if the referent is reachable
// by finalizer referents. So we can not return in this case and must block. Otherwise, we
// can return it to the mutator as long as the GC is not preserving references, in which
// case only black nodes can be safely returned. If the GC is preserving references, the
// mutator could take a white field from a grey or white node and move it somewhere else
// in the heap causing corruption since this field would get swept.
- if (is_marked_callback(referent_addr, process_references_args_.arg_)) {
+ if (collector_->IsMarkedHeapReference(referent_addr)) {
if (!preserving_references_ ||
(LIKELY(!reference->IsFinalizerReferenceInstance()) && !reference->IsEnqueued())) {
return referent_addr->AsMirrorPtr();
@@ -104,16 +103,6 @@
return reference->GetReferent();
}
-bool ReferenceProcessor::PreserveSoftReferenceCallback(mirror::HeapReference<mirror::Object>* obj,
- void* arg) {
- auto* const args = reinterpret_cast<ProcessReferencesArgs*>(arg);
- // TODO: Add smarter logic for preserving soft references.
- mirror::Object* new_obj = args->mark_callback_(obj->AsMirrorPtr(), args->arg_);
- DCHECK(new_obj != nullptr);
- obj->Assign(new_obj);
- return true;
-}
-
void ReferenceProcessor::StartPreservingReferences(Thread* self) {
MutexLock mu(self, *Locks::reference_processor_lock_);
preserving_references_ = true;
@@ -129,17 +118,12 @@
// Process reference class instances and schedule finalizations.
void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings,
bool clear_soft_references,
- IsHeapReferenceMarkedCallback* is_marked_callback,
- MarkObjectCallback* mark_object_callback,
- ProcessMarkStackCallback* process_mark_stack_callback,
- void* arg) {
+ collector::GarbageCollector* collector) {
TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
Thread* self = Thread::Current();
{
MutexLock mu(self, *Locks::reference_processor_lock_);
- process_references_args_.is_marked_callback_ = is_marked_callback;
- process_references_args_.mark_callback_ = mark_object_callback;
- process_references_args_.arg_ = arg;
+ collector_ = collector;
if (!kUseReadBarrier) {
CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent";
} else {
@@ -154,16 +138,16 @@
if (concurrent) {
StartPreservingReferences(self);
}
- soft_reference_queue_.ForwardSoftReferences(&PreserveSoftReferenceCallback,
- &process_references_args_);
- process_mark_stack_callback(arg);
+ // TODO: Add smarter logic for preserving soft references.
+ soft_reference_queue_.ForwardSoftReferences(collector);
+ collector->ProcessMarkStack();
if (concurrent) {
StopPreservingReferences(self);
}
}
// Clear all remaining soft and weak references with white referents.
- soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
- weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
+ soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
+ weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
{
TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" :
"(Paused)EnqueueFinalizerReferences", timings);
@@ -171,18 +155,17 @@
StartPreservingReferences(self);
}
// Preserve all white objects with finalize methods and schedule them for finalization.
- finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, is_marked_callback,
- mark_object_callback, arg);
- process_mark_stack_callback(arg);
+ finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, collector);
+ collector->ProcessMarkStack();
if (concurrent) {
StopPreservingReferences(self);
}
}
// Clear all finalizer referent reachable soft and weak references with white referents.
- soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
- weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
+ soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
+ weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
// Clear all phantom references with white referents.
- phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
+ phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
// At this point all reference queues other than the cleared references should be empty.
DCHECK(soft_reference_queue_.IsEmpty());
DCHECK(weak_reference_queue_.IsEmpty());
@@ -194,12 +177,10 @@
// could result in a stale is_marked_callback_ being called before the reference processing
// starts since there is a small window of time where slow_path_enabled_ is enabled but the
// callback isn't yet set.
- process_references_args_.is_marked_callback_ = nullptr;
- if (!kUseReadBarrier) {
- if (concurrent) {
- // Done processing, disable the slow path and broadcast to the waiters.
- DisableSlowPath(self);
- }
+ collector_ = nullptr;
+ if (!kUseReadBarrier && concurrent) {
+ // Done processing, disable the slow path and broadcast to the waiters.
+ DisableSlowPath(self);
}
}
}
@@ -207,13 +188,12 @@
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
- IsHeapReferenceMarkedCallback* is_marked_callback,
- void* arg) {
+ collector::GarbageCollector* collector) {
// klass can be the class of the old object if the visitor already updated the class of ref.
DCHECK(klass != nullptr);
DCHECK(klass->IsTypeOfReferenceClass());
mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
- if (referent->AsMirrorPtr() != nullptr && !is_marked_callback(referent, arg)) {
+ if (referent->AsMirrorPtr() != nullptr && !collector->IsMarkedHeapReference(referent)) {
Thread* self = Thread::Current();
// TODO: Remove these locks, and use atomic stacks for storing references?
// We need to check that the references haven't already been enqueued since we can end up
@@ -233,8 +213,8 @@
}
}
-void ReferenceProcessor::UpdateRoots(IsMarkedCallback* callback, void* arg) {
- cleared_references_.UpdateRoots(callback, arg);
+void ReferenceProcessor::UpdateRoots(IsMarkedVisitor* visitor) {
+ cleared_references_.UpdateRoots(visitor);
}
class ClearedReferenceTask : public HeapTask {