Change reference processing to use heap references.
Removes several SetReferents for updating moved referents. Cleaned
up other aspects of the code.
Change-Id: Ibcb4d713fadea617efee7e936352ddf77ff4c370
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 7988af7..3ff9889 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -44,36 +44,35 @@
mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) {
mirror::Object* const referent = reference->GetReferent();
- if (LIKELY(!slow_path_enabled_)) {
+ // If the referent is null then it is already cleared, we can just return null since there is no
+ // scenario where it becomes non-null during the reference processing phase.
+ if (LIKELY(!slow_path_enabled_) || referent == nullptr) {
return referent;
}
- // Another fast path, the referent is cleared, we can just return null since there is no scenario
- // where it becomes non-null.
- if (referent == nullptr) {
- return nullptr;
- }
MutexLock mu(self, lock_);
while (slow_path_enabled_) {
- mirror::Object* const referent = reference->GetReferent();
- // If the referent became cleared, return it.
- if (referent == nullptr) {
+ mirror::HeapReference<mirror::Object>* const referent_addr =
+ reference->GetReferentReferenceAddr();
+ // If the referent became cleared, return it. Don't need barrier since thread roots can't get
+ // updated until after we leave the function due to holding the mutator lock.
+ if (referent_addr->AsMirrorPtr() == nullptr) {
return nullptr;
}
// Try to see if the referent is already marked by using the is_marked_callback. We can return
- // it to the mutator as long as the GC is not preserving references. If the GC is
- IsMarkedCallback* const is_marked_callback = process_references_args_.is_marked_callback_;
+ // it to the mutator as long as the GC is not preserving references.
+ IsHeapReferenceMarkedCallback* const is_marked_callback =
+ process_references_args_.is_marked_callback_;
if (LIKELY(is_marked_callback != nullptr)) {
- mirror::Object* const obj = is_marked_callback(referent, process_references_args_.arg_);
// If it's null it means not marked, but it could become marked if the referent is reachable
// by finalizer referents. So we can not return in this case and must block. Otherwise, we
// can return it to the mutator as long as the GC is not preserving references, in which
// case only black nodes can be safely returned. If the GC is preserving references, the
// mutator could take a white field from a grey or white node and move it somewhere else
// in the heap causing corruption since this field would get swept.
- if (obj != nullptr) {
+ if (is_marked_callback(referent_addr, process_references_args_.arg_)) {
if (!preserving_references_ ||
(LIKELY(!reference->IsFinalizerReferenceInstance()) && !reference->IsEnqueued())) {
- return obj;
+ return referent_addr->AsMirrorPtr();
}
}
}
@@ -82,10 +81,14 @@
return reference->GetReferent();
}
-mirror::Object* ReferenceProcessor::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
+bool ReferenceProcessor::PreserveSoftReferenceCallback(mirror::HeapReference<mirror::Object>* obj,
+ void* arg) {
auto* const args = reinterpret_cast<ProcessReferencesArgs*>(arg);
- // TODO: Not preserve all soft references.
- return args->mark_callback_(obj, args->arg_);
+ // TODO: Add smarter logic for preserving soft references.
+ mirror::Object* new_obj = args->mark_callback_(obj->AsMirrorPtr(), args->arg_);
+ DCHECK(new_obj != nullptr);
+ obj->Assign(new_obj);
+ return true;
}
void ReferenceProcessor::StartPreservingReferences(Thread* self) {
@@ -103,7 +106,7 @@
// Process reference class instances and schedule finalizations.
void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings,
bool clear_soft_references,
- IsMarkedCallback* is_marked_callback,
+ IsHeapReferenceMarkedCallback* is_marked_callback,
MarkObjectCallback* mark_object_callback,
ProcessMarkStackCallback* process_mark_stack_callback,
void* arg) {
@@ -132,8 +135,8 @@
}
}
// Clear all remaining soft and weak references with white referents.
- soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
- weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+ soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
+ weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
{
TimingLogger::ScopedSplit split(concurrent ? "EnqueueFinalizerReferences" :
"(Paused)EnqueueFinalizerReferences", timings);
@@ -141,7 +144,7 @@
StartPreservingReferences(self);
}
// Preserve all white objects with finalize methods and schedule them for finalization.
- finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
+ finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, is_marked_callback,
mark_object_callback, arg);
process_mark_stack_callback(arg);
if (concurrent) {
@@ -149,10 +152,10 @@
}
}
// Clear all finalizer referent reachable soft and weak references with white referents.
- soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
- weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+ soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
+ weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
// Clear all phantom references with white referents.
- phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+ phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
// At this point all reference queues other than the cleared references should be empty.
DCHECK(soft_reference_queue_.IsEmpty());
DCHECK(weak_reference_queue_.IsEmpty());
@@ -176,39 +179,33 @@
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
- IsMarkedCallback is_marked_callback, void* arg) {
+ IsHeapReferenceMarkedCallback* is_marked_callback,
+ void* arg) {
// klass can be the class of the old object if the visitor already updated the class of ref.
+ DCHECK(klass != nullptr);
DCHECK(klass->IsReferenceClass());
- mirror::Object* referent = ref->GetReferent<kWithoutReadBarrier>();
- if (referent != nullptr) {
- mirror::Object* forward_address = is_marked_callback(referent, arg);
- // Null means that the object is not currently marked.
- if (forward_address == nullptr) {
- Thread* self = Thread::Current();
- // TODO: Remove these locks, and use atomic stacks for storing references?
- // We need to check that the references haven't already been enqueued since we can end up
- // scanning the same reference multiple times due to dirty cards.
- if (klass->IsSoftReferenceClass()) {
- soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
- } else if (klass->IsWeakReferenceClass()) {
- weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
- } else if (klass->IsFinalizerReferenceClass()) {
- finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
- } else if (klass->IsPhantomReferenceClass()) {
- phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
- } else {
- LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
- << klass->GetAccessFlags();
- }
- } else if (referent != forward_address) {
- // Referent is already marked and we need to update it.
- ref->SetReferent<false>(forward_address);
+ mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
+ if (referent->AsMirrorPtr() != nullptr && !is_marked_callback(referent, arg)) {
+ Thread* self = Thread::Current();
+ // TODO: Remove these locks, and use atomic stacks for storing references?
+ // We need to check that the references haven't already been enqueued since we can end up
+ // scanning the same reference multiple times due to dirty cards.
+ if (klass->IsSoftReferenceClass()) {
+ soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
+ } else if (klass->IsWeakReferenceClass()) {
+ weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
+ } else if (klass->IsFinalizerReferenceClass()) {
+ finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
+ } else if (klass->IsPhantomReferenceClass()) {
+ phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
+ } else {
+ LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
+ << klass->GetAccessFlags();
}
}
}
-void ReferenceProcessor::EnqueueClearedReferences() {
- Thread* self = Thread::Current();
+void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
Locks::mutator_lock_->AssertNotHeld(self);
if (!cleared_references_.IsEmpty()) {
// When a runtime isn't started there are no reference queues to care about so ignore.