Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "reference_queue.h" |
| 18 | |
| 19 | #include "accounting/card_table-inl.h" |
| 20 | #include "heap.h" |
| 21 | #include "mirror/class-inl.h" |
| 22 | #include "mirror/object-inl.h" |
| 23 | |
| 24 | namespace art { |
| 25 | namespace gc { |
| 26 | |
| 27 | ReferenceQueue::ReferenceQueue(Heap* heap) |
| 28 | : lock_("reference queue lock"), |
| 29 | heap_(heap), |
| 30 | list_(nullptr) { |
| 31 | } |
| 32 | |
| 33 | void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Object* ref) { |
| 34 | DCHECK(ref != NULL); |
| 35 | MutexLock mu(self, lock_); |
| 36 | if (!heap_->IsEnqueued(ref)) { |
| 37 | EnqueuePendingReference(ref); |
| 38 | } |
| 39 | } |
| 40 | |
| 41 | void ReferenceQueue::EnqueueReference(mirror::Object* ref) { |
| 42 | CHECK(heap_->IsEnqueuable(ref)); |
| 43 | EnqueuePendingReference(ref); |
| 44 | } |
| 45 | |
| 46 | void ReferenceQueue::EnqueuePendingReference(mirror::Object* ref) { |
| 47 | DCHECK(ref != NULL); |
| 48 | MemberOffset pending_next_offset = heap_->GetReferencePendingNextOffset(); |
| 49 | DCHECK_NE(pending_next_offset.Uint32Value(), 0U); |
| 50 | if (IsEmpty()) { |
| 51 | // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref; |
Sebastien Hertz | d2fe10a | 2014-01-15 10:20:56 +0100 | [diff] [blame] | 52 | if (Runtime::Current()->IsActiveTransaction()) { |
| 53 | ref->SetFieldObject<true>(pending_next_offset, ref, false); |
| 54 | } else { |
| 55 | ref->SetFieldObject<false>(pending_next_offset, ref, false); |
| 56 | } |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 57 | list_ = ref; |
| 58 | } else { |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 59 | mirror::Object* head = list_->GetFieldObject<mirror::Object>(pending_next_offset, false); |
Sebastien Hertz | d2fe10a | 2014-01-15 10:20:56 +0100 | [diff] [blame] | 60 | if (Runtime::Current()->IsActiveTransaction()) { |
| 61 | ref->SetFieldObject<true>(pending_next_offset, head, false); |
| 62 | list_->SetFieldObject<true>(pending_next_offset, ref, false); |
| 63 | } else { |
| 64 | ref->SetFieldObject<false>(pending_next_offset, head, false); |
| 65 | list_->SetFieldObject<false>(pending_next_offset, ref, false); |
| 66 | } |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 67 | } |
| 68 | } |
| 69 | |
| 70 | mirror::Object* ReferenceQueue::DequeuePendingReference() { |
| 71 | DCHECK(!IsEmpty()); |
| 72 | MemberOffset pending_next_offset = heap_->GetReferencePendingNextOffset(); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 73 | mirror::Object* head = list_->GetFieldObject<mirror::Object>(pending_next_offset, false); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 74 | DCHECK(head != nullptr); |
| 75 | mirror::Object* ref; |
| 76 | // Note: the following code is thread-safe because it is only called from ProcessReferences which |
| 77 | // is single threaded. |
| 78 | if (list_ == head) { |
| 79 | ref = list_; |
| 80 | list_ = nullptr; |
| 81 | } else { |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 82 | mirror::Object* next = head->GetFieldObject<mirror::Object>(pending_next_offset, false); |
Sebastien Hertz | d2fe10a | 2014-01-15 10:20:56 +0100 | [diff] [blame] | 83 | if (Runtime::Current()->IsActiveTransaction()) { |
| 84 | list_->SetFieldObject<true>(pending_next_offset, next, false); |
| 85 | } else { |
| 86 | list_->SetFieldObject<false>(pending_next_offset, next, false); |
| 87 | } |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 88 | ref = head; |
| 89 | } |
Sebastien Hertz | d2fe10a | 2014-01-15 10:20:56 +0100 | [diff] [blame] | 90 | if (Runtime::Current()->IsActiveTransaction()) { |
| 91 | ref->SetFieldObject<true>(pending_next_offset, nullptr, false); |
| 92 | } else { |
| 93 | ref->SetFieldObject<false>(pending_next_offset, nullptr, false); |
| 94 | } |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 95 | return ref; |
| 96 | } |
| 97 | |
| 98 | void ReferenceQueue::Dump(std::ostream& os) const { |
| 99 | mirror::Object* cur = list_; |
| 100 | os << "Reference starting at list_=" << list_ << "\n"; |
| 101 | while (cur != nullptr) { |
| 102 | mirror::Object* pending_next = |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 103 | cur->GetFieldObject<mirror::Object>(heap_->GetReferencePendingNextOffset(), false); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 104 | os << "PendingNext=" << pending_next; |
| 105 | if (cur->GetClass()->IsFinalizerReferenceClass()) { |
| 106 | os << " Zombie=" << |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 107 | cur->GetFieldObject<mirror::Object>(heap_->GetFinalizerReferenceZombieOffset(), false); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 108 | } |
| 109 | os << "\n"; |
| 110 | cur = pending_next; |
| 111 | } |
| 112 | } |
| 113 | |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 114 | void ReferenceQueue::ClearWhiteReferences(ReferenceQueue& cleared_references, |
| 115 | IsMarkedCallback* preserve_callback, |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 116 | void* arg) { |
| 117 | while (!IsEmpty()) { |
| 118 | mirror::Object* ref = DequeuePendingReference(); |
| 119 | mirror::Object* referent = heap_->GetReferenceReferent(ref); |
| 120 | if (referent != nullptr) { |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 121 | mirror::Object* forward_address = preserve_callback(referent, arg); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 122 | if (forward_address == nullptr) { |
| 123 | // Referent is white, clear it. |
| 124 | heap_->ClearReferenceReferent(ref); |
| 125 | if (heap_->IsEnqueuable(ref)) { |
| 126 | cleared_references.EnqueuePendingReference(ref); |
| 127 | } |
| 128 | } else if (referent != forward_address) { |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 129 | // Object moved, need to updated the referent. |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 130 | heap_->SetReferenceReferent(ref, forward_address); |
| 131 | } |
| 132 | } |
| 133 | } |
| 134 | } |
| 135 | |
| 136 | void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue& cleared_references, |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 137 | IsMarkedCallback is_marked_callback, |
| 138 | MarkObjectCallback recursive_mark_callback, |
| 139 | void* arg) { |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 140 | while (!IsEmpty()) { |
| 141 | mirror::Object* ref = DequeuePendingReference(); |
| 142 | mirror::Object* referent = heap_->GetReferenceReferent(ref); |
| 143 | if (referent != nullptr) { |
| 144 | mirror::Object* forward_address = is_marked_callback(referent, arg); |
| 145 | // If the referent isn't marked, mark it and update the |
| 146 | if (forward_address == nullptr) { |
| 147 | forward_address = recursive_mark_callback(referent, arg); |
| 148 | // If the referent is non-null the reference must queuable. |
| 149 | DCHECK(heap_->IsEnqueuable(ref)); |
| 150 | // Move the updated referent to the zombie field. |
Sebastien Hertz | d2fe10a | 2014-01-15 10:20:56 +0100 | [diff] [blame] | 151 | if (Runtime::Current()->IsActiveTransaction()) { |
| 152 | ref->SetFieldObject<true>(heap_->GetFinalizerReferenceZombieOffset(), forward_address, false); |
| 153 | } else { |
| 154 | ref->SetFieldObject<false>(heap_->GetFinalizerReferenceZombieOffset(), forward_address, false); |
| 155 | } |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 156 | heap_->ClearReferenceReferent(ref); |
| 157 | cleared_references.EnqueueReference(ref); |
| 158 | } else if (referent != forward_address) { |
| 159 | heap_->SetReferenceReferent(ref, forward_address); |
| 160 | } |
| 161 | } |
| 162 | } |
| 163 | } |
| 164 | |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 165 | void ReferenceQueue::PreserveSomeSoftReferences(IsMarkedCallback preserve_callback, void* arg) { |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 166 | ReferenceQueue cleared(heap_); |
| 167 | while (!IsEmpty()) { |
| 168 | mirror::Object* ref = DequeuePendingReference(); |
| 169 | mirror::Object* referent = heap_->GetReferenceReferent(ref); |
| 170 | if (referent != nullptr) { |
| 171 | mirror::Object* forward_address = preserve_callback(referent, arg); |
| 172 | if (forward_address == nullptr) { |
| 173 | // Either the reference isn't marked or we don't wish to preserve it. |
| 174 | cleared.EnqueuePendingReference(ref); |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 175 | } else if (forward_address != referent) { |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 176 | heap_->SetReferenceReferent(ref, forward_address); |
| 177 | } |
| 178 | } |
| 179 | } |
| 180 | list_ = cleared.GetList(); |
| 181 | } |
| 182 | |
| 183 | } // namespace gc |
| 184 | } // namespace art |
| 185 | |