blob: 6bdacaf18c0430b3da5285a9fe6cf52fa6ba23bd [file] [log] [blame]
Mathieu Chartier39e32612013-11-12 16:28:05 -08001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "reference_queue.h"
18
19#include "accounting/card_table-inl.h"
Andreas Gampe7fbc4a52018-11-28 08:26:47 -080020#include "base/mutex.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080021#include "collector/concurrent_copying.h"
Mathieu Chartier39e32612013-11-12 16:28:05 -080022#include "heap.h"
23#include "mirror/class-inl.h"
24#include "mirror/object-inl.h"
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070025#include "mirror/reference-inl.h"
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070026#include "object_callbacks.h"
Mathieu Chartier39e32612013-11-12 16:28:05 -080027
28namespace art {
29namespace gc {
30
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -070031ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) {
Mathieu Chartier39e32612013-11-12 16:28:05 -080032}
33
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -070034void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -070035 DCHECK(ref != nullptr);
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -070036 MutexLock mu(self, *lock_);
Richard Uhlerc4695df2016-01-15 14:08:05 -080037 if (ref->IsUnprocessed()) {
38 EnqueueReference(ref);
Mathieu Chartier39e32612013-11-12 16:28:05 -080039 }
40}
41
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -070042void ReferenceQueue::EnqueueReference(ObjPtr<mirror::Reference> ref) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -070043 DCHECK(ref != nullptr);
Richard Uhlerc4695df2016-01-15 14:08:05 -080044 CHECK(ref->IsUnprocessed());
Mathieu Chartier39e32612013-11-12 16:28:05 -080045 if (IsEmpty()) {
46 // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -070047 list_ = ref.Ptr();
Mathieu Chartier39e32612013-11-12 16:28:05 -080048 } else {
Mathieu Chartier5ffa0782016-07-27 10:45:47 -070049 // The list is owned by the GC, everything that has been inserted must already be at least
50 // gray.
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -070051 ObjPtr<mirror::Reference> head = list_->GetPendingNext<kWithoutReadBarrier>();
Richard Uhlerc4695df2016-01-15 14:08:05 -080052 DCHECK(head != nullptr);
Richard Uhler522d51b2016-01-22 14:18:57 -080053 ref->SetPendingNext(head);
Mathieu Chartier39e32612013-11-12 16:28:05 -080054 }
Richard Uhlerc4695df2016-01-15 14:08:05 -080055 // Add the reference in the middle to preserve the cycle.
Richard Uhler522d51b2016-01-22 14:18:57 -080056 list_->SetPendingNext(ref);
Mathieu Chartier39e32612013-11-12 16:28:05 -080057}
58
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -070059ObjPtr<mirror::Reference> ReferenceQueue::DequeuePendingReference() {
Mathieu Chartier39e32612013-11-12 16:28:05 -080060 DCHECK(!IsEmpty());
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -070061 ObjPtr<mirror::Reference> ref = list_->GetPendingNext<kWithoutReadBarrier>();
Richard Uhlerc4695df2016-01-15 14:08:05 -080062 DCHECK(ref != nullptr);
Mathieu Chartier39e32612013-11-12 16:28:05 -080063 // Note: the following code is thread-safe because it is only called from ProcessReferences which
64 // is single threaded.
Richard Uhlerc4695df2016-01-15 14:08:05 -080065 if (list_ == ref) {
Mathieu Chartier39e32612013-11-12 16:28:05 -080066 list_ = nullptr;
67 } else {
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -070068 ObjPtr<mirror::Reference> next = ref->GetPendingNext<kWithoutReadBarrier>();
Richard Uhler522d51b2016-01-22 14:18:57 -080069 list_->SetPendingNext(next);
Mathieu Chartier39e32612013-11-12 16:28:05 -080070 }
Richard Uhler522d51b2016-01-22 14:18:57 -080071 ref->SetPendingNext(nullptr);
Hiroshi Yamauchi057d9772017-02-17 15:33:23 -080072 return ref;
73}
74
75// This must be called whenever DequeuePendingReference is called.
76void ReferenceQueue::DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080077 Heap* heap = Runtime::Current()->GetHeap();
Vladimir Markofd846c52021-10-19 07:54:13 +010078 if (kUseBakerReadBarrier && heap->CurrentCollectorType() == kCollectorTypeCC &&
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080079 heap->ConcurrentCopyingCollector()->IsActive()) {
Roland Levillain14e5a292018-06-28 12:00:56 +010080 // Change the gray ptr we left in ConcurrentCopying::ProcessMarkStackRef() to non-gray.
Hiroshi Yamauchi70c08d32015-09-10 16:01:30 -070081 // We check IsActive() above because we don't want to do this when the zygote compaction
82 // collector (SemiSpace) is running.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080083 CHECK(ref != nullptr);
Hiroshi Yamauchi70c08d32015-09-10 16:01:30 -070084 collector::ConcurrentCopying* concurrent_copying = heap->ConcurrentCopyingCollector();
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070085 uint32_t rb_state = ref->GetReadBarrierState();
86 if (rb_state == ReadBarrier::GrayState()) {
Roland Levillain14e5a292018-06-28 12:00:56 +010087 ref->AtomicSetReadBarrierState(ReadBarrier::GrayState(), ReadBarrier::NonGrayState());
88 CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080089 } else {
Roland Levillain14e5a292018-06-28 12:00:56 +010090 // In ConcurrentCopying::ProcessMarkStackRef() we may leave a non-gray reference in the queue
91 // and find it here, which is OK.
92 CHECK_EQ(rb_state, ReadBarrier::NonGrayState()) << "ref=" << ref << " rb_state=" << rb_state;
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -070093 ObjPtr<mirror::Object> referent = ref->GetReferent<kWithoutReadBarrier>();
Hiroshi Yamauchid2bb5ba2015-09-14 15:10:50 -070094 // The referent could be null if it's cleared by a mutator (Reference.clear()).
95 if (referent != nullptr) {
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -070096 CHECK(concurrent_copying->IsInToSpace(referent.Ptr()))
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070097 << "ref=" << ref << " rb_state=" << ref->GetReadBarrierState()
Hiroshi Yamauchid2bb5ba2015-09-14 15:10:50 -070098 << " referent=" << referent;
99 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800100 }
101 }
Mathieu Chartier39e32612013-11-12 16:28:05 -0800102}
103
104void ReferenceQueue::Dump(std::ostream& os) const {
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -0700105 ObjPtr<mirror::Reference> cur = list_;
Mathieu Chartier39e32612013-11-12 16:28:05 -0800106 os << "Reference starting at list_=" << list_ << "\n";
Mathieu Chartier9e2094f2014-12-11 18:43:48 -0800107 if (cur == nullptr) {
108 return;
109 }
110 do {
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -0700111 ObjPtr<mirror::Reference> pending_next = cur->GetPendingNext();
Mathieu Chartier9e2094f2014-12-11 18:43:48 -0800112 os << "Reference= " << cur << " PendingNext=" << pending_next;
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -0700113 if (cur->IsFinalizerReferenceInstance()) {
114 os << " Zombie=" << cur->AsFinalizerReference()->GetZombie();
Mathieu Chartier39e32612013-11-12 16:28:05 -0800115 }
116 os << "\n";
117 cur = pending_next;
Mathieu Chartier9e2094f2014-12-11 18:43:48 -0800118 } while (cur != list_);
119}
120
121size_t ReferenceQueue::GetLength() const {
122 size_t count = 0;
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -0700123 ObjPtr<mirror::Reference> cur = list_;
Mathieu Chartier9e2094f2014-12-11 18:43:48 -0800124 if (cur != nullptr) {
125 do {
126 ++count;
127 cur = cur->GetPendingNext();
128 } while (cur != list_);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800129 }
Mathieu Chartier9e2094f2014-12-11 18:43:48 -0800130 return count;
Mathieu Chartier39e32612013-11-12 16:28:05 -0800131}
132
Mathieu Chartier308351a2014-06-15 12:39:02 -0700133void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
Hans Boehm1b3ec0f2022-01-26 16:53:07 +0000134 collector::GarbageCollector* collector,
135 bool report_cleared) {
Mathieu Chartier39e32612013-11-12 16:28:05 -0800136 while (!IsEmpty()) {
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -0700137 ObjPtr<mirror::Reference> ref = DequeuePendingReference();
Mathieu Chartier308351a2014-06-15 12:39:02 -0700138 mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
Hiroshi Yamauchi65f5f242016-12-19 11:44:47 -0800139 // do_atomic_update is false because this happens during the reference processing phase where
140 // Reference.clear() would block.
Andreas Gampe98ea9d92018-10-19 14:06:15 -0700141 if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) {
Mathieu Chartier308351a2014-06-15 12:39:02 -0700142 // Referent is white, clear it.
143 if (Runtime::Current()->IsActiveTransaction()) {
144 ref->ClearReferent<true>();
145 } else {
146 ref->ClearReferent<false>();
147 }
Richard Uhlerc4695df2016-01-15 14:08:05 -0800148 cleared_references->EnqueueReference(ref);
Hans Boehm1b3ec0f2022-01-26 16:53:07 +0000149 if (report_cleared) {
150 static bool already_reported = false;
151 if (!already_reported) {
152 // TODO: Maybe do this only if the queue is non-null?
153 LOG(WARNING)
154 << "Cleared Reference was only reachable from finalizer (only reported once)";
155 already_reported = true;
156 }
157 }
Mathieu Chartier39e32612013-11-12 16:28:05 -0800158 }
Hiroshi Yamauchi057d9772017-02-17 15:33:23 -0800159 // Delay disabling the read barrier until here so that the ClearReferent call above in
160 // transaction mode will trigger the read barrier.
161 DisableReadBarrierForReference(ref);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800162 }
163}
164
Hans Boehmd7b41612021-06-17 18:31:14 -0700165FinalizerStats ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
Mathieu Chartier97509952015-07-13 14:35:43 -0700166 collector::GarbageCollector* collector) {
Hans Boehmd7b41612021-06-17 18:31:14 -0700167 uint32_t num_refs(0), num_enqueued(0);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800168 while (!IsEmpty()) {
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -0700169 ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference();
Hans Boehmd7b41612021-06-17 18:31:14 -0700170 ++num_refs;
Mathieu Chartier308351a2014-06-15 12:39:02 -0700171 mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
Hiroshi Yamauchi65f5f242016-12-19 11:44:47 -0800172 // do_atomic_update is false because this happens during the reference processing phase where
173 // Reference.clear() would block.
Andreas Gampe98ea9d92018-10-19 14:06:15 -0700174 if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) {
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -0700175 ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
Mathieu Chartier308351a2014-06-15 12:39:02 -0700176 // Move the updated referent to the zombie field.
177 if (Runtime::Current()->IsActiveTransaction()) {
178 ref->SetZombie<true>(forward_address);
179 ref->ClearReferent<true>();
180 } else {
181 ref->SetZombie<false>(forward_address);
182 ref->ClearReferent<false>();
Mathieu Chartier39e32612013-11-12 16:28:05 -0800183 }
Mathieu Chartier308351a2014-06-15 12:39:02 -0700184 cleared_references->EnqueueReference(ref);
Hans Boehmd7b41612021-06-17 18:31:14 -0700185 ++num_enqueued;
Mathieu Chartier39e32612013-11-12 16:28:05 -0800186 }
Hiroshi Yamauchi057d9772017-02-17 15:33:23 -0800187 // Delay disabling the read barrier until here so that the ClearReferent call above in
188 // transaction mode will trigger the read barrier.
189 DisableReadBarrierForReference(ref->AsReference());
Mathieu Chartier39e32612013-11-12 16:28:05 -0800190 }
Hans Boehmd7b41612021-06-17 18:31:14 -0700191 return FinalizerStats(num_refs, num_enqueued);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800192}
193
Hans Boehmd7b41612021-06-17 18:31:14 -0700194uint32_t ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) {
Hans Boehmd7b41612021-06-17 18:31:14 -0700195 uint32_t num_refs(0);
Hans Boehm1b3ec0f2022-01-26 16:53:07 +0000196 Thread* self = Thread::Current();
197 static constexpr int SR_BUF_SIZE = 32;
198 ObjPtr<mirror::Reference> buf[SR_BUF_SIZE];
199 int n_entries;
200 bool empty;
Fred Shih530e1b52014-06-09 15:19:54 -0700201 do {
Hans Boehm1b3ec0f2022-01-26 16:53:07 +0000202 {
203 // Acquire lock only a few times and hold it as briefly as possible.
204 MutexLock mu(self, *lock_);
205 empty = IsEmpty();
206 for (n_entries = 0; n_entries < SR_BUF_SIZE && !empty; ++n_entries) {
207 // Dequeuing the Reference here means it could possibly be enqueued again during this GC.
208 // That's unlikely and benign.
209 buf[n_entries] = DequeuePendingReference();
210 empty = IsEmpty();
211 }
Mathieu Chartier39e32612013-11-12 16:28:05 -0800212 }
Hans Boehm1b3ec0f2022-01-26 16:53:07 +0000213 for (int i = 0; i < n_entries; ++i) {
214 mirror::HeapReference<mirror::Object>* referent_addr = buf[i]->GetReferentReferenceAddr();
215 if (referent_addr->AsMirrorPtr() != nullptr) {
216 visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ true);
217 ++num_refs;
218 }
219 DisableReadBarrierForReference(buf[i]->AsReference());
220 }
221 } while (!empty);
Hans Boehmd7b41612021-06-17 18:31:14 -0700222 return num_refs;
Mathieu Chartier39e32612013-11-12 16:28:05 -0800223}
224
Mathieu Chartier97509952015-07-13 14:35:43 -0700225void ReferenceQueue::UpdateRoots(IsMarkedVisitor* visitor) {
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700226 if (list_ != nullptr) {
Mathieu Chartier97509952015-07-13 14:35:43 -0700227 list_ = down_cast<mirror::Reference*>(visitor->IsMarked(list_));
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700228 }
229}
230
Mathieu Chartier39e32612013-11-12 16:28:05 -0800231} // namespace gc
232} // namespace art