Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "reference_queue.h" |
| 18 | |
| 19 | #include "accounting/card_table-inl.h" |
Andreas Gampe | 7fbc4a5 | 2018-11-28 08:26:47 -0800 | [diff] [blame] | 20 | #include "base/mutex.h" |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 21 | #include "collector/concurrent_copying.h" |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 22 | #include "heap.h" |
| 23 | #include "mirror/class-inl.h" |
| 24 | #include "mirror/object-inl.h" |
Mathieu Chartier | 8fa2dad | 2014-03-13 12:22:56 -0700 | [diff] [blame] | 25 | #include "mirror/reference-inl.h" |
Andreas Gampe | 5d08fcc | 2017-06-05 17:56:46 -0700 | [diff] [blame] | 26 | #include "object_callbacks.h" |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 27 | |
| 28 | namespace art { |
| 29 | namespace gc { |
| 30 | |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 31 | ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) { |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 32 | } |
| 33 | |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 34 | void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 35 | DCHECK(ref != nullptr); |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 36 | MutexLock mu(self, *lock_); |
Richard Uhler | c4695df | 2016-01-15 14:08:05 -0800 | [diff] [blame] | 37 | if (ref->IsUnprocessed()) { |
| 38 | EnqueueReference(ref); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 39 | } |
| 40 | } |
| 41 | |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 42 | void ReferenceQueue::EnqueueReference(ObjPtr<mirror::Reference> ref) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 43 | DCHECK(ref != nullptr); |
Richard Uhler | c4695df | 2016-01-15 14:08:05 -0800 | [diff] [blame] | 44 | CHECK(ref->IsUnprocessed()); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 45 | if (IsEmpty()) { |
| 46 | // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref; |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 47 | list_ = ref.Ptr(); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 48 | } else { |
Mathieu Chartier | 5ffa078 | 2016-07-27 10:45:47 -0700 | [diff] [blame] | 49 | // The list is owned by the GC, everything that has been inserted must already be at least |
| 50 | // gray. |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 51 | ObjPtr<mirror::Reference> head = list_->GetPendingNext<kWithoutReadBarrier>(); |
Richard Uhler | c4695df | 2016-01-15 14:08:05 -0800 | [diff] [blame] | 52 | DCHECK(head != nullptr); |
Richard Uhler | 522d51b | 2016-01-22 14:18:57 -0800 | [diff] [blame] | 53 | ref->SetPendingNext(head); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 54 | } |
Richard Uhler | c4695df | 2016-01-15 14:08:05 -0800 | [diff] [blame] | 55 | // Add the reference in the middle to preserve the cycle. |
Richard Uhler | 522d51b | 2016-01-22 14:18:57 -0800 | [diff] [blame] | 56 | list_->SetPendingNext(ref); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 57 | } |
| 58 | |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 59 | ObjPtr<mirror::Reference> ReferenceQueue::DequeuePendingReference() { |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 60 | DCHECK(!IsEmpty()); |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 61 | ObjPtr<mirror::Reference> ref = list_->GetPendingNext<kWithoutReadBarrier>(); |
Richard Uhler | c4695df | 2016-01-15 14:08:05 -0800 | [diff] [blame] | 62 | DCHECK(ref != nullptr); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 63 | // Note: the following code is thread-safe because it is only called from ProcessReferences which |
| 64 | // is single threaded. |
Richard Uhler | c4695df | 2016-01-15 14:08:05 -0800 | [diff] [blame] | 65 | if (list_ == ref) { |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 66 | list_ = nullptr; |
| 67 | } else { |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 68 | ObjPtr<mirror::Reference> next = ref->GetPendingNext<kWithoutReadBarrier>(); |
Richard Uhler | 522d51b | 2016-01-22 14:18:57 -0800 | [diff] [blame] | 69 | list_->SetPendingNext(next); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 70 | } |
Richard Uhler | 522d51b | 2016-01-22 14:18:57 -0800 | [diff] [blame] | 71 | ref->SetPendingNext(nullptr); |
Hiroshi Yamauchi | 057d977 | 2017-02-17 15:33:23 -0800 | [diff] [blame] | 72 | return ref; |
| 73 | } |
| 74 | |
| 75 | // This must be called whenever DequeuePendingReference is called. |
| 76 | void ReferenceQueue::DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 77 | Heap* heap = Runtime::Current()->GetHeap(); |
Vladimir Marko | fd846c5 | 2021-10-19 07:54:13 +0100 | [diff] [blame] | 78 | if (kUseBakerReadBarrier && heap->CurrentCollectorType() == kCollectorTypeCC && |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 79 | heap->ConcurrentCopyingCollector()->IsActive()) { |
Roland Levillain | 14e5a29 | 2018-06-28 12:00:56 +0100 | [diff] [blame] | 80 | // Change the gray ptr we left in ConcurrentCopying::ProcessMarkStackRef() to non-gray. |
Hiroshi Yamauchi | 70c08d3 | 2015-09-10 16:01:30 -0700 | [diff] [blame] | 81 | // We check IsActive() above because we don't want to do this when the zygote compaction |
| 82 | // collector (SemiSpace) is running. |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 83 | CHECK(ref != nullptr); |
Hiroshi Yamauchi | 70c08d3 | 2015-09-10 16:01:30 -0700 | [diff] [blame] | 84 | collector::ConcurrentCopying* concurrent_copying = heap->ConcurrentCopyingCollector(); |
Hiroshi Yamauchi | 12b58b2 | 2016-11-01 11:55:29 -0700 | [diff] [blame] | 85 | uint32_t rb_state = ref->GetReadBarrierState(); |
| 86 | if (rb_state == ReadBarrier::GrayState()) { |
Roland Levillain | 14e5a29 | 2018-06-28 12:00:56 +0100 | [diff] [blame] | 87 | ref->AtomicSetReadBarrierState(ReadBarrier::GrayState(), ReadBarrier::NonGrayState()); |
| 88 | CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState()); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 89 | } else { |
Roland Levillain | 14e5a29 | 2018-06-28 12:00:56 +0100 | [diff] [blame] | 90 | // In ConcurrentCopying::ProcessMarkStackRef() we may leave a non-gray reference in the queue |
| 91 | // and find it here, which is OK. |
| 92 | CHECK_EQ(rb_state, ReadBarrier::NonGrayState()) << "ref=" << ref << " rb_state=" << rb_state; |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 93 | ObjPtr<mirror::Object> referent = ref->GetReferent<kWithoutReadBarrier>(); |
Hiroshi Yamauchi | d2bb5ba | 2015-09-14 15:10:50 -0700 | [diff] [blame] | 94 | // The referent could be null if it's cleared by a mutator (Reference.clear()). |
| 95 | if (referent != nullptr) { |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 96 | CHECK(concurrent_copying->IsInToSpace(referent.Ptr())) |
Hiroshi Yamauchi | 12b58b2 | 2016-11-01 11:55:29 -0700 | [diff] [blame] | 97 | << "ref=" << ref << " rb_state=" << ref->GetReadBarrierState() |
Hiroshi Yamauchi | d2bb5ba | 2015-09-14 15:10:50 -0700 | [diff] [blame] | 98 | << " referent=" << referent; |
| 99 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 100 | } |
| 101 | } |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 102 | } |
| 103 | |
| 104 | void ReferenceQueue::Dump(std::ostream& os) const { |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 105 | ObjPtr<mirror::Reference> cur = list_; |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 106 | os << "Reference starting at list_=" << list_ << "\n"; |
Mathieu Chartier | 9e2094f | 2014-12-11 18:43:48 -0800 | [diff] [blame] | 107 | if (cur == nullptr) { |
| 108 | return; |
| 109 | } |
| 110 | do { |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 111 | ObjPtr<mirror::Reference> pending_next = cur->GetPendingNext(); |
Mathieu Chartier | 9e2094f | 2014-12-11 18:43:48 -0800 | [diff] [blame] | 112 | os << "Reference= " << cur << " PendingNext=" << pending_next; |
Mathieu Chartier | 8fa2dad | 2014-03-13 12:22:56 -0700 | [diff] [blame] | 113 | if (cur->IsFinalizerReferenceInstance()) { |
| 114 | os << " Zombie=" << cur->AsFinalizerReference()->GetZombie(); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 115 | } |
| 116 | os << "\n"; |
| 117 | cur = pending_next; |
Mathieu Chartier | 9e2094f | 2014-12-11 18:43:48 -0800 | [diff] [blame] | 118 | } while (cur != list_); |
| 119 | } |
| 120 | |
| 121 | size_t ReferenceQueue::GetLength() const { |
| 122 | size_t count = 0; |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 123 | ObjPtr<mirror::Reference> cur = list_; |
Mathieu Chartier | 9e2094f | 2014-12-11 18:43:48 -0800 | [diff] [blame] | 124 | if (cur != nullptr) { |
| 125 | do { |
| 126 | ++count; |
| 127 | cur = cur->GetPendingNext(); |
| 128 | } while (cur != list_); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 129 | } |
Mathieu Chartier | 9e2094f | 2014-12-11 18:43:48 -0800 | [diff] [blame] | 130 | return count; |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 131 | } |
| 132 | |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 133 | void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references, |
Hans Boehm | 1b3ec0f | 2022-01-26 16:53:07 +0000 | [diff] [blame] | 134 | collector::GarbageCollector* collector, |
| 135 | bool report_cleared) { |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 136 | while (!IsEmpty()) { |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 137 | ObjPtr<mirror::Reference> ref = DequeuePendingReference(); |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 138 | mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr(); |
Hiroshi Yamauchi | 65f5f24 | 2016-12-19 11:44:47 -0800 | [diff] [blame] | 139 | // do_atomic_update is false because this happens during the reference processing phase where |
| 140 | // Reference.clear() would block. |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 141 | if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) { |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 142 | // Referent is white, clear it. |
| 143 | if (Runtime::Current()->IsActiveTransaction()) { |
| 144 | ref->ClearReferent<true>(); |
| 145 | } else { |
| 146 | ref->ClearReferent<false>(); |
| 147 | } |
Richard Uhler | c4695df | 2016-01-15 14:08:05 -0800 | [diff] [blame] | 148 | cleared_references->EnqueueReference(ref); |
Hans Boehm | 1b3ec0f | 2022-01-26 16:53:07 +0000 | [diff] [blame] | 149 | if (report_cleared) { |
| 150 | static bool already_reported = false; |
| 151 | if (!already_reported) { |
| 152 | // TODO: Maybe do this only if the queue is non-null? |
| 153 | LOG(WARNING) |
| 154 | << "Cleared Reference was only reachable from finalizer (only reported once)"; |
| 155 | already_reported = true; |
| 156 | } |
| 157 | } |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 158 | } |
Hiroshi Yamauchi | 057d977 | 2017-02-17 15:33:23 -0800 | [diff] [blame] | 159 | // Delay disabling the read barrier until here so that the ClearReferent call above in |
| 160 | // transaction mode will trigger the read barrier. |
| 161 | DisableReadBarrierForReference(ref); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 162 | } |
| 163 | } |
| 164 | |
Hans Boehm | d7b4161 | 2021-06-17 18:31:14 -0700 | [diff] [blame] | 165 | FinalizerStats ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_references, |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 166 | collector::GarbageCollector* collector) { |
Hans Boehm | d7b4161 | 2021-06-17 18:31:14 -0700 | [diff] [blame] | 167 | uint32_t num_refs(0), num_enqueued(0); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 168 | while (!IsEmpty()) { |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 169 | ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference(); |
Hans Boehm | d7b4161 | 2021-06-17 18:31:14 -0700 | [diff] [blame] | 170 | ++num_refs; |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 171 | mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr(); |
Hiroshi Yamauchi | 65f5f24 | 2016-12-19 11:44:47 -0800 | [diff] [blame] | 172 | // do_atomic_update is false because this happens during the reference processing phase where |
| 173 | // Reference.clear() would block. |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 174 | if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) { |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 175 | ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr()); |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 176 | // Move the updated referent to the zombie field. |
| 177 | if (Runtime::Current()->IsActiveTransaction()) { |
| 178 | ref->SetZombie<true>(forward_address); |
| 179 | ref->ClearReferent<true>(); |
| 180 | } else { |
| 181 | ref->SetZombie<false>(forward_address); |
| 182 | ref->ClearReferent<false>(); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 183 | } |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 184 | cleared_references->EnqueueReference(ref); |
Hans Boehm | d7b4161 | 2021-06-17 18:31:14 -0700 | [diff] [blame] | 185 | ++num_enqueued; |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 186 | } |
Hiroshi Yamauchi | 057d977 | 2017-02-17 15:33:23 -0800 | [diff] [blame] | 187 | // Delay disabling the read barrier until here so that the ClearReferent call above in |
| 188 | // transaction mode will trigger the read barrier. |
| 189 | DisableReadBarrierForReference(ref->AsReference()); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 190 | } |
Hans Boehm | d7b4161 | 2021-06-17 18:31:14 -0700 | [diff] [blame] | 191 | return FinalizerStats(num_refs, num_enqueued); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 192 | } |
| 193 | |
Hans Boehm | d7b4161 | 2021-06-17 18:31:14 -0700 | [diff] [blame] | 194 | uint32_t ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) { |
Hans Boehm | d7b4161 | 2021-06-17 18:31:14 -0700 | [diff] [blame] | 195 | uint32_t num_refs(0); |
Hans Boehm | 1b3ec0f | 2022-01-26 16:53:07 +0000 | [diff] [blame] | 196 | Thread* self = Thread::Current(); |
| 197 | static constexpr int SR_BUF_SIZE = 32; |
| 198 | ObjPtr<mirror::Reference> buf[SR_BUF_SIZE]; |
| 199 | int n_entries; |
| 200 | bool empty; |
Fred Shih | 530e1b5 | 2014-06-09 15:19:54 -0700 | [diff] [blame] | 201 | do { |
Hans Boehm | 1b3ec0f | 2022-01-26 16:53:07 +0000 | [diff] [blame] | 202 | { |
| 203 | // Acquire lock only a few times and hold it as briefly as possible. |
| 204 | MutexLock mu(self, *lock_); |
| 205 | empty = IsEmpty(); |
| 206 | for (n_entries = 0; n_entries < SR_BUF_SIZE && !empty; ++n_entries) { |
| 207 | // Dequeuing the Reference here means it could possibly be enqueued again during this GC. |
| 208 | // That's unlikely and benign. |
| 209 | buf[n_entries] = DequeuePendingReference(); |
| 210 | empty = IsEmpty(); |
| 211 | } |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 212 | } |
Hans Boehm | 1b3ec0f | 2022-01-26 16:53:07 +0000 | [diff] [blame] | 213 | for (int i = 0; i < n_entries; ++i) { |
| 214 | mirror::HeapReference<mirror::Object>* referent_addr = buf[i]->GetReferentReferenceAddr(); |
| 215 | if (referent_addr->AsMirrorPtr() != nullptr) { |
| 216 | visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ true); |
| 217 | ++num_refs; |
| 218 | } |
| 219 | DisableReadBarrierForReference(buf[i]->AsReference()); |
| 220 | } |
| 221 | } while (!empty); |
Hans Boehm | d7b4161 | 2021-06-17 18:31:14 -0700 | [diff] [blame] | 222 | return num_refs; |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 223 | } |
| 224 | |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 225 | void ReferenceQueue::UpdateRoots(IsMarkedVisitor* visitor) { |
Mathieu Chartier | 52e4b43 | 2014-06-10 11:22:31 -0700 | [diff] [blame] | 226 | if (list_ != nullptr) { |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 227 | list_ = down_cast<mirror::Reference*>(visitor->IsMarked(list_)); |
Mathieu Chartier | 52e4b43 | 2014-06-10 11:22:31 -0700 | [diff] [blame] | 228 | } |
| 229 | } |
| 230 | |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 231 | } // namespace gc |
| 232 | } // namespace art |