blob: b3aa0531318f9e3350aa6f879b28981d35d485ee [file] [log] [blame]
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18#define ART_RUNTIME_READ_BARRIER_INL_H_
19
20#include "read_barrier.h"
21
Andreas Gamped4901292017-05-30 18:41:34 -070022#include "gc/accounting/read_barrier_table.h"
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -070023#include "gc/collector/concurrent_copying-inl.h"
Lokesh Gidra384c7862021-11-21 11:45:19 -080024#include "gc/collector/mark_compact.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080025#include "gc/heap.h"
Andreas Gampec15a2f42017-04-21 12:09:39 -070026#include "mirror/object-readbarrier-inl.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070027#include "mirror/object_reference.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080028#include "mirror/reference.h"
29#include "runtime.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070030
Dmitrii Ishcheikin8cd02092024-01-30 15:22:45 +000031namespace art HIDDEN {
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070032
Hans Boehmcc55e1d2017-07-27 15:28:07 -070033template <typename MirrorType, bool kIsVolatile, ReadBarrierOption kReadBarrierOption,
34 bool kAlwaysUpdateField>
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070035inline MirrorType* ReadBarrier::Barrier(
36 mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
Igor Murashkinc449e8b2015-06-10 15:56:42 -070037 constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Lokesh Gidraca5ed9f2022-04-20 01:39:28 +000038 if (gUseReadBarrier && with_read_barrier) {
Mathieu Chartierd08f66f2017-04-13 11:47:53 -070039 if (kCheckDebugDisallowReadBarrierCount) {
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080040 Thread* const self = Thread::Current();
Vladimir Markoad535092023-01-04 11:01:55 +000041 CHECK(self != nullptr);
42 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080043 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080044 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070045 // fake_address_dependency (must be zero) is used to create artificial data dependency from
46 // the is_gray load to the ref field (ptr) load to avoid needing a load-load barrier between
47 // the two.
48 uintptr_t fake_address_dependency;
49 bool is_gray = IsGray(obj, &fake_address_dependency);
50 if (kEnableReadBarrierInvariantChecks) {
51 CHECK_EQ(fake_address_dependency, 0U) << obj << " rb_state=" << obj->GetReadBarrierState();
52 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080053 ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070054 fake_address_dependency | reinterpret_cast<uintptr_t>(ref_addr));
Hans Boehmcc55e1d2017-07-27 15:28:07 -070055 MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>();
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080056 MirrorType* old_ref = ref;
57 if (is_gray) {
58 // Slow-path.
59 ref = reinterpret_cast<MirrorType*>(Mark(ref));
60 // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
Roland Levillaina1aa3b12016-10-26 13:03:38 +010061 // updates before us, but it's OK.
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080062 if (kAlwaysUpdateField && ref != old_ref) {
Mathieu Chartiera9746b92018-06-22 10:25:40 -070063 obj->CasFieldObjectWithoutWriteBarrier<false, false>(offset,
64 old_ref,
65 ref,
66 CASMode::kStrong,
67 std::memory_order_release);
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080068 }
Hiroshi Yamauchifa755182015-09-30 20:12:11 -070069 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080070 AssertToSpaceInvariant(obj, offset, ref);
71 return ref;
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080072 } else if (kUseTableLookupReadBarrier) {
Hans Boehmcc55e1d2017-07-27 15:28:07 -070073 MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>();
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080074 MirrorType* old_ref = ref;
75 // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
76 gc::Heap* heap = Runtime::Current()->GetHeap();
77 if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
78 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
79 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
80 if (ref != old_ref) {
Mathieu Chartiera9746b92018-06-22 10:25:40 -070081 obj->CasFieldObjectWithoutWriteBarrier<false, false>(offset,
82 old_ref,
83 ref,
84 CASMode::kStrong,
85 std::memory_order_release);
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080086 }
87 }
88 AssertToSpaceInvariant(obj, offset, ref);
89 return ref;
90 } else {
91 LOG(FATAL) << "Unexpected read barrier type";
92 UNREACHABLE();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080093 }
Lokesh Gidra384c7862021-11-21 11:45:19 -080094 } else if (kReadBarrierOption == kWithFromSpaceBarrier) {
Lokesh Gidracce31512023-01-15 19:36:15 +000095 DCHECK(gUseUserfaultfd);
Lokesh Gidra384c7862021-11-21 11:45:19 -080096 MirrorType* old = ref_addr->template AsMirrorPtr<kIsVolatile>();
97 mirror::Object* ref =
98 Runtime::Current()->GetHeap()->MarkCompactCollector()->GetFromSpaceAddrFromBarrier(old);
99 return reinterpret_cast<MirrorType*>(ref);
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -0700100 } else {
101 // No read barrier.
Hans Boehmcc55e1d2017-07-27 15:28:07 -0700102 return ref_addr->template AsMirrorPtr<kIsVolatile>();
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -0700103 }
104}
105
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -0800106template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700107inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
108 GcRootSource* gc_root_source) {
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -0700109 MirrorType* ref = *root;
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700110 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Lokesh Gidraca5ed9f2022-04-20 01:39:28 +0000111 if (gUseReadBarrier && with_read_barrier) {
Vladimir Marko13701852022-02-11 10:21:10 +0000112 if (kCheckDebugDisallowReadBarrierCount) {
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800113 Thread* const self = Thread::Current();
Vladimir Markoad535092023-01-04 11:01:55 +0000114 CHECK(self != nullptr);
115 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800116 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800117 if (kUseBakerReadBarrier) {
118 // TODO: separate the read barrier code from the collector code more.
119 Thread* self = Thread::Current();
120 if (self != nullptr && self->GetIsGcMarking()) {
121 ref = reinterpret_cast<MirrorType*>(Mark(ref));
122 }
123 AssertToSpaceInvariant(gc_root_source, ref);
124 return ref;
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800125 } else if (kUseTableLookupReadBarrier) {
126 Thread* self = Thread::Current();
127 if (self != nullptr &&
128 self->GetIsGcMarking() &&
129 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
130 MirrorType* old_ref = ref;
131 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
132 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
133 if (ref != old_ref) {
Orion Hodson88591fe2018-03-06 13:35:43 +0000134 Atomic<MirrorType*>* atomic_root = reinterpret_cast<Atomic<MirrorType*>*>(root);
Orion Hodson4557b382018-01-03 11:47:54 +0000135 atomic_root->CompareAndSetStrongRelaxed(old_ref, ref);
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800136 }
137 }
138 AssertToSpaceInvariant(gc_root_source, ref);
139 return ref;
140 } else {
141 LOG(FATAL) << "Unexpected read barrier type";
142 UNREACHABLE();
143 }
Lokesh Gidracce31512023-01-15 19:36:15 +0000144 } else if (kReadBarrierOption == kWithFromSpaceBarrier) {
145 DCHECK(gUseUserfaultfd);
146 mirror::Object* from_ref =
147 Runtime::Current()->GetHeap()->MarkCompactCollector()->GetFromSpaceAddrFromBarrier(ref);
148 return reinterpret_cast<MirrorType*>(from_ref);
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700149 } else {
150 return ref;
151 }
152}
153
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700154// TODO: Reduce copy paste
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -0800155template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700156inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
157 GcRootSource* gc_root_source) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700158 MirrorType* ref = root->AsMirrorPtr();
159 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Lokesh Gidraca5ed9f2022-04-20 01:39:28 +0000160 if (gUseReadBarrier && with_read_barrier) {
Vladimir Marko13701852022-02-11 10:21:10 +0000161 if (kCheckDebugDisallowReadBarrierCount) {
162 Thread* const self = Thread::Current();
Vladimir Markoad535092023-01-04 11:01:55 +0000163 CHECK(self != nullptr);
164 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700165 }
Vladimir Marko13701852022-02-11 10:21:10 +0000166 if (kUseBakerReadBarrier) {
167 // TODO: separate the read barrier code from the collector code more.
168 Thread* self = Thread::Current();
169 if (self != nullptr && self->GetIsGcMarking()) {
170 ref = reinterpret_cast<MirrorType*>(Mark(ref));
171 }
172 AssertToSpaceInvariant(gc_root_source, ref);
173 return ref;
174 } else if (kUseTableLookupReadBarrier) {
175 Thread* self = Thread::Current();
176 if (self != nullptr &&
177 self->GetIsGcMarking() &&
178 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
179 auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
180 ref = reinterpret_cast<MirrorType*>(Mark(ref));
181 auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
182 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
183 if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
184 auto* atomic_root =
185 reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
186 atomic_root->CompareAndSetStrongRelaxed(old_ref, new_ref);
187 }
188 }
189 AssertToSpaceInvariant(gc_root_source, ref);
190 return ref;
191 } else {
192 LOG(FATAL) << "Unexpected read barrier type";
193 UNREACHABLE();
194 }
Lokesh Gidracce31512023-01-15 19:36:15 +0000195 } else if (kReadBarrierOption == kWithFromSpaceBarrier) {
196 DCHECK(gUseUserfaultfd);
197 mirror::Object* from_ref =
198 Runtime::Current()->GetHeap()->MarkCompactCollector()->GetFromSpaceAddrFromBarrier(ref);
199 return reinterpret_cast<MirrorType*>(from_ref);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700200 } else {
201 return ref;
202 }
203}
204
Nicolas Geoffray13056a12017-05-11 11:48:28 +0000205template <typename MirrorType>
206inline MirrorType* ReadBarrier::IsMarked(MirrorType* ref) {
207 // Only read-barrier configurations can have mutators run while
208 // the GC is marking.
Lokesh Gidraca5ed9f2022-04-20 01:39:28 +0000209 if (!gUseReadBarrier) {
Nicolas Geoffray13056a12017-05-11 11:48:28 +0000210 return ref;
211 }
212 // IsMarked does not handle null, so handle it here.
213 if (ref == nullptr) {
214 return nullptr;
215 }
216 // IsMarked should only be called when the GC is marking.
217 if (!Thread::Current()->GetIsGcMarking()) {
218 return ref;
219 }
220
221 return reinterpret_cast<MirrorType*>(
222 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarked(ref));
223}
224
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800225inline bool ReadBarrier::IsDuringStartup() {
226 gc::Heap* heap = Runtime::Current()->GetHeap();
227 if (heap == nullptr) {
228 // During startup, the heap can be null.
229 return true;
230 }
231 if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
232 // CC isn't running.
233 return true;
234 }
235 gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
236 if (collector == nullptr) {
237 // During startup, the collector can be null.
238 return true;
239 }
240 return false;
241}
242
243inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
244 mirror::Object* ref) {
Andreas Gampee3ce7872017-02-22 13:36:21 -0800245 if (kEnableToSpaceInvariantChecks) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800246 if (ref == nullptr || IsDuringStartup()) {
247 return;
248 }
249 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
250 AssertToSpaceInvariant(obj, offset, ref);
251 }
252}
253
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700254inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source,
255 mirror::Object* ref) {
Andreas Gampee3ce7872017-02-22 13:36:21 -0800256 if (kEnableToSpaceInvariantChecks) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700257 if (ref == nullptr || IsDuringStartup()) {
258 return;
259 }
260 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
261 AssertToSpaceInvariant(gc_root_source, ref);
262 }
263}
264
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800265inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700266 return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->MarkFromReadBarrier(obj);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800267}
268
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700269inline bool ReadBarrier::IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency) {
Roland Levillain14e5a292018-06-28 12:00:56 +0100270 return obj->GetReadBarrierState(fake_address_dependency) == kGrayState;
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700271}
272
273inline bool ReadBarrier::IsGray(mirror::Object* obj) {
274 // Use a load-acquire to load the read barrier bit to avoid reordering with the subsequent load.
275 // GetReadBarrierStateAcquire() has load-acquire semantics.
Roland Levillain14e5a292018-06-28 12:00:56 +0100276 return obj->GetReadBarrierStateAcquire() == kGrayState;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800277}
278
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -0700279} // namespace art
280
281#endif // ART_RUNTIME_READ_BARRIER_INL_H_