blob: dd449f991bf2585466761f671468173d9e1e9b82 [file] [log] [blame]
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
18#define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
19
20#include "concurrent_copying.h"
21
22#include "gc/accounting/space_bitmap-inl.h"
23#include "gc/heap.h"
24#include "gc/space/region_space.h"
Andreas Gampec15a2f42017-04-21 12:09:39 -070025#include "mirror/object-readbarrier-inl.h"
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -070026#include "lock_word.h"
27
28namespace art {
29namespace gc {
30namespace collector {
31
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070032inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion(
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080033 mirror::Object* ref, accounting::ContinuousSpaceBitmap* bitmap) {
34 // For the Baker-style RB, in a rare case, we could incorrectly change the object from white
35 // to gray even though the object has already been marked through. This happens if a mutator
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070036 // thread gets preempted before the AtomicSetReadBarrierState below, GC marks through the
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080037 // object (changes it from white to gray and back to white), and the thread runs and
Mathieu Chartierc381c362016-08-23 13:27:53 -070038 // incorrectly changes it from white to gray. If this happens, the object will get added to the
39 // mark stack again and get changed back to white after it is processed.
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080040 if (kUseBakerReadBarrier) {
Mathieu Chartierc381c362016-08-23 13:27:53 -070041 // Test the bitmap first to avoid graying an object that has already been marked through most
42 // of the time.
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080043 if (bitmap->Test(ref)) {
44 return ref;
45 }
46 }
47 // This may or may not succeed, which is ok because the object may already be gray.
Mathieu Chartierc381c362016-08-23 13:27:53 -070048 bool success = false;
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080049 if (kUseBakerReadBarrier) {
Mathieu Chartierc381c362016-08-23 13:27:53 -070050 // GC will mark the bitmap when popping from mark stack. If only the GC is touching the bitmap
51 // we can avoid an expensive CAS.
52 // For the baker case, an object is marked if either the mark bit marked or the bitmap bit is
53 // set.
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070054 success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(), ReadBarrier::GrayState());
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080055 } else {
Mathieu Chartierc381c362016-08-23 13:27:53 -070056 success = !bitmap->AtomicTestAndSet(ref);
57 }
58 if (success) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080059 // Newly marked.
60 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070061 DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080062 }
63 PushOntoMarkStack(ref);
64 }
65 return ref;
66}
67
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070068template<bool kGrayImmuneObject>
69inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(mirror::Object* ref) {
70 if (kUseBakerReadBarrier) {
71 // The GC-running thread doesn't (need to) gray immune objects except when updating thread roots
72 // in the thread flip on behalf of suspended threads (when gc_grays_immune_objects_ is
73 // true). Also, a mutator doesn't (need to) gray an immune object after GC has updated all
74 // immune space objects (when updated_all_immune_objects_ is true).
75 if (kIsDebugBuild) {
76 if (Thread::Current() == thread_running_gc_) {
77 DCHECK(!kGrayImmuneObject ||
78 updated_all_immune_objects_.LoadRelaxed() ||
79 gc_grays_immune_objects_);
80 } else {
81 DCHECK(kGrayImmuneObject);
82 }
83 }
84 if (!kGrayImmuneObject || updated_all_immune_objects_.LoadRelaxed()) {
85 return ref;
86 }
87 // This may or may not succeed, which is ok because the object may already be gray.
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070088 bool success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(),
89 ReadBarrier::GrayState());
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070090 if (success) {
91 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
92 immune_gray_stack_.push_back(ref);
93 }
94 }
95 return ref;
96}
97
Mathieu Chartierc381c362016-08-23 13:27:53 -070098template<bool kGrayImmuneObject, bool kFromGCThread>
Mathieu Chartier1ca68902017-04-18 11:26:22 -070099inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref,
100 mirror::Object* holder,
101 MemberOffset offset) {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700102 if (from_ref == nullptr) {
103 return nullptr;
104 }
105 DCHECK(heap_->collector_type_ == kCollectorTypeCC);
Mathieu Chartierc381c362016-08-23 13:27:53 -0700106 if (kFromGCThread) {
107 DCHECK(is_active_);
108 DCHECK_EQ(Thread::Current(), thread_running_gc_);
109 } else if (UNLIKELY(kUseBakerReadBarrier && !is_active_)) {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700110 // In the lock word forward address state, the read barrier bits
111 // in the lock word are part of the stored forwarding address and
112 // invalid. This is usually OK as the from-space copy of objects
113 // aren't accessed by mutators due to the to-space
114 // invariant. However, during the dex2oat image writing relocation
115 // and the zygote compaction, objects can be in the forward
116 // address state (to store the forward/relocation addresses) and
117 // they can still be accessed and the invalid read barrier bits
118 // are consulted. If they look like gray but aren't really, the
119 // read barriers slow path can trigger when it shouldn't. To guard
120 // against this, return here if the CC collector isn't running.
121 return from_ref;
122 }
123 DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
124 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
125 switch (rtype) {
126 case space::RegionSpace::RegionType::kRegionTypeToSpace:
127 // It's already marked.
128 return from_ref;
129 case space::RegionSpace::RegionType::kRegionTypeFromSpace: {
130 mirror::Object* to_ref = GetFwdPtr(from_ref);
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700131 if (to_ref == nullptr) {
132 // It isn't marked yet. Mark it by copying it to the to-space.
133 to_ref = Copy(from_ref);
134 }
135 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
136 << "from_ref=" << from_ref << " to_ref=" << to_ref;
137 return to_ref;
138 }
139 case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace: {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700140 return MarkUnevacFromSpaceRegion(from_ref, region_space_bitmap_);
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700141 }
142 case space::RegionSpace::RegionType::kRegionTypeNone:
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700143 if (immune_spaces_.ContainsObject(from_ref)) {
144 return MarkImmuneSpace<kGrayImmuneObject>(from_ref);
145 } else {
Mathieu Chartier1ca68902017-04-18 11:26:22 -0700146 return MarkNonMoving(from_ref, holder, offset);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700147 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700148 default:
149 UNREACHABLE();
150 }
151}
152
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700153inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* from_ref) {
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700154 mirror::Object* ret;
Mathieu Chartier1cf194f2016-11-01 20:13:24 -0700155 if (from_ref == nullptr) {
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700156 return from_ref;
157 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700158 // TODO: Consider removing this check when we are done investigating slow paths. b/30162165
159 if (UNLIKELY(mark_from_read_barrier_measurements_)) {
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700160 ret = MarkFromReadBarrierWithMeasurements(from_ref);
161 } else {
162 ret = Mark(from_ref);
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700163 }
Mathieu Chartierd6a595b2016-08-03 18:51:34 -0700164 // Only set the mark bit for baker barrier.
165 if (kUseBakerReadBarrier && LIKELY(!rb_mark_bit_stack_full_ && ret->AtomicSetMarkBit(0, 1))) {
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700166 // If the mark stack is full, we may temporarily go to mark and back to unmarked. Seeing both
167 // values are OK since the only race is doing an unnecessary Mark.
168 if (!rb_mark_bit_stack_->AtomicPushBack(ret)) {
169 // Mark stack is full, set the bit back to zero.
170 CHECK(ret->AtomicSetMarkBit(1, 0));
171 // Set rb_mark_bit_stack_full_, this is racy but OK since AtomicPushBack is thread safe.
172 rb_mark_bit_stack_full_ = true;
173 }
174 }
175 return ret;
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700176}
177
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700178inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
179 DCHECK(region_space_->IsInFromSpace(from_ref));
180 LockWord lw = from_ref->GetLockWord(false);
181 if (lw.GetState() == LockWord::kForwardingAddress) {
182 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
183 DCHECK(fwd_ptr != nullptr);
184 return fwd_ptr;
185 } else {
186 return nullptr;
187 }
188}
189
Mathieu Chartierc381c362016-08-23 13:27:53 -0700190inline bool ConcurrentCopying::IsMarkedInUnevacFromSpace(mirror::Object* from_ref) {
191 // Use load acquire on the read barrier pointer to ensure that we never see a white read barrier
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700192 // state with an unmarked bit due to reordering.
Mathieu Chartierc381c362016-08-23 13:27:53 -0700193 DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700194 if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
Mathieu Chartierc381c362016-08-23 13:27:53 -0700195 return true;
196 }
197 return region_space_bitmap_->Test(from_ref);
198}
199
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700200} // namespace collector
201} // namespace gc
202} // namespace art
203
204#endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_