Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_ |
| 18 | #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_ |
| 19 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 20 | #include "barrier.h" |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 21 | #include "garbage_collector.h" |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 22 | #include "immune_region.h" |
| 23 | #include "jni.h" |
| 24 | #include "object_callbacks.h" |
| 25 | #include "offsets.h" |
| 26 | #include "gc/accounting/atomic_stack.h" |
| 27 | #include "gc/accounting/read_barrier_table.h" |
| 28 | #include "gc/accounting/space_bitmap.h" |
| 29 | #include "mirror/object.h" |
| 30 | #include "mirror/object_reference.h" |
| 31 | #include "safe_map.h" |
| 32 | |
| 33 | #include <unordered_map> |
| 34 | #include <vector> |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 35 | |
| 36 | namespace art { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 37 | class RootInfo; |
| 38 | |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 39 | namespace gc { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 40 | |
| 41 | namespace accounting { |
| 42 | typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap; |
| 43 | class HeapBitmap; |
| 44 | } // namespace accounting |
| 45 | |
| 46 | namespace space { |
| 47 | class RegionSpace; |
| 48 | } // namespace space |
| 49 | |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 50 | namespace collector { |
| 51 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 52 | // Concurrent queue. Used as the mark stack. TODO: use a concurrent |
| 53 | // stack for locality. |
| 54 | class MarkQueue { |
| 55 | public: |
| 56 | explicit MarkQueue(size_t size) : size_(size) { |
| 57 | CHECK(IsPowerOfTwo(size_)); |
| 58 | buf_.reset(new Atomic<mirror::Object*>[size_]); |
| 59 | CHECK(buf_.get() != nullptr); |
| 60 | Clear(); |
| 61 | } |
| 62 | |
| 63 | ALWAYS_INLINE Atomic<mirror::Object*>* GetSlotAddr(size_t index) { |
| 64 | return &(buf_.get()[index & (size_ - 1)]); |
| 65 | } |
| 66 | |
| 67 | // Multiple-proceducer enqueue. |
| 68 | bool Enqueue(mirror::Object* to_ref) { |
| 69 | size_t t; |
| 70 | do { |
| 71 | t = tail_.LoadRelaxed(); |
| 72 | size_t h = head_.LoadSequentiallyConsistent(); |
| 73 | if (t + size_ == h) { |
| 74 | // It's full. |
| 75 | return false; |
| 76 | } |
| 77 | } while (!tail_.CompareExchangeWeakSequentiallyConsistent(t, t + 1)); |
| 78 | // We got a slot but its content has not been filled yet at this point. |
| 79 | GetSlotAddr(t)->StoreSequentiallyConsistent(to_ref); |
| 80 | return true; |
| 81 | } |
| 82 | |
| 83 | // Thread-unsafe. |
| 84 | bool EnqueueThreadUnsafe(mirror::Object* to_ref) { |
| 85 | size_t t = tail_.LoadRelaxed(); |
| 86 | size_t h = head_.LoadRelaxed(); |
| 87 | if (t + size_ == h) { |
| 88 | // It's full. |
| 89 | return false; |
| 90 | } |
| 91 | GetSlotAddr(t)->StoreRelaxed(to_ref); |
| 92 | tail_.StoreRelaxed(t + 1); |
| 93 | return true; |
| 94 | } |
| 95 | |
| 96 | // Single-consumer dequeue. |
| 97 | mirror::Object* Dequeue() { |
| 98 | size_t h = head_.LoadRelaxed(); |
| 99 | size_t t = tail_.LoadSequentiallyConsistent(); |
| 100 | if (h == t) { |
| 101 | // it's empty. |
| 102 | return nullptr; |
| 103 | } |
| 104 | Atomic<mirror::Object*>* slot = GetSlotAddr(h); |
| 105 | mirror::Object* ref = slot->LoadSequentiallyConsistent(); |
| 106 | while (ref == nullptr) { |
| 107 | // Wait until the slot content becomes visible. |
| 108 | ref = slot->LoadSequentiallyConsistent(); |
| 109 | } |
| 110 | slot->StoreRelaxed(nullptr); |
| 111 | head_.StoreSequentiallyConsistent(h + 1); |
| 112 | return ref; |
| 113 | } |
| 114 | |
| 115 | bool IsEmpty() { |
| 116 | size_t h = head_.LoadSequentiallyConsistent(); |
| 117 | size_t t = tail_.LoadSequentiallyConsistent(); |
| 118 | return h == t; |
| 119 | } |
| 120 | |
| 121 | void Clear() { |
| 122 | head_.StoreRelaxed(0); |
| 123 | tail_.StoreRelaxed(0); |
| 124 | memset(buf_.get(), 0, size_ * sizeof(Atomic<mirror::Object*>)); |
| 125 | } |
| 126 | |
| 127 | private: |
| 128 | Atomic<size_t> head_; |
| 129 | Atomic<size_t> tail_; |
| 130 | |
| 131 | size_t size_; |
Andreas Gampe | 6c08a45 | 2015-01-23 19:51:15 -0800 | [diff] [blame] | 132 | std::unique_ptr<Atomic<mirror::Object*>[]> buf_; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 133 | }; |
| 134 | |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 135 | class ConcurrentCopying : public GarbageCollector { |
| 136 | public: |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 137 | // TODO: disable thse flags for production use. |
| 138 | // Enable the no-from-space-refs verification at the pause. |
| 139 | static constexpr bool kEnableNoFromSpaceRefsVerification = true; |
| 140 | // Enable the from-space bytes/objects check. |
| 141 | static constexpr bool kEnableFromSpaceAccountingCheck = true; |
| 142 | // Enable verbose mode. |
| 143 | static constexpr bool kVerboseMode = true; |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 144 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 145 | ConcurrentCopying(Heap* heap, const std::string& name_prefix = ""); |
| 146 | ~ConcurrentCopying(); |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 147 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 148 | virtual void RunPhases() OVERRIDE; |
| 149 | void InitializePhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 150 | void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 151 | void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 152 | void FinishPhase(); |
| 153 | |
| 154 | void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) |
| 155 | LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 156 | virtual GcType GetGcType() const OVERRIDE { |
| 157 | return kGcTypePartial; |
| 158 | } |
| 159 | virtual CollectorType GetCollectorType() const OVERRIDE { |
| 160 | return kCollectorTypeCC; |
| 161 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 162 | virtual void RevokeAllThreadLocalBuffers() OVERRIDE; |
| 163 | void SetRegionSpace(space::RegionSpace* region_space) { |
| 164 | DCHECK(region_space != nullptr); |
| 165 | region_space_ = region_space; |
| 166 | } |
| 167 | space::RegionSpace* RegionSpace() { |
| 168 | return region_space_; |
| 169 | } |
| 170 | void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref) |
| 171 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 172 | void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref) |
| 173 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 174 | bool IsInToSpace(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { |
| 175 | DCHECK(ref != nullptr); |
| 176 | return IsMarked(ref) == ref; |
| 177 | } |
| 178 | mirror::Object* Mark(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 179 | bool IsMarking() const { |
| 180 | return is_marking_; |
| 181 | } |
| 182 | bool IsActive() const { |
| 183 | return is_active_; |
| 184 | } |
| 185 | Barrier& GetBarrier() { |
| 186 | return *gc_barrier_; |
| 187 | } |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 188 | |
| 189 | private: |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 190 | mirror::Object* PopOffMarkStack(); |
| 191 | template<bool kThreadSafe> |
| 192 | void PushOntoMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 193 | mirror::Object* Copy(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 194 | void Scan(mirror::Object* to_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 195 | void Process(mirror::Object* obj, MemberOffset offset) |
| 196 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 197 | virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) |
| 198 | OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 199 | virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, |
| 200 | const RootInfo& info) |
| 201 | OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 202 | void VerifyNoFromSpaceReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 203 | accounting::ObjectStack* GetAllocationStack(); |
| 204 | accounting::ObjectStack* GetLiveStack(); |
| 205 | bool ProcessMarkStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 206 | void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) |
| 207 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 208 | void ProcessReferences(Thread* self, bool concurrent) |
| 209 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 210 | mirror::Object* IsMarked(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 211 | static mirror::Object* MarkCallback(mirror::Object* from_ref, void* arg) |
| 212 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 213 | static mirror::Object* IsMarkedCallback(mirror::Object* from_ref, void* arg) |
| 214 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 215 | static bool IsHeapReferenceMarkedCallback( |
| 216 | mirror::HeapReference<mirror::Object>* field, void* arg) |
| 217 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 218 | static void ProcessMarkStackCallback(void* arg) |
| 219 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 220 | void SweepSystemWeaks(Thread* self) |
| 221 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); |
| 222 | void Sweep(bool swap_bitmaps) |
| 223 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
| 224 | void SweepLargeObjects(bool swap_bitmaps) |
| 225 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
| 226 | void ClearBlackPtrs() |
| 227 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
| 228 | void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) |
| 229 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 230 | mirror::Object* AllocateInSkippedBlock(size_t alloc_size) |
| 231 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 232 | void CheckEmptyMarkQueue() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 233 | void IssueEmptyCheckpoint() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 234 | bool IsOnAllocStack(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 235 | mirror::Object* GetFwdPtr(mirror::Object* from_ref) |
| 236 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Andreas Gampe | 65b798e | 2015-04-06 09:35:22 -0700 | [diff] [blame] | 237 | void FlipThreadRoots() LOCKS_EXCLUDED(Locks::mutator_lock_); |
Mathieu Chartier | cb535da | 2015-01-23 13:50:03 -0800 | [diff] [blame] | 238 | void SwapStacks(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 239 | void RecordLiveStackFreezeSize(Thread* self); |
| 240 | void ComputeUnevacFromSpaceLiveRatio(); |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 241 | void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) |
| 242 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 243 | void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref) |
| 244 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 245 | |
| 246 | space::RegionSpace* region_space_; // The underlying region space. |
| 247 | std::unique_ptr<Barrier> gc_barrier_; |
| 248 | MarkQueue mark_queue_; |
| 249 | bool is_marking_; // True while marking is ongoing. |
| 250 | bool is_active_; // True while the collection is ongoing. |
| 251 | bool is_asserting_to_space_invariant_; // True while asserting the to-space invariant. |
| 252 | ImmuneRegion immune_region_; |
| 253 | std::unique_ptr<accounting::HeapBitmap> cc_heap_bitmap_; |
| 254 | std::vector<accounting::SpaceBitmap<kObjectAlignment>*> cc_bitmaps_; |
| 255 | accounting::SpaceBitmap<kObjectAlignment>* region_space_bitmap_; |
| 256 | // A cache of Heap::GetMarkBitmap(). |
| 257 | accounting::HeapBitmap* heap_mark_bitmap_; |
| 258 | size_t live_stack_freeze_size_; |
| 259 | size_t from_space_num_objects_at_first_pause_; |
| 260 | size_t from_space_num_bytes_at_first_pause_; |
| 261 | Atomic<int> is_mark_queue_push_disallowed_; |
| 262 | |
| 263 | // How many objects and bytes we moved. Used for accounting. |
| 264 | Atomic<size_t> bytes_moved_; |
| 265 | Atomic<size_t> objects_moved_; |
| 266 | |
| 267 | // The skipped blocks are memory blocks/chucks that were copies of |
| 268 | // objects that were unused due to lost races (cas failures) at |
| 269 | // object copy/forward pointer install. They are reused. |
| 270 | Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; |
| 271 | std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_); |
| 272 | Atomic<size_t> to_space_bytes_skipped_; |
| 273 | Atomic<size_t> to_space_objects_skipped_; |
| 274 | |
| 275 | accounting::ReadBarrierTable* rb_table_; |
| 276 | bool force_evacuate_all_; // True if all regions are evacuated. |
| 277 | |
| 278 | friend class ConcurrentCopyingRefFieldsVisitor; |
| 279 | friend class ConcurrentCopyingImmuneSpaceObjVisitor; |
| 280 | friend class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor; |
| 281 | friend class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor; |
| 282 | friend class ConcurrentCopyingClearBlackPtrsVisitor; |
| 283 | friend class ConcurrentCopyingLostCopyVisitor; |
| 284 | friend class ThreadFlipVisitor; |
| 285 | friend class FlipCallback; |
| 286 | friend class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor; |
| 287 | |
Mathieu Chartier | 3130cdf | 2015-05-03 15:20:23 -0700 | [diff] [blame] | 288 | DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying); |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 289 | }; |
| 290 | |
| 291 | } // namespace collector |
| 292 | } // namespace gc |
| 293 | } // namespace art |
| 294 | |
| 295 | #endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_ |