diff options
-rw-r--r-- | cmdline/cmdline_types.h | 2 | ||||
-rw-r--r-- | runtime/dexopt_test.cc | 2 | ||||
-rw-r--r-- | runtime/gc/collector/semi_space-inl.h | 2 | ||||
-rw-r--r-- | runtime/gc/collector/semi_space.cc | 233 | ||||
-rw-r--r-- | runtime/gc/collector/semi_space.h | 36 | ||||
-rw-r--r-- | runtime/gc/collector_type.h | 4 | ||||
-rw-r--r-- | runtime/gc/heap-inl.h | 12 | ||||
-rw-r--r-- | runtime/gc/heap.cc | 62 | ||||
-rw-r--r-- | runtime/gc/heap.h | 1 | ||||
-rw-r--r-- | runtime/gc/system_weak_test.cc | 1 | ||||
-rw-r--r-- | runtime/parsed_options.cc | 9 | ||||
-rwxr-xr-x | test/004-ThreadStress/run | 24 | ||||
-rw-r--r-- | test/knownfailures.json | 7 | ||||
-rw-r--r-- | test/testrunner/target_config.py | 38 |
14 files changed, 46 insertions, 387 deletions
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h index dd9221d6f8..a757c91089 100644 --- a/cmdline/cmdline_types.h +++ b/cmdline/cmdline_types.h @@ -412,8 +412,6 @@ static gc::CollectorType ParseCollectorType(const std::string& option) { return gc::kCollectorTypeCMS; } else if (option == "SS") { return gc::kCollectorTypeSS; - } else if (option == "GSS") { - return gc::kCollectorTypeGSS; } else if (option == "CC") { return gc::kCollectorTypeCC; } else { diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc index 33f8c7fa25..ec41b547e6 100644 --- a/runtime/dexopt_test.cc +++ b/runtime/dexopt_test.cc @@ -174,8 +174,6 @@ void DexoptTest::ReserveImageSpace() { MemMap::Init(); // Ensure a chunk of memory is reserved for the image space. - // The reservation_end includes room for the main space that has to come - // right after the image in case of the GSS collector. uint64_t reservation_start = ART_BASE_ADDRESS; uint64_t reservation_end = ART_BASE_ADDRESS + 384 * MB; diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h index 7db5d2ca20..065a12512b 100644 --- a/runtime/gc/collector/semi_space-inl.h +++ b/runtime/gc/collector/semi_space-inl.h @@ -58,7 +58,7 @@ inline void SemiSpace::MarkObject(CompressedReferenceType* obj_ptr) { MarkStackPush(forward_address); } obj_ptr->Assign(forward_address); - } else if (!collect_from_space_only_ && !immune_spaces_.IsInImmuneRegion(obj)) { + } else if (!immune_spaces_.IsInImmuneRegion(obj)) { DCHECK(!to_space_->HasAddress(obj)) << "Tried to mark " << obj << " in to-space"; auto slow_path = [this](const mirror::Object* ref) { CHECK(!to_space_->HasAddress(ref)) << "Marking " << ref << " in to_space_"; diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index 15e0711948..c93410e4b5 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc @@ -58,8 +58,6 @@ namespace collector { static constexpr bool kProtectFromSpace = true; static constexpr bool kStoreStackTraces = false; -static constexpr size_t kBytesPromotedThreshold = 4 * MB; -static constexpr size_t kLargeObjectBytesAllocatedThreshold = 16 * MB; void SemiSpace::BindBitmaps() { TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); @@ -71,41 +69,23 @@ void SemiSpace::BindBitmaps() { immune_spaces_.AddSpace(space); } else if (space->GetLiveBitmap() != nullptr) { // TODO: We can probably also add this space to the immune region. - if (space == to_space_ || collect_from_space_only_) { - if (collect_from_space_only_) { - // Bind the bitmaps of the main free list space and the non-moving space we are doing a - // bump pointer space only collection. - CHECK(space == GetHeap()->GetPrimaryFreeListSpace() || - space == GetHeap()->GetNonMovingSpace()); - } + if (space == to_space_) { CHECK(space->IsContinuousMemMapAllocSpace()); space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); } } } - if (collect_from_space_only_) { - // We won't collect the large object space if a bump pointer space only collection. - is_large_object_space_immune_ = true; - } } -SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) +SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix) : GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + "semispace"), mark_stack_(nullptr), - is_large_object_space_immune_(false), to_space_(nullptr), to_space_live_bitmap_(nullptr), from_space_(nullptr), mark_bitmap_(nullptr), self_(nullptr), - generational_(generational), - last_gc_to_space_end_(nullptr), - bytes_promoted_(0), - bytes_promoted_since_last_whole_heap_collection_(0), - large_object_bytes_allocated_at_last_whole_heap_collection_(0), - collect_from_space_only_(generational), - promo_dest_space_(nullptr), fallback_space_(nullptr), bytes_moved_(0U), objects_moved_(0U), @@ -148,7 +128,6 @@ void SemiSpace::InitializePhase() { mark_stack_ = heap_->GetMarkStack(); DCHECK(mark_stack_ != nullptr); immune_spaces_.Reset(); - is_large_object_space_immune_ = false; saved_bytes_ = 0; bytes_moved_ = 0; objects_moved_ = 0; @@ -161,9 +140,6 @@ void SemiSpace::InitializePhase() { ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); mark_bitmap_ = heap_->GetMarkBitmap(); } - if (generational_) { - promo_dest_space_ = GetHeap()->GetPrimaryFreeListSpace(); - } fallback_space_ = GetHeap()->GetNonMovingSpace(); } @@ -191,44 +167,14 @@ void SemiSpace::MarkingPhase() { // Revoke the thread local buffers since the GC may allocate into a RosAllocSpace and this helps // to prevent fragmentation. RevokeAllThreadLocalBuffers(); - if (generational_) { - if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit || - GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc || - GetCurrentIteration()->GetClearSoftReferences()) { - // If an explicit, native allocation-triggered, or last attempt - // collection, collect the whole heap. - collect_from_space_only_ = false; - } - if (!collect_from_space_only_) { - VLOG(heap) << "Whole heap collection"; - name_ = collector_name_ + " whole"; - } else { - VLOG(heap) << "Bump pointer space only collection"; - name_ = collector_name_ + " bps"; - } - } - if (!collect_from_space_only_) { - // If non-generational, always clear soft references. - // If generational, clear soft references if a whole heap collection. - GetCurrentIteration()->SetClearSoftReferences(true); - } + // Always clear soft references. + GetCurrentIteration()->SetClearSoftReferences(true); Locks::mutator_lock_->AssertExclusiveHeld(self_); - if (generational_) { - // If last_gc_to_space_end_ is out of the bounds of the from-space - // (the to-space from last GC), then point it to the beginning of - // the from-space. For example, the very first GC or the - // pre-zygote compaction. - if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { - last_gc_to_space_end_ = from_space_->Begin(); - } - // Reset this before the marking starts below. - bytes_promoted_ = 0; - } // Assume the cleared space is already empty. BindBitmaps(); // Process dirty cards and add dirty cards to mod-union tables. - heap_->ProcessCards(GetTimings(), kUseRememberedSet && generational_, false, true); + heap_->ProcessCards(GetTimings(), /*use_rem_sets=*/false, false, true); // Clear the whole card table since we cannot get any additional dirty cards during the // paused GC. This saves memory but only works for pause the world collectors. t.NewTiming("ClearCardTable"); @@ -256,7 +202,7 @@ void SemiSpace::MarkingPhase() { // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked // before they are properly counted. RevokeAllThreadLocalBuffers(); - GetHeap()->RecordFreeRevoke(); // this is for the non-moving rosalloc space used by GSS. + GetHeap()->RecordFreeRevoke(); // This is for the non-moving rosalloc space. // Record freed memory. const int64_t from_bytes = from_space_->GetBytesAllocated(); const int64_t to_bytes = bytes_moved_; @@ -349,8 +295,7 @@ void SemiSpace::MarkReachableObjects() { GetTimings()); table->UpdateAndMarkReferences(this); DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr); - } else if ((space->IsImageSpace() || collect_from_space_only_) && - space->GetLiveBitmap() != nullptr) { + } else if (space->IsImageSpace() && space->GetLiveBitmap() != nullptr) { // If the space has no mod union table (the non-moving space, app image spaces, main spaces // when the bump pointer space only collection is enabled,) then we need to scan its live // bitmap or dirty cards as roots (including the objects on the live stack which have just @@ -358,11 +303,8 @@ void SemiSpace::MarkReachableObjects() { accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space); if (!space->IsImageSpace()) { DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()) - << "Space " << space->GetName() << " " - << "generational_=" << generational_ << " " - << "collect_from_space_only_=" << collect_from_space_only_; + << "Space " << space->GetName(); // App images currently do not have remembered sets. - DCHECK_EQ(kUseRememberedSet, rem_set != nullptr); } else { DCHECK(rem_set == nullptr); } @@ -395,30 +337,6 @@ void SemiSpace::MarkReachableObjects() { } } } - - CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_); - space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace(); - if (is_large_object_space_immune_ && los != nullptr) { - TimingLogger::ScopedTiming t2("VisitLargeObjects", GetTimings()); - DCHECK(collect_from_space_only_); - // Delay copying the live set to the marked set until here from - // BindBitmaps() as the large objects on the allocation stack may - // be newly added to the live set above in MarkAllocStackAsLive(). - los->CopyLiveToMarked(); - - // When the large object space is immune, we need to scan the - // large object space as roots as they contain references to their - // classes (primitive array classes) that could move though they - // don't contain any other references. - accounting::LargeObjectBitmap* large_live_bitmap = los->GetLiveBitmap(); - std::pair<uint8_t*, uint8_t*> range = los->GetBeginEndAtomic(); - large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(range.first), - reinterpret_cast<uintptr_t>(range.second), - [this](mirror::Object* obj) - REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { - ScanObject(obj); - }); - } // Recursively process the mark stack. ProcessMarkStack(); } @@ -437,12 +355,6 @@ void SemiSpace::ReclaimPhase() { if (saved_bytes_ > 0) { VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_); } - if (generational_) { - // Record the end (top) of the to space so we can distinguish - // between objects that were allocated since the last GC and the - // older objects. - last_gc_to_space_end_ = to_space_->End(); - } } void SemiSpace::ResizeMarkStack(size_t new_size) { @@ -515,66 +427,15 @@ static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { const size_t object_size = obj->SizeOf(); size_t bytes_allocated, dummy; - mirror::Object* forward_address = nullptr; - if (generational_ && reinterpret_cast<uint8_t*>(obj) < last_gc_to_space_end_) { - // If it's allocated before the last GC (older), move - // (pseudo-promote) it to the main free list space (as sort - // of an old generation.) - forward_address = promo_dest_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, - nullptr, &dummy); - if (UNLIKELY(forward_address == nullptr)) { - // If out of space, fall back to the to-space. - forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr, - &dummy); - // No logic for marking the bitmap, so it must be null. - DCHECK(to_space_live_bitmap_ == nullptr); - } else { - bytes_promoted_ += bytes_allocated; - // Dirty the card at the destionation as it may contain - // references (including the class pointer) to the bump pointer - // space. - WriteBarrier::ForEveryFieldWrite(forward_address); - // Handle the bitmaps marking. - accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space_->GetLiveBitmap(); - DCHECK(live_bitmap != nullptr); - accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap(); - DCHECK(mark_bitmap != nullptr); - DCHECK(!live_bitmap->Test(forward_address)); - if (collect_from_space_only_) { - // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. - DCHECK_EQ(live_bitmap, mark_bitmap); - - // If a bump pointer space only collection, delay the live - // bitmap marking of the promoted object until it's popped off - // the mark stack (ProcessMarkStack()). The rationale: we may - // be in the middle of scanning the objects in the promo - // destination space for - // non-moving-space-to-bump-pointer-space references by - // iterating over the marked bits of the live bitmap - // (MarkReachableObjects()). If we don't delay it (and instead - // mark the promoted object here), the above promo destination - // space scan could encounter the just-promoted object and - // forward the references in the promoted object's fields even - // through it is pushed onto the mark stack. If this happens, - // the promoted object would be in an inconsistent state, that - // is, it's on the mark stack (gray) but its fields are - // already forwarded (black), which would cause a - // DCHECK(!to_space_->HasAddress(obj)) failure below. - } else { - // Mark forward_address on the live bit map. - live_bitmap->Set(forward_address); - // Mark forward_address on the mark bit map. - DCHECK(!mark_bitmap->Test(forward_address)); - mark_bitmap->Set(forward_address); - } - } - } else { - // If it's allocated after the last GC (younger), copy it to the to-space. - forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr, - &dummy); - if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) { - to_space_live_bitmap_->Set(forward_address); - } + // Copy it to the to-space. + mirror::Object* forward_address = to_space_->AllocThreadUnsafe(self_, + object_size, + &bytes_allocated, + nullptr, + &dummy); + + if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) { + to_space_live_bitmap_->Set(forward_address); } // If it's still null, attempt to use the fallback space. if (UNLIKELY(forward_address == nullptr)) { @@ -596,9 +457,7 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { obj->AssertReadBarrierState(); forward_address->AssertReadBarrierState(); } - DCHECK(to_space_->HasAddress(forward_address) || - fallback_space_->HasAddress(forward_address) || - (generational_ && promo_dest_space_->HasAddress(forward_address))) + DCHECK(to_space_->HasAddress(forward_address) || fallback_space_->HasAddress(forward_address)) << forward_address << "\n" << GetHeap()->DumpSpaces(); return forward_address; } @@ -664,13 +523,10 @@ void SemiSpace::Sweep(bool swap_bitmaps) { RecordFree(alloc_space->Sweep(swap_bitmaps)); } } - if (!is_large_object_space_immune_) { - SweepLargeObjects(swap_bitmaps); - } + SweepLargeObjects(swap_bitmaps); } void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { - DCHECK(!is_large_object_space_immune_); space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace(); if (los != nullptr) { TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings()); @@ -735,26 +591,8 @@ void SemiSpace::ScanObject(Object* obj) { // Scan anything that's on the mark stack. void SemiSpace::ProcessMarkStack() { TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); - accounting::ContinuousSpaceBitmap* live_bitmap = nullptr; - const bool collect_from_space_only = collect_from_space_only_; - if (collect_from_space_only) { - // If a bump pointer space only collection (and the promotion is - // enabled,) we delay the live-bitmap marking of promoted objects - // from MarkObject() until this function. - live_bitmap = promo_dest_space_->GetLiveBitmap(); - DCHECK(live_bitmap != nullptr); - accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap(); - DCHECK(mark_bitmap != nullptr); - DCHECK_EQ(live_bitmap, mark_bitmap); - } while (!mark_stack_->IsEmpty()) { Object* obj = mark_stack_->PopBack(); - if (collect_from_space_only && promo_dest_space_->HasAddress(obj)) { - // obj has just been promoted. Mark the live bitmap for it, - // which is delayed from MarkObject(). - DCHECK(!live_bitmap->Test(obj)); - live_bitmap->Set(obj); - } ScanObject(obj); } } @@ -764,9 +602,7 @@ mirror::Object* SemiSpace::IsMarked(mirror::Object* obj) { if (from_space_->HasAddress(obj)) { // Returns either the forwarding address or null. return GetForwardingAddressInFromSpace(obj); - } else if (collect_from_space_only_ || - immune_spaces_.IsInImmuneRegion(obj) || - to_space_->HasAddress(obj)) { + } else if (immune_spaces_.IsInImmuneRegion(obj) || to_space_->HasAddress(obj)) { return obj; // Already forwarded, must be marked. } return mark_bitmap_->Test(obj) ? obj : nullptr; @@ -817,35 +653,6 @@ void SemiSpace::FinishPhase() { from_space_ = nullptr; CHECK(mark_stack_->IsEmpty()); mark_stack_->Reset(); - space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace(); - if (generational_) { - // Decide whether to do a whole heap collection or a bump pointer - // only space collection at the next collection by updating - // collect_from_space_only_. - if (collect_from_space_only_) { - // Disable collect_from_space_only_ if the bytes promoted since the - // last whole heap collection or the large object bytes - // allocated exceeds a threshold. - bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_; - bool bytes_promoted_threshold_exceeded = - bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold; - uint64_t current_los_bytes_allocated = los != nullptr ? los->GetBytesAllocated() : 0U; - uint64_t last_los_bytes_allocated = - large_object_bytes_allocated_at_last_whole_heap_collection_; - bool large_object_bytes_threshold_exceeded = - current_los_bytes_allocated >= - last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold; - if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) { - collect_from_space_only_ = false; - } - } else { - // Reset the counters. - bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_; - large_object_bytes_allocated_at_last_whole_heap_collection_ = - los != nullptr ? los->GetBytesAllocated() : 0U; - collect_from_space_only_ = true; - } - } // Clear all of the spaces' mark bitmaps. WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); heap_->ClearMarkedObjects(); diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h index f23d4167dd..9f2939f543 100644 --- a/runtime/gc/collector/semi_space.h +++ b/runtime/gc/collector/semi_space.h @@ -59,7 +59,7 @@ class SemiSpace : public GarbageCollector { // If true, use remembered sets in the generational mode. static constexpr bool kUseRememberedSet = true; - explicit SemiSpace(Heap* heap, bool generational = false, const std::string& name_prefix = ""); + explicit SemiSpace(Heap* heap, const std::string& name_prefix = ""); ~SemiSpace() {} @@ -76,7 +76,7 @@ class SemiSpace : public GarbageCollector { return kGcTypePartial; } CollectorType GetCollectorType() const override { - return generational_ ? kCollectorTypeGSS : kCollectorTypeSS; + return kCollectorTypeSS; } // Sets which space we will be copying objects to. @@ -208,9 +208,6 @@ class SemiSpace : public GarbageCollector { // Every object inside the immune spaces is assumed to be marked. ImmuneSpaces immune_spaces_; - // If true, the large object space is immune. - bool is_large_object_space_immune_; - // Destination and source spaces (can be any type of ContinuousMemMapAllocSpace which either has // a live bitmap or doesn't). space::ContinuousMemMapAllocSpace* to_space_; @@ -222,35 +219,6 @@ class SemiSpace : public GarbageCollector { Thread* self_; - // When true, the generational mode (promotion and the bump pointer - // space only collection) is enabled. TODO: move these to a new file - // as a new garbage collector? - const bool generational_; - - // Used for the generational mode. the end/top of the bump - // pointer space at the end of the last collection. - uint8_t* last_gc_to_space_end_; - - // Used for the generational mode. During a collection, keeps track - // of how many bytes of objects have been copied so far from the - // bump pointer space to the non-moving space. - uint64_t bytes_promoted_; - - // Used for the generational mode. Keeps track of how many bytes of - // objects have been copied so far from the bump pointer space to - // the non-moving space, since the last whole heap collection. - uint64_t bytes_promoted_since_last_whole_heap_collection_; - - // Used for the generational mode. Keeps track of how many bytes of - // large objects were allocated at the last whole heap collection. - uint64_t large_object_bytes_allocated_at_last_whole_heap_collection_; - - // Used for generational mode. When true, we only collect the from_space_. - bool collect_from_space_only_; - - // The space which we are promoting into, only used for GSS. - space::ContinuousMemMapAllocSpace* promo_dest_space_; - // The space which we copy to if the to_space_ is full. space::ContinuousMemMapAllocSpace* fallback_space_; diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h index 4759fca46c..62527e2863 100644 --- a/runtime/gc/collector_type.h +++ b/runtime/gc/collector_type.h @@ -32,8 +32,6 @@ enum CollectorType { kCollectorTypeCMS, // Semi-space / mark-sweep hybrid, enables compaction. kCollectorTypeSS, - // A generational variant of kCollectorTypeSS. - kCollectorTypeGSS, // Heap trimming collector, doesn't do any actual collecting. kCollectorTypeHeapTrim, // A (mostly) concurrent copying collector. @@ -69,8 +67,6 @@ static constexpr CollectorType kCollectorTypeDefault = kCollectorTypeCMS #elif ART_DEFAULT_GC_TYPE_IS_SS kCollectorTypeSS -#elif ART_DEFAULT_GC_TYPE_IS_GSS - kCollectorTypeGSS #else kCollectorTypeCMS #error "ART default GC type must be set" diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index 1c09b5c9bf..3b66fbcaa9 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -143,15 +143,9 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, obj->AssertReadBarrierState(); } if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) { - // (Note this if statement will be constant folded away for the - // fast-path quick entry points.) Because SetClass() has no write - // barrier, if a non-moving space allocation, we need a write - // barrier as the class pointer may point to the bump pointer - // space (where the class pointer is an "old-to-young" reference, - // though rare) under the GSS collector with the remembered set - // enabled. We don't need this for kAllocatorTypeRosAlloc/DlMalloc - // cases because we don't directly allocate into the main alloc - // space (besides promotions) under the SS/GSS collector. + // (Note this if statement will be constant folded away for the fast-path quick entry + // points.) Because SetClass() has no write barrier, the GC may need a write barrier in the + // case the object is non movable and points to a recently allocated movable class. WriteBarrier::ForFieldWrite(obj, mirror::Object::ClassOffset(), klass); } pre_fence_visitor(obj, usable_size); diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 987b239ac6..bbcb93c7ea 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -123,7 +123,6 @@ static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosallo static const char* kMemMapSpaceName[2] = {"main space", "main space 1"}; static const char* kNonMovingSpaceName = "non moving space"; static const char* kZygoteSpaceName = "zygote space"; -static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB; static constexpr bool kGCALotMode = false; // GC alot mode uses a small allocation stack to stress test a lot of GC. static constexpr size_t kGcAlotAllocationStackSize = 4 * KB / @@ -335,9 +334,8 @@ Heap::Heap(size_t initial_size, live_bitmap_.reset(new accounting::HeapBitmap(this)); mark_bitmap_.reset(new accounting::HeapBitmap(this)); - // We don't have hspace compaction enabled with GSS or CC. - if (foreground_collector_type_ == kCollectorTypeGSS || - foreground_collector_type_ == kCollectorTypeCC) { + // We don't have hspace compaction enabled with CC. + if (foreground_collector_type_ == kCollectorTypeCC) { use_homogeneous_space_compaction_for_oom_ = false; } bool support_homogeneous_space_compaction = @@ -350,9 +348,6 @@ Heap::Heap(size_t initial_size, bool separate_non_moving_space = is_zygote || support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) || IsMovingGc(background_collector_type_); - if (foreground_collector_type_ == kCollectorTypeGSS) { - separate_non_moving_space = false; - } // Requested begin for the alloc space, to follow the mapped image and oat files uint8_t* request_begin = nullptr; @@ -360,8 +355,7 @@ Heap::Heap(size_t initial_size, size_t heap_reservation_size = 0u; if (separate_non_moving_space) { heap_reservation_size = non_moving_space_capacity; - } else if ((foreground_collector_type_ != kCollectorTypeCC) && - (is_zygote || foreground_collector_type_ == kCollectorTypeGSS)) { + } else if (foreground_collector_type_ != kCollectorTypeCC && is_zygote) { heap_reservation_size = capacity_; } heap_reservation_size = RoundUp(heap_reservation_size, kPageSize); @@ -446,14 +440,13 @@ Heap::Heap(size_t initial_size, // Attempt to create 2 mem maps at or after the requested begin. if (foreground_collector_type_ != kCollectorTypeCC) { ScopedTrace trace2("Create main mem map"); - if (separate_non_moving_space || - !(is_zygote || foreground_collector_type_ == kCollectorTypeGSS)) { + if (separate_non_moving_space || !is_zygote) { main_mem_map_1 = MapAnonymousPreferredAddress( kMemMapSpaceName[0], request_begin, capacity_, &error_str); } else { - // If no separate non-moving space and we are the zygote or the collector type is GSS, - // the main space must come right after the image space to avoid a gap. - // This is required since we want the zygote space to be adjacent to the image space. + // If no separate non-moving space and we are the zygote, the main space must come right after + // the image space to avoid a gap. This is required since we want the zygote space to be + // adjacent to the image space. DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty()); main_mem_map_1 = MemMap::MapAnonymous( kMemMapSpaceName[0], @@ -506,8 +499,7 @@ Heap::Heap(size_t initial_size, region_space_ = space::RegionSpace::Create( kRegionSpaceName, std::move(region_space_mem_map), use_generational_cc_); AddSpace(region_space_); - } else if (IsMovingGc(foreground_collector_type_) && - foreground_collector_type_ != kCollectorTypeGSS) { + } else if (IsMovingGc(foreground_collector_type_)) { // Create bump pointer spaces. // We only to create the bump pointer if the foreground collector is a compacting GC. // TODO: Place bump-pointer spaces somewhere to minimize size of card table. @@ -528,19 +520,7 @@ Heap::Heap(size_t initial_size, non_moving_space_ = main_space_; CHECK(!non_moving_space_->CanMoveObjects()); } - if (foreground_collector_type_ == kCollectorTypeGSS) { - CHECK_EQ(foreground_collector_type_, background_collector_type_); - // Create bump pointer spaces instead of a backup space. - main_mem_map_2.Reset(); - bump_pointer_space_ = space::BumpPointerSpace::Create( - "Bump pointer space 1", kGSSBumpPointerSpaceCapacity); - CHECK(bump_pointer_space_ != nullptr); - AddSpace(bump_pointer_space_); - temp_space_ = space::BumpPointerSpace::Create( - "Bump pointer space 2", kGSSBumpPointerSpaceCapacity); - CHECK(temp_space_ != nullptr); - AddSpace(temp_space_); - } else if (main_mem_map_2.IsValid()) { + if (main_mem_map_2.IsValid()) { const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1]; main_space_backup_.reset(CreateMallocSpaceFromMemMap(std::move(main_mem_map_2), initial_size, @@ -650,13 +630,10 @@ Heap::Heap(size_t initial_size, } } if (kMovingCollector) { - if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) || + if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) || use_homogeneous_space_compaction_for_oom_) { - // TODO: Clean this up. - const bool generational = foreground_collector_type_ == kCollectorTypeGSS; - semi_space_collector_ = new collector::SemiSpace(this, generational, - generational ? "generational" : ""); + semi_space_collector_ = new collector::SemiSpace(this); garbage_collectors_.push_back(semi_space_collector_); } if (MayUseCollector(kCollectorTypeCC)) { @@ -689,10 +666,10 @@ Heap::Heap(size_t initial_size, } } if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr && - (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) { + (is_zygote || separate_non_moving_space)) { // Check that there's no gap between the image space and the non moving space so that the // immune region won't break (eg. due to a large object allocated in the gap). This is only - // required when we're the zygote or using GSS. + // required when we're the zygote. // Space with smallest Begin(). space::ImageSpace* first_space = nullptr; for (space::ImageSpace* space : boot_image_spaces_) { @@ -795,8 +772,7 @@ void Heap::CreateMainMallocSpace(MemMap&& mem_map, if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) { // After the zygote we want this to be false if we don't have background compaction enabled so // that getting primitive array elements is faster. - // We never have homogeneous compaction with GSS and don't need a space with movable objects. - can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS; + can_move_objects = !HasZygoteSpace(); } if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) { RemoveRememberedSet(main_space_); @@ -2280,8 +2256,7 @@ void Heap::ChangeCollector(CollectorType collector_type) { } break; } - case kCollectorTypeSS: // Fall-through. - case kCollectorTypeGSS: { + case kCollectorTypeSS: { gc_plan_.push_back(collector::kGcTypeFull); if (use_tlab_) { ChangeAllocator(kAllocatorTypeTLAB); @@ -2323,7 +2298,7 @@ void Heap::ChangeCollector(CollectorType collector_type) { class ZygoteCompactingCollector final : public collector::SemiSpace { public: ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool) - : SemiSpace(heap, false, "zygote collector"), + : SemiSpace(heap, "zygote collector"), bin_live_bitmap_(nullptr), bin_mark_bitmap_(nullptr), is_running_on_memory_tool_(is_running_on_memory_tool) {} @@ -2738,8 +2713,6 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, current_allocator_ == kAllocatorTypeRegionTLAB); switch (collector_type_) { case kCollectorTypeSS: - // Fall-through. - case kCollectorTypeGSS: semi_space_collector_->SetFromSpace(bump_pointer_space_); semi_space_collector_->SetToSpace(temp_space_); semi_space_collector_->SetSwapSemiSpaces(true); @@ -3365,8 +3338,7 @@ void Heap::ProcessCards(TimingLogger* timings, TimingLogger::ScopedTiming t2(name, timings); table->ProcessCards(); } else if (use_rem_sets && rem_set != nullptr) { - DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS) - << static_cast<int>(collector_type_); + DCHECK(collector::SemiSpace::kUseRememberedSet) << static_cast<int>(collector_type_); TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings); rem_set->ClearCards(); } else if (process_alloc_space_cards) { diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 5cf197869d..07f6a19dff 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -956,7 +956,6 @@ class Heap { return collector_type == kCollectorTypeCC || collector_type == kCollectorTypeSS || - collector_type == kCollectorTypeGSS || collector_type == kCollectorTypeCCBackground || collector_type == kCollectorTypeHomogeneousSpaceCompact; } diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc index 4fe8027c2d..ca112972c2 100644 --- a/runtime/gc/system_weak_test.cc +++ b/runtime/gc/system_weak_test.cc @@ -113,7 +113,6 @@ static bool CollectorDoesAllowOrBroadcast() { case CollectorType::kCollectorTypeCMS: case CollectorType::kCollectorTypeCC: case CollectorType::kCollectorTypeSS: - case CollectorType::kCollectorTypeGSS: return true; default: diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc index 8172e1d729..413355ccb7 100644 --- a/runtime/parsed_options.cc +++ b/runtime/parsed_options.cc @@ -569,7 +569,6 @@ bool ParsedOptions::DoParse(const RuntimeOptions& options, { // If not set, background collector type defaults to homogeneous compaction. - // If foreground is GSS, use GSS as background collector. // If not low memory mode, semispace otherwise. gc::CollectorType background_collector_type_; @@ -585,12 +584,8 @@ bool ParsedOptions::DoParse(const RuntimeOptions& options, } if (background_collector_type_ == gc::kCollectorTypeNone) { - if (collector_type_ != gc::kCollectorTypeGSS) { - background_collector_type_ = low_memory_mode_ ? - gc::kCollectorTypeSS : gc::kCollectorTypeHomogeneousSpaceCompact; - } else { - background_collector_type_ = collector_type_; - } + background_collector_type_ = low_memory_mode_ ? + gc::kCollectorTypeSS : gc::kCollectorTypeHomogeneousSpaceCompact; } args.Set(M::BackgroundGc, BackgroundGcOption { background_collector_type_ }); diff --git a/test/004-ThreadStress/run b/test/004-ThreadStress/run index 067e0d0407..8004036868 100755 --- a/test/004-ThreadStress/run +++ b/test/004-ThreadStress/run @@ -15,29 +15,7 @@ # limitations under the License. # Enable lock contention logging. -if [[ "x$ART_DEFAULT_GC_TYPE" = xGSS ]]; then - # NonMovingAlloc operations fail an assertion with the Generational - # Semi-Space (GSS) collector (see b/72738921); disable them for now - # by explicitly assigning frequencies to operations when the GSS - # collector is used. - # - # Note: The trick to use command substitution to have comments within - # a multi-line command is from https://stackoverflow.com/a/12797512. - ${RUN} --runtime-option -Xlockprofthreshold:10 "${@}" Main \ - -oom:0.005 `# 1/200` \ - -sigquit:0.095 `# 19/200` \ - -alloc:0.225 `# 45/200` \ - -largealloc:0.05 `# 10/200` \ - -nonmovingalloc:0.0 `# 0/200` \ - -stacktrace:0.1 `# 20/200` \ - -exit:0.225 `# 45/200` \ - -sleep:0.125 `# 25/200` \ - -timedwait:0.05 `# 10/200` \ - -wait:0.075 `# 15/200` \ - -queuedwait:0.05 `# 10/200` -else - ${RUN} --runtime-option -Xlockprofthreshold:10 "${@}" -fi +${RUN} --runtime-option -Xlockprofthreshold:10 "${@}" return_status1=$? # Run locks-only mode with stack-dump lock profiling. Reduce the number of total operations from diff --git a/test/knownfailures.json b/test/knownfailures.json index b76bd5c33a..29074dd7fb 100644 --- a/test/knownfailures.json +++ b/test/knownfailures.json @@ -19,13 +19,6 @@ }, { "tests": "080-oom-fragmentation", - "description": ["Disable 080-oom-fragmentation for GSS GC due to lack of", - "support for allocations larger than 32MB."], - "env_vars": {"ART_DEFAULT_GC_TYPE": "GSS"}, - "bug": "http://b/33795328" - }, - { - "tests": "080-oom-fragmentation", "description": ["Disable 080-oom-fragmentation for CC collector in debug mode", "because of potential fragmentation caused by the region space's", "cyclic region allocation (which is enabled in debug mode)."], diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py index 6e299bd7b6..fa5dfed5f5 100644 --- a/test/testrunner/target_config.py +++ b/test/testrunner/target_config.py @@ -124,16 +124,6 @@ target_config = { 'ART_USE_READ_BARRIER' : 'false' } }, - # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078). - 'art-gss-gc' : { - 'run-test' : ['--interpreter', - '--optimizing', - '--jit'], - 'env' : { - 'ART_DEFAULT_GC_TYPE' : 'GSS', - 'ART_USE_READ_BARRIER' : 'false' - } - }, # TODO: Consider removing this configuration when it is no longer used by # any continuous testing target (b/62611253), as the SS collector overlaps # with the CC collector, since both move objects. @@ -147,17 +137,6 @@ target_config = { 'ART_USE_READ_BARRIER' : 'false' } }, - # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078). - 'art-gss-gc-tlab' : { - 'run-test' : ['--interpreter', - '--optimizing', - '--jit'], - 'env' : { - 'ART_DEFAULT_GC_TYPE' : 'GSS', - 'ART_USE_TLAB' : 'true', - 'ART_USE_READ_BARRIER' : 'false' - } - }, 'art-tracing' : { 'run-test' : ['--trace'] }, @@ -229,14 +208,6 @@ target_config = { 'ART_DEFAULT_COMPACT_DEX_LEVEL' : 'none' } }, - # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078). - 'art-gtest-gss-gc': { - 'make' : 'test-art-host-gtest', - 'env' : { - 'ART_DEFAULT_GC_TYPE' : 'GSS', - 'ART_USE_READ_BARRIER' : 'false' - } - }, # TODO: Consider removing this configuration when it is no longer used by # any continuous testing target (b/62611253), as the SS collector overlaps # with the CC collector, since both move objects. @@ -248,15 +219,6 @@ target_config = { 'ART_USE_READ_BARRIER' : 'false', } }, - # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078). - 'art-gtest-gss-gc-tlab': { - 'make' : 'test-art-host-gtest', - 'env': { - 'ART_DEFAULT_GC_TYPE' : 'GSS', - 'ART_USE_TLAB' : 'true', - 'ART_USE_READ_BARRIER' : 'false' - } - }, 'art-gtest-debug-gc' : { 'make' : 'test-art-host-gtest', 'env' : { |