summaryrefslogtreecommitdiff
path: root/runtime/gc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/accounting/read_barrier_table.h2
-rw-r--r--runtime/gc/allocator/rosalloc.h2
-rw-r--r--runtime/gc/collector/concurrent_copying.cc46
-rw-r--r--runtime/gc/collector/mark_compact.cc4
-rw-r--r--runtime/gc/collector_type.h2
-rw-r--r--runtime/gc/gc_cause.cc1
-rw-r--r--runtime/gc/gc_cause.h4
-rw-r--r--runtime/gc/heap.cc60
-rw-r--r--runtime/gc/scoped_gc_critical_section.cc8
-rw-r--r--runtime/gc/space/image_space.cc18
-rw-r--r--runtime/gc/space/large_object_space.cc3
-rw-r--r--runtime/gc/space/region_space-inl.h22
-rw-r--r--runtime/gc/space/region_space.cc57
-rw-r--r--runtime/gc/space/region_space.h45
14 files changed, 172 insertions, 102 deletions
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 86266e2500..e77a5b8e39 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -80,7 +80,7 @@ class ReadBarrierTable {
}
// This should match RegionSpace::kRegionSize. static_assert'ed in concurrent_copying.h.
- static constexpr size_t kRegionSize = 1 * MB;
+ static constexpr size_t kRegionSize = 256 * KB;
private:
static constexpr uint64_t kHeapCapacity = 4ULL * GB; // low 4gb.
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 1fa2d1ac8a..562fc750ed 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -141,7 +141,7 @@ class RosAlloc {
template<bool kUseTail = true>
class SlotFreeList {
public:
- SlotFreeList() : head_(0U), tail_(0), size_(0) {}
+ SlotFreeList() : head_(0U), tail_(0), size_(0), padding_(0) {}
Slot* Head() const {
return reinterpret_cast<Slot*>(head_);
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 8f9c187e1d..24ba52f0c5 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -72,12 +72,19 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
rb_mark_bit_stack_full_(false),
mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
thread_running_gc_(nullptr),
- is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
+ is_marking_(false),
+ is_active_(false),
+ is_asserting_to_space_invariant_(false),
region_space_bitmap_(nullptr),
- heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
+ heap_mark_bitmap_(nullptr),
+ live_stack_freeze_size_(0),
+ from_space_num_objects_at_first_pause_(0),
+ from_space_num_bytes_at_first_pause_(0),
+ mark_stack_mode_(kMarkStackModeOff),
weak_ref_access_enabled_(true),
skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
+ mark_from_read_barrier_measurements_(false),
rb_slow_path_ns_(0),
rb_slow_path_count_(0),
rb_slow_path_count_gc_(0),
@@ -87,6 +94,7 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
rb_slow_path_count_gc_total_(0),
rb_table_(heap_->GetReadBarrierTable()),
force_evacuate_all_(false),
+ gc_grays_immune_objects_(false),
immune_gray_stack_lock_("concurrent copying immune gray stack lock",
kMarkSweepMarkStackLock) {
static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
@@ -1644,10 +1652,10 @@ void ConcurrentCopying::ReclaimPhase() {
// Record freed objects.
TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
// Don't include thread-locals that are in the to-space.
- uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
- uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
- uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
- uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
+ const uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
+ const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
+ const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
+ const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes);
uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
@@ -1658,8 +1666,18 @@ void ConcurrentCopying::ReclaimPhase() {
}
CHECK_LE(to_objects, from_objects);
CHECK_LE(to_bytes, from_bytes);
- int64_t freed_bytes = from_bytes - to_bytes;
- int64_t freed_objects = from_objects - to_objects;
+ // cleared_bytes and cleared_objects may be greater than the from space equivalents since
+ // ClearFromSpace may clear empty unevac regions.
+ uint64_t cleared_bytes;
+ uint64_t cleared_objects;
+ {
+ TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
+ region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects);
+ CHECK_GE(cleared_bytes, from_bytes);
+ CHECK_GE(cleared_objects, from_objects);
+ }
+ int64_t freed_bytes = cleared_bytes - to_bytes;
+ int64_t freed_objects = cleared_objects - to_objects;
if (kVerboseMode) {
LOG(INFO) << "RecordFree:"
<< " from_bytes=" << from_bytes << " from_objects=" << from_objects
@@ -1678,11 +1696,6 @@ void ConcurrentCopying::ReclaimPhase() {
}
{
- TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
- region_space_->ClearFromSpace();
- }
-
- {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Sweep(false);
SwapBitmaps();
@@ -2166,7 +2179,12 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
fall_back_to_non_moving = true;
to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
&non_moving_space_bytes_allocated, nullptr, &dummy);
- CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
+ if (UNLIKELY(to_ref == nullptr)) {
+ LOG(FATAL_WITHOUT_ABORT) << "Fall-back non-moving space allocation failed for a "
+ << obj_size << " byte object in region type "
+ << region_space_->GetRegionType(from_ref);
+ LOG(FATAL) << "Object address=" << from_ref << " type=" << from_ref->PrettyTypeOf();
+ }
bytes_allocated = non_moving_space_bytes_allocated;
// Mark it in the mark bitmap.
accounting::ContinuousSpaceBitmap* mark_bitmap =
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 00393881e9..c61f69dad3 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -52,8 +52,12 @@ void MarkCompact::BindBitmaps() {
MarkCompact::MarkCompact(Heap* heap, const std::string& name_prefix)
: GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + "mark compact"),
+ mark_stack_(nullptr),
space_(nullptr),
+ mark_bitmap_(nullptr),
collector_name_(name_),
+ bump_pointer_(nullptr),
+ live_objects_in_space_(0),
updating_references_(false) {}
void MarkCompact::RunPhases() {
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index eef4fba20d..f0e1029f85 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -59,6 +59,8 @@ enum CollectorType {
kCollectorTypeHprof,
// Fake collector for installing/removing a system-weak holder.
kCollectorTypeAddRemoveSystemWeakHolder,
+ // Fake collector type for GetObjectsAllocated
+ kCollectorTypeGetObjectsAllocated,
};
std::ostream& operator<<(std::ostream& os, const CollectorType& collector_type);
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index 9e34346686..c1c1cad861 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -40,6 +40,7 @@ const char* PrettyCause(GcCause cause) {
case kGcCauseJitCodeCache: return "JitCodeCache";
case kGcCauseAddRemoveSystemWeakHolder: return "SystemWeakHolder";
case kGcCauseHprof: return "Hprof";
+ case kGcCauseGetObjectsAllocated: return "ObjectsAllocated";
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index 9b285b12a4..eb27547768 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -53,8 +53,10 @@ enum GcCause {
kGcCauseJitCodeCache,
// Not a real GC cause, used to add or remove system-weak holders.
kGcCauseAddRemoveSystemWeakHolder,
- // Not a real GC cause, used to hprof running in the middle of GC.
+ // Not a real GC cause, used to prevent hprof running in the middle of GC.
kGcCauseHprof,
+ // Not a real GC cause, used to prevent GetObjectsAllocated running in the middle of GC.
+ kGcCauseGetObjectsAllocated,
};
const char* PrettyCause(GcCause cause);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 12b9701845..f04bc896f1 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -18,13 +18,13 @@
#include <limits>
#include <memory>
-#include <unwind.h> // For GC verification.
#include <vector>
#include "android-base/stringprintf.h"
#include "allocation_listener.h"
#include "art_field-inl.h"
+#include "backtrace_helper.h"
#include "base/allocator.h"
#include "base/arena_allocator.h"
#include "base/dumpable.h"
@@ -1835,6 +1835,11 @@ void Heap::SetTargetHeapUtilization(float target) {
size_t Heap::GetObjectsAllocated() const {
Thread* const self = Thread::Current();
ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
+ // Prevent GC running during GetObjectsALlocated since we may get a checkpoint request that tells
+ // us to suspend while we are doing SuspendAll. b/35232978
+ gc::ScopedGCCriticalSection gcs(Thread::Current(),
+ gc::kGcCauseGetObjectsAllocated,
+ gc::kCollectorTypeGetObjectsAllocated);
// Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
ScopedSuspendAll ssa(__FUNCTION__);
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -3559,11 +3564,8 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
collector::GcType gc_type = collector_ran->GetGcType();
const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
// foreground.
- // Ensure at least 2.5 MB to temporarily fix excessive GC caused by TLAB ergonomics.
- const uint64_t adjusted_min_free = std::max(static_cast<uint64_t>(min_free_ * multiplier),
- static_cast<uint64_t>(5 * MB / 2));
- const uint64_t adjusted_max_free = std::max(static_cast<uint64_t>(max_free_ * multiplier),
- static_cast<uint64_t>(5 * MB / 2));
+ const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
+ const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
if (gc_type != collector::kGcTypeSticky) {
// Grow the heap for non sticky GC.
ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
@@ -4063,42 +4065,6 @@ void Heap::BroadcastForNewAllocationRecords() const {
}
}
-// Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
-class StackCrawlState {
- public:
- StackCrawlState(uintptr_t* frames, size_t max_depth, size_t skip_count)
- : frames_(frames), frame_count_(0), max_depth_(max_depth), skip_count_(skip_count) {
- }
- size_t GetFrameCount() const {
- return frame_count_;
- }
- static _Unwind_Reason_Code Callback(_Unwind_Context* context, void* arg) {
- auto* const state = reinterpret_cast<StackCrawlState*>(arg);
- const uintptr_t ip = _Unwind_GetIP(context);
- // The first stack frame is get_backtrace itself. Skip it.
- if (ip != 0 && state->skip_count_ > 0) {
- --state->skip_count_;
- return _URC_NO_REASON;
- }
- // ip may be off for ARM but it shouldn't matter since we only use it for hashing.
- state->frames_[state->frame_count_] = ip;
- state->frame_count_++;
- return state->frame_count_ >= state->max_depth_ ? _URC_END_OF_STACK : _URC_NO_REASON;
- }
-
- private:
- uintptr_t* const frames_;
- size_t frame_count_;
- const size_t max_depth_;
- size_t skip_count_;
-};
-
-static size_t get_backtrace(uintptr_t* frames, size_t max_depth) {
- StackCrawlState state(frames, max_depth, 0u);
- _Unwind_Backtrace(&StackCrawlState::Callback, &state);
- return state.GetFrameCount();
-}
-
void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
auto* const runtime = Runtime::Current();
if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
@@ -4107,13 +4073,9 @@ void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
bool new_backtrace = false;
{
static constexpr size_t kMaxFrames = 16u;
- uintptr_t backtrace[kMaxFrames];
- const size_t frames = get_backtrace(backtrace, kMaxFrames);
- uint64_t hash = 0;
- for (size_t i = 0; i < frames; ++i) {
- hash = hash * 2654435761 + backtrace[i];
- hash += (hash >> 13) ^ (hash << 6);
- }
+ FixedSizeBacktrace<kMaxFrames> backtrace;
+ backtrace.Collect(/* skip_frames */ 2);
+ uint64_t hash = backtrace.Hash();
MutexLock mu(self, *backtrace_lock_);
new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
if (new_backtrace) {
diff --git a/runtime/gc/scoped_gc_critical_section.cc b/runtime/gc/scoped_gc_critical_section.cc
index b5eb9795de..f937d2c778 100644
--- a/runtime/gc/scoped_gc_critical_section.cc
+++ b/runtime/gc/scoped_gc_critical_section.cc
@@ -29,10 +29,14 @@ ScopedGCCriticalSection::ScopedGCCriticalSection(Thread* self,
CollectorType collector_type)
: self_(self) {
Runtime::Current()->GetHeap()->StartGC(self, cause, collector_type);
- old_cause_ = self->StartAssertNoThreadSuspension("ScopedGCCriticalSection");
+ if (self != nullptr) {
+ old_cause_ = self->StartAssertNoThreadSuspension("ScopedGCCriticalSection");
+ }
}
ScopedGCCriticalSection::~ScopedGCCriticalSection() {
- self_->EndAssertNoThreadSuspension(old_cause_);
+ if (self_ != nullptr) {
+ self_->EndAssertNoThreadSuspension(old_cause_);
+ }
Runtime::Current()->GetHeap()->FinishGC(self_, collector::kGcTypeNone);
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 010ef1156a..662efe2c8d 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -886,7 +886,7 @@ class ImageSpaceLoader {
explicit FixupObjectAdapter(Args... args) : FixupVisitor(args...) {}
template <typename T>
- T* operator()(T* obj) const {
+ T* operator()(T* obj, void** dest_addr ATTRIBUTE_UNUSED = nullptr) const {
return ForwardObject(obj);
}
};
@@ -976,7 +976,8 @@ class ImageSpaceLoader {
ForwardObject(obj));
}
- void operator()(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(mirror::Object* obj) const
+ NO_THREAD_SAFETY_ANALYSIS {
if (visited_->Test(obj)) {
// Already visited.
return;
@@ -1259,17 +1260,18 @@ class ImageSpaceLoader {
}
}
}
- ArtField** fields = dex_cache->GetResolvedFields();
+ mirror::FieldDexCacheType* fields = dex_cache->GetResolvedFields();
if (fields != nullptr) {
- ArtField** new_fields = fixup_adapter.ForwardObject(fields);
+ mirror::FieldDexCacheType* new_fields = fixup_adapter.ForwardObject(fields);
if (fields != new_fields) {
dex_cache->SetResolvedFields(new_fields);
}
for (size_t j = 0, num = dex_cache->NumResolvedFields(); j != num; ++j) {
- ArtField* orig = mirror::DexCache::GetElementPtrSize(new_fields, j, pointer_size);
- ArtField* copy = fixup_adapter.ForwardObject(orig);
- if (orig != copy) {
- mirror::DexCache::SetElementPtrSize(new_fields, j, copy, pointer_size);
+ mirror::FieldDexCachePair orig =
+ mirror::DexCache::GetNativePairPtrSize(new_fields, j, pointer_size);
+ mirror::FieldDexCachePair copy(fixup_adapter.ForwardObject(orig.object), orig.index);
+ if (orig.object != copy.object) {
+ mirror::DexCache::SetNativePairPtrSize(new_fields, j, copy, pointer_size);
}
}
}
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 4c6b5bfadd..3988073de8 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -16,13 +16,12 @@
#include "large_object_space.h"
-#include <valgrind.h>
#include <memory>
-#include <memcheck/memcheck.h>
#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "base/logging.h"
+#include "base/memory_tool.h"
#include "base/mutex-inl.h"
#include "base/stl_util.h"
#include "image.h"
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 5d282f1ae9..5809027235 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -78,7 +78,7 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by
for (size_t i = 0; i < num_regions_; ++i) {
Region* r = &regions_[i];
if (r->IsFree()) {
- r->Unfree(time_);
+ r->Unfree(this, time_);
r->SetNewlyAllocated();
++num_non_free_regions_;
obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
@@ -91,7 +91,7 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by
for (size_t i = 0; i < num_regions_; ++i) {
Region* r = &regions_[i];
if (r->IsFree()) {
- r->Unfree(time_);
+ r->Unfree(this, time_);
++num_non_free_regions_;
obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
CHECK(obj != nullptr);
@@ -233,14 +233,12 @@ void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
continue;
}
if (r->IsLarge()) {
- if (r->LiveBytes() > 0) {
- // Avoid visiting dead large objects since they may contain dangling pointers to the
- // from-space.
- DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
- DCHECK(obj->GetClass() != nullptr);
- callback(obj, arg);
- }
+ // Avoid visiting dead large objects since they may contain dangling pointers to the
+ // from-space.
+ DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
+ DCHECK(obj->GetClass() != nullptr);
+ callback(obj, arg);
} else if (r->IsLargeTail()) {
// Do nothing.
} else {
@@ -314,13 +312,13 @@ mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocate
DCHECK_EQ(left + num_regs, right);
Region* first_reg = &regions_[left];
DCHECK(first_reg->IsFree());
- first_reg->UnfreeLarge(time_);
+ first_reg->UnfreeLarge(this, time_);
++num_non_free_regions_;
first_reg->SetTop(first_reg->Begin() + num_bytes);
for (size_t p = left + 1; p < right; ++p) {
DCHECK_LT(p, num_regions_);
DCHECK(regions_[p].IsFree());
- regions_[p].UnfreeLargeTail(time_);
+ regions_[p].UnfreeLargeTail(this, time_);
++num_non_free_regions_;
}
*bytes_allocated = num_bytes;
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 321524cbbd..1ad48438ba 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -86,6 +86,7 @@ RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
num_regions_ = mem_map_size / kRegionSize;
num_non_free_regions_ = 0U;
DCHECK_GT(num_regions_, 0U);
+ non_free_region_index_limit_ = 0U;
regions_.reset(new Region[num_regions_]);
uint8_t* region_addr = mem_map->Begin();
for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
@@ -192,7 +193,11 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool forc
MutexLock mu(Thread::Current(), region_lock_);
size_t num_expected_large_tails = 0;
bool prev_large_evacuated = false;
- for (size_t i = 0; i < num_regions_; ++i) {
+ VerifyNonFreeRegionLimit();
+ const size_t iter_limit = kUseTableLookupReadBarrier
+ ? num_regions_
+ : std::min(num_regions_, non_free_region_index_limit_);
+ for (size_t i = 0; i < iter_limit; ++i) {
Region* r = &regions_[i];
RegionState state = r->State();
RegionType type = r->Type();
@@ -236,18 +241,50 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool forc
}
}
}
+ DCHECK_EQ(num_expected_large_tails, 0U);
current_region_ = &full_region_;
evac_region_ = &full_region_;
}
-void RegionSpace::ClearFromSpace() {
+void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) {
+ DCHECK(cleared_bytes != nullptr);
+ DCHECK(cleared_objects != nullptr);
+ *cleared_bytes = 0;
+ *cleared_objects = 0;
MutexLock mu(Thread::Current(), region_lock_);
- for (size_t i = 0; i < num_regions_; ++i) {
+ VerifyNonFreeRegionLimit();
+ size_t new_non_free_region_index_limit = 0;
+ for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
Region* r = &regions_[i];
if (r->IsInFromSpace()) {
- r->Clear();
+ *cleared_bytes += r->BytesAllocated();
+ *cleared_objects += r->ObjectsAllocated();
--num_non_free_regions_;
+ r->Clear();
} else if (r->IsInUnevacFromSpace()) {
+ if (r->LiveBytes() == 0) {
+ // Special case for 0 live bytes, this means all of the objects in the region are dead and
+ // we can clear it. This is important for large objects since we must not visit dead ones in
+ // RegionSpace::Walk because they may contain dangling references to invalid objects.
+ // It is also better to clear these regions now instead of at the end of the next GC to
+ // save RAM. If we don't clear the regions here, they will be cleared next GC by the normal
+ // live percent evacuation logic.
+ size_t free_regions = 1;
+ // Also release RAM for large tails.
+ while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
+ DCHECK(r->IsLarge());
+ regions_[i + free_regions].Clear();
+ ++free_regions;
+ }
+ *cleared_bytes += r->BytesAllocated();
+ *cleared_objects += r->ObjectsAllocated();
+ num_non_free_regions_ -= free_regions;
+ r->Clear();
+ GetLiveBitmap()->ClearRange(
+ reinterpret_cast<mirror::Object*>(r->Begin()),
+ reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
+ continue;
+ }
size_t full_count = 0;
while (r->IsInUnevacFromSpace()) {
Region* const cur = &regions_[i + full_count];
@@ -255,6 +292,7 @@ void RegionSpace::ClearFromSpace() {
cur->LiveBytes() != static_cast<size_t>(cur->Top() - cur->Begin())) {
break;
}
+ DCHECK(cur->IsInUnevacFromSpace());
if (full_count != 0) {
cur->SetUnevacFromSpaceAsToSpace();
}
@@ -271,7 +309,15 @@ void RegionSpace::ClearFromSpace() {
i += full_count - 1;
}
}
+ // Note r != last_checked_region if r->IsInUnevacFromSpace() was true above.
+ Region* last_checked_region = &regions_[i];
+ if (!last_checked_region->IsFree()) {
+ new_non_free_region_index_limit = std::max(new_non_free_region_index_limit,
+ last_checked_region->Idx() + 1);
+ }
}
+ // Update non_free_region_index_limit_.
+ SetNonFreeRegionLimit(new_non_free_region_index_limit);
evac_region_ = nullptr;
}
@@ -324,6 +370,7 @@ void RegionSpace::Clear() {
}
r->Clear();
}
+ SetNonFreeRegionLimit(0);
current_region_ = &full_region_;
evac_region_ = &full_region_;
}
@@ -390,7 +437,7 @@ bool RegionSpace::AllocNewTlab(Thread* self) {
for (size_t i = 0; i < num_regions_; ++i) {
Region* r = &regions_[i];
if (r->IsFree()) {
- r->Unfree(time_);
+ r->Unfree(this, time_);
++num_non_free_regions_;
r->SetNewlyAllocated();
r->SetTop(r->End());
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index da36f5c55d..253792993b 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -167,7 +167,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
// Object alignment within the space.
static constexpr size_t kAlignment = kObjectAlignment;
// The region size.
- static constexpr size_t kRegionSize = 1 * MB;
+ static constexpr size_t kRegionSize = 256 * KB;
bool IsInFromSpace(mirror::Object* ref) {
if (HasAddress(ref)) {
@@ -215,7 +215,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
size_t FromSpaceSize() REQUIRES(!region_lock_);
size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
size_t ToSpaceSize() REQUIRES(!region_lock_);
- void ClearFromSpace() REQUIRES(!region_lock_);
+ void ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) REQUIRES(!region_lock_);
void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
Region* reg = RefToRegionUnlocked(ref);
@@ -308,25 +308,31 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
}
// Given a free region, declare it non-free (allocated).
- void Unfree(uint32_t alloc_time) {
+ void Unfree(RegionSpace* region_space, uint32_t alloc_time)
+ REQUIRES(region_space->region_lock_) {
DCHECK(IsFree());
state_ = RegionState::kRegionStateAllocated;
type_ = RegionType::kRegionTypeToSpace;
alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
}
- void UnfreeLarge(uint32_t alloc_time) {
+ void UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time)
+ REQUIRES(region_space->region_lock_) {
DCHECK(IsFree());
state_ = RegionState::kRegionStateLarge;
type_ = RegionType::kRegionTypeToSpace;
alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
}
- void UnfreeLargeTail(uint32_t alloc_time) {
+ void UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time)
+ REQUIRES(region_space->region_lock_) {
DCHECK(IsFree());
state_ = RegionState::kRegionStateLargeTail;
type_ = RegionType::kRegionTypeToSpace;
alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
}
void SetNewlyAllocated() {
@@ -342,7 +348,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
bool IsLarge() const {
bool is_large = state_ == RegionState::kRegionStateLarge;
if (is_large) {
- DCHECK_LT(begin_ + 1 * MB, Top());
+ DCHECK_LT(begin_ + kRegionSize, Top());
}
return is_large;
}
@@ -429,7 +435,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
size_t ObjectsAllocated() const {
if (IsLarge()) {
- DCHECK_LT(begin_ + 1 * MB, Top());
+ DCHECK_LT(begin_ + kRegionSize, Top());
DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
return 1;
} else if (IsLargeTail()) {
@@ -520,6 +526,27 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
mirror::Object* GetNextObject(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_);
+ void AdjustNonFreeRegionLimit(size_t new_non_free_region_index) REQUIRES(region_lock_) {
+ DCHECK_LT(new_non_free_region_index, num_regions_);
+ non_free_region_index_limit_ = std::max(non_free_region_index_limit_,
+ new_non_free_region_index + 1);
+ VerifyNonFreeRegionLimit();
+ }
+
+ void SetNonFreeRegionLimit(size_t new_non_free_region_index_limit) REQUIRES(region_lock_) {
+ DCHECK_LE(new_non_free_region_index_limit, num_regions_);
+ non_free_region_index_limit_ = new_non_free_region_index_limit;
+ VerifyNonFreeRegionLimit();
+ }
+
+ void VerifyNonFreeRegionLimit() REQUIRES(region_lock_) {
+ if (kIsDebugBuild && non_free_region_index_limit_ < num_regions_) {
+ for (size_t i = non_free_region_index_limit_; i < num_regions_; ++i) {
+ CHECK(regions_[i].IsFree());
+ }
+ }
+ }
+
Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
uint32_t time_; // The time as the number of collections since the startup.
@@ -527,6 +554,10 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
size_t num_non_free_regions_; // The number of non-free regions in this space.
std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
// The pointer to the region array.
+ // The upper-bound index of the non-free regions. Used to avoid scanning all regions in
+ // SetFromSpace(). Invariant: for all i >= non_free_region_index_limit_, regions_[i].IsFree() is
+ // true.
+ size_t non_free_region_index_limit_ GUARDED_BY(region_lock_);
Region* current_region_; // The region that's being allocated currently.
Region* evac_region_; // The region that's being evacuated to currently.
Region full_region_; // The dummy/sentinel region that looks full.