summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Mathieu Chartier <mathieuc@google.com> 2013-12-10 01:26:49 +0000
committer Gerrit Code Review <noreply-gerritcodereview@google.com> 2013-12-10 01:26:49 +0000
commitfa365d2fa097810f31d6cb1b1ef415636bc63af8 (patch)
tree048b4ae81fb0a5bd31bfe1ad6747dc12f753ca9e
parent5e99ba779afccac163801c2fcd82406194887a9e (diff)
parent7bf82af01ec250a4ed2cee03a0e51d179fa820f9 (diff)
Merge "Fix memory usage regression and clean up collector changing code."
-rw-r--r--runtime/gc/collector_type.h2
-rw-r--r--runtime/gc/heap-inl.h8
-rw-r--r--runtime/gc/heap.cc82
-rw-r--r--runtime/gc/heap.h8
4 files changed, 65 insertions, 35 deletions
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index ba3cad6972..06395cf687 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -24,6 +24,8 @@ namespace gc {
// Which types of collections are able to be performed.
enum CollectorType {
+ // No collector selected.
+ kCollectorTypeNone,
// Non concurrent mark-sweep.
kCollectorTypeMS,
// Concurrent mark-sweep.
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 6e9b04ada1..08ab6b8c6a 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -93,7 +93,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
} else {
DCHECK(!Dbg::IsAllocTrackingEnabled());
}
- if (AllocatorHasConcurrentGC(allocator)) {
+ if (concurrent_gc_) {
CheckConcurrentGC(self, new_num_bytes_allocated, obj);
}
if (kIsDebugBuild) {
@@ -199,9 +199,11 @@ inline bool Heap::IsOutOfMemoryOnAllocation(size_t alloc_size, bool grow) {
if (!concurrent_gc_) {
if (!grow) {
return true;
- } else {
- max_allowed_footprint_ = new_footprint;
}
+ // TODO: Grow for allocation is racy, fix it.
+ VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to "
+ << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
+ max_allowed_footprint_ = new_footprint;
}
}
return false;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 1e3689bbda..f92a8212f0 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -75,12 +75,13 @@ static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
double target_utilization, size_t capacity, const std::string& image_file_name,
- CollectorType collector_type, size_t parallel_gc_threads, size_t conc_gc_threads,
- bool low_memory_mode, size_t long_pause_log_threshold, size_t long_gc_log_threshold,
- bool ignore_max_footprint)
+ CollectorType post_zygote_collector_type, size_t parallel_gc_threads,
+ size_t conc_gc_threads, bool low_memory_mode, size_t long_pause_log_threshold,
+ size_t long_gc_log_threshold, bool ignore_max_footprint)
: non_moving_space_(nullptr),
- concurrent_gc_(collector_type == gc::kCollectorTypeCMS),
- collector_type_(collector_type),
+ concurrent_gc_(false),
+ collector_type_(kCollectorTypeNone),
+ post_zygote_collector_type_(post_zygote_collector_type),
parallel_gc_threads_(parallel_gc_threads),
conc_gc_threads_(conc_gc_threads),
low_memory_mode_(low_memory_mode),
@@ -109,8 +110,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
last_process_state_id_(NULL),
// Initially assume we perceive jank in case the process state is never updated.
process_state_(kProcessStateJankPerceptible),
- concurrent_start_bytes_(concurrent_gc_ ? initial_size - kMinConcurrentRemainingBytes
- : std::numeric_limits<size_t>::max()),
+ concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
total_bytes_freed_ever_(0),
total_objects_freed_ever_(0),
num_bytes_allocated_(0),
@@ -155,8 +155,12 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
// If we aren't the zygote, switch to the default non zygote allocator. This may update the
// entrypoints.
if (!Runtime::Current()->IsZygote()) {
- ChangeCollector(collector_type_);
+ ChangeCollector(post_zygote_collector_type_);
+ } else {
+ // We are the zygote, use bump pointer allocation + semi space collector.
+ ChangeCollector(kCollectorTypeSS);
}
+
live_bitmap_.reset(new accounting::HeapBitmap(this));
mark_bitmap_.reset(new accounting::HeapBitmap(this));
// Requested begin for the alloc space, to follow the mapped image and oat files
@@ -262,9 +266,6 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
}
- gc_plan_.push_back(collector::kGcTypeSticky);
- gc_plan_.push_back(collector::kGcTypePartial);
- gc_plan_.push_back(collector::kGcTypeFull);
if (kMovingCollector) {
// TODO: Clean this up.
semi_space_collector_ = new collector::SemiSpace(this);
@@ -1085,22 +1086,46 @@ void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
void Heap::CollectGarbage(bool clear_soft_references) {
// Even if we waited for a GC we still need to do another GC since weaks allocated during the
// last GC will not have necessarily been cleared.
- CollectGarbageInternal(collector::kGcTypeFull, kGcCauseExplicit, clear_soft_references);
+ CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
}
void Heap::ChangeCollector(CollectorType collector_type) {
- switch (collector_type) {
- case kCollectorTypeSS: {
- ChangeAllocator(kAllocatorTypeBumpPointer);
- break;
+ // TODO: Only do this with all mutators suspended to avoid races.
+ if (collector_type != collector_type_) {
+ collector_type_ = collector_type;
+ gc_plan_.clear();
+ switch (collector_type_) {
+ case kCollectorTypeSS: {
+ concurrent_gc_ = false;
+ gc_plan_.push_back(collector::kGcTypeFull);
+ ChangeAllocator(kAllocatorTypeBumpPointer);
+ break;
+ }
+ case kCollectorTypeMS: {
+ concurrent_gc_ = false;
+ gc_plan_.push_back(collector::kGcTypeSticky);
+ gc_plan_.push_back(collector::kGcTypePartial);
+ gc_plan_.push_back(collector::kGcTypeFull);
+ ChangeAllocator(kAllocatorTypeFreeList);
+ break;
+ }
+ case kCollectorTypeCMS: {
+ concurrent_gc_ = true;
+ gc_plan_.push_back(collector::kGcTypeSticky);
+ gc_plan_.push_back(collector::kGcTypePartial);
+ gc_plan_.push_back(collector::kGcTypeFull);
+ ChangeAllocator(kAllocatorTypeFreeList);
+ break;
+ }
+ default: {
+ LOG(FATAL) << "Unimplemented";
+ }
}
- case kCollectorTypeMS:
- // Fall-through.
- case kCollectorTypeCMS: {
- ChangeAllocator(kAllocatorTypeFreeList);
- break;
- default:
- LOG(FATAL) << "Unimplemented";
+ if (concurrent_gc_) {
+ concurrent_start_bytes_ =
+ std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
+ } else {
+ concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
}
}
}
@@ -1119,8 +1144,8 @@ void Heap::PreZygoteFork() {
// Trim the pages at the end of the non moving space.
non_moving_space_->Trim();
non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
- // Change the allocator to the post zygote one.
- ChangeCollector(collector_type_);
+ // Change the collector to the post zygote one.
+ ChangeCollector(post_zygote_collector_type_);
// TODO: Delete bump_pointer_space_ and temp_pointer_space_?
if (semi_space_collector_ != nullptr) {
// Create a new bump pointer space which we will compact into.
@@ -1295,7 +1320,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
} else {
LOG(FATAL) << "Invalid current allocator " << current_allocator_;
}
- CHECK(collector != NULL)
+ CHECK(collector != nullptr)
<< "Could not find garbage collector with concurrent=" << concurrent_gc_
<< " and type=" << gc_type;
@@ -1876,7 +1901,7 @@ void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
}
if (!ignore_max_footprint_) {
SetIdealFootprint(target_size);
- if (concurrent_gc_ && AllocatorHasConcurrentGC(current_allocator_)) {
+ if (concurrent_gc_) {
// Calculate when to perform the next ConcurrentGC.
// Calculate the estimated GC duration.
double gc_duration_seconds = NsToMs(gc_duration) / 1000.0;
@@ -1962,7 +1987,6 @@ void Heap::EnqueueClearedReferences() {
void Heap::RequestConcurrentGC(Thread* self) {
// Make sure that we can do a concurrent GC.
Runtime* runtime = Runtime::Current();
- DCHECK(concurrent_gc_);
if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
self->IsHandlingStackOverflow()) {
return;
@@ -2096,7 +2120,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
// finalizers released native managed allocations.
UpdateMaxNativeFootprint();
} else if (!IsGCRequestPending()) {
- if (concurrent_gc_ && AllocatorHasConcurrentGC(current_allocator_)) {
+ if (concurrent_gc_) {
RequestConcurrentGC(self);
} else {
CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 046fbac319..3bff3f9704 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -644,12 +644,14 @@ class Heap {
// A mod-union table remembers all of the references from the it's space to other spaces.
SafeMap<space::Space*, accounting::ModUnionTable*> mod_union_tables_;
- // What kind of concurrency behavior is the runtime after? True for concurrent mark sweep GC,
- // false for stop-the-world mark sweep.
- const bool concurrent_gc_;
+ // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
+ // sweep GC, false for other GC types.
+ bool concurrent_gc_;
// The current collector type.
CollectorType collector_type_;
+ // Which collector we will switch to after zygote fork.
+ CollectorType post_zygote_collector_type_;
// How many GC threads we may use for paused parts of garbage collection.
const size_t parallel_gc_threads_;