Only allocate collectors if we may use them
I believe most of the savings come from not allocating the concurrent
copying 8MB MarkQueue.
Before AOSP hh native PSS:
72653 kB: Native
72998 kB: Native
72882 kB: Native
After AOSP hh native PSS:
64823 kB: Native
65986 kB: Native
64219 kB: Native
Bug: 17643507
Change-Id: Ic5a8e753beca36142c1bf36be6311051f7c78e47
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 9343622..7534515 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -438,20 +438,31 @@
// Create our garbage collectors.
for (size_t i = 0; i < 2; ++i) {
const bool concurrent = i != 0;
- garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
- garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
- garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
+ if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
+ (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
+ garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
+ garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
+ garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
+ }
}
if (kMovingCollector) {
- // TODO: Clean this up.
- const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
- semi_space_collector_ = new collector::SemiSpace(this, generational,
- generational ? "generational" : "");
- garbage_collectors_.push_back(semi_space_collector_);
- concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
- garbage_collectors_.push_back(concurrent_copying_collector_);
- mark_compact_collector_ = new collector::MarkCompact(this);
- garbage_collectors_.push_back(mark_compact_collector_);
+ if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) ||
+ MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
+ use_homogeneous_space_compaction_for_oom_) {
+ // TODO: Clean this up.
+ const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
+ semi_space_collector_ = new collector::SemiSpace(this, generational,
+ generational ? "generational" : "");
+ garbage_collectors_.push_back(semi_space_collector_);
+ }
+ if (MayUseCollector(kCollectorTypeCC)) {
+ concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
+ garbage_collectors_.push_back(concurrent_copying_collector_);
+ }
+ if (MayUseCollector(kCollectorTypeMC)) {
+ mark_compact_collector_ = new collector::MarkCompact(this);
+ garbage_collectors_.push_back(mark_compact_collector_);
+ }
}
if (GetImageSpace() != nullptr && non_moving_space_ != nullptr &&
(is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
@@ -487,6 +498,10 @@
return nullptr;
}
+bool Heap::MayUseCollector(CollectorType type) const {
+ return foreground_collector_type_ == type || background_collector_type_ == type;
+}
+
space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
size_t growth_limit, size_t capacity,
const char* name, bool can_move_objects) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index b2478e6..d41e17f 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -661,6 +661,9 @@
// Request asynchronous GC.
void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(pending_task_lock_);
+ // Whether or not we may use a garbage collector, used so that we only create collectors we need.
+ bool MayUseCollector(CollectorType type) const;
+
private:
class ConcurrentGCTask;
class CollectorTransitionTask;