diff options
Diffstat (limited to 'runtime/gc')
| -rw-r--r-- | runtime/gc/allocation_record.cc | 6 | ||||
| -rw-r--r-- | runtime/gc/allocator/rosalloc.cc | 96 | ||||
| -rw-r--r-- | runtime/gc/allocator/rosalloc.h | 3 | ||||
| -rw-r--r-- | runtime/gc/collector/mark_sweep.cc | 4 | ||||
| -rw-r--r-- | runtime/gc/heap.cc | 59 | ||||
| -rw-r--r-- | runtime/gc/heap.h | 19 | ||||
| -rw-r--r-- | runtime/gc/space/image_space.cc | 11 | ||||
| -rw-r--r-- | runtime/gc/space/image_space.h | 2 | ||||
| -rw-r--r-- | runtime/gc/space/image_space_fs.h | 6 | ||||
| -rw-r--r-- | runtime/gc/space/rosalloc_space.cc | 5 | ||||
| -rw-r--r-- | runtime/gc/space/rosalloc_space.h | 2 |
11 files changed, 166 insertions, 47 deletions
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc index 73190455cf..e3714bbde6 100644 --- a/runtime/gc/allocation_record.cc +++ b/runtime/gc/allocation_record.cc @@ -288,6 +288,12 @@ void AllocRecordObjectMap::RecordAllocation(Thread* self, mirror::Object* obj, m records->new_record_condition_.WaitHoldingLocks(self); } + if (!heap->IsAllocTrackingEnabled()) { + // Return if the allocation tracking has been disabled while waiting for system weak access + // above. + return; + } + DCHECK_LE(records->Size(), records->alloc_record_max_); // Get stack trace. diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc index 2c487fe8df..bd84d0d8e5 100644 --- a/runtime/gc/allocator/rosalloc.cc +++ b/runtime/gc/allocator/rosalloc.cc @@ -1653,14 +1653,14 @@ void RosAlloc::Initialize() { if (i < kNumThreadLocalSizeBrackets) { numOfPages[i] = 1; } else if (i < (kNumThreadLocalSizeBrackets + kNumRegularSizeBrackets) / 2) { - numOfPages[i] = 4; + numOfPages[i] = 1; } else if (i < kNumRegularSizeBrackets) { - numOfPages[i] = 8; + numOfPages[i] = 1; } else if (i == kNumOfSizeBrackets - 2) { - numOfPages[i] = 16; + numOfPages[i] = 2; } else { DCHECK_EQ(i, kNumOfSizeBrackets - 1); - numOfPages[i] = 32; + numOfPages[i] = 4; } if (kTraceRosAlloc) { LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i]; @@ -2102,6 +2102,94 @@ void RosAlloc::LogFragmentationAllocFailure(std::ostream& os, size_t failed_allo } } +void RosAlloc::DumpStats(std::ostream& os) { + Thread* self = Thread::Current(); + CHECK(Locks::mutator_lock_->IsExclusiveHeld(self)) + << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__; + size_t num_large_objects = 0; + size_t num_pages_large_objects = 0; + // These arrays are zero initialized. + std::unique_ptr<size_t[]> num_runs(new size_t[kNumOfSizeBrackets]()); + std::unique_ptr<size_t[]> num_pages_runs(new size_t[kNumOfSizeBrackets]()); + std::unique_ptr<size_t[]> num_slots(new size_t[kNumOfSizeBrackets]()); + std::unique_ptr<size_t[]> num_used_slots(new size_t[kNumOfSizeBrackets]()); + std::unique_ptr<size_t[]> num_metadata_bytes(new size_t[kNumOfSizeBrackets]()); + ReaderMutexLock rmu(self, bulk_free_lock_); + MutexLock lock_mu(self, lock_); + for (size_t i = 0; i < page_map_size_; ) { + uint8_t pm = page_map_[i]; + switch (pm) { + case kPageMapReleased: + case kPageMapEmpty: + ++i; + break; + case kPageMapLargeObject: { + size_t num_pages = 1; + size_t idx = i + 1; + while (idx < page_map_size_ && page_map_[idx] == kPageMapLargeObjectPart) { + num_pages++; + idx++; + } + num_large_objects++; + num_pages_large_objects += num_pages; + i += num_pages; + break; + } + case kPageMapLargeObjectPart: + LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl + << DumpPageMap(); + break; + case kPageMapRun: { + Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize); + size_t idx = run->size_bracket_idx_; + size_t num_pages = numOfPages[idx]; + num_runs[idx]++; + num_pages_runs[idx] += num_pages; + num_slots[idx] += numOfSlots[idx]; + size_t num_free_slots = run->NumberOfFreeSlots(); + num_used_slots[idx] += numOfSlots[idx] - num_free_slots; + num_metadata_bytes[idx] += headerSizes[idx]; + i += num_pages; + break; + } + case kPageMapRunPart: + // Fall-through. + default: + LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl + << DumpPageMap(); + break; + } + } + os << "RosAlloc stats:\n"; + for (size_t i = 0; i < kNumOfSizeBrackets; ++i) { + os << "Bracket " << i << " (" << bracketSizes[i] << "):" + << " #runs=" << num_runs[i] + << " #pages=" << num_pages_runs[i] + << " (" << PrettySize(num_pages_runs[i] * kPageSize) << ")" + << " #metadata_bytes=" << PrettySize(num_metadata_bytes[i]) + << " #slots=" << num_slots[i] << " (" << PrettySize(num_slots[i] * bracketSizes[i]) << ")" + << " #used_slots=" << num_used_slots[i] + << " (" << PrettySize(num_used_slots[i] * bracketSizes[i]) << ")\n"; + } + os << "Large #allocations=" << num_large_objects + << " #pages=" << num_pages_large_objects + << " (" << PrettySize(num_pages_large_objects * kPageSize) << ")\n"; + size_t total_num_pages = 0; + size_t total_metadata_bytes = 0; + size_t total_allocated_bytes = 0; + for (size_t i = 0; i < kNumOfSizeBrackets; ++i) { + total_num_pages += num_pages_runs[i]; + total_metadata_bytes += num_metadata_bytes[i]; + total_allocated_bytes += num_used_slots[i] * bracketSizes[i]; + } + total_num_pages += num_pages_large_objects; + total_allocated_bytes += num_pages_large_objects * kPageSize; + os << "Total #total_bytes=" << PrettySize(total_num_pages * kPageSize) + << " #metadata_bytes=" << PrettySize(total_metadata_bytes) + << " #used_bytes=" << PrettySize(total_allocated_bytes) << "\n"; + os << "\n"; +} + } // namespace allocator } // namespace gc } // namespace art diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h index b12cb5b0dd..1fa2d1ac8a 100644 --- a/runtime/gc/allocator/rosalloc.h +++ b/runtime/gc/allocator/rosalloc.h @@ -928,6 +928,9 @@ class RosAlloc { void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) REQUIRES(!bulk_free_lock_, !lock_); + void DumpStats(std::ostream& os) + REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_) REQUIRES(!bulk_free_lock_); + private: friend std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs); diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 6073fc8a78..894ceba216 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -845,7 +845,9 @@ class CardScanTask : public MarkStackTask<false> { }; size_t MarkSweep::GetThreadCount(bool paused) const { - if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { + // Use less threads if we are in a background state (non jank perceptible) since we want to leave + // more CPU time for the foreground apps. + if (heap_->GetThreadPool() == nullptr || !Runtime::Current()->InJankPerceptibleProcessState()) { return 1; } return (paused ? heap_->GetParallelGCThreadCount() : heap_->GetConcGCThreadCount()) + 1; diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index f4fccee034..01db90a401 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -116,6 +116,12 @@ static constexpr uint64_t kNativeAllocationFinalizeTimeout = MsToNs(250u); // For deterministic compilation, we need the heap to be at a well-known address. static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000; +// Dump the rosalloc stats on SIGQUIT. +static constexpr bool kDumpRosAllocStatsOnSigQuit = false; + +static inline bool CareAboutPauseTimes() { + return Runtime::Current()->InJankPerceptibleProcessState(); +} Heap::Heap(size_t initial_size, size_t growth_limit, @@ -175,8 +181,6 @@ Heap::Heap(size_t initial_size, max_allowed_footprint_(initial_size), native_footprint_gc_watermark_(initial_size), native_need_to_run_finalization_(false), - // Initially assume we perceive jank in case the process state is never updated. - process_state_(kProcessStateJankPerceptible), concurrent_start_bytes_(std::numeric_limits<size_t>::max()), total_bytes_freed_ever_(0), total_objects_freed_ever_(0), @@ -924,17 +928,18 @@ void Heap::ThreadFlipEnd(Thread* self) { thread_flip_cond_->Broadcast(self); } -void Heap::UpdateProcessState(ProcessState process_state) { - if (process_state_ != process_state) { - process_state_ = process_state; +void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) { + if (old_process_state != new_process_state) { + const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible; for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) { // Start at index 1 to avoid "is always false" warning. // Have iteration 1 always transition the collector. - TransitionCollector((((i & 1) == 1) == (process_state_ == kProcessStateJankPerceptible)) - ? foreground_collector_type_ : background_collector_type_); + TransitionCollector((((i & 1) == 1) == jank_perceptible) + ? foreground_collector_type_ + : background_collector_type_); usleep(kCollectorTransitionStressWait); } - if (process_state_ == kProcessStateJankPerceptible) { + if (jank_perceptible) { // Transition back to foreground right away to prevent jank. RequestCollectorTransition(foreground_collector_type_, 0); } else { @@ -1176,6 +1181,10 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) { } } + if (kDumpRosAllocStatsOnSigQuit && rosalloc_space_ != nullptr) { + rosalloc_space_->DumpStats(os); + } + BaseMutex::DumpAll(os); } @@ -2204,8 +2213,8 @@ void Heap::TransitionCollector(CollectorType collector_type) { } else { saved_str = " expanded " + PrettySize(-delta_allocated); } - VLOG(heap) << "Heap transition to " << process_state_ << " took " - << PrettyDuration(duration) << saved_str; + VLOG(heap) << "Collector transition to " << collector_type << " took " + << PrettyDuration(duration) << saved_str; } void Heap::ChangeCollector(CollectorType collector_type) { @@ -3960,31 +3969,31 @@ void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const { void Heap::AllowNewAllocationRecords() const { CHECK(!kUseReadBarrier); - if (IsAllocTrackingEnabled()) { - MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); - if (IsAllocTrackingEnabled()) { - GetAllocationRecords()->AllowNewAllocationRecords(); - } + MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); + AllocRecordObjectMap* allocation_records = GetAllocationRecords(); + if (allocation_records != nullptr) { + allocation_records->AllowNewAllocationRecords(); } } void Heap::DisallowNewAllocationRecords() const { CHECK(!kUseReadBarrier); - if (IsAllocTrackingEnabled()) { - MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); - if (IsAllocTrackingEnabled()) { - GetAllocationRecords()->DisallowNewAllocationRecords(); - } + MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); + AllocRecordObjectMap* allocation_records = GetAllocationRecords(); + if (allocation_records != nullptr) { + allocation_records->DisallowNewAllocationRecords(); } } void Heap::BroadcastForNewAllocationRecords() const { CHECK(kUseReadBarrier); - if (IsAllocTrackingEnabled()) { - MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); - if (IsAllocTrackingEnabled()) { - GetAllocationRecords()->BroadcastForNewAllocationRecords(); - } + // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may + // be set to false while some threads are waiting for system weak access in + // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554. + MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); + AllocRecordObjectMap* allocation_records = GetAllocationRecords(); + if (allocation_records != nullptr) { + allocation_records->BroadcastForNewAllocationRecords(); } } diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 9eda422902..2925591333 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -36,6 +36,7 @@ #include "globals.h" #include "object_callbacks.h" #include "offsets.h" +#include "process_state.h" #include "safe_map.h" #include "verify_object.h" @@ -116,14 +117,6 @@ static constexpr bool kUseRosAlloc = true; // If true, use thread-local allocation stack. static constexpr bool kUseThreadLocalAllocationStack = true; -// The process state passed in from the activity manager, used to determine when to do trimming -// and compaction. -enum ProcessState { - kProcessStateJankPerceptible = 0, - kProcessStateJankImperceptible = 1, -}; -std::ostream& operator<<(std::ostream& os, const ProcessState& process_state); - class Heap { public: // If true, measure the total allocation time. @@ -382,7 +375,7 @@ class Heap { collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) REQUIRES(!*gc_complete_lock_); // Update the heap's process state to a new value, may cause compaction to occur. - void UpdateProcessState(ProcessState process_state) + void UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) REQUIRES(!*pending_task_lock_, !*gc_complete_lock_); bool HaveContinuousSpaces() const NO_THREAD_SAFETY_ANALYSIS { @@ -664,11 +657,6 @@ class Heap { void DumpGcPerformanceInfo(std::ostream& os) REQUIRES(!*gc_complete_lock_); void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_); - // Returns true if we currently care about pause times. - bool CareAboutPauseTimes() const { - return process_state_ == kProcessStateJankPerceptible; - } - // Thread pool. void CreateThreadPool(); void DeleteThreadPool(); @@ -1152,9 +1140,6 @@ class Heap { // Whether or not we need to run finalizers in the next native allocation. bool native_need_to_run_finalization_; - // Whether or not we currently care about pause times. - ProcessState process_state_; - // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that // it completes ahead of an allocation failing. size_t concurrent_start_bytes_; diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index 5ff1cb7a2e..22bf5f9358 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -1520,6 +1520,17 @@ ImageSpace* ImageSpace::CreateFromAppImage(const char* image, /*out*/error_msg); } +void ImageSpace::DumpSections(std::ostream& os) const { + const uint8_t* base = Begin(); + const ImageHeader& header = GetImageHeader(); + for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) { + auto section_type = static_cast<ImageHeader::ImageSections>(i); + const ImageSection& section = header.GetImageSection(section_type); + os << section_type << " " << reinterpret_cast<const void*>(base + section.Offset()) + << "-" << reinterpret_cast<const void*>(base + section.End()) << "\n"; + } +} + } // namespace space } // namespace gc } // namespace art diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h index f2f416377e..c9741d0648 100644 --- a/runtime/gc/space/image_space.h +++ b/runtime/gc/space/image_space.h @@ -149,6 +149,8 @@ class ImageSpace : public MemMapSpace { return GetImageHeader().GetOatFileEnd(); } + void DumpSections(std::ostream& os) const; + protected: // Tries to initialize an ImageSpace from the given image path, returning null on error. // diff --git a/runtime/gc/space/image_space_fs.h b/runtime/gc/space/image_space_fs.h index ec4bf92a2a..5237466c00 100644 --- a/runtime/gc/space/image_space_fs.h +++ b/runtime/gc/space/image_space_fs.h @@ -26,6 +26,7 @@ #include "base/unix_file/fd_file.h" #include "globals.h" #include "os.h" +#include "runtime.h" #include "utils.h" namespace art { @@ -200,6 +201,11 @@ static void PruneDalvikCache(InstructionSet isa) { impl::DeleteDirectoryContents(GetDalvikCacheOrDie(".", false), false); // Prune /data/dalvik-cache/<isa>. impl::DeleteDirectoryContents(GetDalvikCacheOrDie(GetInstructionSetString(isa), false), false); + + // Be defensive. There should be a runtime created here, but this may be called in a test. + if (Runtime::Current() != nullptr) { + Runtime::Current()->SetPrunedDalvikCache(true); + } } // We write out an empty file to the zygote's ISA specific cache dir at the start of diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc index 203d3bcfe9..b01609509c 100644 --- a/runtime/gc/space/rosalloc_space.cc +++ b/runtime/gc/space/rosalloc_space.cc @@ -368,6 +368,11 @@ void RosAllocSpace::Clear() { SetFootprintLimit(footprint_limit); } +void RosAllocSpace::DumpStats(std::ostream& os) { + ScopedSuspendAll ssa(__FUNCTION__); + rosalloc_->DumpStats(os); +} + } // namespace space namespace allocator { diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h index bc1473850c..b175fbfea3 100644 --- a/runtime/gc/space/rosalloc_space.h +++ b/runtime/gc/space/rosalloc_space.h @@ -144,6 +144,8 @@ class RosAllocSpace : public MallocSpace { rosalloc_->LogFragmentationAllocFailure(os, failed_alloc_bytes); } + void DumpStats(std::ostream& os); + protected: RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name, allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end, uint8_t* limit, |