diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/gc/space.cc | 10 | ||||
| -rw-r--r-- | src/gc/space.h | 2 | ||||
| -rw-r--r-- | src/heap.cc | 26 | ||||
| -rw-r--r-- | src/heap.h | 2 | ||||
| -rw-r--r-- | src/native/dalvik_system_VMRuntime.cc | 25 |
5 files changed, 48 insertions, 17 deletions
diff --git a/src/gc/space.cc b/src/gc/space.cc index 37565db7fc..7e6f7ed28a 100644 --- a/src/gc/space.cc +++ b/src/gc/space.cc @@ -406,7 +406,7 @@ size_t DlMallocSpace::AllocationSize(const Object* obj) { return InternalAllocationSize(obj); } -void MspaceMadviseCallback(void* start, void* end, size_t used_bytes, void* /* arg */) { +void MspaceMadviseCallback(void* start, void* end, size_t used_bytes, void* arg) { // Is this chunk in use? if (used_bytes != 0) { return; @@ -417,15 +417,19 @@ void MspaceMadviseCallback(void* start, void* end, size_t used_bytes, void* /* a if (end > start) { size_t length = reinterpret_cast<byte*>(end) - reinterpret_cast<byte*>(start); CHECK_MEMORY_CALL(madvise, (start, length, MADV_DONTNEED), "trim"); + size_t* reclaimed = reinterpret_cast<size_t*>(arg); + *reclaimed += length; } } -void DlMallocSpace::Trim() { +size_t DlMallocSpace::Trim() { MutexLock mu(Thread::Current(), lock_); // Trim to release memory at the end of the space. mspace_trim(mspace_, 0); // Visit space looking for page-sized holes to advise the kernel we don't need. - mspace_inspect_all(mspace_, MspaceMadviseCallback, NULL); + size_t reclaimed = 0; + mspace_inspect_all(mspace_, MspaceMadviseCallback, &reclaimed); + return reclaimed; } void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg), diff --git a/src/gc/space.h b/src/gc/space.h index f51ae5d785..2ed4988f75 100644 --- a/src/gc/space.h +++ b/src/gc/space.h @@ -284,7 +284,7 @@ class DlMallocSpace : public MemMapSpace, public AllocSpace { } // Hands unused pages back to the system. - void Trim(); + size_t Trim(); // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be // in use, indicated by num_bytes equaling zero. diff --git a/src/heap.cc b/src/heap.cc index 656aae7330..40037e34af 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -23,6 +23,7 @@ #include <vector> #include "base/stl_util.h" +#include "cutils/sched_policy.h" #include "debugger.h" #include "gc/atomic_stack.h" #include "gc/card_table.h" @@ -1789,18 +1790,20 @@ void Heap::ConcurrentGC(Thread* self) { } } -void Heap::Trim() { - alloc_space_->Trim(); -} - void Heap::RequestHeapTrim() { + // GC completed and now we must decide whether to request a heap trim (advising pages back to the + // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans + // a space it will hold its lock and can become a cause of jank. + // Note, the large object space self trims and the Zygote space was trimmed and unchanging since + // forking. + // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap // because that only marks object heads, so a large array looks like lots of empty space. We // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional // to utilization (which is probably inversely proportional to how much benefit we can expect). // We could try mincore(2) but that's only a measure of how many pages we haven't given away, // not how much use we're making of those pages. - uint64_t ms_time = NsToMs(NanoTime()); + uint64_t ms_time = MilliTime(); float utilization = static_cast<float>(alloc_space_->GetNumBytesAllocated()) / alloc_space_->Size(); if ((utilization > 0.75f) || ((ms_time - last_trim_time_) < 2 * 1000)) { @@ -1820,6 +1823,14 @@ void Heap::RequestHeapTrim() { return; } } + + SchedPolicy policy; + get_sched_policy(self->GetTid(), &policy); + if (policy == SP_FOREGROUND || policy == SP_AUDIO_APP) { + // Don't trim the heap if we are a foreground or audio app. + return; + } + last_trim_time_ = ms_time; JNIEnv* env = self->GetJniEnv(); DCHECK(WellKnownClasses::java_lang_Daemons != NULL); @@ -1829,4 +1840,9 @@ void Heap::RequestHeapTrim() { CHECK(!env->ExceptionCheck()); } +size_t Heap::Trim() { + // Handle a requested heap trim on a thread outside of the main GC thread. + return alloc_space_->Trim(); +} + } // namespace art diff --git a/src/heap.h b/src/heap.h index 78cbe99fc6..b7fc34d8ff 100644 --- a/src/heap.h +++ b/src/heap.h @@ -297,7 +297,7 @@ class Heap { void DumpForSigQuit(std::ostream& os); - void Trim(); + size_t Trim(); HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { return live_bitmap_.get(); diff --git a/src/native/dalvik_system_VMRuntime.cc b/src/native/dalvik_system_VMRuntime.cc index 9c400418db..bf518dcefb 100644 --- a/src/native/dalvik_system_VMRuntime.cc +++ b/src/native/dalvik_system_VMRuntime.cc @@ -157,19 +157,30 @@ static void VMRuntime_setTargetSdkVersion(JNIEnv* env, jobject, jint targetSdkVe } static void VMRuntime_trimHeap(JNIEnv*, jobject) { + uint64_t start_ns = NanoTime(); + // Trim the managed heap. Heap* heap = Runtime::Current()->GetHeap(); - uint64_t start_ns = NanoTime(); DlMallocSpace* alloc_space = heap->GetAllocSpace(); size_t alloc_space_size = alloc_space->Size(); - float utilization = static_cast<float>(alloc_space->GetNumBytesAllocated()) / alloc_space_size; - heap->Trim(); + float managed_utilization = + static_cast<float>(alloc_space->GetNumBytesAllocated()) / alloc_space_size; + size_t managed_reclaimed = heap->Trim(); + + uint64_t gc_heap_end_ns = NanoTime(); + // Trim the native heap. dlmalloc_trim(0); - dlmalloc_inspect_all(MspaceMadviseCallback, NULL); - LOG(INFO) << "Parallel heap trimming took " << PrettyDuration(NanoTime() - start_ns) - << " on a " << PrettySize(alloc_space_size) - << " alloc space with " << static_cast<int>(100 * utilization) << "% utilization"; + size_t native_reclaimed = 0; + dlmalloc_inspect_all(MspaceMadviseCallback, &native_reclaimed); + + uint64_t end_ns = NanoTime(); + + LOG(INFO) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns) + << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration=" + << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed) + << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization) + << "%."; } static void VMRuntime_concurrentGC(JNIEnv* env, jobject) { |