summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/gc/gc_cause.cc1
-rw-r--r--runtime/gc/gc_cause.h2
-rw-r--r--runtime/gc/heap-inl.h42
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc6
4 files changed, 30 insertions, 21 deletions
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index ec213e50e8..00ed57ba7d 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -32,6 +32,7 @@ const char* PrettyCause(GcCause cause) {
case kGcCauseForAlloc: return "Alloc";
case kGcCauseBackground: return "Background";
case kGcCauseExplicit: return "Explicit";
+ case kGcCauseExplicitBackground: return "ExplicitBackground";
case kGcCauseForNativeAlloc: return "NativeAlloc";
case kGcCauseCollectorTransition: return "CollectorTransition";
case kGcCauseDisableMovingGc: return "DisableMovingGc";
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index e035510969..4977f0c25b 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -35,6 +35,8 @@ enum GcCause {
kGcCauseBackground,
// An explicit System.gc() call.
kGcCauseExplicit,
+ // An explicit VMRuntime.requestConcurrentGC() call.
+ kGcCauseExplicitBackground,
// GC triggered for a native allocation when NativeAllocationGcWatermark is exceeded.
// (This may be a blocking GC depending on whether we run a non-concurrent collector).
kGcCauseForNativeAlloc,
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 5d6e149b98..106b7e6358 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -93,23 +93,33 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
// Need to check that we aren't the large object allocator since the large object allocation
// code path includes this function. If we didn't check we would have an infinite loop.
- if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
- // AllocLargeObject can suspend and will recall PreObjectAllocated if needed.
- obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
- pre_fence_visitor);
- if (obj != nullptr) {
- return obj.Ptr();
+ if (kCheckLargeObject && UNLIKELY(byte_count >= large_object_threshold_)) {
+ // Any not non-movable large object allocation should get here exactly once.
+ static constexpr size_t WARN_THRESHOLD = 50'000'000;
+ if (byte_count >= WARN_THRESHOLD) {
+ // The zeroing cost here is substantial, so we claim there is no way to do this
+ // frequently enough to cause disastrous log spam.
+ LOG(WARNING) << "Allocating huge object, klass = " << klass->PrettyClass()
+ << ", byte count = " << byte_count;
}
- // There should be an OOM exception, since we are retrying, clear it.
- self->ClearException();
+ if (ShouldAllocLargeObject(klass, byte_count)) {
+ // AllocLargeObject can suspend and will recall PreObjectAllocated if needed.
+ obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(
+ self, &klass, byte_count, pre_fence_visitor);
+ if (obj != nullptr) {
+ return obj.Ptr();
+ }
+ // There should be an OOM exception, since we are retrying, clear it.
+ self->ClearException();
- // If the large object allocation failed, try to use the normal spaces (main space,
- // non moving space). This can happen if there is significant virtual address space
- // fragmentation.
- // kInstrumented may be out of date, so recurse without large object checking, rather than
- // continue.
- return AllocObjectWithAllocator</*kInstrumented=*/ true, /*kCheckLargeObject=*/ false>
- (self, klass, byte_count, GetUpdatedAllocator(allocator), pre_fence_visitor);
+ // If the large object allocation failed, try to use the normal spaces (main space,
+ // non moving space). This can happen if there is significant virtual address space
+ // fragmentation.
+ // kInstrumented may be out of date, so recurse without large object checking, rather
+ // than continue.
+ return AllocObjectWithAllocator</*kInstrumented=*/true, /*kCheckLargeObject=*/false>(
+ self, klass, byte_count, GetUpdatedAllocator(allocator), pre_fence_visitor);
+ }
}
ScopedAssertNoThreadSuspension ants("Called PreObjectAllocated, no suspend until alloc");
if (IsTLABAllocator(allocator)) {
@@ -435,8 +445,6 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self,
}
inline bool Heap::ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const {
- // We need to have a zygote space or else our newly allocated large object can end up in the
- // Zygote resulting in it being prematurely freed.
// We can only do this for primitive objects since large objects will not be within the card table
// range. This also means that we rely on SetClass not dirtying the object's card.
return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass());
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 0e9660aaac..3658c9c31c 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -340,10 +340,8 @@ static void VMRuntime_requestHeapTrim(JNIEnv* env, jobject) {
static void VMRuntime_requestConcurrentGC(JNIEnv* env, jobject) {
gc::Heap *heap = Runtime::Current()->GetHeap();
- heap->RequestConcurrentGC(Thread::ForEnv(env),
- gc::kGcCauseBackground,
- true,
- heap->GetCurrentGcNum());
+ heap->RequestConcurrentGC(
+ Thread::ForEnv(env), gc::kGcCauseExplicitBackground, true, heap->GetCurrentGcNum());
}
static void VMRuntime_startHeapTaskProcessor(JNIEnv* env, jobject) {