Merge "Differentiate between native alloc and normal background GC"
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index c1c1cad..c35ec7c 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -29,6 +29,7 @@
case kGcCauseBackground: return "Background";
case kGcCauseExplicit: return "Explicit";
case kGcCauseForNativeAlloc: return "NativeAlloc";
+ case kGcCauseForNativeAllocBackground: return "NativeAllocBackground";
case kGcCauseCollectorTransition: return "CollectorTransition";
case kGcCauseDisableMovingGc: return "DisableMovingGc";
case kGcCauseHomogeneousSpaceCompact: return "HomogeneousSpaceCompact";
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index eb27547..41c8943 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -33,6 +33,8 @@
kGcCauseExplicit,
// GC triggered for a native allocation.
kGcCauseForNativeAlloc,
+ // Background GC triggered for a native allocation.
+ kGcCauseForNativeAllocBackground,
// GC triggered for a collector transition.
kGcCauseCollectorTransition,
// Not a real GC cause, used when we disable moving GC (currently for GetPrimitiveArrayCritical).
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a853b98..4a25610 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3686,20 +3686,21 @@
ObjPtr<mirror::Object>* obj) {
StackHandleScope<1> hs(self);
HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
- RequestConcurrentGC(self, force_full);
+ RequestConcurrentGC(self, kGcCauseBackground, force_full);
}
class Heap::ConcurrentGCTask : public HeapTask {
public:
- ConcurrentGCTask(uint64_t target_time, bool force_full)
- : HeapTask(target_time), force_full_(force_full) { }
+ ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full)
+ : HeapTask(target_time), cause_(cause), force_full_(force_full) {}
virtual void Run(Thread* self) OVERRIDE {
gc::Heap* heap = Runtime::Current()->GetHeap();
- heap->ConcurrentGC(self, force_full_);
+ heap->ConcurrentGC(self, cause_, force_full_);
heap->ClearConcurrentGCRequest();
}
private:
+ const GcCause cause_;
const bool force_full_; // If true, force full (or partial) collection.
};
@@ -3713,18 +3714,19 @@
concurrent_gc_pending_.StoreRelaxed(false);
}
-void Heap::RequestConcurrentGC(Thread* self, bool force_full) {
+void Heap::RequestConcurrentGC(Thread* self, GcCause cause, bool force_full) {
if (CanAddHeapTask(self) &&
concurrent_gc_pending_.CompareExchangeStrongSequentiallyConsistent(false, true)) {
task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away.
+ cause,
force_full));
}
}
-void Heap::ConcurrentGC(Thread* self, bool force_full) {
+void Heap::ConcurrentGC(Thread* self, GcCause cause, bool force_full) {
if (!Runtime::Current()->IsShuttingDown(self)) {
// Wait for any GCs currently running to finish.
- if (WaitForGcToComplete(kGcCauseBackground, self) == collector::kGcTypeNone) {
+ if (WaitForGcToComplete(cause, self) == collector::kGcTypeNone) {
// If the we can't run the GC type we wanted to run, find the next appropriate one and try that
// instead. E.g. can't do partial, so do full instead.
collector::GcType next_gc_type = next_gc_type_;
@@ -3732,13 +3734,11 @@
if (force_full && next_gc_type == collector::kGcTypeSticky) {
next_gc_type = NonStickyGcType();
}
- if (CollectGarbageInternal(next_gc_type, kGcCauseBackground, false) ==
- collector::kGcTypeNone) {
+ if (CollectGarbageInternal(next_gc_type, cause, false) == collector::kGcTypeNone) {
for (collector::GcType gc_type : gc_plan_) {
// Attempt to run the collector, if we succeed, we are done.
if (gc_type > next_gc_type &&
- CollectGarbageInternal(gc_type, kGcCauseBackground, false) !=
- collector::kGcTypeNone) {
+ CollectGarbageInternal(gc_type, cause, false) != collector::kGcTypeNone) {
break;
}
}
@@ -3940,7 +3940,7 @@
// Trigger another GC because there have been enough native bytes
// allocated since the last GC.
if (IsGcConcurrent()) {
- RequestConcurrentGC(ThreadForEnv(env), /*force_full*/true);
+ RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAllocBackground, /*force_full*/true);
} else {
CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 1a782b4..241d84c 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -330,7 +330,7 @@
// Does a concurrent GC, should only be called by the GC daemon thread
// through runtime.
- void ConcurrentGC(Thread* self, bool force_full)
+ void ConcurrentGC(Thread* self, GcCause cause, bool force_full)
REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_, !*pending_task_lock_);
// Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
@@ -743,7 +743,8 @@
void RequestTrim(Thread* self) REQUIRES(!*pending_task_lock_);
// Request asynchronous GC.
- void RequestConcurrentGC(Thread* self, bool force_full) REQUIRES(!*pending_task_lock_);
+ void RequestConcurrentGC(Thread* self, GcCause cause, bool force_full)
+ REQUIRES(!*pending_task_lock_);
// Whether or not we may use a garbage collector, used so that we only create collectors we need.
bool MayUseCollector(CollectorType type) const;
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 11f8505..34bbf32 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -252,7 +252,7 @@
}
static void VMRuntime_concurrentGC(JNIEnv* env, jobject) {
- Runtime::Current()->GetHeap()->ConcurrentGC(ThreadForEnv(env), true);
+ Runtime::Current()->GetHeap()->ConcurrentGC(ThreadForEnv(env), gc::kGcCauseBackground, true);
}
static void VMRuntime_requestHeapTrim(JNIEnv* env, jobject) {
@@ -260,7 +260,9 @@
}
static void VMRuntime_requestConcurrentGC(JNIEnv* env, jobject) {
- Runtime::Current()->GetHeap()->RequestConcurrentGC(ThreadForEnv(env), true);
+ Runtime::Current()->GetHeap()->RequestConcurrentGC(ThreadForEnv(env),
+ gc::kGcCauseBackground,
+ true);
}
static void VMRuntime_startHeapTaskProcessor(JNIEnv* env, jobject) {