diff options
| author | 2017-05-17 08:12:47 +0000 | |
|---|---|---|
| committer | 2017-05-17 08:12:49 +0000 | |
| commit | 8e0a5dee37e848ae9bc7ff11c8a0eb66048a5e9e (patch) | |
| tree | b92aae8793ba9fbb4fcceb8c318c1832b20c4f39 | |
| parent | 536e54a6af85127d7f17037360deb08e0a7dc4eb (diff) | |
| parent | da1da8a78c9df45890f48b4f0197ed18fa1de3c5 (diff) | |
Merge "Revert "Revert "RegisterNativeAllocation: Avoid case of double blocking gc."""
| -rw-r--r-- | runtime/gc/collector/concurrent_copying.cc | 2 | ||||
| -rw-r--r-- | runtime/gc/collector/semi_space.cc | 1 | ||||
| -rw-r--r-- | runtime/gc/gc_cause.cc | 2 | ||||
| -rw-r--r-- | runtime/gc/gc_cause.h | 8 | ||||
| -rw-r--r-- | runtime/gc/heap.cc | 23 | ||||
| -rw-r--r-- | runtime/gc/heap.h | 14 |
6 files changed, 36 insertions, 14 deletions
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 8b80f54880..b0218b51fa 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -299,7 +299,7 @@ void ConcurrentCopying::InitializePhase() { objects_moved_.StoreRelaxed(0); GcCause gc_cause = GetCurrentIteration()->GetGcCause(); if (gc_cause == kGcCauseExplicit || - gc_cause == kGcCauseForNativeAlloc || + gc_cause == kGcCauseForNativeAllocBlocking || gc_cause == kGcCauseCollectorTransition || GetCurrentIteration()->GetClearSoftReferences()) { force_evacuate_all_ = true; diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index 41e605104c..d3798924ee 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc @@ -193,6 +193,7 @@ void SemiSpace::MarkingPhase() { if (generational_) { if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit || GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc || + GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAllocBlocking || GetCurrentIteration()->GetClearSoftReferences()) { // If an explicit, native allocation-triggered, or last attempt // collection, collect the whole heap. diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc index c35ec7c341..2bbc86e3e9 100644 --- a/runtime/gc/gc_cause.cc +++ b/runtime/gc/gc_cause.cc @@ -29,7 +29,7 @@ const char* PrettyCause(GcCause cause) { case kGcCauseBackground: return "Background"; case kGcCauseExplicit: return "Explicit"; case kGcCauseForNativeAlloc: return "NativeAlloc"; - case kGcCauseForNativeAllocBackground: return "NativeAllocBackground"; + case kGcCauseForNativeAllocBlocking: return "NativeAllocBlocking"; case kGcCauseCollectorTransition: return "CollectorTransition"; case kGcCauseDisableMovingGc: return "DisableMovingGc"; case kGcCauseHomogeneousSpaceCompact: return "HomogeneousSpaceCompact"; diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h index 41c894340c..b8cf3c4295 100644 --- a/runtime/gc/gc_cause.h +++ b/runtime/gc/gc_cause.h @@ -31,10 +31,12 @@ enum GcCause { kGcCauseBackground, // An explicit System.gc() call. kGcCauseExplicit, - // GC triggered for a native allocation. + // GC triggered for a native allocation when NativeAllocationGcWatermark is exceeded. + // (This may be a blocking GC depending on whether we run a non-concurrent collector). kGcCauseForNativeAlloc, - // Background GC triggered for a native allocation. - kGcCauseForNativeAllocBackground, + // GC triggered for a native allocation when NativeAllocationBlockingGcWatermark is exceeded. + // (This is always a blocking GC). + kGcCauseForNativeAllocBlocking, // GC triggered for a collector transition. kGcCauseCollectorTransition, // Not a real GC cause, used when we disable moving GC (currently for GetPrimitiveArrayCritical). diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 298336ae4d..668fb4b7e4 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -563,6 +563,7 @@ Heap::Heap(size_t initial_size, native_blocking_gc_lock_ = new Mutex("Native blocking GC lock"); native_blocking_gc_cond_.reset(new ConditionVariable("Native blocking GC condition variable", *native_blocking_gc_lock_)); + native_blocking_gc_is_assigned_ = false; native_blocking_gc_in_progress_ = false; native_blocking_gcs_finished_ = 0; @@ -2695,6 +2696,10 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, // old_native_bytes_allocated_ now that GC has been triggered, resetting // new_native_bytes_allocated_ to zero in the process. old_native_bytes_allocated_.FetchAndAddRelaxed(new_native_bytes_allocated_.ExchangeRelaxed(0)); + if (gc_cause == kGcCauseForNativeAllocBlocking) { + MutexLock mu(self, *native_blocking_gc_lock_); + native_blocking_gc_in_progress_ = true; + } } DCHECK_LT(gc_type, collector::kGcTypeMax); @@ -3526,6 +3531,7 @@ collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) { // it results in log spam. kGcCauseExplicit is already logged in LogGC, so avoid it here too. if (cause == kGcCauseForAlloc || cause == kGcCauseForNativeAlloc || + cause == kGcCauseForNativeAllocBlocking || cause == kGcCauseDisableMovingGc) { VLOG(gc) << "Starting a blocking GC " << cause; } @@ -3927,33 +3933,36 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) { // finish before addressing the fact that we exceeded the blocking // watermark again. do { + ScopedTrace trace("RegisterNativeAllocation: Wait For Prior Blocking GC Completion"); native_blocking_gc_cond_->Wait(self); } while (native_blocking_gcs_finished_ == initial_gcs_finished); initial_gcs_finished++; } // It's possible multiple threads have seen that we exceeded the - // blocking watermark. Ensure that only one of those threads runs the - // blocking GC. The rest of the threads should instead wait for the - // blocking GC to complete. + // blocking watermark. Ensure that only one of those threads is assigned + // to run the blocking GC. The rest of the threads should instead wait + // for the blocking GC to complete. if (native_blocking_gcs_finished_ == initial_gcs_finished) { - if (native_blocking_gc_in_progress_) { + if (native_blocking_gc_is_assigned_) { do { + ScopedTrace trace("RegisterNativeAllocation: Wait For Blocking GC Completion"); native_blocking_gc_cond_->Wait(self); } while (native_blocking_gcs_finished_ == initial_gcs_finished); } else { - native_blocking_gc_in_progress_ = true; + native_blocking_gc_is_assigned_ = true; run_gc = true; } } } if (run_gc) { - CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false); + CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAllocBlocking, false); RunFinalization(env, kNativeAllocationFinalizeTimeout); CHECK(!env->ExceptionCheck()); MutexLock mu(self, *native_blocking_gc_lock_); + native_blocking_gc_is_assigned_ = false; native_blocking_gc_in_progress_ = false; native_blocking_gcs_finished_++; native_blocking_gc_cond_->Broadcast(self); @@ -3962,7 +3971,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) { // Trigger another GC because there have been enough native bytes // allocated since the last GC. if (IsGcConcurrent()) { - RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAllocBackground, /*force_full*/true); + RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full*/true); } else { CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false); } diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index aa123d8736..72871785e5 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -1237,10 +1237,20 @@ class Heap { // old_native_bytes_allocated_ and new_native_bytes_allocated_. Atomic<size_t> old_native_bytes_allocated_; - // Used for synchronization of blocking GCs triggered by - // RegisterNativeAllocation. + // Used for synchronization when multiple threads call into + // RegisterNativeAllocation and require blocking GC. + // * If a previous blocking GC is in progress, all threads will wait for + // that GC to complete, then wait for one of the threads to complete another + // blocking GC. + // * If a blocking GC is assigned but not in progress, a thread has been + // assigned to run a blocking GC but has not started yet. Threads will wait + // for the assigned blocking GC to complete. + // * If a blocking GC is not assigned nor in progress, the first thread will + // run a blocking GC and signal to other threads that blocking GC has been + // assigned. Mutex* native_blocking_gc_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; std::unique_ptr<ConditionVariable> native_blocking_gc_cond_ GUARDED_BY(native_blocking_gc_lock_); + bool native_blocking_gc_is_assigned_ GUARDED_BY(native_blocking_gc_lock_); bool native_blocking_gc_in_progress_ GUARDED_BY(native_blocking_gc_lock_); uint32_t native_blocking_gcs_finished_ GUARDED_BY(native_blocking_gc_lock_); |