Remove blocking case from RegisterNativeAllocation.
To avoid causing jank for dubious reasons.
Test: art/test/testrunner/testrunner.py -b -t 004-NativeAllocations --host
Test: vogar --mode host libcore/luni/src/test/java/libcore/libcore/util/NativeAllocationRegistryTest.java
Bug: 70831911
Change-Id: I310e3daf5bd3b65097b6011d6a96fb42ac50132b
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index cf83716..3173db0 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -301,7 +301,6 @@
objects_moved_.StoreRelaxed(0);
GcCause gc_cause = GetCurrentIteration()->GetGcCause();
if (gc_cause == kGcCauseExplicit ||
- gc_cause == kGcCauseForNativeAllocBlocking ||
gc_cause == kGcCauseCollectorTransition ||
GetCurrentIteration()->GetClearSoftReferences()) {
force_evacuate_all_ = true;
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 3150781..1e136bc 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -193,7 +193,6 @@
if (generational_) {
if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
- GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAllocBlocking ||
GetCurrentIteration()->GetClearSoftReferences()) {
// If an explicit, native allocation-triggered, or last attempt
// collection, collect the whole heap.
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index d88fcdc..508d765 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -33,7 +33,6 @@
case kGcCauseBackground: return "Background";
case kGcCauseExplicit: return "Explicit";
case kGcCauseForNativeAlloc: return "NativeAlloc";
- case kGcCauseForNativeAllocBlocking: return "NativeAllocBlocking";
case kGcCauseCollectorTransition: return "CollectorTransition";
case kGcCauseDisableMovingGc: return "DisableMovingGc";
case kGcCauseHomogeneousSpaceCompact: return "HomogeneousSpaceCompact";
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index 78496f3..81781ce 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -36,9 +36,6 @@
// GC triggered for a native allocation when NativeAllocationGcWatermark is exceeded.
// (This may be a blocking GC depending on whether we run a non-concurrent collector).
kGcCauseForNativeAlloc,
- // GC triggered for a native allocation when NativeAllocationBlockingGcWatermark is exceeded.
- // (This is always a blocking GC).
- kGcCauseForNativeAllocBlocking,
// GC triggered for a collector transition.
kGcCauseCollectorTransition,
// Not a real GC cause, used when we disable moving GC (currently for GetPrimitiveArrayCritical).
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 9edba96..6da092c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -128,9 +128,6 @@
sizeof(mirror::HeapReference<mirror::Object>);
static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
sizeof(mirror::HeapReference<mirror::Object>);
-// System.runFinalization can deadlock with native allocations, to deal with this, we have a
-// timeout on how long we wait for finalizers to run. b/21544853
-static constexpr uint64_t kNativeAllocationFinalizeTimeout = MsToNs(250u);
// For deterministic compilation, we need the heap to be at a well-known address.
static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
@@ -561,12 +558,6 @@
gc_complete_lock_ = new Mutex("GC complete lock");
gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
*gc_complete_lock_));
- native_blocking_gc_lock_ = new Mutex("Native blocking GC lock");
- native_blocking_gc_cond_.reset(new ConditionVariable("Native blocking GC condition variable",
- *native_blocking_gc_lock_));
- native_blocking_gc_is_assigned_ = false;
- native_blocking_gc_in_progress_ = false;
- native_blocking_gcs_finished_ = 0;
thread_flip_lock_ = new Mutex("GC thread flip lock");
thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
@@ -1143,7 +1134,6 @@
STLDeleteElements(&continuous_spaces_);
STLDeleteElements(&discontinuous_spaces_);
delete gc_complete_lock_;
- delete native_blocking_gc_lock_;
delete thread_flip_lock_;
delete pending_task_lock_;
delete backtrace_lock_;
@@ -2556,10 +2546,6 @@
// old_native_bytes_allocated_ now that GC has been triggered, resetting
// new_native_bytes_allocated_ to zero in the process.
old_native_bytes_allocated_.FetchAndAddRelaxed(new_native_bytes_allocated_.ExchangeRelaxed(0));
- if (gc_cause == kGcCauseForNativeAllocBlocking) {
- MutexLock mu(self, *native_blocking_gc_lock_);
- native_blocking_gc_in_progress_ = true;
- }
}
DCHECK_LT(gc_type, collector::kGcTypeMax);
@@ -3386,7 +3372,6 @@
// it results in log spam. kGcCauseExplicit is already logged in LogGC, so avoid it here too.
if (cause == kGcCauseForAlloc ||
cause == kGcCauseForNativeAlloc ||
- cause == kGcCauseForNativeAllocBlocking ||
cause == kGcCauseDisableMovingGc) {
VLOG(gc) << "Starting a blocking GC " << cause;
}
@@ -3772,59 +3757,9 @@
}
void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
- // See the REDESIGN section of go/understanding-register-native-allocation
- // for an explanation of how RegisterNativeAllocation works.
- size_t new_value = bytes + new_native_bytes_allocated_.FetchAndAddRelaxed(bytes);
- if (new_value > NativeAllocationBlockingGcWatermark()) {
- // Wait for a new GC to finish and finalizers to run, because the
- // allocation rate is too high.
- Thread* self = ThreadForEnv(env);
+ size_t old_value = new_native_bytes_allocated_.FetchAndAddRelaxed(bytes);
- bool run_gc = false;
- {
- MutexLock mu(self, *native_blocking_gc_lock_);
- uint32_t initial_gcs_finished = native_blocking_gcs_finished_;
- if (native_blocking_gc_in_progress_) {
- // A native blocking GC is in progress from the last time the native
- // allocation blocking GC watermark was exceeded. Wait for that GC to
- // finish before addressing the fact that we exceeded the blocking
- // watermark again.
- do {
- ScopedTrace trace("RegisterNativeAllocation: Wait For Prior Blocking GC Completion");
- native_blocking_gc_cond_->Wait(self);
- } while (native_blocking_gcs_finished_ == initial_gcs_finished);
- initial_gcs_finished++;
- }
-
- // It's possible multiple threads have seen that we exceeded the
- // blocking watermark. Ensure that only one of those threads is assigned
- // to run the blocking GC. The rest of the threads should instead wait
- // for the blocking GC to complete.
- if (native_blocking_gcs_finished_ == initial_gcs_finished) {
- if (native_blocking_gc_is_assigned_) {
- do {
- ScopedTrace trace("RegisterNativeAllocation: Wait For Blocking GC Completion");
- native_blocking_gc_cond_->Wait(self);
- } while (native_blocking_gcs_finished_ == initial_gcs_finished);
- } else {
- native_blocking_gc_is_assigned_ = true;
- run_gc = true;
- }
- }
- }
-
- if (run_gc) {
- CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAllocBlocking, false);
- RunFinalization(env, kNativeAllocationFinalizeTimeout);
- CHECK(!env->ExceptionCheck());
-
- MutexLock mu(self, *native_blocking_gc_lock_);
- native_blocking_gc_is_assigned_ = false;
- native_blocking_gc_in_progress_ = false;
- native_blocking_gcs_finished_++;
- native_blocking_gc_cond_->Broadcast(self);
- }
- } else if (new_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() &&
+ if (old_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() &&
!IsGCRequestPending()) {
// Trigger another GC because there have been enough native bytes
// allocated since the last GC.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 7dcf82f..57d3d50 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -268,7 +268,7 @@
REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*native_blocking_gc_lock_);
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
void RegisterNativeFree(JNIEnv* env, size_t bytes);
// Change the allocator, updates entrypoints.
@@ -1087,16 +1087,6 @@
return max_free_;
}
- // How large new_native_bytes_allocated_ can grow while GC is in progress
- // before we block the allocating thread to allow GC to catch up.
- ALWAYS_INLINE size_t NativeAllocationBlockingGcWatermark() const {
- // Historically the native allocations were bounded by growth_limit_. This
- // uses that same value, dividing growth_limit_ by 2 to account for
- // the fact that now the bound is relative to the number of retained
- // registered native allocations rather than absolute.
- return growth_limit_ / 2;
- }
-
void TraceHeapSize(size_t heap_size);
// Remove a vlog code from heap-inl.h which is transitively included in half the world.
@@ -1252,23 +1242,6 @@
// old_native_bytes_allocated_ and new_native_bytes_allocated_.
Atomic<size_t> old_native_bytes_allocated_;
- // Used for synchronization when multiple threads call into
- // RegisterNativeAllocation and require blocking GC.
- // * If a previous blocking GC is in progress, all threads will wait for
- // that GC to complete, then wait for one of the threads to complete another
- // blocking GC.
- // * If a blocking GC is assigned but not in progress, a thread has been
- // assigned to run a blocking GC but has not started yet. Threads will wait
- // for the assigned blocking GC to complete.
- // * If a blocking GC is not assigned nor in progress, the first thread will
- // run a blocking GC and signal to other threads that blocking GC has been
- // assigned.
- Mutex* native_blocking_gc_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- std::unique_ptr<ConditionVariable> native_blocking_gc_cond_ GUARDED_BY(native_blocking_gc_lock_);
- bool native_blocking_gc_is_assigned_ GUARDED_BY(native_blocking_gc_lock_);
- bool native_blocking_gc_in_progress_ GUARDED_BY(native_blocking_gc_lock_);
- uint32_t native_blocking_gcs_finished_ GUARDED_BY(native_blocking_gc_lock_);
-
// Number of bytes freed by thread local buffer revokes. This will
// cancel out the ahead-of-time bulk counting of bytes allocated in
// rosalloc thread-local buffers. It is temporarily accumulated