summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/debugger.cc1
-rw-r--r--runtime/gc/collector/concurrent_copying.cc2
-rw-r--r--runtime/gc/heap.cc70
-rw-r--r--runtime/gc/heap.h12
-rw-r--r--runtime/jni_internal.cc28
-rw-r--r--runtime/native/java_lang_Thread.cc1
-rw-r--r--runtime/thread_state.h1
7 files changed, 111 insertions, 4 deletions
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 0cbbb79767..67099d715f 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2094,6 +2094,7 @@ JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
case kWaitingInMainSignalCatcherLoop:
case kWaitingPerformingGc:
case kWaitingWeakGcRootRead:
+ case kWaitingForGcThreadFlip:
case kWaiting:
return JDWP::TS_WAIT;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 65e946fd79..a5bc60a912 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -261,8 +261,10 @@ void ConcurrentCopying::FlipThreadRoots() {
gc_barrier_->Init(self, 0);
ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
FlipCallback flip_callback(this);
+ heap_->ThreadFlipBegin(self); // Sync with JNI critical calls.
size_t barrier_count = Runtime::Current()->FlipThreadRoots(
&thread_flip_visitor, &flip_callback, this);
+ heap_->ThreadFlipEnd(self);
{
ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
gc_barrier_->Increment(self, barrier_count);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index d7f918b4ff..aed7c62d76 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -161,6 +161,8 @@ Heap::Heap(size_t initial_size,
zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
zygote_space_(nullptr),
large_object_threshold_(large_object_threshold),
+ disable_thread_flip_count_(0),
+ thread_flip_running_(false),
collector_type_running_(kCollectorTypeNone),
last_gc_type_(collector::kGcTypeNone),
next_gc_type_(collector::kGcTypePartial),
@@ -480,6 +482,9 @@ Heap::Heap(size_t initial_size,
gc_complete_lock_ = new Mutex("GC complete lock");
gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
*gc_complete_lock_));
+ thread_flip_lock_ = new Mutex("GC thread flip lock");
+ thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
+ *thread_flip_lock_));
task_processor_.reset(new TaskProcessor());
reference_processor_.reset(new ReferenceProcessor());
pending_task_lock_ = new Mutex("Pending task lock");
@@ -770,6 +775,71 @@ void Heap::DecrementDisableMovingGC(Thread* self) {
--disable_moving_gc_count_;
}
+void Heap::IncrementDisableThreadFlip(Thread* self) {
+ // Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead.
+ CHECK(kUseReadBarrier);
+ ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
+ MutexLock mu(self, *thread_flip_lock_);
+ bool has_waited = false;
+ uint64_t wait_start = NanoTime();
+ while (thread_flip_running_) {
+ has_waited = true;
+ thread_flip_cond_->Wait(self);
+ }
+ ++disable_thread_flip_count_;
+ if (has_waited) {
+ uint64_t wait_time = NanoTime() - wait_start;
+ total_wait_time_ += wait_time;
+ if (wait_time > long_pause_log_threshold_) {
+ LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
+ }
+ }
+}
+
+void Heap::DecrementDisableThreadFlip(Thread* self) {
+ // Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up
+ // the GC waiting before doing a thread flip.
+ CHECK(kUseReadBarrier);
+ MutexLock mu(self, *thread_flip_lock_);
+ CHECK_GT(disable_thread_flip_count_, 0U);
+ --disable_thread_flip_count_;
+ thread_flip_cond_->Broadcast(self);
+}
+
+void Heap::ThreadFlipBegin(Thread* self) {
+ // Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_
+ // > 0, block. Otherwise, go ahead.
+ CHECK(kUseReadBarrier);
+ ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
+ MutexLock mu(self, *thread_flip_lock_);
+ bool has_waited = false;
+ uint64_t wait_start = NanoTime();
+ CHECK(!thread_flip_running_);
+ // Set this to true before waiting so that a new mutator entering a JNI critical won't starve GC.
+ thread_flip_running_ = true;
+ while (disable_thread_flip_count_ > 0) {
+ has_waited = true;
+ thread_flip_cond_->Wait(self);
+ }
+ if (has_waited) {
+ uint64_t wait_time = NanoTime() - wait_start;
+ total_wait_time_ += wait_time;
+ if (wait_time > long_pause_log_threshold_) {
+ LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
+ }
+ }
+}
+
+void Heap::ThreadFlipEnd(Thread* self) {
+ // Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators
+ // waiting before doing a JNI critical.
+ CHECK(kUseReadBarrier);
+ MutexLock mu(self, *thread_flip_lock_);
+ CHECK(thread_flip_running_);
+ thread_flip_running_ = false;
+ thread_flip_cond_->Broadcast(self);
+}
+
void Heap::UpdateProcessState(ProcessState process_state) {
if (process_state_ != process_state) {
process_state_ = process_state;
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index d94f1091e0..85688ae3ee 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -300,6 +300,12 @@ class Heap {
void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
+ // Temporarily disable thread flip for JNI critical calls.
+ void IncrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
+ void DecrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
+ void ThreadFlipBegin(Thread* self) REQUIRES(!*thread_flip_lock_);
+ void ThreadFlipEnd(Thread* self) REQUIRES(!*thread_flip_lock_);
+
// Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
void ClearMarkedObjects() REQUIRES(Locks::heap_bitmap_lock_);
@@ -1065,6 +1071,12 @@ class Heap {
Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
+ // Used to synchronize between JNI critical calls and the thread flip of the CC collector.
+ Mutex* thread_flip_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ std::unique_ptr<ConditionVariable> thread_flip_cond_ GUARDED_BY(thread_flip_lock_);
+ size_t disable_thread_flip_count_ GUARDED_BY(thread_flip_lock_);
+ bool thread_flip_running_ GUARDED_BY(thread_flip_lock_);
+
// Reference processor;
std::unique_ptr<ReferenceProcessor> reference_processor_;
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 6a716b5e0d..6bc18291cb 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -1729,7 +1729,13 @@ class JNI {
if (heap->IsMovableObject(s)) {
StackHandleScope<1> hs(soa.Self());
HandleWrapper<mirror::String> h(hs.NewHandleWrapper(&s));
- heap->IncrementDisableMovingGC(soa.Self());
+ if (!kUseReadBarrier) {
+ heap->IncrementDisableMovingGC(soa.Self());
+ } else {
+ // For the CC collector, we only need to wait for the thread flip rather than the whole GC
+ // to occur thanks to the to-space invariant.
+ heap->IncrementDisableThreadFlip(soa.Self());
+ }
}
if (is_copy != nullptr) {
*is_copy = JNI_FALSE;
@@ -1744,7 +1750,11 @@ class JNI {
gc::Heap* heap = Runtime::Current()->GetHeap();
mirror::String* s = soa.Decode<mirror::String*>(java_string);
if (heap->IsMovableObject(s)) {
- heap->DecrementDisableMovingGC(soa.Self());
+ if (!kUseReadBarrier) {
+ heap->DecrementDisableMovingGC(soa.Self());
+ } else {
+ heap->DecrementDisableThreadFlip(soa.Self());
+ }
}
}
@@ -1891,7 +1901,13 @@ class JNI {
}
gc::Heap* heap = Runtime::Current()->GetHeap();
if (heap->IsMovableObject(array)) {
- heap->IncrementDisableMovingGC(soa.Self());
+ if (!kUseReadBarrier) {
+ heap->IncrementDisableMovingGC(soa.Self());
+ } else {
+ // For the CC collector, we only need to wait for the thread flip rather than the whole GC
+ // to occur thanks to the to-space invariant.
+ heap->IncrementDisableThreadFlip(soa.Self());
+ }
// Re-decode in case the object moved since IncrementDisableGC waits for GC to complete.
array = soa.Decode<mirror::Array*>(java_array);
}
@@ -2437,7 +2453,11 @@ class JNI {
delete[] reinterpret_cast<uint64_t*>(elements);
} else if (heap->IsMovableObject(array)) {
// Non copy to a movable object must means that we had disabled the moving GC.
- heap->DecrementDisableMovingGC(soa.Self());
+ if (!kUseReadBarrier) {
+ heap->DecrementDisableMovingGC(soa.Self());
+ } else {
+ heap->DecrementDisableThreadFlip(soa.Self());
+ }
}
}
}
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index c76f6eec73..c75ff78821 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -90,6 +90,7 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha
case kWaitingForMethodTracingStart: return kJavaWaiting;
case kWaitingForVisitObjects: return kJavaWaiting;
case kWaitingWeakGcRootRead: return kJavaWaiting;
+ case kWaitingForGcThreadFlip: return kJavaWaiting;
case kSuspended: return kJavaRunnable;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
}
diff --git a/runtime/thread_state.h b/runtime/thread_state.h
index a11d213ea3..8f2f70f46e 100644
--- a/runtime/thread_state.h
+++ b/runtime/thread_state.h
@@ -44,6 +44,7 @@ enum ThreadState {
kWaitingForVisitObjects, // WAITING TS_WAIT waiting for visiting objects
kWaitingForGetObjectsAllocated, // WAITING TS_WAIT waiting for getting the number of allocated objects
kWaitingWeakGcRootRead, // WAITING TS_WAIT waiting on the GC to read a weak root
+ kWaitingForGcThreadFlip, // WAITING TS_WAIT waiting on the GC thread flip (CC collector) to finish
kStarting, // NEW TS_WAIT native thread started, not yet ready to run managed code
kNative, // RUNNABLE TS_RUNNING running in a JNI native method
kSuspended, // RUNNABLE TS_RUNNING suspended by GC or debugger