summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Hiroshi Yamauchi <yamauchi@google.com> 2015-08-19 20:19:15 +0000
committer Gerrit Code Review <noreply-gerritcodereview@google.com> 2015-08-19 20:19:15 +0000
commite1e05e0876f303a9f4aca1d8713b7c44f21952b8 (patch)
tree79fd9e7d08ad2055542cb1b64ebf26b00cb3af17
parent83f8b5bb9e9c52791a02a84d7b0889cab898efe4 (diff)
parent00370827646cc21cb370c3e7e93f9c0cff4c30c2 (diff)
Merge "Use thread-local is_gc_marking flags for the CC collector."
-rw-r--r--runtime/art_method-inl.h1
-rw-r--r--runtime/art_method.cc1
-rw-r--r--runtime/gc/collector/concurrent_copying.cc82
-rw-r--r--runtime/gc/collector/concurrent_copying.h2
-rw-r--r--runtime/read_barrier-inl.h16
-rw-r--r--runtime/thread.h18
-rw-r--r--runtime/thread_list.cc5
7 files changed, 101 insertions, 24 deletions
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 40bb9e1d9b..d38cc56067 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -21,6 +21,7 @@
#include "art_field.h"
#include "base/logging.h"
+#include "class_linker-inl.h"
#include "dex_file.h"
#include "dex_file-inl.h"
#include "gc_root-inl.h"
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index b9e13a4e81..f4c6473521 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -20,6 +20,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/stringpiece.h"
+#include "class_linker-inl.h"
#include "debugger.h"
#include "dex_file-inl.h"
#include "dex_instruction.h"
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 263e67879b..65e946fd79 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -190,6 +190,7 @@ class ThreadFlipVisitor : public Closure {
Thread* self = Thread::Current();
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
<< thread->GetState() << " thread " << thread << " self " << self;
+ thread->SetIsGcMarking(true);
if (use_tlab_ && thread->HasTlab()) {
if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
// This must come before the revoke.
@@ -454,17 +455,8 @@ void ConcurrentCopying::MarkingPhase() {
CheckEmptyMarkStack();
// Re-enable weak ref accesses.
ReenableWeakRefAccess(self);
- // Issue an empty checkpoint to ensure no threads are still in the middle of a read barrier
- // which may have a from-space ref cached in a local variable.
- IssueEmptyCheckpoint();
// Marking is done. Disable marking.
- if (kUseTableLookupReadBarrier) {
- heap_->rb_table_->ClearAll();
- DCHECK(heap_->rb_table_->IsAllCleared());
- }
- is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
- is_marking_ = false; // This disables the read barrier/marking of weak roots.
- mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
+ DisableMarking();
CheckEmptyMarkStack();
}
@@ -493,6 +485,68 @@ void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
Runtime::Current()->BroadcastForNewSystemWeaks();
}
+class DisableMarkingCheckpoint : public Closure {
+ public:
+ explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
+ : concurrent_copying_(concurrent_copying) {
+ }
+
+ void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ // Note: self is not necessarily equal to thread since thread may be suspended.
+ Thread* self = Thread::Current();
+ DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
+ << thread->GetState() << " thread " << thread << " self " << self;
+ // Disable the thread-local is_gc_marking flag.
+ DCHECK(thread->GetIsGcMarking());
+ thread->SetIsGcMarking(false);
+ // If thread is a running mutator, then act on behalf of the garbage collector.
+ // See the code in ThreadList::RunCheckpoint.
+ if (thread->GetState() == kRunnable) {
+ concurrent_copying_->GetBarrier().Pass(self);
+ }
+ }
+
+ private:
+ ConcurrentCopying* const concurrent_copying_;
+};
+
+void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
+ Thread* self = Thread::Current();
+ DisableMarkingCheckpoint check_point(this);
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
+ gc_barrier_->Init(self, 0);
+ size_t barrier_count = thread_list->RunCheckpoint(&check_point);
+ // If there are no threads to wait which implies that all the checkpoint functions are finished,
+ // then no need to release the mutator lock.
+ if (barrier_count == 0) {
+ return;
+ }
+ // Release locks then wait for all mutator threads to pass the barrier.
+ Locks::mutator_lock_->SharedUnlock(self);
+ {
+ ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
+ gc_barrier_->Increment(self, barrier_count);
+ }
+ Locks::mutator_lock_->SharedLock(self);
+}
+
+void ConcurrentCopying::DisableMarking() {
+ // Change the global is_marking flag to false. Do a fence before doing a checkpoint to update the
+ // thread-local flags so that a new thread starting up will get the correct is_marking flag.
+ is_marking_ = false;
+ QuasiAtomic::ThreadFenceForConstructor();
+ // Use a checkpoint to turn off the thread-local is_gc_marking flags and to ensure no threads are
+ // still in the middle of a read barrier which may have a from-space ref cached in a local
+ // variable.
+ IssueDisableMarkingCheckpoint();
+ if (kUseTableLookupReadBarrier) {
+ heap_->rb_table_->ClearAll();
+ DCHECK(heap_->rb_table_->IsAllCleared());
+ }
+ is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
+ mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
+}
+
void ConcurrentCopying::IssueEmptyCheckpoint() {
Thread* self = Thread::Current();
EmptyCheckpoint check_point(this);
@@ -708,6 +762,14 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
void ConcurrentCopying::VerifyNoFromSpaceReferences() {
Thread* self = Thread::Current();
DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
+ // Verify all threads have is_gc_marking to be false
+ {
+ MutexLock mu(self, *Locks::thread_list_lock_);
+ std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
+ for (Thread* thread : thread_list) {
+ CHECK(!thread->GetIsGcMarking());
+ }
+ }
ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this);
// Roots.
{
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index f382448615..8efad731b8 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -180,6 +180,8 @@ class ConcurrentCopying : public GarbageCollector {
void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
SHARED_REQUIRES(Locks::mutator_lock_);
void ReenableWeakRefAccess(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+ void DisableMarking() SHARED_REQUIRES(Locks::mutator_lock_);
+ void IssueDisableMarkingCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_);
space::RegionSpace* region_space_; // The underlying region space.
std::unique_ptr<Barrier> gc_barrier_;
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 701481392f..daae401b46 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -79,13 +79,9 @@ inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
MirrorType* ref = *root;
const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
if (with_read_barrier && kUseBakerReadBarrier) {
- if (kMaybeDuringStartup && IsDuringStartup()) {
- // During startup, the heap may not be initialized yet. Just
- // return the given ref.
- return ref;
- }
// TODO: separate the read barrier code from the collector code more.
- if (Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking()) {
+ Thread* self = Thread::Current();
+ if (self != nullptr && self->GetIsGcMarking()) {
ref = reinterpret_cast<MirrorType*>(Mark(ref));
}
AssertToSpaceInvariant(gc_root_source, ref);
@@ -120,13 +116,9 @@ inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<Mirro
MirrorType* ref = root->AsMirrorPtr();
const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
if (with_read_barrier && kUseBakerReadBarrier) {
- if (kMaybeDuringStartup && IsDuringStartup()) {
- // During startup, the heap may not be initialized yet. Just
- // return the given ref.
- return ref;
- }
// TODO: separate the read barrier code from the collector code more.
- if (Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking()) {
+ Thread* self = Thread::Current();
+ if (self != nullptr && self->GetIsGcMarking()) {
ref = reinterpret_cast<MirrorType*>(Mark(ref));
}
AssertToSpaceInvariant(gc_root_source, ref);
diff --git a/runtime/thread.h b/runtime/thread.h
index e4ad7f36db..959af95526 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -782,6 +782,16 @@ class Thread {
tls32_.debug_method_entry_ = false;
}
+ bool GetIsGcMarking() const {
+ CHECK(kUseReadBarrier);
+ return tls32_.is_gc_marking;
+ }
+
+ void SetIsGcMarking(bool is_marking) {
+ CHECK(kUseReadBarrier);
+ tls32_.is_gc_marking = is_marking;
+ }
+
bool GetWeakRefAccessEnabled() const {
CHECK(kUseReadBarrier);
return tls32_.weak_ref_access_enabled;
@@ -1093,7 +1103,8 @@ class Thread {
daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
thread_exit_check_count(0), handling_signal_(false),
deoptimization_return_value_is_reference(false), suspended_at_suspend_check(false),
- ready_for_debug_invoke(false), debug_method_entry_(false), weak_ref_access_enabled(true) {
+ ready_for_debug_invoke(false), debug_method_entry_(false), is_gc_marking(false),
+ weak_ref_access_enabled(true) {
}
union StateAndFlags state_and_flags;
@@ -1151,6 +1162,11 @@ class Thread {
// event for the debugger.
bool32_t debug_method_entry_;
+ // True if the GC is in the marking phase. This is used for the CC collector only. This is
+ // thread local so that we can simplify the logic to check for the fast path of read barriers of
+ // GC roots.
+ bool32_t is_gc_marking;
+
// True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
// weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
// processing of the CC collector only. This is thread local so that we can enable/disable weak
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index d449f42701..d63781bdba 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -1175,7 +1175,10 @@ void ThreadList::Register(Thread* self) {
CHECK(!Contains(self));
list_.push_back(self);
if (kUseReadBarrier) {
- // Initialize this according to the state of the CC collector.
+ // Initialize according to the state of the CC collector.
+ bool is_gc_marking =
+ Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking();
+ self->SetIsGcMarking(is_gc_marking);
bool weak_ref_access_enabled =
Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsWeakRefAccessEnabled();
self->SetWeakRefAccessEnabled(weak_ref_access_enabled);