summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/gc/collector/mark_sweep.cc22
-rw-r--r--runtime/gc/collector/mark_sweep.h7
-rw-r--r--runtime/locks.h1
-rw-r--r--runtime/thread.cc8
4 files changed, 22 insertions, 16 deletions
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index cedea61bda..2f68f8e1c7 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -144,7 +144,7 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
cleared_reference_list_(NULL),
gc_barrier_(new Barrier(0)),
large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
- mark_stack_expand_lock_("mark sweep mark stack expand lock"),
+ mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
is_concurrent_(is_concurrent),
clear_soft_references_(false) {
}
@@ -343,14 +343,18 @@ void MarkSweep::FindDefaultMarkBitmap() {
}
void MarkSweep::ExpandMarkStack() {
+ ResizeMarkStack(mark_stack_->Capacity() * 2);
+}
+
+void MarkSweep::ResizeMarkStack(size_t new_size) {
// Rare case, no need to have Thread::Current be a parameter.
- MutexLock mu(Thread::Current(), mark_stack_expand_lock_);
if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
// Someone else acquired the lock and expanded the mark stack before us.
return;
}
std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
- mark_stack_->Resize(mark_stack_->Capacity() * 2);
+ CHECK_LE(mark_stack_->Size(), new_size);
+ mark_stack_->Resize(new_size);
for (const auto& obj : temp) {
mark_stack_->PushBack(obj);
}
@@ -359,10 +363,12 @@ void MarkSweep::ExpandMarkStack() {
inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) {
DCHECK(obj != NULL);
if (MarkObjectParallel(obj)) {
- while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) {
- // Only reason a push can fail is that the mark stack is full.
+ MutexLock mu(Thread::Current(), mark_stack_lock_);
+ if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
ExpandMarkStack();
}
+ // The object must be pushed on to the mark stack.
+ mark_stack_->PushBack(const_cast<Object*>(obj));
}
}
@@ -409,7 +415,8 @@ inline void MarkSweep::MarkObjectNonNull(const Object* obj) {
// This object was not previously marked.
if (!object_bitmap->Test(obj)) {
object_bitmap->Set(obj);
- // Do we need to expand the mark stack?
+ // Lock is not needed but is here anyways to please annotalysis.
+ MutexLock mu(Thread::Current(), mark_stack_lock_);
if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
ExpandMarkStack();
}
@@ -493,8 +500,7 @@ void MarkSweep::MarkRoot(const Object* obj) {
void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) {
DCHECK(root != NULL);
DCHECK(arg != NULL);
- MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
- mark_sweep->MarkObjectNonNullParallel(root);
+ reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(root);
}
void MarkSweep::MarkObjectCallback(const Object* root, void* arg) {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index dbec3e9064..fdd0c86724 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -305,8 +305,9 @@ class MarkSweep : public GarbageCollector {
void VerifyRoots()
NO_THREAD_SAFETY_ANALYSIS;
- // Expand mark stack to 2x its current size. Thread safe.
- void ExpandMarkStack();
+ // Expand mark stack to 2x its current size.
+ void ExpandMarkStack() EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_);
+ void ResizeMarkStack(size_t new_size) EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_);
// Returns how many threads we should use for the current GC phase based on if we are paused,
// whether or not we care about pauses.
@@ -445,7 +446,7 @@ class MarkSweep : public GarbageCollector {
UniquePtr<Barrier> gc_barrier_;
Mutex large_object_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- Mutex mark_stack_expand_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ Mutex mark_stack_lock_ ACQUIRED_AFTER(Locks::classlinker_classes_lock_);
const bool is_concurrent_;
diff --git a/runtime/locks.h b/runtime/locks.h
index 88d05db0cb..f63e2b1720 100644
--- a/runtime/locks.h
+++ b/runtime/locks.h
@@ -38,6 +38,7 @@ enum LockLevel {
kAbortLock,
kJdwpSocketLock,
kAllocSpaceLock,
+ kMarkSweepMarkStackLock,
kDefaultMutexLevel,
kMarkSweepLargeObjectLock,
kPinTableLock,
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 3178bf16b8..a454195316 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2209,14 +2209,12 @@ void Thread::VisitRoots(RootVisitor* visitor, void* arg) {
mapper.WalkStack();
ReleaseLongJumpContext(context);
- std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack = GetInstrumentationStack();
- typedef std::deque<instrumentation::InstrumentationStackFrame>::const_iterator It;
- for (It it = instrumentation_stack->begin(), end = instrumentation_stack->end(); it != end; ++it) {
- mirror::Object* this_object = (*it).this_object_;
+ for (const instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
+ mirror::Object* this_object = frame.this_object_;
if (this_object != NULL) {
visitor(this_object, arg);
}
- mirror::ArtMethod* method = (*it).method_;
+ mirror::ArtMethod* method = frame.method_;
visitor(method, arg);
}
}