summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Mathieu Chartier <mathieuc@google.com> 2012-10-15 17:38:16 -0700
committer Android (Google) Code Review <android-gerrit@google.com> 2012-10-16 10:05:04 -0700
commit9ebae1f30b84dfd8dab4144f80eebec4f8fc8851 (patch)
tree379f7606cf7c1b5bbeb41caccc4cb681fc1ef2b4
parent30de32dec8dba21f7fec117b779c2b4e6bb4e4e6 (diff)
Concurrent class linker and intern table root marking
We now mark the class linker and intern table roots concurrently (with mutators unpaused), only re-marking these roots in the second pause if they get dirtied. Reduces root marking time by ~1ms for each pause. Change-Id: I833fc557bac9a2930868db715587318293fa4655
-rw-r--r--src/class_linker.cc6
-rw-r--r--src/class_linker.h11
-rw-r--r--src/gc/mark_sweep.cc8
-rw-r--r--src/gc/mark_sweep.h3
-rw-r--r--src/heap.cc5
-rw-r--r--src/intern_table.cc8
-rw-r--r--src/intern_table.h8
-rw-r--r--src/runtime.cc23
-rw-r--r--src/runtime.h13
9 files changed, 75 insertions, 10 deletions
diff --git a/src/class_linker.cc b/src/class_linker.cc
index 83661cbafc..090378173f 100644
--- a/src/class_linker.cc
+++ b/src/class_linker.cc
@@ -222,6 +222,7 @@ ClassLinker::ClassLinker(InternTable* intern_table)
class_roots_(NULL),
array_iftable_(NULL),
init_done_(false),
+ is_dirty_(false),
intern_table_(intern_table) {
CHECK_EQ(arraysize(class_roots_descriptors_), size_t(kClassRootsMax));
}
@@ -1043,7 +1044,7 @@ void ClassLinker::InitFromImageCallback(Object* obj, void* arg) {
// Keep in sync with InitCallback. Anything we visit, we need to
// reinit references to when reinitializing a ClassLinker from a
// mapped image.
-void ClassLinker::VisitRoots(Heap::RootVisitor* visitor, void* arg) const {
+void ClassLinker::VisitRoots(Heap::RootVisitor* visitor, void* arg) {
visitor(class_roots_, arg);
Thread* self = Thread::Current();
{
@@ -1065,6 +1066,7 @@ void ClassLinker::VisitRoots(Heap::RootVisitor* visitor, void* arg) const {
}
visitor(array_iftable_, arg);
+ is_dirty_ = false;
}
void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) const {
@@ -1746,6 +1748,7 @@ void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file, SirtRef<DexCach
CHECK(dex_cache->GetLocation()->Equals(dex_file.GetLocation()));
dex_caches_.push_back(dex_cache.get());
dex_cache->SetDexFile(&dex_file);
+ Dirty();
}
void ClassLinker::RegisterDexFile(const DexFile& dex_file) {
@@ -1990,6 +1993,7 @@ Class* ClassLinker::InsertClass(const StringPiece& descriptor, Class* klass, boo
return existing;
}
classes.insert(std::make_pair(hash, klass));
+ Dirty();
return NULL;
}
diff --git a/src/class_linker.h b/src/class_linker.h
index 096d602558..460fcd2bb2 100644
--- a/src/class_linker.h
+++ b/src/class_linker.h
@@ -262,7 +262,7 @@ class ClassLinker {
void VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) const
LOCKS_EXCLUDED(Locks::classlinker_classes_lock_);
- void VisitRoots(Heap::RootVisitor* visitor, void* arg) const
+ void VisitRoots(Heap::RootVisitor* visitor, void* arg)
LOCKS_EXCLUDED(Locks::classlinker_classes_lock_, dex_lock_);
DexCache* FindDexCache(const DexFile& dex_file) const
@@ -378,6 +378,14 @@ class ClassLinker {
pid_t GetClassesLockOwner(); // For SignalCatcher.
pid_t GetDexLockOwner(); // For SignalCatcher.
+ bool IsDirty() const {
+ return is_dirty_;
+ }
+
+ void Dirty() {
+ is_dirty_ = true;
+ }
+
private:
explicit ClassLinker(InternTable*);
@@ -636,6 +644,7 @@ class ClassLinker {
IfTable* array_iftable_;
bool init_done_;
+ bool is_dirty_;
InternTable* intern_table_;
diff --git a/src/gc/mark_sweep.cc b/src/gc/mark_sweep.cc
index 03bbb6abcb..e4cb4d6948 100644
--- a/src/gc/mark_sweep.cc
+++ b/src/gc/mark_sweep.cc
@@ -79,6 +79,8 @@ void MarkSweep::Init() {
FindDefaultMarkBitmap();
// TODO: if concurrent, enable card marking in compiler
// TODO: check that the mark bitmap is entirely clear.
+ // Mark any concurrent roots as dirty since we need to scan them at least once during this GC.
+ Runtime::Current()->DirtyRoots();
}
void MarkSweep::FindDefaultMarkBitmap() {
@@ -195,7 +197,11 @@ void MarkSweep::VerifyRoots() {
// Marks all objects in the root set.
void MarkSweep::MarkRoots() {
- Runtime::Current()->VisitRoots(MarkObjectVisitor, this);
+ Runtime::Current()->VisitNonConcurrentRoots(MarkObjectVisitor, this);
+}
+
+void MarkSweep::MarkConcurrentRoots() {
+ Runtime::Current()->VisitConcurrentRoots(MarkObjectVisitor, this);
}
class CheckObjectVisitor {
diff --git a/src/gc/mark_sweep.h b/src/gc/mark_sweep.h
index 76c5428299..ed74f993fc 100644
--- a/src/gc/mark_sweep.h
+++ b/src/gc/mark_sweep.h
@@ -52,6 +52,9 @@ class MarkSweep {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void MarkConcurrentRoots();
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
// Verify that image roots point to only marked objects within the alloc space.
void VerifyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
diff --git a/src/heap.cc b/src/heap.cc
index 98845d8b92..4ca80a6095 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1045,6 +1045,7 @@ void Heap::CollectGarbageMarkSweepPlan(Thread* self, GcType gc_type, GcCause gc_
mark_sweep.FindDefaultMarkBitmap();
mark_sweep.MarkRoots();
+ mark_sweep.MarkConcurrentRoots();
timings.AddSplit("MarkRoots");
// Roots are marked on the bitmap and the mark_stack is empty.
@@ -1615,6 +1616,10 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(Thread* self, GcType gc_type, G
root_end = NanoTime();
timings.AddSplit("RootEnd");
+ // Mark the roots which we can do concurrently.
+ mark_sweep.MarkConcurrentRoots();
+ timings.AddSplit("MarkConcurrentRoots");
+
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
UpdateAndMarkModUnion(&mark_sweep, timings, gc_type);
diff --git a/src/intern_table.cc b/src/intern_table.cc
index 5ad39582b8..817ce1e5e1 100644
--- a/src/intern_table.cc
+++ b/src/intern_table.cc
@@ -21,7 +21,7 @@
namespace art {
-InternTable::InternTable() : intern_table_lock_("InternTable lock") {
+InternTable::InternTable() : intern_table_lock_("InternTable lock"), is_dirty_(false) {
}
size_t InternTable::Size() const {
@@ -36,12 +36,13 @@ void InternTable::DumpForSigQuit(std::ostream& os) const {
<< image_strong_interns_.size() << " image strong\n";
}
-void InternTable::VisitRoots(Heap::RootVisitor* visitor, void* arg) const {
+void InternTable::VisitRoots(Heap::RootVisitor* visitor, void* arg) {
MutexLock mu(Thread::Current(), intern_table_lock_);
typedef Table::const_iterator It; // TODO: C++0x auto
for (It it = strong_interns_.begin(), end = strong_interns_.end(); it != end; ++it) {
visitor(it->second, arg);
}
+ is_dirty_ = false;
// Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
}
@@ -97,6 +98,9 @@ String* InternTable::Insert(String* s, bool is_strong) {
return image;
}
+ // Mark as dirty so that we rescan the roots.
+ Dirty();
+
// There is no match in the strong table, check the weak table.
String* weak = Lookup(weak_interns_, s, hash_code);
if (weak != NULL) {
diff --git a/src/intern_table.h b/src/intern_table.h
index 6f5677385f..93d20b2e2b 100644
--- a/src/intern_table.h
+++ b/src/intern_table.h
@@ -65,10 +65,15 @@ class InternTable {
size_t Size() const;
- void VisitRoots(Heap::RootVisitor* visitor, void* arg) const;
+ void VisitRoots(Heap::RootVisitor* visitor, void* arg);
void DumpForSigQuit(std::ostream& os) const;
+ bool IsDirty() const { return is_dirty_; }
+ void Dirty() {
+ is_dirty_ = true;
+ }
+
private:
typedef std::multimap<int32_t, String*> Table;
@@ -81,6 +86,7 @@ class InternTable {
void Remove(Table& table, const String* s, uint32_t hash_code);
mutable Mutex intern_table_lock_;
+ bool is_dirty_;
Table image_strong_interns_ GUARDED_BY(intern_table_lock_);
Table strong_interns_ GUARDED_BY(intern_table_lock_);
Table weak_interns_ GUARDED_BY(intern_table_lock_);
diff --git a/src/runtime.cc b/src/runtime.cc
index f93d687399..4b7338b9f3 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -993,10 +993,17 @@ void Runtime::DetachCurrentThread() {
thread_list_->Unregister(self);
}
-void Runtime::VisitRoots(Heap::RootVisitor* visitor, void* arg) const {
+void Runtime::VisitConcurrentRoots(Heap::RootVisitor* visitor, void* arg) {
+ if (intern_table_->IsDirty()) {
+ intern_table_->VisitRoots(visitor, arg);
+ }
+ if (class_linker_->IsDirty()) {
+ class_linker_->VisitRoots(visitor, arg);
+ }
+}
+
+void Runtime::VisitNonConcurrentRoots(Heap::RootVisitor* visitor, void* arg) {
Dbg::VisitRoots(visitor, arg);
- class_linker_->VisitRoots(visitor, arg);
- intern_table_->VisitRoots(visitor, arg);
java_vm_->VisitRoots(visitor, arg);
thread_list_->VisitRoots(visitor, arg);
if (pre_allocated_OutOfMemoryError_ != NULL) {
@@ -1013,6 +1020,16 @@ void Runtime::VisitRoots(Heap::RootVisitor* visitor, void* arg) const {
}
}
+void Runtime::DirtyRoots() {
+ intern_table_->Dirty();
+ class_linker_->Dirty();
+}
+
+void Runtime::VisitRoots(Heap::RootVisitor* visitor, void* arg) {
+ VisitConcurrentRoots(visitor, arg);
+ VisitNonConcurrentRoots(visitor, arg);
+}
+
void Runtime::SetJniDlsymLookupStub(ByteArray* jni_stub_array) {
CHECK(jni_stub_array != NULL) << " jni_stub_array=" << jni_stub_array;
CHECK(jni_stub_array_ == NULL || jni_stub_array_ == jni_stub_array)
diff --git a/src/runtime.h b/src/runtime.h
index a6c662cb45..44823a0d1b 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -224,9 +224,20 @@ class Runtime {
return "2.0.0";
}
- void VisitRoots(Heap::RootVisitor* visitor, void* arg) const
+ // Force all the roots which can be marked concurrently to be dirty.
+ void DirtyRoots();
+
+ // Visit all the roots.
+ void VisitRoots(Heap::RootVisitor* visitor, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Visit all of the roots we can do safely do concurrently.
+ void VisitConcurrentRoots(Heap::RootVisitor* visitor, void* arg);
+
+ // Visit all other roots which must be done with mutators suspended.
+ void VisitNonConcurrentRoots(Heap::RootVisitor* visitor, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
bool HasJniDlsymLookupStub() const {
return jni_stub_array_ != NULL;
}