summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Lokesh Gidra <lokeshgidra@google.com> 2022-01-28 12:30:31 -0800
committer Lokesh Gidra <lokeshgidra@google.com> 2022-08-10 18:06:05 +0000
commitb7607c2fd67e12e998aebd71db38414ffc65621b (patch)
tree0b816edc36dc3a696c366e1e5922018accbde5b7
parent5d73d6b3e4de8e7a1cb1aa6c8683a6afac7725be (diff)
Update native gc-roots separately in compaction pause
The concurrent compaction algorithm requires all GC roots to be updated to post-compact addresses before resuming mutators for concurrent compaction. Therefore, unlike CC, we cannot update native roots in classes/dex-caches/class-loaders while visiting references (VisitReferences) on heap objects. This CL separates the two and updates all the gc-roots in the compaction pause. Bug: 160737021 Test: art/test/testrunner/testrunner.py Change-Id: I8a57472ba49b9dc30bc0f41a7db3f5efa7eafd9a
-rw-r--r--runtime/art_method-inl.h18
-rw-r--r--runtime/art_method.h4
-rw-r--r--runtime/class_linker-inl.h6
-rw-r--r--runtime/class_linker.cc12
-rw-r--r--runtime/class_linker.h16
-rw-r--r--runtime/class_table-inl.h37
-rw-r--r--runtime/class_table.h9
-rw-r--r--runtime/gc/allocation_record.cc12
-rw-r--r--runtime/gc/collector/mark_compact-inl.h17
-rw-r--r--runtime/gc/collector/mark_compact.cc285
-rw-r--r--runtime/gc/collector/mark_compact.h40
-rw-r--r--runtime/intern_table.cc5
-rw-r--r--runtime/mirror/class-refvisitor-inl.h35
-rw-r--r--runtime/mirror/class.h12
-rw-r--r--runtime/mirror/class_ext-inl.h20
-rw-r--r--runtime/mirror/class_ext.h16
-rw-r--r--runtime/mirror/dex_cache-inl.h29
-rw-r--r--runtime/mirror/dex_cache.h6
-rw-r--r--runtime/mirror/object.h2
-rw-r--r--runtime/runtime.cc4
-rw-r--r--runtime/runtime.h2
-rw-r--r--runtime/thread.cc2
22 files changed, 427 insertions, 162 deletions
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 844a0ffa9b..b071714382 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -388,17 +388,19 @@ inline bool ArtMethod::HasSingleImplementation() {
return (GetAccessFlags() & kAccSingleImplementation) != 0;
}
-template<ReadBarrierOption kReadBarrierOption, typename RootVisitorType>
+template<ReadBarrierOption kReadBarrierOption, bool kVisitProxyMethod, typename RootVisitorType>
void ArtMethod::VisitRoots(RootVisitorType& visitor, PointerSize pointer_size) {
if (LIKELY(!declaring_class_.IsNull())) {
visitor.VisitRoot(declaring_class_.AddressWithoutBarrier());
- ObjPtr<mirror::Class> klass = declaring_class_.Read<kReadBarrierOption>();
- if (UNLIKELY(klass->IsProxyClass())) {
- // For normal methods, dex cache shortcuts will be visited through the declaring class.
- // However, for proxies we need to keep the interface method alive, so we visit its roots.
- ArtMethod* interface_method = GetInterfaceMethodForProxyUnchecked(pointer_size);
- DCHECK(interface_method != nullptr);
- interface_method->VisitRoots<kReadBarrierOption>(visitor, pointer_size);
+ if (kVisitProxyMethod) {
+ ObjPtr<mirror::Class> klass = declaring_class_.Read<kReadBarrierOption>();
+ if (UNLIKELY(klass->IsProxyClass())) {
+ // For normal methods, dex cache shortcuts will be visited through the declaring class.
+ // However, for proxies we need to keep the interface method alive, so we visit its roots.
+ ArtMethod* interface_method = GetInterfaceMethodForProxyUnchecked(pointer_size);
+ DCHECK(interface_method != nullptr);
+ interface_method->VisitRoots<kReadBarrierOption, kVisitProxyMethod>(visitor, pointer_size);
+ }
}
}
}
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 072dea2c4d..a07d696f25 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -635,7 +635,9 @@ class ArtMethod final {
REQUIRES_SHARED(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
- template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename RootVisitorType>
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
+ bool kVisitProxyMethod = true,
+ typename RootVisitorType>
void VisitRoots(RootVisitorType& visitor, PointerSize pointer_size) NO_THREAD_SAFETY_ANALYSIS;
const DexFile* GetDexFile() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 02b2778f4f..b79f3f5685 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -24,6 +24,7 @@
#include "art_method-inl.h"
#include "base/mutex.h"
#include "class_linker.h"
+#include "class_table-inl.h"
#include "dex/dex_file.h"
#include "dex/dex_file_structs.h"
#include "gc_root-inl.h"
@@ -592,6 +593,11 @@ inline ArtField* ClassLinker::ResolveField(uint32_t field_idx,
return resolved;
}
+template <typename Visitor>
+inline void ClassLinker::VisitBootClasses(Visitor* visitor) {
+ boot_class_table_->Visit(*visitor);
+}
+
template <class Visitor>
inline void ClassLinker::VisitClassTables(const Visitor& visitor) {
Thread* const self = Thread::Current();
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 51870a755f..8921577a99 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -10177,6 +10177,18 @@ void ClassLinker::VisitClassLoaders(ClassLoaderVisitor* visitor) const {
}
}
+void ClassLinker::VisitDexCaches(DexCacheVisitor* visitor) const {
+ Thread* const self = Thread::Current();
+ for (const auto& it : dex_caches_) {
+ // Need to use DecodeJObject so that we get null for cleared JNI weak globals.
+ ObjPtr<mirror::DexCache> dex_cache = ObjPtr<mirror::DexCache>::DownCast(
+ self->DecodeJObject(it.second.weak_root));
+ if (dex_cache != nullptr) {
+ visitor->Visit(dex_cache);
+ }
+ }
+}
+
void ClassLinker::VisitAllocators(AllocatorVisitor* visitor) const {
for (const ClassLoaderData& data : class_loaders_) {
LinearAlloc* alloc = data.allocator;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 895d820c7b..1ac47562b2 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -128,6 +128,13 @@ class ClassLoaderVisitor {
REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) = 0;
};
+class DexCacheVisitor {
+ public:
+ virtual ~DexCacheVisitor() {}
+ virtual void Visit(ObjPtr<mirror::DexCache> dex_cache)
+ REQUIRES_SHARED(Locks::dex_lock_, Locks::mutator_lock_) = 0;
+};
+
template <typename Func>
class ClassLoaderFuncVisitor final : public ClassLoaderVisitor {
public:
@@ -479,6 +486,11 @@ class ClassLinker {
REQUIRES(!Locks::classlinker_classes_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Visits only the classes in the boot class path.
+ template <typename Visitor>
+ inline void VisitBootClasses(Visitor* visitor)
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Less efficient variant of VisitClasses that copies the class_table_ into secondary storage
// so that it can visit individual classes without holding the doesn't hold the
// Locks::classlinker_classes_lock_. As the Locks::classlinker_classes_lock_ isn't held this code
@@ -780,6 +792,10 @@ class ClassLinker {
void VisitClassLoaders(ClassLoaderVisitor* visitor) const
REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
+ // Visit all of the dex caches in the class linker.
+ void VisitDexCaches(DexCacheVisitor* visitor) const
+ REQUIRES_SHARED(Locks::dex_lock_, Locks::mutator_lock_);
+
// Checks that a class and its superclass from another class loader have the same virtual methods.
bool ValidateSuperClassDescriptors(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h
index 071376cd77..67eeb553a4 100644
--- a/runtime/class_table-inl.h
+++ b/runtime/class_table-inl.h
@@ -104,6 +104,43 @@ void ClassTable::VisitRoots(const Visitor& visitor) {
}
}
+template <typename Visitor>
+class ClassTable::TableSlot::ClassAndRootVisitor {
+ public:
+ explicit ClassAndRootVisitor(Visitor& visitor) : visitor_(visitor) {}
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* klass) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(!klass->IsNull());
+ // Visit roots in the klass object
+ visitor_(klass->AsMirrorPtr());
+ // Visit the GC-root holding klass' reference
+ visitor_.VisitRoot(klass);
+ }
+
+ private:
+ Visitor& visitor_;
+};
+
+template <typename Visitor>
+void ClassTable::VisitClassesAndRoots(Visitor& visitor) {
+ TableSlot::ClassAndRootVisitor class_visitor(visitor);
+ ReaderMutexLock mu(Thread::Current(), lock_);
+ for (ClassSet& class_set : classes_) {
+ for (TableSlot& table_slot : class_set) {
+ table_slot.VisitRoot(class_visitor);
+ }
+ }
+ for (GcRoot<mirror::Object>& root : strong_roots_) {
+ visitor.VisitRoot(root.AddressWithoutBarrier());
+ }
+ for (const OatFile* oat_file : oat_files_) {
+ for (GcRoot<mirror::Object>& root : oat_file->GetBssGcRoots()) {
+ visitor.VisitRootIfNonNull(root.AddressWithoutBarrier());
+ }
+ }
+}
+
template <ReadBarrierOption kReadBarrierOption, typename Visitor>
bool ClassTable::Visit(Visitor& visitor) {
ReaderMutexLock mu(Thread::Current(), lock_);
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 212a7d6631..123c069f0e 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -85,6 +85,9 @@ class ClassTable {
template<typename Visitor>
void VisitRoot(const Visitor& visitor) const NO_THREAD_SAFETY_ANALYSIS;
+ template<typename Visitor>
+ class ClassAndRootVisitor;
+
private:
// Extract a raw pointer from an address.
static ObjPtr<mirror::Class> ExtractPtr(uint32_t data)
@@ -185,6 +188,12 @@ class ClassTable {
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ template<class Visitor>
+ void VisitClassesAndRoots(Visitor& visitor)
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Stops visit if the visitor returns false.
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
bool Visit(Visitor& visitor)
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 7bcf375b16..561eae7bb8 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -59,6 +59,13 @@ AllocRecordObjectMap::~AllocRecordObjectMap() {
}
void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
+ // When we are compacting in userfaultfd GC, the class GC-roots are already
+ // updated in SweepAllocationRecords()->SweepClassObject().
+ if (heap->CurrentCollectorType() == gc::CollectorType::kCollectorTypeCMC
+ && heap->MarkCompactCollector()->IsCompacting(Thread::Current())) {
+ return;
+ }
CHECK_LE(recent_record_max_, alloc_record_max_);
BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(visitor, RootInfo(kRootDebugger));
size_t count = recent_record_max_;
@@ -92,7 +99,10 @@ static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visito
mirror::Object* new_object = visitor->IsMarked(old_object);
DCHECK(new_object != nullptr);
if (UNLIKELY(old_object != new_object)) {
- klass = GcRoot<mirror::Class>(new_object->AsClass());
+ // We can't use AsClass() as it uses IsClass in a DCHECK, which expects
+ // the class' contents to be there. This is not the case in userfaultfd
+ // GC.
+ klass = GcRoot<mirror::Class>(ObjPtr<mirror::Class>::DownCast(new_object));
}
}
}
diff --git a/runtime/gc/collector/mark_compact-inl.h b/runtime/gc/collector/mark_compact-inl.h
index 6f636c225e..3db51bf732 100644
--- a/runtime/gc/collector/mark_compact-inl.h
+++ b/runtime/gc/collector/mark_compact-inl.h
@@ -183,7 +183,7 @@ inline void MarkCompact::UpdateRef(mirror::Object* obj, MemberOffset offset) {
if (kIsDebugBuild) {
if (live_words_bitmap_->HasAddress(old_ref)
&& reinterpret_cast<uint8_t*>(old_ref) < black_allocations_begin_
- && !current_space_bitmap_->Test(old_ref)) {
+ && !moving_space_bitmap_->Test(old_ref)) {
mirror::Object* from_ref = GetFromSpaceAddr(old_ref);
std::ostringstream oss;
heap_->DumpSpaces(oss);
@@ -194,7 +194,7 @@ inline void MarkCompact::UpdateRef(mirror::Object* obj, MemberOffset offset) {
<< " obj=" << obj
<< " obj-validity=" << IsValidObject(obj)
<< " from-space=" << static_cast<void*>(from_space_begin_)
- << " bitmap= " << current_space_bitmap_->DumpMemAround(old_ref)
+ << " bitmap= " << moving_space_bitmap_->DumpMemAround(old_ref)
<< " from_ref "
<< heap_->GetVerification()->DumpRAMAroundAddress(
reinterpret_cast<uintptr_t>(from_ref), 128)
@@ -223,7 +223,7 @@ inline bool MarkCompact::VerifyRootSingleUpdate(void* root,
if (!live_words_bitmap_->HasAddress(old_ref)) {
return false;
}
- if (stack_end == nullptr) {
+ if (UNLIKELY(stack_end == nullptr)) {
pthread_attr_t attr;
size_t stack_size;
pthread_getattr_np(pthread_self(), &attr);
@@ -241,7 +241,8 @@ inline bool MarkCompact::VerifyRootSingleUpdate(void* root,
DCHECK(reinterpret_cast<uint8_t*>(old_ref) >= black_allocations_begin_
|| live_words_bitmap_->Test(old_ref))
<< "ref=" << old_ref
- << " RootInfo=" << info;
+ << " <" << mirror::Object::PrettyTypeOf(old_ref)
+ << "> RootInfo [" << info << "]";
return true;
}
@@ -249,7 +250,7 @@ inline void MarkCompact::UpdateRoot(mirror::CompressedReference<mirror::Object>*
const RootInfo& info) {
DCHECK(!root->IsNull());
mirror::Object* old_ref = root->AsMirrorPtr();
- if (VerifyRootSingleUpdate(root, old_ref, info)) {
+ if (!kIsDebugBuild || VerifyRootSingleUpdate(root, old_ref, info)) {
mirror::Object* new_ref = PostCompactAddress(old_ref);
if (old_ref != new_ref) {
root->Assign(new_ref);
@@ -259,7 +260,7 @@ inline void MarkCompact::UpdateRoot(mirror::CompressedReference<mirror::Object>*
inline void MarkCompact::UpdateRoot(mirror::Object** root, const RootInfo& info) {
mirror::Object* old_ref = *root;
- if (VerifyRootSingleUpdate(root, old_ref, info)) {
+ if (!kIsDebugBuild || VerifyRootSingleUpdate(root, old_ref, info)) {
mirror::Object* new_ref = PostCompactAddress(old_ref);
if (old_ref != new_ref) {
*root = new_ref;
@@ -317,14 +318,14 @@ inline mirror::Object* MarkCompact::PostCompactAddressUnchecked(mirror::Object*
mirror::Object* from_ref = GetFromSpaceAddr(old_ref);
DCHECK(live_words_bitmap_->Test(old_ref))
<< "ref=" << old_ref;
- if (!current_space_bitmap_->Test(old_ref)) {
+ if (!moving_space_bitmap_->Test(old_ref)) {
std::ostringstream oss;
Runtime::Current()->GetHeap()->DumpSpaces(oss);
MemMap::DumpMaps(oss, /* terse= */ true);
LOG(FATAL) << "ref=" << old_ref
<< " from_ref=" << from_ref
<< " from-space=" << static_cast<void*>(from_space_begin_)
- << " bitmap= " << current_space_bitmap_->DumpMemAround(old_ref)
+ << " bitmap= " << moving_space_bitmap_->DumpMemAround(old_ref)
<< heap_->GetVerification()->DumpRAMAroundAddress(
reinterpret_cast<uintptr_t>(from_ref), 128)
<< " maps\n" << oss.str();
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 8d98a3cf30..2da7848ba6 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -149,8 +149,8 @@ void MarkCompact::BindAndResetBitmaps() {
if (space == bump_pointer_space_) {
// It is OK to clear the bitmap with mutators running since the only
// place it is read is VisitObjects which has exclusion with this GC.
- current_space_bitmap_ = bump_pointer_space_->GetMarkBitmap();
- current_space_bitmap_->Clear();
+ moving_space_bitmap_ = bump_pointer_space_->GetMarkBitmap();
+ moving_space_bitmap_->Clear();
} else {
CHECK(space == heap_->GetNonMovingSpace());
non_moving_space_ = space;
@@ -169,6 +169,7 @@ void MarkCompact::InitializePhase() {
non_moving_first_objs_count_ = 0;
black_page_count_ = 0;
from_space_slide_diff_ = from_space_begin_ - bump_pointer_space_->Begin();
+ black_allocations_begin_ = bump_pointer_space_->Limit();
}
void MarkCompact::RunPhases() {
@@ -187,6 +188,10 @@ void MarkCompact::RunPhases() {
bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
}
}
+ // To increase likelihood of black allocations. For testing purposes only.
+ if (kIsDebugBuild && heap_->GetTaskProcessor()->GetRunningThread() == thread_running_gc_) {
+ sleep(3);
+ }
{
ReaderMutexLock mu(self, *Locks::mutator_lock_);
ReclaimPhase();
@@ -194,7 +199,15 @@ void MarkCompact::RunPhases() {
}
compacting_ = true;
- PreCompactionPhase();
+ {
+ heap_->ThreadFlipBegin(self);
+ {
+ ScopedPause pause(this);
+ PreCompactionPhase();
+ }
+ heap_->ThreadFlipEnd(self);
+ }
+
if (kConcurrentCompaction) {
ReaderMutexLock mu(self, *Locks::mutator_lock_);
CompactionPhase();
@@ -212,7 +225,7 @@ void MarkCompact::InitMovingSpaceFirstObjects(const size_t vec_len) {
uint32_t offset_in_chunk_word;
uint32_t offset;
mirror::Object* obj;
- const uintptr_t heap_begin = current_space_bitmap_->HeapBegin();
+ const uintptr_t heap_begin = moving_space_bitmap_->HeapBegin();
size_t chunk_idx;
// Find the first live word in the space
@@ -272,7 +285,7 @@ void MarkCompact::InitMovingSpaceFirstObjects(const size_t vec_len) {
//
// Find the object which encapsulates offset in it, which could be
// starting at offset itself.
- obj = current_space_bitmap_->FindPrecedingObject(heap_begin + offset * kAlignment);
+ obj = moving_space_bitmap_->FindPrecedingObject(heap_begin + offset * kAlignment);
// TODO: add a check to validate the object.
pre_compact_offset_moving_space_[to_space_page_idx] = offset;
first_objs_moving_space_[to_space_page_idx].Assign(obj);
@@ -423,7 +436,6 @@ void MarkCompact::PrepareForCompaction() {
// appropriately updated in the pre-compaction pause.
// The chunk-info vector entries for the post marking-pause allocations will be
// also updated in the pre-compaction pause.
- updated_roots_.reserve(1000000);
}
class MarkCompact::VerifyRootMarkedVisitor : public SingleRootVisitor {
@@ -508,7 +520,7 @@ void MarkCompact::MarkingPause() {
heap_->GetReferenceProcessor()->EnableSlowPath();
// Capture 'end' of moving-space at this point. Every allocation beyond this
- // point will be considered as in to-space.
+ // point will be considered as black.
// Align-up to page boundary so that black allocations happen from next page
// onwards.
black_allocations_begin_ = bump_pointer_space_->AlignEnd(thread_running_gc_, kPageSize);
@@ -520,8 +532,7 @@ void MarkCompact::SweepSystemWeaks(Thread* self, Runtime* runtime, const bool pa
TimingLogger::ScopedTiming t(paused ? "(Paused)SweepSystemWeaks" : "SweepSystemWeaks",
GetTimings());
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- // Don't sweep JIT weaks with other. They are separately done.
- runtime->SweepSystemWeaks(this, !paused);
+ runtime->SweepSystemWeaks(this);
}
void MarkCompact::ProcessReferences(Thread* self) {
@@ -676,7 +687,7 @@ void MarkCompact::VerifyObject(mirror::Object* ref, Callback& callback) const {
mirror::Class* klass_klass_klass = klass_klass->GetClass<kVerifyNone, kWithFromSpaceBarrier>();
if (bump_pointer_space_->HasAddress(pre_compact_klass) &&
reinterpret_cast<uint8_t*>(pre_compact_klass) < black_allocations_begin_) {
- CHECK(current_space_bitmap_->Test(pre_compact_klass))
+ CHECK(moving_space_bitmap_->Test(pre_compact_klass))
<< "ref=" << ref
<< " post_compact_end=" << static_cast<void*>(post_compact_end_)
<< " pre_compact_klass=" << pre_compact_klass
@@ -712,7 +723,7 @@ void MarkCompact::VerifyObject(mirror::Object* ref, Callback& callback) const {
void MarkCompact::CompactPage(mirror::Object* obj, uint32_t offset, uint8_t* addr) {
DCHECK(IsAligned<kPageSize>(addr));
- DCHECK(current_space_bitmap_->Test(obj)
+ DCHECK(moving_space_bitmap_->Test(obj)
&& live_words_bitmap_->Test(obj));
DCHECK(live_words_bitmap_->Test(offset)) << "obj=" << obj
<< " offset=" << offset
@@ -766,10 +777,10 @@ void MarkCompact::CompactPage(mirror::Object* obj, uint32_t offset, uint8_t* add
+ stride_begin
* kAlignment);
CHECK(live_words_bitmap_->Test(o)) << "ref=" << o;
- CHECK(current_space_bitmap_->Test(o))
+ CHECK(moving_space_bitmap_->Test(o))
<< "ref=" << o
<< " bitmap: "
- << current_space_bitmap_->DumpMemAround(o);
+ << moving_space_bitmap_->DumpMemAround(o);
VerifyObject(reinterpret_cast<mirror::Object*>(addr),
verify_obj_callback);
}
@@ -984,7 +995,7 @@ void MarkCompact::SlideBlackPage(mirror::Object* first_obj,
uintptr_t start_visit = reinterpret_cast<uintptr_t>(pre_compact_addr);
uintptr_t page_end = reinterpret_cast<uintptr_t>(pre_compact_page_end);
mirror::Object* found_obj = nullptr;
- current_space_bitmap_->VisitMarkedRange</*kVisitOnce*/true>(start_visit,
+ moving_space_bitmap_->VisitMarkedRange</*kVisitOnce*/true>(start_visit,
page_end,
[&found_obj](mirror::Object* obj) {
found_obj = obj;
@@ -999,7 +1010,7 @@ void MarkCompact::SlideBlackPage(mirror::Object* first_obj,
// in-between.
std::memcpy(dest, src_addr, remaining_bytes);
DCHECK_LT(reinterpret_cast<uintptr_t>(found_obj), page_end);
- current_space_bitmap_->VisitMarkedRange(
+ moving_space_bitmap_->VisitMarkedRange(
reinterpret_cast<uintptr_t>(found_obj) + mirror::kObjectHeaderSize,
page_end,
[&found_obj, pre_compact_addr, dest, this, verify_obj_callback] (mirror::Object* obj)
@@ -1175,13 +1186,14 @@ void MarkCompact::UpdateMovingSpaceBlackAllocations() {
// BumpPointerSpace::Walk() also works similarly.
while (black_allocs < block_end
&& obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
+ RememberDexCaches(obj);
if (first_obj == nullptr) {
first_obj = obj;
}
// We only need the mark-bitmap in the pages wherein a new TLAB starts in
// the middle of the page.
if (set_mark_bit) {
- current_space_bitmap_->Set(obj);
+ moving_space_bitmap_->Set(obj);
}
size_t obj_size = RoundUp(obj->SizeOf(), kAlignment);
// Handle objects which cross page boundary, including objects larger
@@ -1306,60 +1318,112 @@ class MarkCompact::ImmuneSpaceUpdateObjVisitor {
MarkCompact* const collector_;
};
-class MarkCompact::StackRefsUpdateVisitor : public Closure {
+// TODO: JVMTI redefinition leads to situations wherein new class object(s) and the
+// corresponding native roots are setup but are not linked to class tables and
+// therefore are not accessible, leading to memory corruption.
+class MarkCompact::NativeRootsUpdateVisitor : public ClassLoaderVisitor, public DexCacheVisitor {
public:
- explicit StackRefsUpdateVisitor(MarkCompact* collector, size_t bytes)
- : collector_(collector), adjust_bytes_(bytes) {}
+ explicit NativeRootsUpdateVisitor(MarkCompact* collector, PointerSize pointer_size)
+ : collector_(collector), pointer_size_(pointer_size) {}
- void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
- // Note: self is not necessarily equal to thread since thread may be suspended.
- Thread* self = Thread::Current();
- CHECK(thread == self
- || thread->IsSuspended()
- || thread->GetState() == ThreadState::kWaitingPerformingGc)
- << thread->GetState() << " thread " << thread << " self " << self;
- thread->VisitRoots(collector_, kVisitRootFlagAllRoots);
- // Subtract adjust_bytes_ from TLAB pointers (top, end etc.) to align it
- // with the black-page slide that is performed during compaction.
- thread->AdjustTlab(adjust_bytes_);
- // TODO: update the TLAB pointers.
- collector_->GetBarrier().Pass(self);
+ ~NativeRootsUpdateVisitor() {
+ LOG(INFO) << "num_classes: " << classes_visited_.size()
+ << " num_dex_caches: " << dex_caches_visited_.size();
}
- private:
- MarkCompact* const collector_;
- const size_t adjust_bytes_;
-};
+ void Visit(ObjPtr<mirror::ClassLoader> class_loader) override
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) {
+ ClassTable* const class_table = class_loader->GetClassTable();
+ if (class_table != nullptr) {
+ class_table->VisitClassesAndRoots(*this);
+ }
+ }
-class MarkCompact::CompactionPauseCallback : public Closure {
- public:
- explicit CompactionPauseCallback(MarkCompact* collector) : collector_(collector) {}
+ void Visit(ObjPtr<mirror::DexCache> dex_cache) override
+ REQUIRES_SHARED(Locks::dex_lock_, Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_) {
+ if (!dex_cache.IsNull()) {
+ uint32_t cache = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(dex_cache.Ptr()));
+ if (dex_caches_visited_.insert(cache).second) {
+ dex_cache->VisitNativeRoots<kDefaultVerifyFlags, kWithoutReadBarrier>(*this);
+ collector_->dex_caches_.erase(cache);
+ }
+ }
+ }
- void Run(Thread* thread) override REQUIRES(Locks::mutator_lock_) {
- DCHECK_EQ(thread, collector_->thread_running_gc_);
- {
- pthread_attr_t attr;
- size_t stack_size;
- void* stack_addr;
- pthread_getattr_np(pthread_self(), &attr);
- pthread_attr_getstack(&attr, &stack_addr, &stack_size);
- pthread_attr_destroy(&attr);
- collector_->stack_addr_ = stack_addr;
- collector_->stack_end_ = reinterpret_cast<char*>(stack_addr) + stack_size;
+ void VisitDexCache(mirror::DexCache* dex_cache)
+ REQUIRES_SHARED(Locks::dex_lock_, Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_) {
+ dex_cache->VisitNativeRoots<kDefaultVerifyFlags, kWithoutReadBarrier>(*this);
+ }
+
+ void operator()(mirror::Object* obj)
+ ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(obj->IsClass<kDefaultVerifyFlags>());
+ ObjPtr<mirror::Class> klass = obj->AsClass<kDefaultVerifyFlags>();
+ VisitClassRoots(klass);
+ }
+
+ // For ClassTable::Visit()
+ bool operator()(ObjPtr<mirror::Class> klass)
+ ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!klass.IsNull()) {
+ VisitClassRoots(klass);
}
- collector_->CompactionPause();
+ return true;
+ }
- collector_->stack_end_ = nullptr;
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ collector_->UpdateRoot(root);
}
private:
+ void VisitClassRoots(ObjPtr<mirror::Class> klass)
+ ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+ mirror::Class* klass_ptr = klass.Ptr();
+ uint32_t k = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(klass_ptr));
+ // No reason to visit native roots of class in immune spaces.
+ if ((collector_->bump_pointer_space_->HasAddress(klass_ptr)
+ || collector_->non_moving_space_->HasAddress(klass_ptr))
+ && classes_visited_.insert(k).second) {
+ klass->VisitNativeRoots<kWithoutReadBarrier, /*kVisitProxyMethod*/false>(*this,
+ pointer_size_);
+ klass->VisitObsoleteDexCaches<kWithoutReadBarrier>(*this);
+ klass->VisitObsoleteClass<kWithoutReadBarrier>(*this);
+ }
+ }
+
+ std::unordered_set<uint32_t> dex_caches_visited_;
+ std::unordered_set<uint32_t> classes_visited_;
MarkCompact* const collector_;
+ PointerSize pointer_size_;
};
-void MarkCompact::CompactionPause() {
+void MarkCompact::PreCompactionPhase() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Runtime* runtime = Runtime::Current();
non_moving_space_bitmap_ = non_moving_space_->GetLiveBitmap();
+ if (kIsDebugBuild) {
+ pthread_attr_t attr;
+ size_t stack_size;
+ void* stack_addr;
+ pthread_getattr_np(pthread_self(), &attr);
+ pthread_attr_getstack(&attr, &stack_addr, &stack_size);
+ pthread_attr_destroy(&attr);
+ stack_addr_ = stack_addr;
+ stack_end_ = reinterpret_cast<char*>(stack_addr) + stack_size;
+ }
+
{
TimingLogger::ScopedTiming t2("(Paused)UpdateCompactionDataStructures", GetTimings());
ReaderMutexLock rmu(thread_running_gc_, *Locks::heap_bitmap_lock_);
@@ -1387,17 +1451,46 @@ void MarkCompact::CompactionPause() {
heap_->GetReferenceProcessor()->UpdateRoots(this);
}
- if (runtime->GetJit() != nullptr) {
- runtime->GetJit()->GetCodeCache()->SweepRootTables(this);
+
+ {
+ // Thread roots must be updated first (before space mremap and native root
+ // updation) to ensure that pre-update content is accessible.
+ TimingLogger::ScopedTiming t2("(Paused)UpdateThreadRoots", GetTimings());
+ MutexLock mu1(thread_running_gc_, *Locks::runtime_shutdown_lock_);
+ MutexLock mu2(thread_running_gc_, *Locks::thread_list_lock_);
+ std::list<Thread*> thread_list = runtime->GetThreadList()->GetList();
+ for (Thread* thread : thread_list) {
+ thread->VisitRoots(this, kVisitRootFlagAllRoots);
+ thread->AdjustTlab(black_objs_slide_diff_);
+ }
}
{
- // TODO: Calculate freed objects and update that as well.
- int32_t freed_bytes = black_objs_slide_diff_;
- bump_pointer_space_->RecordFree(0, freed_bytes);
- RecordFree(ObjectBytePair(0, freed_bytes));
+ // Native roots must be updated before updating system weaks as class linker
+ // holds roots to class loaders and dex-caches as weak roots. Also, space
+ // mremap must be done after this step as we require reading
+ // class/dex-cache/class-loader content for updating native roots.
+ TimingLogger::ScopedTiming t2("(Paused)UpdateNativeRoots", GetTimings());
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ NativeRootsUpdateVisitor visitor(this, class_linker->GetImagePointerSize());
+ {
+ ReaderMutexLock rmu(thread_running_gc_, *Locks::classlinker_classes_lock_);
+ class_linker->VisitBootClasses(&visitor);
+ class_linker->VisitClassLoaders(&visitor);
+ }
+ {
+ WriterMutexLock wmu(thread_running_gc_, *Locks::heap_bitmap_lock_);
+ ReaderMutexLock rmu(thread_running_gc_, *Locks::dex_lock_);
+ class_linker->VisitDexCaches(&visitor);
+ for (uint32_t cache : dex_caches_) {
+ visitor.VisitDexCache(reinterpret_cast<mirror::DexCache*>(cache));
+ }
+ }
+ dex_caches_.clear();
}
+ SweepSystemWeaks(thread_running_gc_, runtime, /*paused*/true);
+
{
TimingLogger::ScopedTiming t2("(Paused)Mremap", GetTimings());
// TODO: Create mapping's at 2MB aligned addresses to benefit from optimized
@@ -1415,12 +1508,14 @@ void MarkCompact::CompactionPause() {
<< errno;
}
- if (!kConcurrentCompaction) {
- // We need to perform the heap compaction *before* root updation (below) so
- // that assumptions that objects have already been compacted and laid down
- // are not broken.
- UpdateNonMovingSpace();
- CompactMovingSpace();
+ {
+ TimingLogger::ScopedTiming t2("(Paused)UpdateConcurrentRoots", GetTimings());
+ runtime->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
+ }
+ {
+ // TODO: don't visit the transaction roots if it's not active.
+ TimingLogger::ScopedTiming t2("(Paused)UpdateNonThreadRoots", GetTimings());
+ runtime->VisitNonThreadRoots(this);
}
{
@@ -1449,39 +1544,18 @@ void MarkCompact::CompactionPause() {
}
}
}
- {
- TimingLogger::ScopedTiming t2("(Paused)UpdateConcurrentRoots", GetTimings());
- runtime->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
- }
- {
- // TODO: don't visit the transaction roots if it's not active.
- TimingLogger::ScopedTiming t2("(Paused)UpdateNonThreadRoots", GetTimings());
- runtime->VisitNonThreadRoots(this);
- }
- {
- TimingLogger::ScopedTiming t2("(Paused)UpdateSystemWeaks", GetTimings());
- SweepSystemWeaks(thread_running_gc_, runtime, /*paused*/true);
- }
-}
-void MarkCompact::PreCompactionPhase() {
- TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
- DCHECK_EQ(Thread::Current(), thread_running_gc_);
- Locks::mutator_lock_->AssertNotHeld(thread_running_gc_);
- gc_barrier_.Init(thread_running_gc_, 0);
- StackRefsUpdateVisitor thread_visitor(this, black_objs_slide_diff_);
- CompactionPauseCallback callback(this);
- // To increase likelihood of black allocations. For testing purposes only.
- if (kIsDebugBuild && heap_->GetTaskProcessor()->GetRunningThread() == thread_running_gc_) {
- sleep(10);
+ if (!kConcurrentCompaction) {
+ UpdateNonMovingSpace();
+ CompactMovingSpace();
}
- size_t barrier_count = Runtime::Current()->GetThreadList()->FlipThreadRoots(
- &thread_visitor, &callback, this, GetHeap()->GetGcPauseListener());
-
+ stack_end_ = nullptr;
{
- ScopedThreadStateChange tsc(thread_running_gc_, ThreadState::kWaitingForCheckPointsToRun);
- gc_barrier_.Increment(thread_running_gc_, barrier_count);
+ // TODO: Calculate freed objects and update that as well.
+ int32_t freed_bytes = black_objs_slide_diff_;
+ bump_pointer_space_->RecordFree(0, freed_bytes);
+ RecordFree(ObjectBytePair(0, freed_bytes));
}
}
@@ -1895,10 +1969,18 @@ template <bool kUpdateLiveWords>
void MarkCompact::ScanObject(mirror::Object* obj) {
RefFieldsVisitor visitor(this);
DCHECK(IsMarked(obj)) << "Scanning marked object " << obj << "\n" << heap_->DumpSpaces();
- if (kUpdateLiveWords && current_space_bitmap_->HasAddress(obj)) {
+ if (kUpdateLiveWords && moving_space_bitmap_->HasAddress(obj)) {
UpdateLivenessInfo(obj);
}
obj->VisitReferences(visitor, visitor);
+ RememberDexCaches(obj);
+}
+
+void MarkCompact::RememberDexCaches(mirror::Object* obj) {
+ if (obj->IsDexCache()) {
+ dex_caches_.insert(
+ mirror::CompressedReference<mirror::Object>::FromMirrorPtr(obj).AsVRegValue());
+ }
}
// Scan anything that's on the mark stack.
@@ -1945,9 +2027,9 @@ inline bool MarkCompact::MarkObjectNonNullNoPush(mirror::Object* obj,
MemberOffset offset) {
// We expect most of the referenes to be in bump-pointer space, so try that
// first to keep the cost of this function minimal.
- if (LIKELY(current_space_bitmap_->HasAddress(obj))) {
- return kParallel ? !current_space_bitmap_->AtomicTestAndSet(obj)
- : !current_space_bitmap_->Set(obj);
+ if (LIKELY(moving_space_bitmap_->HasAddress(obj))) {
+ return kParallel ? !moving_space_bitmap_->AtomicTestAndSet(obj)
+ : !moving_space_bitmap_->Set(obj);
} else if (non_moving_space_bitmap_->HasAddress(obj)) {
return kParallel ? !non_moving_space_bitmap_->AtomicTestAndSet(obj)
: !non_moving_space_bitmap_->Set(obj);
@@ -2022,9 +2104,10 @@ void MarkCompact::VisitRoots(mirror::CompressedReference<mirror::Object>** roots
mirror::Object* MarkCompact::IsMarked(mirror::Object* obj) {
CHECK(obj != nullptr);
- if (current_space_bitmap_->HasAddress(obj)) {
+ if (moving_space_bitmap_->HasAddress(obj)) {
+ const bool is_black = reinterpret_cast<uint8_t*>(obj) >= black_allocations_begin_;
if (compacting_) {
- if (reinterpret_cast<uint8_t*>(obj) > black_allocations_begin_) {
+ if (is_black) {
return PostCompactBlackObjAddr(obj);
} else if (live_words_bitmap_->Test(obj)) {
return PostCompactOldObjAddr(obj);
@@ -2032,7 +2115,7 @@ mirror::Object* MarkCompact::IsMarked(mirror::Object* obj) {
return nullptr;
}
}
- return current_space_bitmap_->Test(obj) ? obj : nullptr;
+ return (is_black || moving_space_bitmap_->Test(obj)) ? obj : nullptr;
} else if (non_moving_space_bitmap_->HasAddress(obj)) {
return non_moving_space_bitmap_->Test(obj) ? obj : nullptr;
} else if (immune_spaces_.ContainsObject(obj)) {
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 92f0830cdf..d8c500c6f3 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -33,6 +33,11 @@
#include "offsets.h"
namespace art {
+
+namespace mirror {
+class DexCache;
+}
+
namespace gc {
class Heap;
@@ -56,8 +61,8 @@ class MarkCompact : public GarbageCollector {
// pause or during concurrent compaction. The flag is reset after compaction
// is completed and never accessed by mutators. Therefore, safe to update
// without any memory ordering.
- bool IsCompacting() const {
- return compacting_;
+ bool IsCompacting(Thread* self) const {
+ return compacting_ && self == thread_running_gc_;
}
GcType GetGcType() const override {
@@ -112,11 +117,11 @@ class MarkCompact : public GarbageCollector {
void CompactionPause() REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_);
mirror::Object* GetFromSpaceAddrFromBarrier(mirror::Object* old_ref) {
- CHECK(compacting_);
- if (live_words_bitmap_->HasAddress(old_ref)) {
- return GetFromSpaceAddr(old_ref);
- }
- return old_ref;
+ CHECK(compacting_);
+ if (live_words_bitmap_->HasAddress(old_ref)) {
+ return GetFromSpaceAddr(old_ref);
+ }
+ return old_ref;
}
private:
@@ -252,7 +257,10 @@ class MarkCompact : public GarbageCollector {
// during a stop-the-world (STW) pause.
void MarkingPause() REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_);
// Perform stop-the-world pause prior to concurrent compaction.
- void PreCompactionPhase() REQUIRES(!Locks::mutator_lock_);
+ // Updates GC-roots and protects heap so that during the concurrent
+ // compaction phase we can receive faults and compact the corresponding pages
+ // on the fly.
+ void PreCompactionPhase() REQUIRES(Locks::mutator_lock_);
// Compute offsets (in chunk_info_vec_) and other data structures required
// during concurrent compaction.
void PrepareForCompaction() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -386,6 +394,14 @@ class MarkCompact : public GarbageCollector {
void SweepLargeObjects(bool swap_bitmaps) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
+ // Store all the dex-cache objects visited during marking phase.
+ // This is required during compaction phase to ensure that we don't miss any
+ // of them from visiting (to update references). Somehow, iterating over
+ // class-tables to fetch these misses some of them, leading to memory
+ // corruption.
+ // TODO: once we implement concurrent compaction of classes and dex-caches,
+ // which will visit all of them, we should remove this.
+ void RememberDexCaches(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
// For checkpoints
Barrier gc_barrier_;
// Every object inside the immune spaces is assumed to be marked.
@@ -406,12 +422,15 @@ class MarkCompact : public GarbageCollector {
// TODO: Must be replaced with an efficient mechanism eventually. Or ensure
// that double updation doesn't happen in the first place.
std::unordered_set<void*> updated_roots_;
+ // Set of dex-caches visited during marking. See comment above
+ // RememberDexCaches() for the explanation.
+ std::unordered_set<uint32_t> dex_caches_;
MemMap from_space_map_;
// Any array of live-bytes in logical chunks of kOffsetChunkSize size
// in the 'to-be-compacted' space.
MemMap info_map_;
// The main space bitmap
- accounting::ContinuousSpaceBitmap* current_space_bitmap_;
+ accounting::ContinuousSpaceBitmap* moving_space_bitmap_;
accounting::ContinuousSpaceBitmap* non_moving_space_bitmap_;
space::ContinuousSpace* non_moving_space_;
space::BumpPointerSpace* const bump_pointer_space_;
@@ -472,8 +491,7 @@ class MarkCompact : public GarbageCollector {
class CardModifiedVisitor;
class RefFieldsVisitor;
template <bool kCheckBegin, bool kCheckEnd> class RefsUpdateVisitor;
- class StackRefsUpdateVisitor;
- class CompactionPauseCallback;
+ class NativeRootsUpdateVisitor;
class ImmuneSpaceUpdateObjVisitor;
DISALLOW_IMPLICIT_CONSTRUCTORS(MarkCompact);
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index f587d0170f..49733550fe 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -405,7 +405,10 @@ void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor)
if (new_object == nullptr) {
it = set->erase(it);
} else {
- *it = GcRoot<mirror::String>(new_object->AsString());
+ // Don't use AsString as it does IsString check in debug builds which, in
+ // case of userfaultfd GC, is called when the object's content isn't
+ // thereyet.
+ *it = GcRoot<mirror::String>(ObjPtr<mirror::String>::DownCast(new_object));
++it;
}
}
diff --git a/runtime/mirror/class-refvisitor-inl.h b/runtime/mirror/class-refvisitor-inl.h
index 1dd7c107d4..9bcfd03f3b 100644
--- a/runtime/mirror/class-refvisitor-inl.h
+++ b/runtime/mirror/class-refvisitor-inl.h
@@ -30,11 +30,6 @@ template <bool kVisitNativeRoots,
ReadBarrierOption kReadBarrierOption,
typename Visitor>
inline void Class::VisitReferences(ObjPtr<Class> klass, const Visitor& visitor) {
- if (kVisitNativeRoots) {
- // Since this class is reachable, we must also visit the associated roots when we scan it.
- VisitNativeRoots<kReadBarrierOption>(
- visitor, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
- }
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass.Ptr(), visitor);
// Right after a class is allocated, but not yet loaded
// (ClassStatus::kNotReady, see ClassLinker::LoadClass()), GC may find it
@@ -49,14 +44,17 @@ inline void Class::VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
// linked yet.
VisitStaticFieldsReferences<kVerifyFlags, kReadBarrierOption>(this, visitor);
}
+ if (kVisitNativeRoots) {
+ // Since this class is reachable, we must also visit the associated roots when we scan it.
+ VisitNativeRoots<kReadBarrierOption>(
+ visitor, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
+ }
}
-template<ReadBarrierOption kReadBarrierOption, class Visitor>
+template<ReadBarrierOption kReadBarrierOption, bool kVisitProxyMethod, class Visitor>
void Class::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) {
VisitFields<kReadBarrierOption>([&](ArtField* field) REQUIRES_SHARED(art::Locks::mutator_lock_) {
field->VisitRoots(visitor);
- // TODO: Once concurrent mark-compact GC is made concurrent and stops using
- // kVisitNativeRoots, remove the following condition
if (kIsDebugBuild && !kUseUserfaultfd && IsResolved()) {
CHECK_EQ(field->GetDeclaringClass<kReadBarrierOption>(), this)
<< GetStatus() << field->GetDeclaringClass()->PrettyClass() << " != " << PrettyClass();
@@ -64,11 +62,28 @@ void Class::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) {
});
// Don't use VisitMethods because we don't want to hit the class-ext methods twice.
for (ArtMethod& method : GetMethods(pointer_size)) {
- method.VisitRoots<kReadBarrierOption>(visitor, pointer_size);
+ method.VisitRoots<kReadBarrierOption, kVisitProxyMethod>(visitor, pointer_size);
}
ObjPtr<ClassExt> ext(GetExtData<kDefaultVerifyFlags, kReadBarrierOption>());
if (!ext.IsNull()) {
- ext->VisitNativeRoots<kReadBarrierOption, Visitor>(visitor, pointer_size);
+ ext->VisitNativeRoots<kReadBarrierOption, kVisitProxyMethod>(visitor, pointer_size);
+ }
+}
+
+template<ReadBarrierOption kReadBarrierOption>
+void Class::VisitObsoleteDexCaches(DexCacheVisitor& visitor) {
+ ObjPtr<ClassExt> ext(GetExtData<kDefaultVerifyFlags, kReadBarrierOption>());
+ if (!ext.IsNull()) {
+ ext->VisitDexCaches<kDefaultVerifyFlags, kReadBarrierOption>(visitor);
+ }
+}
+
+template<ReadBarrierOption kReadBarrierOption, class Visitor>
+void Class::VisitObsoleteClass(Visitor& visitor) {
+ ObjPtr<ClassExt> ext(GetExtData<kDefaultVerifyFlags, kReadBarrierOption>());
+ if (!ext.IsNull()) {
+ ObjPtr<Class> klass = ext->GetObsoleteClass<kDefaultVerifyFlags, kReadBarrierOption>();
+ visitor(klass);
}
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index ecf7856642..fa6b711343 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -64,6 +64,7 @@ class Signature;
template<typename T> class StrideIterator;
template<size_t kNumReferences> class PACKED(4) StackHandleScope;
class Thread;
+class DexCacheVisitor;
namespace mirror {
@@ -1176,10 +1177,19 @@ class MANAGED Class final : public Object {
// Visit native roots visits roots which are keyed off the native pointers such as ArtFields and
// ArtMethods.
- template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
+ bool kVisitProxyMethod = true,
+ class Visitor>
void VisitNativeRoots(Visitor& visitor, PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Visit obsolete dex caches possibly stored in ext_data_
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ void VisitObsoleteDexCaches(DexCacheVisitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
+ void VisitObsoleteClass(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
// Visit ArtMethods directly owned by this class.
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
void VisitMethods(Visitor visitor, PointerSize pointer_size)
diff --git a/runtime/mirror/class_ext-inl.h b/runtime/mirror/class_ext-inl.h
index ddd46b9bcb..9d6ac433e8 100644
--- a/runtime/mirror/class_ext-inl.h
+++ b/runtime/mirror/class_ext-inl.h
@@ -23,6 +23,7 @@
#include "art_method-inl.h"
#include "base/enums.h"
#include "base/globals.h"
+#include "class_linker.h"
#include "handle_scope.h"
#include "jni/jni_internal.h"
#include "jni_id_type.h"
@@ -148,8 +149,9 @@ inline ObjPtr<Throwable> ClassExt::GetErroneousStateError() {
return GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(ClassExt, erroneous_state_error_));
}
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline ObjPtr<ObjectArray<DexCache>> ClassExt::GetObsoleteDexCaches() {
- return GetFieldObject<ObjectArray<DexCache>>(
+ return GetFieldObject<ObjectArray<DexCache>, kVerifyFlags, kReadBarrierOption>(
OFFSET_OF_OBJECT_MEMBER(ClassExt, obsolete_dex_caches_));
}
@@ -164,13 +166,25 @@ inline ObjPtr<Object> ClassExt::GetOriginalDexFile() {
return GetFieldObject<Object>(OFFSET_OF_OBJECT_MEMBER(ClassExt, original_dex_file_));
}
-template<ReadBarrierOption kReadBarrierOption, class Visitor>
+template<ReadBarrierOption kReadBarrierOption, bool kVisitProxyMethod, class Visitor>
void ClassExt::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) {
VisitMethods<kReadBarrierOption>([&](ArtMethod* method) {
- method->VisitRoots<kReadBarrierOption>(visitor, pointer_size);
+ method->VisitRoots<kReadBarrierOption, kVisitProxyMethod>(visitor, pointer_size);
}, pointer_size);
}
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+void ClassExt::VisitDexCaches(DexCacheVisitor& visitor) {
+ ObjPtr<ObjectArray<DexCache>> arr(GetObsoleteDexCaches<kVerifyFlags, kReadBarrierOption>());
+ if (!arr.IsNull()) {
+ int32_t len = arr->GetLength();
+ for (int32_t i = 0; i < len; i++) {
+ ObjPtr<mirror::DexCache> dex_cache = arr->Get<kVerifyFlags, kReadBarrierOption>(i);
+ visitor.Visit(dex_cache);
+ }
+ }
+}
+
template<ReadBarrierOption kReadBarrierOption, class Visitor>
void ClassExt::VisitMethods(Visitor visitor, PointerSize pointer_size) {
ObjPtr<PointerArray> arr(GetObsoleteMethods<kDefaultVerifyFlags, kReadBarrierOption>());
diff --git a/runtime/mirror/class_ext.h b/runtime/mirror/class_ext.h
index b805ea0582..b025eb21af 100644
--- a/runtime/mirror/class_ext.h
+++ b/runtime/mirror/class_ext.h
@@ -27,6 +27,7 @@
namespace art {
struct ClassExtOffsets;
+class DexCacheVisitor;
namespace mirror {
@@ -46,6 +47,8 @@ class MANAGED ClassExt : public Object {
ObjPtr<Throwable> GetErroneousStateError() REQUIRES_SHARED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ObjPtr<ObjectArray<DexCache>> GetObsoleteDexCaches() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -126,10 +129,21 @@ class MANAGED ClassExt : public Object {
static bool ExtendObsoleteArrays(Handle<ClassExt> h_this, Thread* self, uint32_t increase)
REQUIRES_SHARED(Locks::mutator_lock_);
- template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
+ bool kVisitProxyMethod = true,
+ class Visitor>
inline void VisitNativeRoots(Visitor& visitor, PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // NO_THREAD_SAFETY_ANALYSIS for dex_lock and heap_bitmap_lock_ as both are at
+ // higher lock-level than class-table's lock, which is already acquired and
+ // is at lower (kClassLoaderClassesLock) level.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ inline void VisitDexCaches(DexCacheVisitor& visitor)
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
inline void VisitMethods(Visitor visitor, PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 2791fe33a5..74f9ccbbda 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -405,20 +405,27 @@ inline void DexCache::VisitReferences(ObjPtr<Class> klass, const Visitor& visito
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
// Visit arrays after.
if (kVisitNativeRoots) {
- VisitDexCachePairs<String, kReadBarrierOption, Visitor>(
- GetStrings<kVerifyFlags>(), NumStrings<kVerifyFlags>(), visitor);
+ VisitNativeRoots<kVerifyFlags, kReadBarrierOption>(visitor);
+ }
+}
+
+template <VerifyObjectFlags kVerifyFlags,
+ ReadBarrierOption kReadBarrierOption,
+ typename Visitor>
+inline void DexCache::VisitNativeRoots(const Visitor& visitor) {
+ VisitDexCachePairs<String, kReadBarrierOption, Visitor>(
+ GetStrings<kVerifyFlags>(), NumStrings<kVerifyFlags>(), visitor);
- VisitDexCachePairs<Class, kReadBarrierOption, Visitor>(
- GetResolvedTypes<kVerifyFlags>(), NumResolvedTypes<kVerifyFlags>(), visitor);
+ VisitDexCachePairs<Class, kReadBarrierOption, Visitor>(
+ GetResolvedTypes<kVerifyFlags>(), NumResolvedTypes<kVerifyFlags>(), visitor);
- VisitDexCachePairs<MethodType, kReadBarrierOption, Visitor>(
- GetResolvedMethodTypes<kVerifyFlags>(), NumResolvedMethodTypes<kVerifyFlags>(), visitor);
+ VisitDexCachePairs<MethodType, kReadBarrierOption, Visitor>(
+ GetResolvedMethodTypes<kVerifyFlags>(), NumResolvedMethodTypes<kVerifyFlags>(), visitor);
- GcRoot<mirror::CallSite>* resolved_call_sites = GetResolvedCallSites<kVerifyFlags>();
- size_t num_call_sites = NumResolvedCallSites<kVerifyFlags>();
- for (size_t i = 0; resolved_call_sites != nullptr && i != num_call_sites; ++i) {
- visitor.VisitRootIfNonNull(resolved_call_sites[i].AddressWithoutBarrier());
- }
+ GcRoot<mirror::CallSite>* resolved_call_sites = GetResolvedCallSites<kVerifyFlags>();
+ size_t num_call_sites = NumResolvedCallSites<kVerifyFlags>();
+ for (size_t i = 0; resolved_call_sites != nullptr && i != num_call_sites; ++i) {
+ visitor.VisitRootIfNonNull(resolved_call_sites[i].AddressWithoutBarrier());
}
}
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 6701405ab3..78c6bb566d 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -444,6 +444,12 @@ class MANAGED DexCache final : public Object {
ObjPtr<ClassLoader> GetClassLoader() REQUIRES_SHARED(Locks::mutator_lock_);
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
+ typename Visitor>
+ void VisitNativeRoots(const Visitor& visitor)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
+
private:
// Allocate new array in linear alloc and save it in the given fields.
template<typename T, size_t kMaxCacheSize>
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 97fc9385d4..0ba545becc 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -651,7 +651,7 @@ class MANAGED LOCKABLE Object {
// object so that portions of the object, like klass and length (for arrays),
// can be accessed without causing cascading faults.
template <bool kFetchObjSize = true,
- bool kVisitNativeRoots = true,
+ bool kVisitNativeRoots = false,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithFromSpaceBarrier,
typename Visitor>
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index e5ce8e5869..6194b816d6 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -790,12 +790,12 @@ void Runtime::CallExitHook(jint status) {
}
}
-void Runtime::SweepSystemWeaks(IsMarkedVisitor* visitor, const bool sweep_jit) {
+void Runtime::SweepSystemWeaks(IsMarkedVisitor* visitor) {
GetInternTable()->SweepInternTableWeaks(visitor);
GetMonitorList()->SweepMonitorList(visitor);
GetJavaVM()->SweepJniWeakGlobals(visitor);
GetHeap()->SweepAllocationRecords(visitor);
- if (sweep_jit && GetJit() != nullptr) {
+ if (GetJit() != nullptr) {
// Visit JIT literal tables. Objects in these tables are classes and strings
// and only classes can be affected by class unloading. The strings always
// stay alive as they are strongly interned.
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 7f00d77922..133db0893c 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -450,7 +450,7 @@ class Runtime {
// Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
// system weak is updated to be the visitor's returned value.
- void SweepSystemWeaks(IsMarkedVisitor* visitor, bool sweep_jit = true)
+ void SweepSystemWeaks(IsMarkedVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
// Walk all reflective objects and visit their targets as well as any method/fields held by the
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 2de60e4527..0561c5b615 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -4019,7 +4019,7 @@ class ReferenceMapVisitor : public StackVisitor {
visitor_(visitor) {
gc::Heap* const heap = Runtime::Current()->GetHeap();
visit_declaring_class_ = heap->CurrentCollectorType() != gc::CollectorType::kCollectorTypeCMC
- || !heap->MarkCompactCollector()->IsCompacting();
+ || !heap->MarkCompactCollector()->IsCompacting(Thread::Current());
}
bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {