summaryrefslogtreecommitdiff
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/class_linker.cc110
-rw-r--r--runtime/class_linker.h33
-rw-r--r--runtime/class_linker_test.cc1
-rw-r--r--runtime/gc/collector/concurrent_copying.cc2
-rw-r--r--runtime/gc/collector/mark_compact.cc1
-rw-r--r--runtime/gc/collector/mark_sweep-inl.h3
-rw-r--r--runtime/gc/collector/mark_sweep.cc173
-rw-r--r--runtime/gc/collector/mark_sweep.h155
-rw-r--r--runtime/gc/collector/semi_space.cc1
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.cc3
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.h16
-rw-r--r--runtime/jit/jit_code_cache_test.cc7
-rw-r--r--runtime/mirror/class_loader.h14
-rw-r--r--runtime/runtime.cc21
-rw-r--r--runtime/runtime.h3
-rw-r--r--runtime/stack.cc35
-rw-r--r--runtime/thread_pool.cc10
17 files changed, 389 insertions, 199 deletions
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index bc8a9f4936..6b9c8aa353 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1318,9 +1318,8 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) {
boot_class_table_.VisitRoots(buffered_visitor);
// TODO: Avoid marking these to enable class unloading.
JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
- for (jweak weak_root : class_loaders_) {
- mirror::Object* class_loader =
- down_cast<mirror::ClassLoader*>(vm->DecodeWeakGlobal(self, weak_root));
+ for (const ClassLoaderData& data : class_loaders_) {
+ mirror::Object* class_loader = vm->DecodeWeakGlobal(self, data.weak_root);
// Don't need to update anything since the class loaders will be updated by SweepSystemWeaks.
visitor->VisitRootIfNonNull(&class_loader, RootInfo(kRootVMInternal));
}
@@ -1503,13 +1502,10 @@ ClassLinker::~ClassLinker() {
STLDeleteElements(&oat_files_);
Thread* const self = Thread::Current();
JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
- for (jweak weak_root : class_loaders_) {
- auto* const class_loader = down_cast<mirror::ClassLoader*>(
- vm->DecodeWeakGlobalDuringShutdown(self, weak_root));
- if (class_loader != nullptr) {
- delete class_loader->GetClassTable();
- }
- vm->DeleteWeakGlobalRef(self, weak_root);
+ for (const ClassLoaderData& data : class_loaders_) {
+ vm->DecodeWeakGlobalDuringShutdown(self, data.weak_root);
+ delete data.allocator;
+ delete data.class_table;
}
class_loaders_.clear();
}
@@ -2375,21 +2371,25 @@ void ClassLinker::LoadClass(Thread* self,
}
}
-LengthPrefixedArray<ArtField>* ClassLinker::AllocArtFieldArray(Thread* self, size_t length) {
+LengthPrefixedArray<ArtField>* ClassLinker::AllocArtFieldArray(Thread* self,
+ LinearAlloc* allocator,
+ size_t length) {
if (length == 0) {
return nullptr;
}
// If the ArtField alignment changes, review all uses of LengthPrefixedArray<ArtField>.
static_assert(alignof(ArtField) == 4, "ArtField alignment is expected to be 4.");
size_t storage_size = LengthPrefixedArray<ArtField>::ComputeSize(length);
- void* array_storage = Runtime::Current()->GetLinearAlloc()->Alloc(self, storage_size);
+ void* array_storage = allocator->Alloc(self, storage_size);
auto* ret = new(array_storage) LengthPrefixedArray<ArtField>(length);
CHECK(ret != nullptr);
std::uninitialized_fill_n(&ret->At(0), length, ArtField());
return ret;
}
-LengthPrefixedArray<ArtMethod>* ClassLinker::AllocArtMethodArray(Thread* self, size_t length) {
+LengthPrefixedArray<ArtMethod>* ClassLinker::AllocArtMethodArray(Thread* self,
+ LinearAlloc* allocator,
+ size_t length) {
if (length == 0) {
return nullptr;
}
@@ -2397,7 +2397,7 @@ LengthPrefixedArray<ArtMethod>* ClassLinker::AllocArtMethodArray(Thread* self, s
const size_t method_size = ArtMethod::Size(image_pointer_size_);
const size_t storage_size =
LengthPrefixedArray<ArtMethod>::ComputeSize(length, method_size, method_alignment);
- void* array_storage = Runtime::Current()->GetLinearAlloc()->Alloc(self, storage_size);
+ void* array_storage = allocator->Alloc(self, storage_size);
auto* ret = new (array_storage) LengthPrefixedArray<ArtMethod>(length);
CHECK(ret != nullptr);
for (size_t i = 0; i < length; ++i) {
@@ -2406,6 +2406,15 @@ LengthPrefixedArray<ArtMethod>* ClassLinker::AllocArtMethodArray(Thread* self, s
return ret;
}
+LinearAlloc* ClassLinker::GetAllocatorForClassLoader(mirror::ClassLoader* class_loader) {
+ if (class_loader == nullptr) {
+ return Runtime::Current()->GetLinearAlloc();
+ }
+ LinearAlloc* allocator = class_loader->GetAllocator();
+ DCHECK(allocator != nullptr);
+ return allocator;
+}
+
void ClassLinker::LoadClassMembers(Thread* self,
const DexFile& dex_file,
const uint8_t* class_data,
@@ -2418,8 +2427,11 @@ void ClassLinker::LoadClassMembers(Thread* self,
// Load static fields.
// We allow duplicate definitions of the same field in a class_data_item
// but ignore the repeated indexes here, b/21868015.
+ LinearAlloc* const allocator = GetAllocatorForClassLoader(klass->GetClassLoader());
ClassDataItemIterator it(dex_file, class_data);
- LengthPrefixedArray<ArtField>* sfields = AllocArtFieldArray(self, it.NumStaticFields());
+ LengthPrefixedArray<ArtField>* sfields = AllocArtFieldArray(self,
+ allocator,
+ it.NumStaticFields());
size_t num_sfields = 0;
uint32_t last_field_idx = 0u;
for (; it.HasNextStaticField(); it.Next()) {
@@ -2435,7 +2447,9 @@ void ClassLinker::LoadClassMembers(Thread* self,
klass->SetSFieldsPtr(sfields);
DCHECK_EQ(klass->NumStaticFields(), num_sfields);
// Load instance fields.
- LengthPrefixedArray<ArtField>* ifields = AllocArtFieldArray(self, it.NumInstanceFields());
+ LengthPrefixedArray<ArtField>* ifields = AllocArtFieldArray(self,
+ allocator,
+ it.NumInstanceFields());
size_t num_ifields = 0u;
last_field_idx = 0u;
for (; it.HasNextInstanceField(); it.Next()) {
@@ -2458,8 +2472,8 @@ void ClassLinker::LoadClassMembers(Thread* self,
klass->SetIFieldsPtr(ifields);
DCHECK_EQ(klass->NumInstanceFields(), num_ifields);
// Load methods.
- klass->SetDirectMethodsPtr(AllocArtMethodArray(self, it.NumDirectMethods()));
- klass->SetVirtualMethodsPtr(AllocArtMethodArray(self, it.NumVirtualMethods()));
+ klass->SetDirectMethodsPtr(AllocArtMethodArray(self, allocator, it.NumDirectMethods()));
+ klass->SetVirtualMethodsPtr(AllocArtMethodArray(self, allocator, it.NumVirtualMethods()));
size_t class_def_method_index = 0;
uint32_t last_dex_method_index = DexFile::kDexNoIndex;
size_t last_class_def_method_index = 0;
@@ -3031,7 +3045,7 @@ void ClassLinker::MoveClassTableToPreZygote() {
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
boot_class_table_.FreezeSnapshot();
MoveClassTableToPreZygoteVisitor visitor;
- VisitClassLoadersAndRemoveClearedLoaders(&visitor);
+ VisitClassLoaders(&visitor);
}
mirror::Class* ClassLinker::LookupClassFromImage(const char* descriptor) {
@@ -3414,9 +3428,12 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
mirror::Class* existing = InsertClass(descriptor.c_str(), klass.Get(), hash);
CHECK(existing == nullptr);
+ // Needs to be after we insert the class so that the allocator field is set.
+ LinearAlloc* const allocator = GetAllocatorForClassLoader(klass->GetClassLoader());
+
// Instance fields are inherited, but we add a couple of static fields...
const size_t num_fields = 2;
- LengthPrefixedArray<ArtField>* sfields = AllocArtFieldArray(self, num_fields);
+ LengthPrefixedArray<ArtField>* sfields = AllocArtFieldArray(self, allocator, num_fields);
klass->SetSFieldsPtr(sfields);
// 1. Create a static field 'interfaces' that holds the _declared_ interfaces implemented by
@@ -3433,7 +3450,7 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
throws_sfield.SetAccessFlags(kAccStatic | kAccPublic | kAccFinal);
// Proxies have 1 direct method, the constructor
- LengthPrefixedArray<ArtMethod>* directs = AllocArtMethodArray(self, 1);
+ LengthPrefixedArray<ArtMethod>* directs = AllocArtMethodArray(self, allocator, 1);
// Currently AllocArtMethodArray cannot return null, but the OOM logic is left there in case we
// want to throw OOM in the future.
if (UNLIKELY(directs == nullptr)) {
@@ -3448,7 +3465,7 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
DCHECK_EQ(h_methods->GetClass(), mirror::Method::ArrayClass())
<< PrettyClass(h_methods->GetClass());
const size_t num_virtual_methods = h_methods->GetLength();
- auto* virtuals = AllocArtMethodArray(self, num_virtual_methods);
+ auto* virtuals = AllocArtMethodArray(self, allocator, num_virtual_methods);
// Currently AllocArtMethodArray cannot return null, but the OOM logic is left there in case we
// want to throw OOM in the future.
if (UNLIKELY(virtuals == nullptr)) {
@@ -4166,9 +4183,14 @@ ClassTable* ClassLinker::InsertClassTableForClassLoader(mirror::ClassLoader* cla
if (class_table == nullptr) {
class_table = new ClassTable;
Thread* const self = Thread::Current();
- class_loaders_.push_back(self->GetJniEnv()->vm->AddWeakGlobalRef(self, class_loader));
+ ClassLoaderData data;
+ data.weak_root = self->GetJniEnv()->vm->AddWeakGlobalRef(self, class_loader);
+ data.class_table = class_table;
+ data.allocator = Runtime::Current()->CreateLinearAlloc();
+ class_loaders_.push_back(data);
// Don't already have a class table, add it to the class loader.
- class_loader->SetClassTable(class_table);
+ class_loader->SetClassTable(data.class_table);
+ class_loader->SetAllocator(data.allocator);
}
return class_table;
}
@@ -6158,7 +6180,10 @@ jobject ClassLinker::CreatePathClassLoader(Thread* self, std::vector<const DexFi
ArtMethod* ClassLinker::CreateRuntimeMethod() {
const size_t method_alignment = ArtMethod::Alignment(image_pointer_size_);
const size_t method_size = ArtMethod::Size(image_pointer_size_);
- LengthPrefixedArray<ArtMethod>* method_array = AllocArtMethodArray(Thread::Current(), 1);
+ LengthPrefixedArray<ArtMethod>* method_array = AllocArtMethodArray(
+ Thread::Current(),
+ Runtime::Current()->GetLinearAlloc(),
+ 1);
ArtMethod* method = &method_array->At(0, method_size, method_alignment);
CHECK(method != nullptr);
method->SetDexMethodIndex(DexFile::kDexNoIndex);
@@ -6171,33 +6196,34 @@ void ClassLinker::DropFindArrayClassCache() {
find_array_class_cache_next_victim_ = 0;
}
-void ClassLinker::VisitClassLoadersAndRemoveClearedLoaders(ClassLoaderVisitor* visitor) {
+void ClassLinker::VisitClassLoaders(ClassLoaderVisitor* visitor) const {
Thread* const self = Thread::Current();
- Locks::classlinker_classes_lock_->AssertExclusiveHeld(self);
JavaVMExt* const vm = self->GetJniEnv()->vm;
- for (auto it = class_loaders_.begin(); it != class_loaders_.end();) {
- const jweak weak_root = *it;
- mirror::ClassLoader* const class_loader = down_cast<mirror::ClassLoader*>(
- vm->DecodeWeakGlobal(self, weak_root));
+ for (const ClassLoaderData& data : class_loaders_) {
+ auto* const class_loader = down_cast<mirror::ClassLoader*>(
+ vm->DecodeWeakGlobal(self, data.weak_root));
if (class_loader != nullptr) {
visitor->Visit(class_loader);
- ++it;
- } else {
- // Remove the cleared weak reference from the array.
- vm->DeleteWeakGlobalRef(self, weak_root);
- it = class_loaders_.erase(it);
}
}
}
-void ClassLinker::VisitClassLoaders(ClassLoaderVisitor* visitor) const {
+void ClassLinker::CleanupClassLoaders() {
Thread* const self = Thread::Current();
- JavaVMExt* const vm = self->GetJniEnv()->vm;
- for (jweak weak_root : class_loaders_) {
- mirror::ClassLoader* const class_loader = down_cast<mirror::ClassLoader*>(
- vm->DecodeWeakGlobal(self, weak_root));
+ WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
+ JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
+ for (auto it = class_loaders_.begin(); it != class_loaders_.end(); ) {
+ const ClassLoaderData& data = *it;
+ auto* const class_loader = down_cast<mirror::ClassLoader*>(
+ vm->DecodeWeakGlobal(self, data.weak_root));
if (class_loader != nullptr) {
- visitor->Visit(class_loader);
+ ++it;
+ } else {
+ // Weak reference was cleared, delete the data associated with this class loader.
+ delete data.class_table;
+ delete data.allocator;
+ vm->DeleteWeakGlobalRef(self, data.weak_root);
+ it = class_loaders_.erase(it);
}
}
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index fee706625b..f705330b14 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -403,9 +403,13 @@ class ClassLinker {
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
- LengthPrefixedArray<ArtField>* AllocArtFieldArray(Thread* self, size_t length);
+ LengthPrefixedArray<ArtField>* AllocArtFieldArray(Thread* self,
+ LinearAlloc* allocator,
+ size_t length);
- LengthPrefixedArray<ArtMethod>* AllocArtMethodArray(Thread* self, size_t length);
+ LengthPrefixedArray<ArtMethod>* AllocArtMethodArray(Thread* self,
+ LinearAlloc* allocator,
+ size_t length);
mirror::PointerArray* AllocPointerArray(Thread* self, size_t length)
SHARED_REQUIRES(Locks::mutator_lock_)
@@ -546,17 +550,24 @@ class ClassLinker {
// entries are roots, but potentially not image classes.
void DropFindArrayClassCache() SHARED_REQUIRES(Locks::mutator_lock_);
- private:
- // The RemoveClearedLoaders version removes cleared weak global class loaders and frees their
- // class tables. This version can only be called with reader access to the
- // classlinker_classes_lock_ since it modifies the class_loaders_ list.
- void VisitClassLoadersAndRemoveClearedLoaders(ClassLoaderVisitor* visitor)
- REQUIRES(Locks::classlinker_classes_lock_)
+ // Clean up class loaders, this needs to happen after JNI weak globals are cleared.
+ void CleanupClassLoaders()
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::classlinker_classes_lock_);
+
+ static LinearAlloc* GetAllocatorForClassLoader(mirror::ClassLoader* class_loader)
SHARED_REQUIRES(Locks::mutator_lock_);
+
+ private:
+ struct ClassLoaderData {
+ jobject weak_root; // Weak root to enable class unloading.
+ ClassTable* class_table;
+ LinearAlloc* allocator;
+ };
+
void VisitClassLoaders(ClassLoaderVisitor* visitor) const
SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
-
void VisitClassesInternal(ClassVisitor* visitor)
SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
@@ -826,8 +837,8 @@ class ClassLinker {
std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_);
// This contains the class loaders which have class tables. It is populated by
- // InsertClassTableForClassLoader. Weak roots to enable class unloading.
- std::list<jweak> class_loaders_
+ // InsertClassTableForClassLoader.
+ std::list<ClassLoaderData> class_loaders_
GUARDED_BY(Locks::classlinker_classes_lock_);
// Boot class path table. Since the class loader for this is null.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index b4ea3b3460..0926ce3f6a 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -550,6 +550,7 @@ struct StackTraceElementOffsets : public CheckOffsets<mirror::StackTraceElement>
struct ClassLoaderOffsets : public CheckOffsets<mirror::ClassLoader> {
ClassLoaderOffsets() : CheckOffsets<mirror::ClassLoader>(false, "Ljava/lang/ClassLoader;") {
+ addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, allocator_), "allocator");
addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, class_table_), "classTable");
addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, packages_), "packages");
addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, parent_), "parent");
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 399591b93d..468179c9d5 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -457,6 +457,8 @@ void ConcurrentCopying::MarkingPhase() {
CheckEmptyMarkStack();
// Re-enable weak ref accesses.
ReenableWeakRefAccess(self);
+ // Free data for class loaders that we unloaded.
+ Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
// Marking is done. Disable marking.
DisableMarking();
CheckEmptyMarkStack();
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 60f833b349..f561764ce4 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -205,6 +205,7 @@ void MarkCompact::MarkingPhase() {
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
SweepSystemWeaks();
}
+ Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
// Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
// before they are properly counted.
RevokeAllThreadLocalBuffers();
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index 56edcc9d09..e72277ffb2 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -29,7 +29,8 @@ namespace gc {
namespace collector {
template<typename MarkVisitor, typename ReferenceVisitor>
-inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor,
+inline void MarkSweep::ScanObjectVisit(mirror::Object* obj,
+ const MarkVisitor& visitor,
const ReferenceVisitor& ref_visitor) {
DCHECK(IsMarked(obj)) << "Scanning unmarked object " << obj << "\n" << heap_->DumpSpaces();
obj->VisitReferences(visitor, ref_visitor);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 089f453888..77a288ba68 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -95,10 +95,13 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
: GarbageCollector(heap,
name_prefix +
(is_concurrent ? "concurrent mark sweep": "mark sweep")),
- current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr),
+ current_space_bitmap_(nullptr),
+ mark_bitmap_(nullptr),
+ mark_stack_(nullptr),
gc_barrier_(new Barrier(0)),
mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
- is_concurrent_(is_concurrent), live_stack_freeze_size_(0) {
+ is_concurrent_(is_concurrent),
+ live_stack_freeze_size_(0) {
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous(
"mark sweep sweep array free buffer", nullptr,
@@ -173,7 +176,10 @@ void MarkSweep::RunPhases() {
void MarkSweep::ProcessReferences(Thread* self) {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->GetReferenceProcessor()->ProcessReferences(
- true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
+ true,
+ GetTimings(),
+ GetCurrentIteration()->GetClearSoftReferences(),
+ this);
}
void MarkSweep::PausePhase() {
@@ -265,8 +271,9 @@ void MarkSweep::MarkingPhase() {
void MarkSweep::UpdateAndMarkModUnion() {
for (const auto& space : heap_->GetContinuousSpaces()) {
if (immune_region_.ContainsSpace(space)) {
- const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
- "UpdateAndMarkImageModUnionTable";
+ const char* name = space->IsZygoteSpace()
+ ? "UpdateAndMarkZygoteModUnionTable"
+ : "UpdateAndMarkImageModUnionTable";
TimingLogger::ScopedTiming t(name, GetTimings());
accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
CHECK(mod_union_table != nullptr);
@@ -283,11 +290,15 @@ void MarkSweep::MarkReachableObjects() {
void MarkSweep::ReclaimPhase() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- Thread* self = Thread::Current();
+ Thread* const self = Thread::Current();
// Process the references concurrently.
ProcessReferences(self);
SweepSystemWeaks(self);
- Runtime::Current()->AllowNewSystemWeaks();
+ Runtime* const runtime = Runtime::Current();
+ runtime->AllowNewSystemWeaks();
+ // Clean up class loaders after system weaks are swept since that is how we know if class
+ // unloading occurred.
+ runtime->GetClassLinker()->CleanupClassLoaders();
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->RecordFreeRevoke();
@@ -361,10 +372,10 @@ bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref
class MarkSweepMarkObjectSlowPath {
public:
- explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep, mirror::Object* holder = nullptr,
+ explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep,
+ mirror::Object* holder = nullptr,
MemberOffset offset = MemberOffset(0))
- : mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {
- }
+ : mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {}
void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
if (kProfileLargeObjects) {
@@ -441,7 +452,8 @@ class MarkSweepMarkObjectSlowPath {
MemberOffset offset_;
};
-inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder,
+inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj,
+ mirror::Object* holder,
MemberOffset offset) {
DCHECK(obj != nullptr);
if (kUseBakerOrBrooksReadBarrier) {
@@ -508,7 +520,8 @@ void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) {
}
// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
-inline void MarkSweep::MarkObject(mirror::Object* obj, mirror::Object* holder,
+inline void MarkSweep::MarkObject(mirror::Object* obj,
+ mirror::Object* holder,
MemberOffset offset) {
if (obj != nullptr) {
MarkObjectNonNull(obj, holder, offset);
@@ -530,14 +543,16 @@ class VerifyRootMarkedVisitor : public SingleRootVisitor {
MarkSweep* const collector_;
};
-void MarkSweep::VisitRoots(mirror::Object*** roots, size_t count,
+void MarkSweep::VisitRoots(mirror::Object*** roots,
+ size_t count,
const RootInfo& info ATTRIBUTE_UNUSED) {
for (size_t i = 0; i < count; ++i) {
MarkObjectNonNull(*roots[i]);
}
}
-void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+ size_t count,
const RootInfo& info ATTRIBUTE_UNUSED) {
for (size_t i = 0; i < count; ++i) {
MarkObjectNonNull(roots[i]->AsMirrorPtr());
@@ -596,8 +611,10 @@ class ScanObjectVisitor {
explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
: mark_sweep_(mark_sweep) {}
- void operator()(mirror::Object* obj) const ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ void operator()(mirror::Object* obj) const
+ ALWAYS_INLINE
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -611,12 +628,11 @@ class ScanObjectVisitor {
class DelayReferenceReferentVisitor {
public:
- explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {
- }
+ explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
collector_->DelayReferenceReferent(klass, ref);
}
@@ -627,7 +643,9 @@ class DelayReferenceReferentVisitor {
template <bool kUseFinger = false>
class MarkStackTask : public Task {
public:
- MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
+ MarkStackTask(ThreadPool* thread_pool,
+ MarkSweep* mark_sweep,
+ size_t mark_stack_size,
StackReference<mirror::Object>* mark_stack)
: mark_sweep_(mark_sweep),
thread_pool_(thread_pool),
@@ -652,8 +670,10 @@ class MarkStackTask : public Task {
MarkSweep* mark_sweep)
: chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const
- ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void operator()(mirror::Object* obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
Mark(obj->GetFieldObject<mirror::Object>(offset));
}
@@ -674,7 +694,7 @@ class MarkStackTask : public Task {
}
private:
- void Mark(mirror::Object* ref) const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void Mark(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_) {
if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
if (kUseFinger) {
std::atomic_thread_fence(std::memory_order_seq_cst);
@@ -693,12 +713,13 @@ class MarkStackTask : public Task {
class ScanObjectParallelVisitor {
public:
- explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
+ ALWAYS_INLINE explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task)
: chunk_task_(chunk_task) {}
// No thread safety analysis since multiple threads will use this visitor.
- void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(Locks::heap_bitmap_lock_) {
+ void operator()(mirror::Object* obj) const
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
DelayReferenceReferentVisitor ref_visitor(mark_sweep);
@@ -729,7 +750,9 @@ class MarkStackTask : public Task {
if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
// Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
mark_stack_pos_ /= 2;
- auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
+ auto* task = new MarkStackTask(thread_pool_,
+ mark_sweep_,
+ kMaxSize - mark_stack_pos_,
mark_stack_ + mark_stack_pos_);
thread_pool_->AddTask(Thread::Current(), task);
}
@@ -743,9 +766,9 @@ class MarkStackTask : public Task {
}
// Scans all of the objects
- virtual void Run(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(Locks::heap_bitmap_lock_) {
- UNUSED(self);
+ virtual void Run(Thread* self ATTRIBUTE_UNUSED)
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
ScanObjectParallelVisitor visitor(this);
// TODO: Tune this.
static const size_t kFifoSize = 4;
@@ -778,16 +801,21 @@ class MarkStackTask : public Task {
class CardScanTask : public MarkStackTask<false> {
public:
- CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
+ CardScanTask(ThreadPool* thread_pool,
+ MarkSweep* mark_sweep,
accounting::ContinuousSpaceBitmap* bitmap,
- uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size,
- StackReference<mirror::Object>* mark_stack_obj, bool clear_card)
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t minimum_age,
+ size_t mark_stack_size,
+ StackReference<mirror::Object>* mark_stack_obj,
+ bool clear_card)
: MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
bitmap_(bitmap),
begin_(begin),
end_(end),
- minimum_age_(minimum_age), clear_card_(clear_card) {
- }
+ minimum_age_(minimum_age),
+ clear_card_(clear_card) {}
protected:
accounting::ContinuousSpaceBitmap* const bitmap_;
@@ -803,9 +831,9 @@ class CardScanTask : public MarkStackTask<false> {
virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
ScanObjectParallelVisitor visitor(this);
accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
- size_t cards_scanned = clear_card_ ?
- card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_) :
- card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_);
+ size_t cards_scanned = clear_card_
+ ? card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_)
+ : card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_);
VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
<< reinterpret_cast<void*>(end_) << " = " << cards_scanned;
// Finish by emptying our local mark stack.
@@ -873,9 +901,15 @@ void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
DCHECK_EQ(mark_stack_end, mark_stack_->End());
// Add the new task to the thread pool.
- auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
- card_begin + card_increment, minimum_age,
- mark_stack_increment, mark_stack_end, clear_card);
+ auto* task = new CardScanTask(thread_pool,
+ this,
+ space->GetMarkBitmap(),
+ card_begin,
+ card_begin + card_increment,
+ minimum_age,
+ mark_stack_increment,
+ mark_stack_end,
+ clear_card);
thread_pool->AddTask(self, task);
card_begin += card_increment;
}
@@ -911,10 +945,16 @@ void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
ScanObjectVisitor visitor(this);
bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
if (clear_card) {
- card_table->Scan<true>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor,
+ card_table->Scan<true>(space->GetMarkBitmap(),
+ space->Begin(),
+ space->End(),
+ visitor,
minimum_age);
} else {
- card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor,
+ card_table->Scan<false>(space->GetMarkBitmap(),
+ space->Begin(),
+ space->End(),
+ visitor,
minimum_age);
}
}
@@ -924,11 +964,15 @@ void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
class RecursiveMarkTask : public MarkStackTask<false> {
public:
- RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
- accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
- : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr), bitmap_(bitmap), begin_(begin),
- end_(end) {
- }
+ RecursiveMarkTask(ThreadPool* thread_pool,
+ MarkSweep* mark_sweep,
+ accounting::ContinuousSpaceBitmap* bitmap,
+ uintptr_t begin,
+ uintptr_t end)
+ : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr),
+ bitmap_(bitmap),
+ begin_(begin),
+ end_(end) {}
protected:
accounting::ContinuousSpaceBitmap* const bitmap_;
@@ -985,7 +1029,10 @@ void MarkSweep::RecursiveMark() {
delta = RoundUp(delta, KB);
if (delta < 16 * KB) delta = end - begin;
begin += delta;
- auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start,
+ auto* task = new RecursiveMarkTask(thread_pool,
+ this,
+ current_space_bitmap_,
+ start,
begin);
thread_pool->AddTask(self, task);
}
@@ -1032,7 +1079,8 @@ class VerifySystemWeakVisitor : public IsMarkedVisitor {
public:
explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
- virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
+ virtual mirror::Object* IsMarked(mirror::Object* obj)
+ OVERRIDE
SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
mark_sweep_->VerifyIsLive(obj);
return obj;
@@ -1073,7 +1121,8 @@ class CheckpointMarkThreadRoots : public Closure, public RootVisitor {
}
}
- void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+ size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
@@ -1247,7 +1296,8 @@ void MarkSweep::Sweep(bool swap_bitmaps) {
if (space->IsContinuousMemMapAllocSpace()) {
space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
TimingLogger::ScopedTiming split(
- alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", GetTimings());
+ alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace",
+ GetTimings());
RecordFree(alloc_space->Sweep(swap_bitmaps));
}
}
@@ -1270,12 +1320,13 @@ void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference*
class MarkVisitor {
public:
- explicit MarkVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {
- }
+ ALWAYS_INLINE explicit MarkVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(Locks::heap_bitmap_lock_) {
+ ALWAYS_INLINE void operator()(mirror::Object* obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -1284,14 +1335,16 @@ class MarkVisitor {
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 371bba531d..8f7df78d53 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -33,9 +33,9 @@
namespace art {
namespace mirror {
- class Class;
- class Object;
- class Reference;
+class Class;
+class Object;
+class Reference;
} // namespace mirror
class Thread;
@@ -46,8 +46,8 @@ namespace gc {
class Heap;
namespace accounting {
- template<typename T> class AtomicStack;
- typedef AtomicStack<mirror::Object> ObjectStack;
+template<typename T> class AtomicStack;
+typedef AtomicStack<mirror::Object> ObjectStack;
} // namespace accounting
namespace collector {
@@ -60,12 +60,14 @@ class MarkSweep : public GarbageCollector {
virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_);
void InitializePhase();
- void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
- void PausePhase() REQUIRES(Locks::mutator_lock_, !mark_stack_lock_);
- void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ void MarkingPhase() REQUIRES(!mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ void PausePhase() REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ void ReclaimPhase() REQUIRES(!mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
void FinishPhase();
virtual void MarkReachableObjects()
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool IsConcurrent() const {
return is_concurrent_;
@@ -87,20 +89,30 @@ class MarkSweep : public GarbageCollector {
// Marks all objects in the root set at the start of a garbage collection.
void MarkRoots(Thread* self)
- REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void MarkNonThreadRoots()
- REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void MarkConcurrentRoots(VisitRootFlags flags)
- REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void MarkRootsCheckpoint(Thread* self, bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
- REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Builds a mark stack and recursively mark until it empties.
void RecursiveMark()
- REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
// the image. Mark that portion of the heap as immune.
@@ -108,26 +120,35 @@ class MarkSweep : public GarbageCollector {
// Builds a mark stack with objects on dirty cards and recursively mark until it empties.
void RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age)
- REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Remarks the root set after completing the concurrent mark.
void ReMarkRoots()
- REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void ProcessReferences(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Update and mark references from immune spaces.
void UpdateAndMarkModUnion()
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Pre clean cards to reduce how much work is needed in the pause.
void PreCleanCards()
- REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps
// all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap.
- virtual void Sweep(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_)
+ virtual void Sweep(bool swap_bitmaps)
+ REQUIRES(Locks::heap_bitmap_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
// Sweeps unmarked objects to complete the garbage collection.
@@ -135,20 +156,27 @@ class MarkSweep : public GarbageCollector {
// Sweep only pointers within an array. WARNING: Trashes objects.
void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Blackens an object.
void ScanObject(mirror::Object* obj)
- REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// No thread safety analysis due to lambdas.
template<typename MarkVisitor, typename ReferenceVisitor>
- void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor,
+ void ScanObjectVisit(mirror::Object* obj,
+ const MarkVisitor& visitor,
const ReferenceVisitor& ref_visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
+ REQUIRES(!Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg)
SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
@@ -161,22 +189,36 @@ class MarkSweep : public GarbageCollector {
SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
- virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+ size_t count,
const RootInfo& info) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Marks an object.
virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
void MarkObject(mirror::Object* obj, mirror::Object* holder, MemberOffset offset)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
Barrier& GetBarrier() {
return *gc_barrier_;
@@ -191,13 +233,17 @@ class MarkSweep : public GarbageCollector {
virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
SHARED_REQUIRES(Locks::heap_bitmap_lock_);
- void MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder = nullptr,
+ void MarkObjectNonNull(mirror::Object* obj,
+ mirror::Object* holder = nullptr,
MemberOffset offset = MemberOffset(0))
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Marks an object atomically, safe to use from multiple threads.
void MarkObjectNonNullParallel(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if we need to add obj to a mark stack.
bool MarkObjectParallel(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
@@ -208,9 +254,12 @@ class MarkSweep : public GarbageCollector {
NO_THREAD_SAFETY_ANALYSIS;
// Expand mark stack to 2x its current size.
- void ExpandMarkStack() REQUIRES(mark_stack_lock_)
+ void ExpandMarkStack()
+ REQUIRES(mark_stack_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
- void ResizeMarkStack(size_t new_size) REQUIRES(mark_stack_lock_)
+
+ void ResizeMarkStack(size_t new_size)
+ REQUIRES(mark_stack_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
// Returns how many threads we should use for the current GC phase based on if we are paused,
@@ -218,24 +267,34 @@ class MarkSweep : public GarbageCollector {
size_t GetThreadCount(bool paused) const;
// Push a single reference on a mark stack.
- void PushOnMarkStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(!mark_stack_lock_);
+ void PushOnMarkStack(mirror::Object* obj)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Blackens objects grayed during a garbage collection.
void ScanGrayObjects(bool paused, uint8_t minimum_age)
- REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
- virtual void ProcessMarkStack() OVERRIDE REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_)
+ virtual void ProcessMarkStack()
+ OVERRIDE
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
SHARED_REQUIRES(Locks::mutator_lock_) {
ProcessMarkStack(false);
}
// Recursively blackens objects on the mark stack.
void ProcessMarkStack(bool paused)
- REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void ProcessMarkStackParallel(size_t thread_count)
- REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(!mark_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Used to Get around thread safety annotations. The call is from MarkingPhase and is guarded by
// IsExclusiveHeld.
@@ -293,23 +352,15 @@ class MarkSweep : public GarbageCollector {
std::unique_ptr<MemMap> sweep_array_free_buffer_mem_map_;
private:
- friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table.
friend class CardScanTask;
friend class CheckBitmapVisitor;
friend class CheckReferenceVisitor;
friend class CheckpointMarkThreadRoots;
- friend class art::gc::Heap;
+ friend class Heap;
friend class FifoMarkStackChunk;
friend class MarkObjectVisitor;
template<bool kUseFinger> friend class MarkStackTask;
friend class MarkSweepMarkObjectSlowPath;
- friend class ModUnionCheckReferences;
- friend class ModUnionClearCardVisitor;
- friend class ModUnionReferenceVisitor;
- friend class ModUnionScanImageRootVisitor;
- friend class ModUnionTableBitmap;
- friend class ModUnionTableReferenceCache;
- friend class ModUnionVisitor;
friend class VerifyRootMarkedVisitor;
friend class VerifyRootVisitor;
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index ed63ed049f..7f57f30b27 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -248,6 +248,7 @@ void SemiSpace::MarkingPhase() {
ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
SweepSystemWeaks();
}
+ Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
// Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
// before they are properly counted.
RevokeAllThreadLocalBuffers();
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 5be3db712b..6c32658e43 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -25,8 +25,7 @@ namespace gc {
namespace collector {
StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
- : PartialMarkSweep(heap, is_concurrent,
- name_prefix.empty() ? "sticky " : name_prefix) {
+ : PartialMarkSweep(heap, is_concurrent, name_prefix.empty() ? "sticky " : name_prefix) {
cumulative_timings_.SetName(GetName());
}
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index e8f0672426..abaf97845d 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -38,13 +38,15 @@ class StickyMarkSweep FINAL : public PartialMarkSweep {
// alloc space will be marked as immune.
void BindBitmaps() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
- void MarkReachableObjects() OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(Locks::heap_bitmap_lock_);
-
- void Sweep(bool swap_bitmaps) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(Locks::heap_bitmap_lock_);
+ void MarkReachableObjects()
+ OVERRIDE
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ void Sweep(bool swap_bitmaps)
+ OVERRIDE
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StickyMarkSweep);
diff --git a/runtime/jit/jit_code_cache_test.cc b/runtime/jit/jit_code_cache_test.cc
index a6cbb710af..c76dc1110a 100644
--- a/runtime/jit/jit_code_cache_test.cc
+++ b/runtime/jit/jit_code_cache_test.cc
@@ -49,8 +49,11 @@ TEST_F(JitCodeCacheTest, TestCoverage) {
ASSERT_TRUE(reserved_code != nullptr);
ASSERT_TRUE(code_cache->ContainsCodePtr(reserved_code));
ASSERT_EQ(code_cache->NumMethods(), 1u);
- ClassLinker* const cl = Runtime::Current()->GetClassLinker();
- ArtMethod* method = &cl->AllocArtMethodArray(soa.Self(), 1)->At(0);
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* const class_linker = runtime->GetClassLinker();
+ ArtMethod* method = &class_linker->AllocArtMethodArray(soa.Self(),
+ runtime->GetLinearAlloc(),
+ 1)->At(0);
ASSERT_FALSE(code_cache->ContainsMethod(method));
method->SetEntryPointFromQuickCompiledCode(reserved_code);
ASSERT_TRUE(code_cache->ContainsMethod(method));
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index f27b6155ce..c2a65d62e2 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -35,18 +35,31 @@ class MANAGED ClassLoader : public Object {
static constexpr uint32_t InstanceSize() {
return sizeof(ClassLoader);
}
+
ClassLoader* GetParent() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, parent_));
}
+
ClassTable* GetClassTable() SHARED_REQUIRES(Locks::mutator_lock_) {
return reinterpret_cast<ClassTable*>(
GetField64(OFFSET_OF_OBJECT_MEMBER(ClassLoader, class_table_)));
}
+
void SetClassTable(ClassTable* class_table) SHARED_REQUIRES(Locks::mutator_lock_) {
SetField64<false>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, class_table_),
reinterpret_cast<uint64_t>(class_table));
}
+ LinearAlloc* GetAllocator() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return reinterpret_cast<LinearAlloc*>(
+ GetField64(OFFSET_OF_OBJECT_MEMBER(ClassLoader, allocator_)));
+ }
+
+ void SetAllocator(LinearAlloc* allocator) SHARED_REQUIRES(Locks::mutator_lock_) {
+ SetField64<false>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, allocator_),
+ reinterpret_cast<uint64_t>(allocator));
+ }
+
private:
// Visit instance fields of the class loader as well as its associated classes.
// Null class loader is handled by ClassLinker::VisitClassRoots.
@@ -61,6 +74,7 @@ class MANAGED ClassLoader : public Object {
HeapReference<Object> proxyCache_;
// Native pointer to class table, need to zero this out when image writing.
uint32_t padding_ ATTRIBUTE_UNUSED;
+ uint64_t allocator_;
uint64_t class_table_;
friend struct art::ClassLoaderOffsets; // for verifying offset information
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 6b144cf48b..8cba1a91d7 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -274,9 +274,6 @@ Runtime::~Runtime() {
VLOG(jit) << "Deleting jit";
jit_.reset(nullptr);
}
- linear_alloc_.reset();
- arena_pool_.reset();
- low_4gb_arena_pool_.reset();
// Shutdown the fault manager if it was initialized.
fault_manager.Shutdown();
@@ -290,7 +287,13 @@ Runtime::~Runtime() {
Thread::Shutdown();
QuasiAtomic::Shutdown();
verifier::MethodVerifier::Shutdown();
+
+ // Destroy allocators before shutting down the MemMap because they may use it.
+ linear_alloc_.reset();
+ low_4gb_arena_pool_.reset();
+ arena_pool_.reset();
MemMap::Shutdown();
+
// TODO: acquire a static mutex on Runtime to avoid racing.
CHECK(instance_ == nullptr || instance_ == this);
instance_ = nullptr;
@@ -941,13 +944,11 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
// can't be trimmed as easily.
const bool use_malloc = IsAotCompiler();
arena_pool_.reset(new ArenaPool(use_malloc, false));
- if (IsCompiler() && Is64BitInstructionSet(kRuntimeISA)) {
+ if (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA)) {
// 4gb, no malloc. Explanation in header.
low_4gb_arena_pool_.reset(new ArenaPool(false, true));
- linear_alloc_.reset(new LinearAlloc(low_4gb_arena_pool_.get()));
- } else {
- linear_alloc_.reset(new LinearAlloc(arena_pool_.get()));
}
+ linear_alloc_.reset(CreateLinearAlloc());
BlockSignals();
InitPlatformSignalHandlers();
@@ -1788,4 +1789,10 @@ bool Runtime::IsVerificationSoftFail() const {
return verify_ == verifier::VerifyMode::kSoftFail;
}
+LinearAlloc* Runtime::CreateLinearAlloc() {
+ return (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA))
+ ? new LinearAlloc(low_4gb_arena_pool_.get())
+ : new LinearAlloc(arena_pool_.get());
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index a35eac1af8..6154c34ec5 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -570,6 +570,9 @@ class Runtime {
// Called from class linker.
void SetSentinel(mirror::Object* sentinel) SHARED_REQUIRES(Locks::mutator_lock_);
+ // Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
+ LinearAlloc* CreateLinearAlloc();
+
private:
static void InitPlatformSignalHandlers();
diff --git a/runtime/stack.cc b/runtime/stack.cc
index d739743151..7f72f8ab61 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -840,23 +840,30 @@ void StackVisitor::SanityCheckFrame() const {
} else {
CHECK(declaring_class == nullptr);
}
- auto* runtime = Runtime::Current();
- auto* la = runtime->GetLinearAlloc();
- if (!la->Contains(method)) {
- // Check image space.
- bool in_image = false;
- for (auto& space : runtime->GetHeap()->GetContinuousSpaces()) {
- if (space->IsImageSpace()) {
- auto* image_space = space->AsImageSpace();
- const auto& header = image_space->GetImageHeader();
- const auto* methods = &header.GetMethodsSection();
- if (methods->Contains(reinterpret_cast<const uint8_t*>(method) - image_space->Begin())) {
- in_image = true;
- break;
+ Runtime* const runtime = Runtime::Current();
+ LinearAlloc* const linear_alloc = runtime->GetLinearAlloc();
+ if (!linear_alloc->Contains(method)) {
+ // Check class linker linear allocs.
+ mirror::Class* klass = method->GetDeclaringClass();
+ LinearAlloc* const class_linear_alloc = (klass != nullptr)
+ ? ClassLinker::GetAllocatorForClassLoader(klass->GetClassLoader())
+ : linear_alloc;
+ if (!class_linear_alloc->Contains(method)) {
+ // Check image space.
+ bool in_image = false;
+ for (auto& space : runtime->GetHeap()->GetContinuousSpaces()) {
+ if (space->IsImageSpace()) {
+ auto* image_space = space->AsImageSpace();
+ const auto& header = image_space->GetImageHeader();
+ const auto* methods = &header.GetMethodsSection();
+ if (methods->Contains(reinterpret_cast<const uint8_t*>(method) - image_space->Begin())) {
+ in_image = true;
+ break;
+ }
}
}
+ CHECK(in_image) << PrettyMethod(method) << " not in linear alloc or image";
}
- CHECK(in_image) << PrettyMethod(method) << " not in linear alloc or image";
}
if (cur_quick_frame_ != nullptr) {
method->AssertPcIsWithinQuickCode(cur_quick_frame_pc_);
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index d8f80fa690..0527d3ae14 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -16,7 +16,9 @@
#include "thread_pool.h"
+#include "base/bit_utils.h"
#include "base/casts.h"
+#include "base/logging.h"
#include "base/stl_util.h"
#include "base/time_utils.h"
#include "runtime.h"
@@ -30,10 +32,15 @@ ThreadPoolWorker::ThreadPoolWorker(ThreadPool* thread_pool, const std::string& n
size_t stack_size)
: thread_pool_(thread_pool),
name_(name) {
+ // Add an inaccessible page to catch stack overflow.
+ stack_size += kPageSize;
std::string error_msg;
stack_.reset(MemMap::MapAnonymous(name.c_str(), nullptr, stack_size, PROT_READ | PROT_WRITE,
false, false, &error_msg));
CHECK(stack_.get() != nullptr) << error_msg;
+ CHECK_ALIGNED(stack_->Begin(), kPageSize);
+ int mprotect_result = mprotect(stack_->Begin(), kPageSize, PROT_NONE);
+ CHECK_EQ(mprotect_result, 0) << "Failed to mprotect() bottom page of thread pool worker stack.";
const char* reason = "new thread pool worker thread";
pthread_attr_t attr;
CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), reason);
@@ -92,7 +99,8 @@ ThreadPool::ThreadPool(const char* name, size_t num_threads)
while (GetThreadCount() < num_threads) {
const std::string worker_name = StringPrintf("%s worker thread %zu", name_.c_str(),
GetThreadCount());
- threads_.push_back(new ThreadPoolWorker(this, worker_name, ThreadPoolWorker::kDefaultStackSize));
+ threads_.push_back(
+ new ThreadPoolWorker(this, worker_name, ThreadPoolWorker::kDefaultStackSize));
}
// Wait for all of the threads to attach.
creation_barier_.Wait(self);