diff options
author | 2017-07-12 19:32:55 -0700 | |
---|---|---|
committer | 2017-07-18 00:08:52 -0700 | |
commit | 351c44765279142d15333e2ae02b8a423d195b1b (patch) | |
tree | 6c926a887efe27e9f11934d34fdf3b53bc0952ca | |
parent | 1847704d0422dcc75ac30ae0d3caaf210c05a755 (diff) |
ART: Add templated VisitObjects
Add templated versions of VisitObjects that accept visitors. This
allows to use more modern lambdas.
Test: m
Change-Id: I71a7f59bcae02090b9493bf8b477bb8b6ba649de
-rw-r--r-- | runtime/gc/accounting/heap_bitmap-inl.h | 2 | ||||
-rw-r--r-- | runtime/gc/accounting/heap_bitmap.h | 2 | ||||
-rw-r--r-- | runtime/gc/accounting/space_bitmap-inl.h | 5 | ||||
-rw-r--r-- | runtime/gc/accounting/space_bitmap.h | 2 | ||||
-rw-r--r-- | runtime/gc/heap-visit-objects-inl.h | 169 | ||||
-rw-r--r-- | runtime/gc/heap.cc | 14 | ||||
-rw-r--r-- | runtime/gc/heap.h | 17 | ||||
-rw-r--r-- | runtime/gc/space/bump_pointer_space-walk-inl.h | 87 | ||||
-rw-r--r-- | runtime/gc/space/bump_pointer_space.h | 3 | ||||
-rw-r--r-- | runtime/gc/space/region_space-inl.h | 16 | ||||
-rw-r--r-- | runtime/gc/space/region_space.h | 7 |
11 files changed, 304 insertions, 20 deletions
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h index 8fcc87d3a1..edf2e5bd65 100644 --- a/runtime/gc/accounting/heap_bitmap-inl.h +++ b/runtime/gc/accounting/heap_bitmap-inl.h @@ -26,7 +26,7 @@ namespace gc { namespace accounting { template <typename Visitor> -inline void HeapBitmap::Visit(const Visitor& visitor) { +inline void HeapBitmap::Visit(Visitor&& visitor) { for (const auto& bitmap : continuous_space_bitmaps_) { bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor); } diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h index 7097f87e91..2007bef0fb 100644 --- a/runtime/gc/accounting/heap_bitmap.h +++ b/runtime/gc/accounting/heap_bitmap.h @@ -51,7 +51,7 @@ class HeapBitmap { REQUIRES_SHARED(Locks::heap_bitmap_lock_); template <typename Visitor> - void Visit(const Visitor& visitor) + ALWAYS_INLINE void Visit(Visitor&& visitor) REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_); diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h index 9feaf415a5..20508c1d29 100644 --- a/runtime/gc/accounting/space_bitmap-inl.h +++ b/runtime/gc/accounting/space_bitmap-inl.h @@ -62,8 +62,9 @@ inline bool SpaceBitmap<kAlignment>::Test(const mirror::Object* obj) const { } template<size_t kAlignment> template<typename Visitor> -inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, - const Visitor& visitor) const { +inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin, + uintptr_t visit_end, + Visitor&& visitor) const { DCHECK_LE(visit_begin, visit_end); #if 0 for (uintptr_t i = visit_begin; i < visit_end; i += kAlignment) { diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h index 889f57b333..6188c9fa09 100644 --- a/runtime/gc/accounting/space_bitmap.h +++ b/runtime/gc/accounting/space_bitmap.h @@ -134,7 +134,7 @@ class SpaceBitmap { // TODO: Use lock annotations when clang is fixed. // REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_); template <typename Visitor> - void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const + void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, Visitor&& visitor) const NO_THREAD_SAFETY_ANALYSIS; // Visits set bits in address order. The callback is not permitted to change the bitmap bits or diff --git a/runtime/gc/heap-visit-objects-inl.h b/runtime/gc/heap-visit-objects-inl.h new file mode 100644 index 0000000000..b6ccb277cd --- /dev/null +++ b/runtime/gc/heap-visit-objects-inl.h @@ -0,0 +1,169 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_GC_HEAP_VISIT_OBJECTS_INL_H_ +#define ART_RUNTIME_GC_HEAP_VISIT_OBJECTS_INL_H_ + +#include "heap.h" + +#include "base/mutex-inl.h" +#include "gc/accounting/heap_bitmap-inl.h" +#include "gc/space/bump_pointer_space-walk-inl.h" +#include "gc/space/region_space-inl.h" +#include "mirror/object-inl.h" +#include "obj_ptr-inl.h" +#include "scoped_thread_state_change-inl.h" +#include "thread-current-inl.h" +#include "thread_list.h" + +namespace art { +namespace gc { + +// Visit objects when threads aren't suspended. If concurrent moving +// GC, disable moving GC and suspend threads and then visit objects. +template <typename Visitor> +inline void Heap::VisitObjects(Visitor&& visitor) { + Thread* self = Thread::Current(); + Locks::mutator_lock_->AssertSharedHeld(self); + DCHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)) << "Call VisitObjectsPaused() instead"; + if (IsGcConcurrentAndMoving()) { + // Concurrent moving GC. Just suspending threads isn't sufficient + // because a collection isn't one big pause and we could suspend + // threads in the middle (between phases) of a concurrent moving + // collection where it's not easily known which objects are alive + // (both the region space and the non-moving space) or which + // copies of objects to visit, and the to-space invariant could be + // easily broken. Visit objects while GC isn't running by using + // IncrementDisableMovingGC() and threads are suspended. + IncrementDisableMovingGC(self); + { + ScopedThreadSuspension sts(self, kWaitingForVisitObjects); + ScopedSuspendAll ssa(__FUNCTION__); + VisitObjectsInternalRegionSpace(visitor); + VisitObjectsInternal(visitor); + } + DecrementDisableMovingGC(self); + } else { + // Since concurrent moving GC has thread suspension, also poison ObjPtr the normal case to + // catch bugs. + self->PoisonObjectPointers(); + // GCs can move objects, so don't allow this. + ScopedAssertNoThreadSuspension ants("Visiting objects"); + DCHECK(region_space_ == nullptr); + VisitObjectsInternal(visitor); + self->PoisonObjectPointers(); + } +} + +template <typename Visitor> +inline void Heap::VisitObjectsPaused(Visitor&& visitor) { + Thread* self = Thread::Current(); + Locks::mutator_lock_->AssertExclusiveHeld(self); + VisitObjectsInternalRegionSpace(visitor); + VisitObjectsInternal(visitor); +} + +// Visit objects in the region spaces. +template <typename Visitor> +inline void Heap::VisitObjectsInternalRegionSpace(Visitor&& visitor) { + Thread* self = Thread::Current(); + Locks::mutator_lock_->AssertExclusiveHeld(self); + if (region_space_ != nullptr) { + DCHECK(IsGcConcurrentAndMoving()); + if (!zygote_creation_lock_.IsExclusiveHeld(self)) { + // Exclude the pre-zygote fork time where the semi-space collector + // calls VerifyHeapReferences() as part of the zygote compaction + // which then would call here without the moving GC disabled, + // which is fine. + bool is_thread_running_gc = false; + if (kIsDebugBuild) { + MutexLock mu(self, *gc_complete_lock_); + is_thread_running_gc = self == thread_running_gc_; + } + // If we are not the thread running the GC on in a GC exclusive region, then moving GC + // must be disabled. + DCHECK(is_thread_running_gc || IsMovingGCDisabled(self)); + } + region_space_->Walk(visitor); + } +} + +// Visit objects in the other spaces. +template <typename Visitor> +inline void Heap::VisitObjectsInternal(Visitor&& visitor) { + if (bump_pointer_space_ != nullptr) { + // Visit objects in bump pointer space. + bump_pointer_space_->Walk(visitor); + } + // TODO: Switch to standard begin and end to use ranged a based loop. + for (auto* it = allocation_stack_->Begin(), *end = allocation_stack_->End(); it < end; ++it) { + mirror::Object* const obj = it->AsMirrorPtr(); + + mirror::Class* kls = nullptr; + if (obj != nullptr && (kls = obj->GetClass()) != nullptr) { + // Below invariant is safe regardless of what space the Object is in. + // For speed reasons, only perform it when Rosalloc could possibly be used. + // (Disabled for read barriers because it never uses Rosalloc). + // (See the DCHECK in RosAllocSpace constructor). + if (!kUseReadBarrier) { + // Rosalloc has a race in allocation. Objects can be written into the allocation + // stack before their header writes are visible to this thread. + // See b/28790624 for more details. + // + // obj.class will either be pointing to a valid Class*, or it will point + // to a rosalloc free buffer. + // + // If it's pointing to a valid Class* then that Class's Class will be the + // ClassClass (whose Class is itself). + // + // A rosalloc free buffer will point to another rosalloc free buffer + // (or to null), and never to itself. + // + // Either way dereferencing while its not-null is safe because it will + // always point to another valid pointer or to null. + mirror::Class* klsClass = kls->GetClass(); + + if (klsClass == nullptr) { + continue; + } else if (klsClass->GetClass() != klsClass) { + continue; + } + } else { + // Ensure the invariant is not broken for non-rosalloc cases. + DCHECK(Heap::rosalloc_space_ == nullptr) + << "unexpected rosalloc with read barriers"; + DCHECK(kls->GetClass() != nullptr) + << "invalid object: class does not have a class"; + DCHECK_EQ(kls->GetClass()->GetClass(), kls->GetClass()) + << "invalid object: class's class is not ClassClass"; + } + + // Avoid the race condition caused by the object not yet being written into the allocation + // stack or the class not yet being written in the object. Or, if + // kUseThreadLocalAllocationStack, there can be nulls on the allocation stack. + visitor(obj); + } + } + { + ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); + GetLiveBitmap()->Visit<Visitor>(visitor); + } +} + +} // namespace gc +} // namespace art + +#endif // ART_RUNTIME_GC_HEAP_VISIT_OBJECTS_INL_H_ diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index ad4c0d5b2d..dfa3ff9433 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -65,6 +65,7 @@ #include "gc_pause_listener.h" #include "gc_root.h" #include "heap-inl.h" +#include "heap-visit-objects-inl.h" #include "image.h" #include "intern_table.h" #include "java_vm_ext.h" @@ -2935,7 +2936,7 @@ class ScanVisitor { class VerifyReferenceVisitor : public SingleRootVisitor { public: VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent) - REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) + REQUIRES_SHARED(Locks::mutator_lock_) : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {} size_t GetFailureCount() const { @@ -3089,8 +3090,7 @@ class VerifyObjectVisitor { VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent) : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {} - void operator()(mirror::Object* obj) - REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + void operator()(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { // Note: we are verifying the references in obj but not obj itself, this is because obj must // be live or else how did we find it in the live bitmap? VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_); @@ -3098,12 +3098,6 @@ class VerifyObjectVisitor { obj->VisitReferences(visitor, visitor); } - static void VisitCallback(mirror::Object* obj, void* arg) - REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { - VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg); - visitor->operator()(obj); - } - void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) { ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_); @@ -3175,7 +3169,7 @@ size_t Heap::VerifyHeapReferences(bool verify_referents) { // 2. Allocated during the GC (pre sweep GC verification). // We don't want to verify the objects in the live stack since they themselves may be // pointing to dead objects if they are not reachable. - VisitObjectsPaused(VerifyObjectVisitor::VisitCallback, &visitor); + VisitObjectsPaused(visitor); // Verify the roots: visitor.VerifyRoots(); if (visitor.GetFailureCount() > 0) { diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 9e55081b63..78a21de256 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -25,6 +25,7 @@ #include "allocator_type.h" #include "arch/instruction_set.h" #include "atomic.h" +#include "base/mutex.h" #include "base/time_utils.h" #include "gc/gc_cause.h" #include "gc/collector/gc_type.h" @@ -256,6 +257,14 @@ class Heap { void VisitObjectsPaused(ObjectCallback callback, void* arg) REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_); + template <typename Visitor> + ALWAYS_INLINE void VisitObjects(Visitor&& visitor) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_); + template <typename Visitor> + ALWAYS_INLINE void VisitObjectsPaused(Visitor&& visitor) + REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_); + void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) REQUIRES_SHARED(Locks::mutator_lock_); @@ -1057,6 +1066,14 @@ class Heap { void VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg) REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_); + template <typename Visitor> + ALWAYS_INLINE void VisitObjectsInternal(Visitor&& visitor) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_); + template <typename Visitor> + ALWAYS_INLINE void VisitObjectsInternalRegionSpace(Visitor&& visitor) + REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_); + void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_); // GC stress mode attempts to do one GC per unique backtrace. diff --git a/runtime/gc/space/bump_pointer_space-walk-inl.h b/runtime/gc/space/bump_pointer_space-walk-inl.h new file mode 100644 index 0000000000..611b3d043e --- /dev/null +++ b/runtime/gc/space/bump_pointer_space-walk-inl.h @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_WALK_INL_H_ +#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_WALK_INL_H_ + +#include "bump_pointer_space.h" + +#include "base/bit_utils.h" +#include "mirror/object-inl.h" +#include "thread-current-inl.h" + +namespace art { +namespace gc { +namespace space { + +template <typename Visitor> +inline void BumpPointerSpace::Walk(Visitor&& visitor) { + uint8_t* pos = Begin(); + uint8_t* end = End(); + uint8_t* main_end = pos; + { + MutexLock mu(Thread::Current(), block_lock_); + // If we have 0 blocks then we need to update the main header since we have bump pointer style + // allocation into an unbounded region (actually bounded by Capacity()). + if (num_blocks_ == 0) { + UpdateMainBlock(); + } + main_end = Begin() + main_block_size_; + if (num_blocks_ == 0) { + // We don't have any other blocks, this means someone else may be allocating into the main + // block. In this case, we don't want to try and visit the other blocks after the main block + // since these could actually be part of the main block. + end = main_end; + } + } + // Walk all of the objects in the main block first. + while (pos < main_end) { + mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos); + // No read barrier because obj may not be a valid object. + if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() == nullptr) { + // There is a race condition where a thread has just allocated an object but not set the + // class. We can't know the size of this object, so we don't visit it and exit the function + // since there is guaranteed to be not other blocks. + return; + } else { + visitor(obj); + pos = reinterpret_cast<uint8_t*>(GetNextObject(obj)); + } + } + // Walk the other blocks (currently only TLABs). + while (pos < end) { + BlockHeader* header = reinterpret_cast<BlockHeader*>(pos); + size_t block_size = header->size_; + pos += sizeof(BlockHeader); // Skip the header so that we know where the objects + mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos); + const mirror::Object* end_obj = reinterpret_cast<const mirror::Object*>(pos + block_size); + CHECK_LE(reinterpret_cast<const uint8_t*>(end_obj), End()); + // We don't know how many objects are allocated in the current block. When we hit a null class + // assume its the end. TODO: Have a thread update the header when it flushes the block? + // No read barrier because obj may not be a valid object. + while (obj < end_obj && obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) { + visitor(obj); + obj = GetNextObject(obj); + } + pos += block_size; + } +} + +} // namespace space +} // namespace gc +} // namespace art + +#endif // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_WALK_INL_H_ diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h index 566dc5dc40..cf152e182f 100644 --- a/runtime/gc/space/bump_pointer_space.h +++ b/runtime/gc/space/bump_pointer_space.h @@ -151,6 +151,9 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { // Go through all of the blocks and visit the continuous objects. void Walk(ObjectCallback* callback, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!block_lock_); + template <typename Visitor> + ALWAYS_INLINE void Walk(Visitor&& visitor) + REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!block_lock_); accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE; diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h index 2fba4a8bd1..34b552bc9b 100644 --- a/runtime/gc/space/region_space-inl.h +++ b/runtime/gc/space/region_space-inl.h @@ -186,6 +186,14 @@ uint64_t RegionSpace::GetObjectsAllocatedInternal() { template<bool kToSpaceOnly> void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) { + auto visitor = [callback, arg](mirror::Object* obj) { + callback(obj, arg); + }; + WalkInternal<kToSpaceOnly>(visitor); +} + +template<bool kToSpaceOnly, typename Visitor> +void RegionSpace::WalkInternal(Visitor&& visitor) { // TODO: MutexLock on region_lock_ won't work due to lock order // issues (the classloader classes lock and the monitor lock). We // call this with threads suspended. @@ -201,7 +209,7 @@ void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) { DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object"; mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin()); DCHECK(obj->GetClass() != nullptr); - callback(obj, arg); + visitor(obj); } else if (r->IsLargeTail()) { // Do nothing. } else { @@ -215,14 +223,12 @@ void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) { GetLiveBitmap()->VisitMarkedRange( reinterpret_cast<uintptr_t>(pos), reinterpret_cast<uintptr_t>(top), - [callback, arg](mirror::Object* obj) { - callback(obj, arg); - }); + visitor); } else { while (pos < top) { mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos); if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) { - callback(obj, arg); + visitor(obj); pos = reinterpret_cast<uint8_t*>(GetNextObject(obj)); } else { break; diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h index 6412158a77..54a56b31f9 100644 --- a/runtime/gc/space/region_space.h +++ b/runtime/gc/space/region_space.h @@ -156,6 +156,10 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { REQUIRES(Locks::mutator_lock_) { WalkInternal<false>(callback, arg); } + template <typename Visitor> + ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES(Locks::mutator_lock_) { + WalkInternal<false /* kToSpaceOnly */>(visitor); + } void WalkToSpace(ObjectCallback* callback, void* arg) REQUIRES(Locks::mutator_lock_) { @@ -250,6 +254,9 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { template<bool kToSpaceOnly> void WalkInternal(ObjectCallback* callback, void* arg) NO_THREAD_SAFETY_ANALYSIS; + template<bool kToSpaceOnly, typename Visitor> + ALWAYS_INLINE void WalkInternal(Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS; + class Region { public: Region() |