diff options
| -rw-r--r-- | runtime/art_field-inl.h | 3 | ||||
| -rw-r--r-- | runtime/art_method-inl.h | 9 | ||||
| -rw-r--r-- | runtime/gc/collector/concurrent_copying.cc | 234 | ||||
| -rw-r--r-- | runtime/gc/collector/concurrent_copying.h | 6 | ||||
| -rw-r--r-- | runtime/gc_root-inl.h | 4 | ||||
| -rw-r--r-- | runtime/gc_root.h | 36 | ||||
| -rw-r--r-- | runtime/lock_word.h | 4 | ||||
| -rw-r--r-- | runtime/read_barrier-inl.h | 25 | ||||
| -rw-r--r-- | runtime/read_barrier.h | 10 |
9 files changed, 234 insertions, 97 deletions
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h index ee51ec9f1c..73beb1f168 100644 --- a/runtime/art_field-inl.h +++ b/runtime/art_field-inl.h @@ -34,7 +34,8 @@ namespace art { inline mirror::Class* ArtField::GetDeclaringClass() { - mirror::Class* result = declaring_class_.Read(); + GcRootSource gc_root_source(this); + mirror::Class* result = declaring_class_.Read(&gc_root_source); DCHECK(result != nullptr); DCHECK(result->IsLoaded() || result->IsErroneous()); return result; diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h index 5cfce41cc0..8712bdbbf5 100644 --- a/runtime/art_method-inl.h +++ b/runtime/art_method-inl.h @@ -36,7 +36,8 @@ namespace art { inline mirror::Class* ArtMethod::GetDeclaringClassUnchecked() { - return declaring_class_.Read(); + GcRootSource gc_root_source(this); + return declaring_class_.Read(&gc_root_source); } inline mirror::Class* ArtMethod::GetDeclaringClassNoBarrier() { @@ -84,7 +85,8 @@ inline uint32_t ArtMethod::GetDexMethodIndex() { } inline mirror::PointerArray* ArtMethod::GetDexCacheResolvedMethods() { - return dex_cache_resolved_methods_.Read(); + GcRootSource gc_root_source(this); + return dex_cache_resolved_methods_.Read(&gc_root_source); } inline ArtMethod* ArtMethod::GetDexCacheResolvedMethod(uint16_t method_index, size_t ptr_size) { @@ -118,7 +120,8 @@ inline bool ArtMethod::HasSameDexCacheResolvedMethods(ArtMethod* other) { } inline mirror::ObjectArray<mirror::Class>* ArtMethod::GetDexCacheResolvedTypes() { - return dex_cache_resolved_types_.Read(); + GcRootSource gc_root_source(this); + return dex_cache_resolved_types_.Read(&gc_root_source); } template <bool kWithCheck> diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 658390dd2d..6984c1624f 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -1002,97 +1002,167 @@ void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset } else if (region_space_->IsInFromSpace(ref)) { // Not OK. Do extra logging. if (obj != nullptr) { - if (kUseBakerReadBarrier) { - LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj) - << " holder rb_ptr=" << obj->GetReadBarrierPointer(); - } else { - LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj); - } - if (region_space_->IsInFromSpace(obj)) { - LOG(INFO) << "holder is in the from-space."; - } else if (region_space_->IsInToSpace(obj)) { - LOG(INFO) << "holder is in the to-space."; - } else if (region_space_->IsInUnevacFromSpace(obj)) { - LOG(INFO) << "holder is in the unevac from-space."; - if (region_space_bitmap_->Test(obj)) { - LOG(INFO) << "holder is marked in the region space bitmap."; - } else { - LOG(INFO) << "holder is not marked in the region space bitmap."; - } - } else { - // In a non-moving space. - if (immune_region_.ContainsObject(obj)) { - LOG(INFO) << "holder is in the image or the zygote space."; - accounting::ContinuousSpaceBitmap* cc_bitmap = - cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); - CHECK(cc_bitmap != nullptr) - << "An immune space object must have a bitmap."; - if (cc_bitmap->Test(obj)) { - LOG(INFO) << "holder is marked in the bit map."; - } else { - LOG(INFO) << "holder is NOT marked in the bit map."; - } - } else { - LOG(INFO) << "holder is in a non-moving (or main) space."; - accounting::ContinuousSpaceBitmap* mark_bitmap = - heap_mark_bitmap_->GetContinuousSpaceBitmap(obj); - accounting::LargeObjectBitmap* los_bitmap = - heap_mark_bitmap_->GetLargeObjectBitmap(obj); - CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; - bool is_los = mark_bitmap == nullptr; - if (!is_los && mark_bitmap->Test(obj)) { - LOG(INFO) << "holder is marked in the mark bit map."; - } else if (is_los && los_bitmap->Test(obj)) { - LOG(INFO) << "holder is marked in the los bit map."; - } else { - // If ref is on the allocation stack, then it is considered - // mark/alive (but not necessarily on the live stack.) - if (IsOnAllocStack(obj)) { - LOG(INFO) << "holder is on the alloc stack."; - } else { - LOG(INFO) << "holder is not marked or on the alloc stack."; - } - } - } - } - LOG(INFO) << "offset=" << offset.SizeValue(); + LogFromSpaceRefHolder(obj, offset); } + ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL)); CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref); } else { - // In a non-moving spaces. Check that the ref is marked. - if (immune_region_.ContainsObject(ref)) { - accounting::ContinuousSpaceBitmap* cc_bitmap = - cc_heap_bitmap_->GetContinuousSpaceBitmap(ref); - CHECK(cc_bitmap != nullptr) - << "An immune space ref must have a bitmap. " << ref; - if (kUseBakerReadBarrier) { - CHECK(cc_bitmap->Test(ref)) - << "Unmarked immune space ref. obj=" << obj << " rb_ptr=" - << obj->GetReadBarrierPointer() << " ref=" << ref; - } else { - CHECK(cc_bitmap->Test(ref)) - << "Unmarked immune space ref. obj=" << obj << " ref=" << ref; - } + AssertToSpaceInvariantInNonMovingSpace(obj, ref); + } + } +} + +class RootPrinter { + public: + RootPrinter() { } + + template <class MirrorType> + ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + template <class MirrorType> + void VisitRoot(mirror::Object** root) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root; + } + + template <class MirrorType> + void VisitRoot(mirror::CompressedReference<MirrorType>* root) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr(); + } +}; + +void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source, + mirror::Object* ref) { + CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_); + if (is_asserting_to_space_invariant_) { + if (region_space_->IsInToSpace(ref)) { + // OK. + return; + } else if (region_space_->IsInUnevacFromSpace(ref)) { + CHECK(region_space_bitmap_->Test(ref)) << ref; + } else if (region_space_->IsInFromSpace(ref)) { + // Not OK. Do extra logging. + if (gc_root_source == nullptr) { + // No info. + } else if (gc_root_source->HasArtField()) { + ArtField* field = gc_root_source->GetArtField(); + LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field); + RootPrinter root_printer; + field->VisitRoots(root_printer); + } else if (gc_root_source->HasArtMethod()) { + ArtMethod* method = gc_root_source->GetArtMethod(); + LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method); + RootPrinter root_printer; + method->VisitRoots(root_printer); + } + ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL)); + region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL)); + PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL); + MemMap::DumpMaps(LOG(INTERNAL_FATAL), true); + CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref); + } else { + AssertToSpaceInvariantInNonMovingSpace(nullptr, ref); + } + } +} + +void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) { + if (kUseBakerReadBarrier) { + LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj) + << " holder rb_ptr=" << obj->GetReadBarrierPointer(); + } else { + LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj); + } + if (region_space_->IsInFromSpace(obj)) { + LOG(INFO) << "holder is in the from-space."; + } else if (region_space_->IsInToSpace(obj)) { + LOG(INFO) << "holder is in the to-space."; + } else if (region_space_->IsInUnevacFromSpace(obj)) { + LOG(INFO) << "holder is in the unevac from-space."; + if (region_space_bitmap_->Test(obj)) { + LOG(INFO) << "holder is marked in the region space bitmap."; + } else { + LOG(INFO) << "holder is not marked in the region space bitmap."; + } + } else { + // In a non-moving space. + if (immune_region_.ContainsObject(obj)) { + LOG(INFO) << "holder is in the image or the zygote space."; + accounting::ContinuousSpaceBitmap* cc_bitmap = + cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); + CHECK(cc_bitmap != nullptr) + << "An immune space object must have a bitmap."; + if (cc_bitmap->Test(obj)) { + LOG(INFO) << "holder is marked in the bit map."; } else { - accounting::ContinuousSpaceBitmap* mark_bitmap = - heap_mark_bitmap_->GetContinuousSpaceBitmap(ref); - accounting::LargeObjectBitmap* los_bitmap = - heap_mark_bitmap_->GetLargeObjectBitmap(ref); - CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; - bool is_los = mark_bitmap == nullptr; - if ((!is_los && mark_bitmap->Test(ref)) || - (is_los && los_bitmap->Test(ref))) { - // OK. + LOG(INFO) << "holder is NOT marked in the bit map."; + } + } else { + LOG(INFO) << "holder is in a non-moving (or main) space."; + accounting::ContinuousSpaceBitmap* mark_bitmap = + heap_mark_bitmap_->GetContinuousSpaceBitmap(obj); + accounting::LargeObjectBitmap* los_bitmap = + heap_mark_bitmap_->GetLargeObjectBitmap(obj); + CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; + bool is_los = mark_bitmap == nullptr; + if (!is_los && mark_bitmap->Test(obj)) { + LOG(INFO) << "holder is marked in the mark bit map."; + } else if (is_los && los_bitmap->Test(obj)) { + LOG(INFO) << "holder is marked in the los bit map."; + } else { + // If ref is on the allocation stack, then it is considered + // mark/alive (but not necessarily on the live stack.) + if (IsOnAllocStack(obj)) { + LOG(INFO) << "holder is on the alloc stack."; } else { - // If ref is on the allocation stack, then it may not be - // marked live, but considered marked/alive (but not - // necessarily on the live stack). - CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. " - << "obj=" << obj << " ref=" << ref; + LOG(INFO) << "holder is not marked or on the alloc stack."; } } } } + LOG(INFO) << "offset=" << offset.SizeValue(); +} + +void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, + mirror::Object* ref) { + // In a non-moving spaces. Check that the ref is marked. + if (immune_region_.ContainsObject(ref)) { + accounting::ContinuousSpaceBitmap* cc_bitmap = + cc_heap_bitmap_->GetContinuousSpaceBitmap(ref); + CHECK(cc_bitmap != nullptr) + << "An immune space ref must have a bitmap. " << ref; + if (kUseBakerReadBarrier) { + CHECK(cc_bitmap->Test(ref)) + << "Unmarked immune space ref. obj=" << obj << " rb_ptr=" + << obj->GetReadBarrierPointer() << " ref=" << ref; + } else { + CHECK(cc_bitmap->Test(ref)) + << "Unmarked immune space ref. obj=" << obj << " ref=" << ref; + } + } else { + accounting::ContinuousSpaceBitmap* mark_bitmap = + heap_mark_bitmap_->GetContinuousSpaceBitmap(ref); + accounting::LargeObjectBitmap* los_bitmap = + heap_mark_bitmap_->GetLargeObjectBitmap(ref); + CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; + bool is_los = mark_bitmap == nullptr; + if ((!is_los && mark_bitmap->Test(ref)) || + (is_los && los_bitmap->Test(ref))) { + // OK. + } else { + // If ref is on the allocation stack, then it may not be + // marked live, but considered marked/alive (but not + // necessarily on the live stack). + CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. " + << "obj=" << obj << " ref=" << ref; + } + } } // Used to scan ref fields of an object. diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h index 60ea6b6444..b1897b82f4 100644 --- a/runtime/gc/collector/concurrent_copying.h +++ b/runtime/gc/collector/concurrent_copying.h @@ -169,6 +169,8 @@ class ConcurrentCopying : public GarbageCollector { } void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsInToSpace(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(ref != nullptr); return IsMarked(ref) == ref; @@ -236,6 +238,10 @@ class ConcurrentCopying : public GarbageCollector { void SwapStacks(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void RecordLiveStackFreezeSize(Thread* self); void ComputeUnevacFromSpaceLiveRatio(); + void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); space::RegionSpace* region_space_; // The underlying region space. std::unique_ptr<Barrier> gc_barrier_; diff --git a/runtime/gc_root-inl.h b/runtime/gc_root-inl.h index 57d5689e56..ae8a38f43e 100644 --- a/runtime/gc_root-inl.h +++ b/runtime/gc_root-inl.h @@ -27,9 +27,9 @@ namespace art { template<class MirrorType> template<ReadBarrierOption kReadBarrierOption> -inline MirrorType* GcRoot<MirrorType>::Read() const { +inline MirrorType* GcRoot<MirrorType>::Read(GcRootSource* gc_root_source) const { return down_cast<MirrorType*>( - ReadBarrier::BarrierForRoot<mirror::Object, kReadBarrierOption>(&root_)); + ReadBarrier::BarrierForRoot<mirror::Object, kReadBarrierOption>(&root_, gc_root_source)); } template<class MirrorType> inline GcRoot<MirrorType>::GcRoot(MirrorType* ref) diff --git a/runtime/gc_root.h b/runtime/gc_root.h index b67e9c29b4..d6146f348f 100644 --- a/runtime/gc_root.h +++ b/runtime/gc_root.h @@ -22,6 +22,8 @@ #include "mirror/object_reference.h" namespace art { +class ArtField; +class ArtMethod; namespace mirror { class Object; @@ -129,11 +131,43 @@ class SingleRootVisitor : public RootVisitor { virtual void VisitRoot(mirror::Object* root, const RootInfo& info) = 0; }; +class GcRootSource { + public: + GcRootSource() + : field_(nullptr), method_(nullptr) { + } + explicit GcRootSource(ArtField* field) + : field_(field), method_(nullptr) { + } + explicit GcRootSource(ArtMethod* method) + : field_(nullptr), method_(method) { + } + ArtField* GetArtField() const { + return field_; + } + ArtMethod* GetArtMethod() const { + return method_; + } + bool HasArtField() const { + return field_ != nullptr; + } + bool HasArtMethod() const { + return method_ != nullptr; + } + + private: + ArtField* const field_; + ArtMethod* const method_; + + DISALLOW_COPY_AND_ASSIGN(GcRootSource); +}; + template<class MirrorType> class GcRoot { public: template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - ALWAYS_INLINE MirrorType* Read() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE MirrorType* Read(GcRootSource* gc_root_source = nullptr) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void VisitRoot(RootVisitor* visitor, const RootInfo& info) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { diff --git a/runtime/lock_word.h b/runtime/lock_word.h index aafbfe4159..a290575bac 100644 --- a/runtime/lock_word.h +++ b/runtime/lock_word.h @@ -210,6 +210,10 @@ class LockWord { return lw1.GetValueWithoutReadBarrierState() == lw2.GetValueWithoutReadBarrierState(); } + void Dump(std::ostream& os) { + os << "LockWord:" << std::hex << value_; + } + private: // Default constructor with no lock ownership. LockWord(); diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h index 8d84c35bd9..701481392f 100644 --- a/runtime/read_barrier-inl.h +++ b/runtime/read_barrier-inl.h @@ -74,7 +74,8 @@ inline MirrorType* ReadBarrier::Barrier( } template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup> -inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root) { +inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root, + GcRootSource* gc_root_source) { MirrorType* ref = *root; const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier; if (with_read_barrier && kUseBakerReadBarrier) { @@ -87,7 +88,7 @@ inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root) { if (Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking()) { ref = reinterpret_cast<MirrorType*>(Mark(ref)); } - AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); + AssertToSpaceInvariant(gc_root_source, ref); return ref; } else if (with_read_barrier && kUseBrooksReadBarrier) { // To be implemented. @@ -105,7 +106,7 @@ inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root) { Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root); atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, ref); } - AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); + AssertToSpaceInvariant(gc_root_source, ref); return ref; } else { return ref; @@ -114,7 +115,8 @@ inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root) { // TODO: Reduce copy paste template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup> -inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root) { +inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root, + GcRootSource* gc_root_source) { MirrorType* ref = root->AsMirrorPtr(); const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier; if (with_read_barrier && kUseBakerReadBarrier) { @@ -127,7 +129,7 @@ inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<Mirro if (Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking()) { ref = reinterpret_cast<MirrorType*>(Mark(ref)); } - AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); + AssertToSpaceInvariant(gc_root_source, ref); return ref; } else if (with_read_barrier && kUseBrooksReadBarrier) { // To be implemented. @@ -147,7 +149,7 @@ inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<Mirro reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root); atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref); } - AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); + AssertToSpaceInvariant(gc_root_source, ref); return ref; } else { return ref; @@ -183,6 +185,17 @@ inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffse } } +inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source, + mirror::Object* ref) { + if (kEnableToSpaceInvariantChecks || kIsDebugBuild) { + if (ref == nullptr || IsDuringStartup()) { + return; + } + Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()-> + AssertToSpaceInvariant(gc_root_source, ref); + } +} + inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) { return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->Mark(obj); } diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h index aa72e97328..55cef6826a 100644 --- a/runtime/read_barrier.h +++ b/runtime/read_barrier.h @@ -19,6 +19,7 @@ #include "base/mutex.h" #include "base/macros.h" +#include "gc_root.h" #include "jni.h" #include "mirror/object_reference.h" #include "offsets.h" @@ -54,14 +55,16 @@ class ReadBarrier { // whereas the return value must be an updated reference. template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier, bool kMaybeDuringStartup = false> - ALWAYS_INLINE static MirrorType* BarrierForRoot(MirrorType** root) + ALWAYS_INLINE static MirrorType* BarrierForRoot(MirrorType** root, + GcRootSource* gc_root_source = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // It's up to the implementation whether the given root gets updated // whereas the return value must be an updated reference. template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier, bool kMaybeDuringStartup = false> - ALWAYS_INLINE static MirrorType* BarrierForRoot(mirror::CompressedReference<MirrorType>* root) + ALWAYS_INLINE static MirrorType* BarrierForRoot(mirror::CompressedReference<MirrorType>* root, + GcRootSource* gc_root_source = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static bool IsDuringStartup(); @@ -75,6 +78,9 @@ class ReadBarrier { static void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // With GcRootSource. + static void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static mirror::Object* Mark(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |