diff options
Diffstat (limited to 'runtime')
| -rw-r--r-- | runtime/debugger.cc | 17 | ||||
| -rw-r--r-- | runtime/gc/accounting/space_bitmap-inl.h | 14 | ||||
| -rw-r--r-- | runtime/gc/accounting/space_bitmap.cc | 23 | ||||
| -rw-r--r-- | runtime/gc/accounting/space_bitmap.h | 4 | ||||
| -rw-r--r-- | runtime/gc/collector/concurrent_copying-inl.h | 48 | ||||
| -rw-r--r-- | runtime/gc/collector/concurrent_copying.cc | 27 | ||||
| -rw-r--r-- | runtime/gc/collector/concurrent_copying.h | 4 | ||||
| -rw-r--r-- | runtime/mirror/object-inl.h | 19 | ||||
| -rw-r--r-- | runtime/mirror/object.h | 9 |
9 files changed, 113 insertions, 52 deletions
diff --git a/runtime/debugger.cc b/runtime/debugger.cc index cbdf3dc636..a5b0689473 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -4058,7 +4058,7 @@ void Dbg::ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInv // Prepare JDWP ids for the reply. JDWP::JdwpTag result_tag = BasicTagFromDescriptor(m->GetShorty()); const bool is_object_result = (result_tag == JDWP::JT_OBJECT); - StackHandleScope<2> hs(soa.Self()); + StackHandleScope<3> hs(soa.Self()); Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr); Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException()); soa.Self()->ClearException(); @@ -4097,10 +4097,17 @@ void Dbg::ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInv // unless we threw, in which case we return null. DCHECK_EQ(JDWP::JT_VOID, result_tag); if (exceptionObjectId == 0) { - // TODO we could keep the receiver ObjectId in the DebugInvokeReq to avoid looking into the - // object registry. - result_value = GetObjectRegistry()->Add(pReq->receiver.Read()); - result_tag = TagFromObject(soa, pReq->receiver.Read()); + if (m->GetDeclaringClass()->IsStringClass()) { + // For string constructors, the new string is remapped to the receiver (stored in ref). + Handle<mirror::Object> decoded_ref = hs.NewHandle(soa.Self()->DecodeJObject(ref.get())); + result_value = gRegistry->Add(decoded_ref); + result_tag = TagFromObject(soa, decoded_ref.Get()); + } else { + // TODO we could keep the receiver ObjectId in the DebugInvokeReq to avoid looking into the + // object registry. + result_value = GetObjectRegistry()->Add(pReq->receiver.Read()); + result_tag = TagFromObject(soa, pReq->receiver.Read()); + } } else { result_value = 0; result_tag = JDWP::JT_OBJECT; diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h index 4cf5b4f643..9feaf415a5 100644 --- a/runtime/gc/accounting/space_bitmap-inl.h +++ b/runtime/gc/accounting/space_bitmap-inl.h @@ -36,7 +36,7 @@ inline bool SpaceBitmap<kAlignment>::AtomicTestAndSet(const mirror::Object* obj) const uintptr_t offset = addr - heap_begin_; const size_t index = OffsetToIndex(offset); const uintptr_t mask = OffsetToMask(offset); - Atomic<uintptr_t>* atomic_entry = reinterpret_cast<Atomic<uintptr_t>*>(&bitmap_begin_[index]); + Atomic<uintptr_t>* atomic_entry = &bitmap_begin_[index]; DCHECK_LT(index, bitmap_size_ / sizeof(intptr_t)) << " bitmap_size_ = " << bitmap_size_; uintptr_t old_word; do { @@ -58,7 +58,7 @@ inline bool SpaceBitmap<kAlignment>::Test(const mirror::Object* obj) const { DCHECK(bitmap_begin_ != nullptr); DCHECK_GE(addr, heap_begin_); const uintptr_t offset = addr - heap_begin_; - return (bitmap_begin_[OffsetToIndex(offset)] & OffsetToMask(offset)) != 0; + return (bitmap_begin_[OffsetToIndex(offset)].LoadRelaxed() & OffsetToMask(offset)) != 0; } template<size_t kAlignment> template<typename Visitor> @@ -116,7 +116,7 @@ inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin, uin // Traverse the middle, full part. for (size_t i = index_start + 1; i < index_end; ++i) { - uintptr_t w = bitmap_begin_[i]; + uintptr_t w = bitmap_begin_[i].LoadRelaxed(); if (w != 0) { const uintptr_t ptr_base = IndexToOffset(i) + heap_begin_; do { @@ -164,8 +164,8 @@ inline bool SpaceBitmap<kAlignment>::Modify(const mirror::Object* obj) { const size_t index = OffsetToIndex(offset); const uintptr_t mask = OffsetToMask(offset); DCHECK_LT(index, bitmap_size_ / sizeof(intptr_t)) << " bitmap_size_ = " << bitmap_size_; - uintptr_t* address = &bitmap_begin_[index]; - uintptr_t old_word = *address; + Atomic<uintptr_t>* atomic_entry = &bitmap_begin_[index]; + uintptr_t old_word = atomic_entry->LoadRelaxed(); if (kSetBit) { // Check the bit before setting the word incase we are trying to mark a read only bitmap // like an image space bitmap. This bitmap is mapped as read only and will fault if we @@ -173,10 +173,10 @@ inline bool SpaceBitmap<kAlignment>::Modify(const mirror::Object* obj) { // occur if we check before setting the bit. This also prevents dirty pages that would // occur if the bitmap was read write and we did not check the bit. if ((old_word & mask) == 0) { - *address = old_word | mask; + atomic_entry->StoreRelaxed(old_word | mask); } } else { - *address = old_word & ~mask; + atomic_entry->StoreRelaxed(old_word & ~mask); } DCHECK_EQ(Test(obj), kSetBit); return (old_word & mask) != 0; diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc index b43f77f6b7..3df02ed443 100644 --- a/runtime/gc/accounting/space_bitmap.cc +++ b/runtime/gc/accounting/space_bitmap.cc @@ -51,7 +51,9 @@ SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::CreateFromMemMap( template<size_t kAlignment> SpaceBitmap<kAlignment>::SpaceBitmap(const std::string& name, MemMap* mem_map, uintptr_t* bitmap_begin, size_t bitmap_size, const void* heap_begin) - : mem_map_(mem_map), bitmap_begin_(bitmap_begin), bitmap_size_(bitmap_size), + : mem_map_(mem_map), + bitmap_begin_(reinterpret_cast<Atomic<uintptr_t>*>(bitmap_begin)), + bitmap_size_(bitmap_size), heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)), name_(name) { CHECK(bitmap_begin_ != nullptr); @@ -104,7 +106,12 @@ void SpaceBitmap<kAlignment>::Clear() { template<size_t kAlignment> void SpaceBitmap<kAlignment>::CopyFrom(SpaceBitmap* source_bitmap) { DCHECK_EQ(Size(), source_bitmap->Size()); - std::copy(source_bitmap->Begin(), source_bitmap->Begin() + source_bitmap->Size() / sizeof(intptr_t), Begin()); + const size_t count = source_bitmap->Size() / sizeof(intptr_t); + Atomic<uintptr_t>* const src = source_bitmap->Begin(); + Atomic<uintptr_t>* const dest = Begin(); + for (size_t i = 0; i < count; ++i) { + dest[i].StoreRelaxed(src[i].LoadRelaxed()); + } } template<size_t kAlignment> @@ -113,9 +120,9 @@ void SpaceBitmap<kAlignment>::Walk(ObjectCallback* callback, void* arg) { CHECK(callback != nullptr); uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1); - uintptr_t* bitmap_begin = bitmap_begin_; + Atomic<uintptr_t>* bitmap_begin = bitmap_begin_; for (uintptr_t i = 0; i <= end; ++i) { - uintptr_t w = bitmap_begin[i]; + uintptr_t w = bitmap_begin[i].LoadRelaxed(); if (w != 0) { uintptr_t ptr_base = IndexToOffset(i) + heap_begin_; do { @@ -160,10 +167,10 @@ void SpaceBitmap<kAlignment>::SweepWalk(const SpaceBitmap<kAlignment>& live_bitm size_t start = OffsetToIndex(sweep_begin - live_bitmap.heap_begin_); size_t end = OffsetToIndex(sweep_end - live_bitmap.heap_begin_ - 1); CHECK_LT(end, live_bitmap.Size() / sizeof(intptr_t)); - uintptr_t* live = live_bitmap.bitmap_begin_; - uintptr_t* mark = mark_bitmap.bitmap_begin_; + Atomic<uintptr_t>* live = live_bitmap.bitmap_begin_; + Atomic<uintptr_t>* mark = mark_bitmap.bitmap_begin_; for (size_t i = start; i <= end; i++) { - uintptr_t garbage = live[i] & ~mark[i]; + uintptr_t garbage = live[i].LoadRelaxed() & ~mark[i].LoadRelaxed(); if (UNLIKELY(garbage != 0)) { uintptr_t ptr_base = IndexToOffset(i) + live_bitmap.heap_begin_; do { @@ -251,7 +258,7 @@ void SpaceBitmap<kAlignment>::InOrderWalk(ObjectCallback* callback, void* arg) { uintptr_t end = Size() / sizeof(intptr_t); for (uintptr_t i = 0; i < end; ++i) { // Need uint for unsigned shift. - uintptr_t w = bitmap_begin_[i]; + uintptr_t w = bitmap_begin_[i].LoadRelaxed(); if (UNLIKELY(w != 0)) { uintptr_t ptr_base = IndexToOffset(i) + heap_begin_; while (w != 0) { diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h index b8ff471c69..829b1b1644 100644 --- a/runtime/gc/accounting/space_bitmap.h +++ b/runtime/gc/accounting/space_bitmap.h @@ -147,7 +147,7 @@ class SpaceBitmap { void CopyFrom(SpaceBitmap* source_bitmap); // Starting address of our internal storage. - uintptr_t* Begin() { + Atomic<uintptr_t>* Begin() { return bitmap_begin_; } @@ -215,7 +215,7 @@ class SpaceBitmap { std::unique_ptr<MemMap> mem_map_; // This bitmap itself, word sized for efficiency in scanning. - uintptr_t* const bitmap_begin_; + Atomic<uintptr_t>* const bitmap_begin_; // Size of this bitmap. size_t bitmap_size_; diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h index fb774a4d1e..76f500c204 100644 --- a/runtime/gc/collector/concurrent_copying-inl.h +++ b/runtime/gc/collector/concurrent_copying-inl.h @@ -34,32 +34,27 @@ inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion( // to gray even though the object has already been marked through. This happens if a mutator // thread gets preempted before the AtomicSetReadBarrierPointer below, GC marks through the // object (changes it from white to gray and back to white), and the thread runs and - // incorrectly changes it from white to gray. We need to detect such "false gray" cases and - // change the objects back to white at the end of marking. + // incorrectly changes it from white to gray. If this happens, the object will get added to the + // mark stack again and get changed back to white after it is processed. if (kUseBakerReadBarrier) { - // Test the bitmap first to reduce the chance of false gray cases. + // Test the bitmap first to avoid graying an object that has already been marked through most + // of the time. if (bitmap->Test(ref)) { return ref; } } // This may or may not succeed, which is ok because the object may already be gray. - bool cas_success = false; + bool success = false; if (kUseBakerReadBarrier) { - cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), - ReadBarrier::GrayPtr()); - } - if (bitmap->AtomicTestAndSet(ref)) { - // Already marked. - if (kUseBakerReadBarrier && - cas_success && - // The object could be white here if a thread gets preempted after a success at the - // above AtomicSetReadBarrierPointer, GC has marked through it, and the thread runs up - // to this point. - ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) { - // Register a "false-gray" object to change it from gray to white at the end of marking. - PushOntoFalseGrayStack(ref); - } + // GC will mark the bitmap when popping from mark stack. If only the GC is touching the bitmap + // we can avoid an expensive CAS. + // For the baker case, an object is marked if either the mark bit marked or the bitmap bit is + // set. + success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); } else { + success = !bitmap->AtomicTestAndSet(ref); + } + if (success) { // Newly marked. if (kUseBakerReadBarrier) { DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr()); @@ -99,13 +94,16 @@ inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(mirror::Object* ref) { return ref; } -template<bool kGrayImmuneObject> +template<bool kGrayImmuneObject, bool kFromGCThread> inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) { if (from_ref == nullptr) { return nullptr; } DCHECK(heap_->collector_type_ == kCollectorTypeCC); - if (UNLIKELY(kUseBakerReadBarrier && !is_active_)) { + if (kFromGCThread) { + DCHECK(is_active_); + DCHECK_EQ(Thread::Current(), thread_running_gc_); + } else if (UNLIKELY(kUseBakerReadBarrier && !is_active_)) { // In the lock word forward address state, the read barrier bits // in the lock word are part of the stored forwarding address and // invalid. This is usually OK as the from-space copy of objects @@ -192,6 +190,16 @@ inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) { } } +inline bool ConcurrentCopying::IsMarkedInUnevacFromSpace(mirror::Object* from_ref) { + // Use load acquire on the read barrier pointer to ensure that we never see a white read barrier + // pointer with an unmarked bit due to reordering. + DCHECK(region_space_->IsInUnevacFromSpace(from_ref)); + if (kUseBakerReadBarrier && from_ref->GetReadBarrierPointerAcquire() == ReadBarrier::GrayPtr()) { + return true; + } + return region_space_bitmap_->Test(from_ref); +} + } // namespace collector } // namespace gc } // namespace art diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 42816a04f1..651669e325 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -1302,8 +1302,19 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { << " " << to_ref << " " << to_ref->GetReadBarrierPointer() << " is_marked=" << IsMarked(to_ref); } - // Scan ref fields. - Scan(to_ref); + bool add_to_live_bytes = false; + if (region_space_->IsInUnevacFromSpace(to_ref)) { + // Mark the bitmap only in the GC thread here so that we don't need a CAS. + if (!kUseBakerReadBarrier || !region_space_bitmap_->Set(to_ref)) { + // It may be already marked if we accidentally pushed the same object twice due to the racy + // bitmap read in MarkUnevacFromSpaceRegion. + Scan(to_ref); + // Only add to the live bytes if the object was not already marked. + add_to_live_bytes = true; + } + } else { + Scan(to_ref); + } if (kUseBakerReadBarrier) { DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) << " " << to_ref << " " << to_ref->GetReadBarrierPointer() @@ -1332,7 +1343,7 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { DCHECK(!kUseBakerReadBarrier); #endif - if (region_space_->IsInUnevacFromSpace(to_ref)) { + if (add_to_live_bytes) { // Add to the live bytes per unevacuated from space. Note this code is always run by the // GC-running thread (no synchronization required). DCHECK(region_space_bitmap_->Test(to_ref)); @@ -1567,7 +1578,7 @@ void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset // OK. return; } else if (region_space_->IsInUnevacFromSpace(ref)) { - CHECK(region_space_bitmap_->Test(ref)) << ref; + CHECK(IsMarkedInUnevacFromSpace(ref)) << ref; } else if (region_space_->IsInFromSpace(ref)) { // Not OK. Do extra logging. if (obj != nullptr) { @@ -1614,7 +1625,7 @@ void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source, // OK. return; } else if (region_space_->IsInUnevacFromSpace(ref)) { - CHECK(region_space_bitmap_->Test(ref)) << ref; + CHECK(IsMarkedInUnevacFromSpace(ref)) << ref; } else if (region_space_->IsInFromSpace(ref)) { // Not OK. Do extra logging. if (gc_root_source == nullptr) { @@ -1654,7 +1665,7 @@ void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset LOG(INFO) << "holder is in the to-space."; } else if (region_space_->IsInUnevacFromSpace(obj)) { LOG(INFO) << "holder is in the unevac from-space."; - if (region_space_bitmap_->Test(obj)) { + if (IsMarkedInUnevacFromSpace(obj)) { LOG(INFO) << "holder is marked in the region space bitmap."; } else { LOG(INFO) << "holder is not marked in the region space bitmap."; @@ -1783,7 +1794,7 @@ inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) DCHECK_EQ(Thread::Current(), thread_running_gc_); mirror::Object* ref = obj->GetFieldObject< mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); - mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false>(ref); + mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false, /*kFromGCThread*/true>(ref); if (to_ref == ref) { return; } @@ -2126,7 +2137,7 @@ mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) { heap_->non_moving_space_->HasAddress(to_ref)) << "from_ref=" << from_ref << " to_ref=" << to_ref; } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { - if (region_space_bitmap_->Test(from_ref)) { + if (IsMarkedInUnevacFromSpace(from_ref)) { to_ref = from_ref; } else { to_ref = nullptr; diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h index 5b0e2d6274..97f45551cf 100644 --- a/runtime/gc/collector/concurrent_copying.h +++ b/runtime/gc/collector/concurrent_copying.h @@ -104,7 +104,7 @@ class ConcurrentCopying : public GarbageCollector { DCHECK(ref != nullptr); return IsMarked(ref) == ref; } - template<bool kGrayImmuneObject = true> + template<bool kGrayImmuneObject = true, bool kFromGCThread = false> ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); @@ -179,6 +179,8 @@ class ConcurrentCopying : public GarbageCollector { REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); + bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref) + SHARED_REQUIRES(Locks::mutator_lock_); virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void SweepSystemWeaks(Thread* self) diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index 0495c957c6..27f8bd72d6 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -147,6 +147,18 @@ inline Object* Object::GetReadBarrierPointer() { #endif } +inline Object* Object::GetReadBarrierPointerAcquire() { +#ifdef USE_BAKER_READ_BARRIER + DCHECK(kUseBakerReadBarrier); + LockWord lw(GetFieldAcquire<uint32_t>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_))); + return reinterpret_cast<Object*>(lw.ReadBarrierState()); +#else + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); +#endif +} + + inline uint32_t Object::GetMarkBit() { #ifdef USE_READ_BARRIER return GetLockWord(false).MarkBitState(); @@ -814,6 +826,13 @@ inline kSize Object::GetField(MemberOffset field_offset) { } } +template<typename kSize> +inline kSize Object::GetFieldAcquire(MemberOffset field_offset) { + const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value(); + const kSize* addr = reinterpret_cast<const kSize*>(raw_addr); + return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadAcquire(); +} + template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> inline bool Object::CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value, int64_t new_value) { diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index 5b129bf2ba..864929444a 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -93,9 +93,12 @@ class MANAGED LOCKABLE Object { template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> void SetClass(Class* new_klass) SHARED_REQUIRES(Locks::mutator_lock_); - // TODO: Clean this up and change to return int32_t + // TODO: Clean these up and change to return int32_t Object* GetReadBarrierPointer() SHARED_REQUIRES(Locks::mutator_lock_); + // Get the read barrier pointer with release semantics, only supported for baker. + Object* GetReadBarrierPointerAcquire() SHARED_REQUIRES(Locks::mutator_lock_); + #ifndef USE_BAKER_OR_BROOKS_READ_BARRIER NO_RETURN #endif @@ -574,6 +577,10 @@ class MANAGED LOCKABLE Object { template<typename kSize, bool kIsVolatile> ALWAYS_INLINE kSize GetField(MemberOffset field_offset) SHARED_REQUIRES(Locks::mutator_lock_); + // Get a field with acquire semantics. + template<typename kSize> + ALWAYS_INLINE kSize GetFieldAcquire(MemberOffset field_offset) + SHARED_REQUIRES(Locks::mutator_lock_); // Verify the type correctness of stores to fields. // TODO: This can cause thread suspension and isn't moving GC safe. |