diff options
| -rw-r--r-- | runtime/gc/reference_processor.cc | 14 | ||||
| -rw-r--r-- | runtime/gc/reference_processor.h | 11 | ||||
| -rw-r--r-- | runtime/gc/reference_queue.cc | 34 | ||||
| -rw-r--r-- | runtime/gc/reference_queue.h | 9 | ||||
| -rw-r--r-- | runtime/gc/reference_queue_test.cc | 4 | ||||
| -rw-r--r-- | runtime/interpreter/unstarted_runtime.cc | 4 | ||||
| -rw-r--r-- | runtime/mirror/reference-inl.h | 20 | ||||
| -rw-r--r-- | runtime/mirror/reference.cc | 4 | ||||
| -rw-r--r-- | runtime/mirror/reference.h | 21 | ||||
| -rw-r--r-- | runtime/native/java_lang_ref_Reference.cc | 4 |
10 files changed, 72 insertions, 53 deletions
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc index 96945978af..4b8f38d709 100644 --- a/runtime/gc/reference_processor.cc +++ b/runtime/gc/reference_processor.cc @@ -60,12 +60,13 @@ void ReferenceProcessor::BroadcastForSlowPath(Thread* self) { condition_.Broadcast(self); } -mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) { +ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self, + ObjPtr<mirror::Reference> reference) { if (!kUseReadBarrier || self->GetWeakRefAccessEnabled()) { // Under read barrier / concurrent copying collector, it's not safe to call GetReferent() when // weak ref access is disabled as the call includes a read barrier which may push a ref onto the // mark stack and interfere with termination of marking. - mirror::Object* const referent = reference->GetReferent(); + ObjPtr<mirror::Object> const referent = reference->GetReferent(); // If the referent is null then it is already cleared, we can just return null since there is no // scenario where it becomes non-null during the reference processing phase. if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) { @@ -116,7 +117,8 @@ void ReferenceProcessor::StopPreservingReferences(Thread* self) { } // Process reference class instances and schedule finalizations. -void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings, +void ReferenceProcessor::ProcessReferences(bool concurrent, + TimingLogger* timings, bool clear_soft_references, collector::GarbageCollector* collector) { TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings); @@ -188,7 +190,8 @@ void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timing // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been // marked, put it on the appropriate list in the heap for later processing. -void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref, +void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass, + ObjPtr<mirror::Reference> ref, collector::GarbageCollector* collector) { // klass can be the class of the old object if the visitor already updated the class of ref. DCHECK(klass != nullptr); @@ -260,7 +263,8 @@ void ReferenceProcessor::EnqueueClearedReferences(Thread* self) { } } -bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) { +bool ReferenceProcessor::MakeCircularListIfUnenqueued( + ObjPtr<mirror::FinalizerReference> reference) { Thread* self = Thread::Current(); MutexLock mu(self, *Locks::reference_processor_lock_); // Wait untul we are done processing reference. diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h index 4788f8a3c0..759b7e129c 100644 --- a/runtime/gc/reference_processor.h +++ b/runtime/gc/reference_processor.h @@ -46,7 +46,9 @@ class Heap; class ReferenceProcessor { public: explicit ReferenceProcessor(); - void ProcessReferences(bool concurrent, TimingLogger* timings, bool clear_soft_references, + void ProcessReferences(bool concurrent, + TimingLogger* timings, + bool clear_soft_references, gc::collector::GarbageCollector* collector) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) @@ -57,16 +59,17 @@ class ReferenceProcessor { void EnableSlowPath() REQUIRES_SHARED(Locks::mutator_lock_); void BroadcastForSlowPath(Thread* self); // Decode the referent, may block if references are being processed. - mirror::Object* GetReferent(Thread* self, mirror::Reference* reference) + ObjPtr<mirror::Object> GetReferent(Thread* self, ObjPtr<mirror::Reference> reference) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::reference_processor_lock_); void EnqueueClearedReferences(Thread* self) REQUIRES(!Locks::mutator_lock_); - void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref, + void DelayReferenceReferent(ObjPtr<mirror::Class> klass, + ObjPtr<mirror::Reference> ref, collector::GarbageCollector* collector) REQUIRES_SHARED(Locks::mutator_lock_); void UpdateRoots(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); // Make a circular list with reference if it is not enqueued. Uses the finalizer queue lock. - bool MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) + bool MakeCircularListIfUnenqueued(ObjPtr<mirror::FinalizerReference> reference) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::reference_processor_lock_, !Locks::reference_queue_finalizer_references_lock_); diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc index 62625c41b4..4e6f7da5f0 100644 --- a/runtime/gc/reference_queue.cc +++ b/runtime/gc/reference_queue.cc @@ -29,7 +29,7 @@ namespace gc { ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) { } -void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) { +void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref) { DCHECK(ref != nullptr); MutexLock mu(self, *lock_); if (ref->IsUnprocessed()) { @@ -37,16 +37,16 @@ void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* } } -void ReferenceQueue::EnqueueReference(mirror::Reference* ref) { +void ReferenceQueue::EnqueueReference(ObjPtr<mirror::Reference> ref) { DCHECK(ref != nullptr); CHECK(ref->IsUnprocessed()); if (IsEmpty()) { // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref; - list_ = ref; + list_ = ref.Ptr(); } else { // The list is owned by the GC, everything that has been inserted must already be at least // gray. - mirror::Reference* head = list_->GetPendingNext<kWithoutReadBarrier>(); + ObjPtr<mirror::Reference> head = list_->GetPendingNext<kWithoutReadBarrier>(); DCHECK(head != nullptr); ref->SetPendingNext(head); } @@ -54,16 +54,16 @@ void ReferenceQueue::EnqueueReference(mirror::Reference* ref) { list_->SetPendingNext(ref); } -mirror::Reference* ReferenceQueue::DequeuePendingReference() { +ObjPtr<mirror::Reference> ReferenceQueue::DequeuePendingReference() { DCHECK(!IsEmpty()); - mirror::Reference* ref = list_->GetPendingNext<kWithoutReadBarrier>(); + ObjPtr<mirror::Reference> ref = list_->GetPendingNext<kWithoutReadBarrier>(); DCHECK(ref != nullptr); // Note: the following code is thread-safe because it is only called from ProcessReferences which // is single threaded. if (list_ == ref) { list_ = nullptr; } else { - mirror::Reference* next = ref->GetPendingNext<kWithoutReadBarrier>(); + ObjPtr<mirror::Reference> next = ref->GetPendingNext<kWithoutReadBarrier>(); list_->SetPendingNext(next); } ref->SetPendingNext(nullptr); @@ -83,10 +83,10 @@ mirror::Reference* ReferenceQueue::DequeuePendingReference() { // In ConcurrentCopying::ProcessMarkStackRef() we may leave a white reference in the queue and // find it here, which is OK. CHECK_EQ(rb_ptr, ReadBarrier::WhitePtr()) << "ref=" << ref << " rb_ptr=" << rb_ptr; - mirror::Object* referent = ref->GetReferent<kWithoutReadBarrier>(); + ObjPtr<mirror::Object> referent = ref->GetReferent<kWithoutReadBarrier>(); // The referent could be null if it's cleared by a mutator (Reference.clear()). if (referent != nullptr) { - CHECK(concurrent_copying->IsInToSpace(referent)) + CHECK(concurrent_copying->IsInToSpace(referent.Ptr())) << "ref=" << ref << " rb_ptr=" << ref->GetReadBarrierPointer() << " referent=" << referent; } @@ -96,13 +96,13 @@ mirror::Reference* ReferenceQueue::DequeuePendingReference() { } void ReferenceQueue::Dump(std::ostream& os) const { - mirror::Reference* cur = list_; + ObjPtr<mirror::Reference> cur = list_; os << "Reference starting at list_=" << list_ << "\n"; if (cur == nullptr) { return; } do { - mirror::Reference* pending_next = cur->GetPendingNext(); + ObjPtr<mirror::Reference> pending_next = cur->GetPendingNext(); os << "Reference= " << cur << " PendingNext=" << pending_next; if (cur->IsFinalizerReferenceInstance()) { os << " Zombie=" << cur->AsFinalizerReference()->GetZombie(); @@ -114,7 +114,7 @@ void ReferenceQueue::Dump(std::ostream& os) const { size_t ReferenceQueue::GetLength() const { size_t count = 0; - mirror::Reference* cur = list_; + ObjPtr<mirror::Reference> cur = list_; if (cur != nullptr) { do { ++count; @@ -127,7 +127,7 @@ size_t ReferenceQueue::GetLength() const { void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references, collector::GarbageCollector* collector) { while (!IsEmpty()) { - mirror::Reference* ref = DequeuePendingReference(); + ObjPtr<mirror::Reference> ref = DequeuePendingReference(); mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr(); if (referent_addr->AsMirrorPtr() != nullptr && !collector->IsMarkedHeapReference(referent_addr)) { @@ -145,11 +145,11 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references, void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_references, collector::GarbageCollector* collector) { while (!IsEmpty()) { - mirror::FinalizerReference* ref = DequeuePendingReference()->AsFinalizerReference(); + ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference(); mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr(); if (referent_addr->AsMirrorPtr() != nullptr && !collector->IsMarkedHeapReference(referent_addr)) { - mirror::Object* forward_address = collector->MarkObject(referent_addr->AsMirrorPtr()); + ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr()); // Move the updated referent to the zombie field. if (Runtime::Current()->IsActiveTransaction()) { ref->SetZombie<true>(forward_address); @@ -167,8 +167,8 @@ void ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) { if (UNLIKELY(IsEmpty())) { return; } - mirror::Reference* const head = list_; - mirror::Reference* ref = head; + ObjPtr<mirror::Reference> const head = list_; + ObjPtr<mirror::Reference> ref = head; do { mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr(); if (referent_addr->AsMirrorPtr() != nullptr) { diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h index 1de1aa11db..b5ec1e5341 100644 --- a/runtime/gc/reference_queue.h +++ b/runtime/gc/reference_queue.h @@ -26,6 +26,7 @@ #include "base/timing_logger.h" #include "globals.h" #include "jni.h" +#include "obj_ptr.h" #include "object_callbacks.h" #include "offsets.h" #include "thread_pool.h" @@ -54,15 +55,15 @@ class ReferenceQueue { // Enqueue a reference if it is unprocessed. Thread safe to call from multiple // threads since it uses a lock to avoid a race between checking for the references presence and // adding it. - void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) + void AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*lock_); // Enqueue a reference. The reference must be unprocessed. // Not thread safe, used when mutators are paused to minimize lock overhead. - void EnqueueReference(mirror::Reference* ref) REQUIRES_SHARED(Locks::mutator_lock_); + void EnqueueReference(ObjPtr<mirror::Reference> ref) REQUIRES_SHARED(Locks::mutator_lock_); // Dequeue a reference from the queue and return that dequeued reference. - mirror::Reference* DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_); + ObjPtr<mirror::Reference> DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_); // Enqueues finalizer references with white referents. White referents are blackened, moved to // the zombie field, and the referent field is cleared. @@ -104,7 +105,7 @@ class ReferenceQueue { // calling AtomicEnqueueIfNotEnqueued. Mutex* const lock_; // The actual reference list. Only a root for the mark compact GC since it will be null for other - // GC types. + // GC types. Not an ObjPtr since it is accessed from multiple threads. mirror::Reference* list_; DISALLOW_IMPLICIT_CONSTRUCTORS(ReferenceQueue); diff --git a/runtime/gc/reference_queue_test.cc b/runtime/gc/reference_queue_test.cc index 5b8a3c2963..3ca3353562 100644 --- a/runtime/gc/reference_queue_test.cc +++ b/runtime/gc/reference_queue_test.cc @@ -52,10 +52,10 @@ TEST_F(ReferenceQueueTest, EnqueueDequeue) { std::set<mirror::Reference*> refs = {ref1.Get(), ref2.Get()}; std::set<mirror::Reference*> dequeued; - dequeued.insert(queue.DequeuePendingReference()); + dequeued.insert(queue.DequeuePendingReference().Ptr()); ASSERT_TRUE(!queue.IsEmpty()); ASSERT_EQ(queue.GetLength(), 1U); - dequeued.insert(queue.DequeuePendingReference()); + dequeued.insert(queue.DequeuePendingReference().Ptr()); ASSERT_EQ(queue.GetLength(), 0U); ASSERT_TRUE(queue.IsEmpty()); ASSERT_EQ(refs, dequeued); diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc index 46b9e80792..4a3654be3e 100644 --- a/runtime/interpreter/unstarted_runtime.cc +++ b/runtime/interpreter/unstarted_runtime.cc @@ -1186,13 +1186,13 @@ void UnstartedRuntime::UnstartedStringToCharArray( // This allows statically initializing ConcurrentHashMap and SynchronousQueue. void UnstartedRuntime::UnstartedReferenceGetReferent( Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { - mirror::Reference* const ref = down_cast<mirror::Reference*>( + ObjPtr<mirror::Reference> const ref = down_cast<mirror::Reference*>( shadow_frame->GetVRegReference(arg_offset)); if (ref == nullptr) { AbortTransactionOrFail(self, "Reference.getReferent() with null object"); return; } - mirror::Object* const referent = + ObjPtr<mirror::Object> const referent = Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(self, ref); result->SetL(referent); } diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h index 039989bcb4..a449b41087 100644 --- a/runtime/mirror/reference-inl.h +++ b/runtime/mirror/reference-inl.h @@ -19,6 +19,8 @@ #include "reference.h" +#include "obj_ptr-inl.h" + namespace art { namespace mirror { @@ -27,6 +29,24 @@ inline uint32_t Reference::ClassSize(PointerSize pointer_size) { return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0, 0, 0, pointer_size); } +template<bool kTransactionActive> +inline void Reference::SetReferent(ObjPtr<Object> referent) { + SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent); +} + +inline void Reference::SetPendingNext(ObjPtr<Reference> pending_next) { + if (Runtime::Current()->IsActiveTransaction()) { + SetFieldObject<true>(PendingNextOffset(), pending_next); + } else { + SetFieldObject<false>(PendingNextOffset(), pending_next); + } +} + +template<bool kTransactionActive> +inline void FinalizerReference::SetZombie(ObjPtr<Object> zombie) { + return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie); +} + } // namespace mirror } // namespace art diff --git a/runtime/mirror/reference.cc b/runtime/mirror/reference.cc index 3c7f8c8e63..1d0b4c5b27 100644 --- a/runtime/mirror/reference.cc +++ b/runtime/mirror/reference.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "reference.h" +#include "reference-inl.h" #include "art_method.h" #include "gc_root-inl.h" @@ -24,7 +24,7 @@ namespace mirror { GcRoot<Class> Reference::java_lang_ref_Reference_; -void Reference::SetClass(Class* java_lang_ref_Reference) { +void Reference::SetClass(ObjPtr<Class> java_lang_ref_Reference) { CHECK(java_lang_ref_Reference_.IsNull()); CHECK(java_lang_ref_Reference != nullptr); java_lang_ref_Reference_ = GcRoot<Class>(java_lang_ref_Reference); diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h index 6a8b32b62d..f2fa589500 100644 --- a/runtime/mirror/reference.h +++ b/runtime/mirror/reference.h @@ -20,6 +20,7 @@ #include "base/enums.h" #include "class.h" #include "gc_root.h" +#include "obj_ptr.h" #include "object.h" #include "object_callbacks.h" #include "read_barrier_option.h" @@ -69,9 +70,7 @@ class MANAGED Reference : public Object { ReferentOffset()); } template<bool kTransactionActive> - void SetReferent(Object* referent) REQUIRES_SHARED(Locks::mutator_lock_) { - SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent); - } + void SetReferent(ObjPtr<Object> referent) REQUIRES_SHARED(Locks::mutator_lock_); template<bool kTransactionActive> void ClearReferent() REQUIRES_SHARED(Locks::mutator_lock_) { SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr); @@ -82,14 +81,7 @@ class MANAGED Reference : public Object { return GetFieldObject<Reference, kDefaultVerifyFlags, kReadBarrierOption>(PendingNextOffset()); } - void SetPendingNext(Reference* pending_next) - REQUIRES_SHARED(Locks::mutator_lock_) { - if (Runtime::Current()->IsActiveTransaction()) { - SetFieldObject<true>(PendingNextOffset(), pending_next); - } else { - SetFieldObject<false>(PendingNextOffset(), pending_next); - } - } + void SetPendingNext(ObjPtr<Reference> pending_next) REQUIRES_SHARED(Locks::mutator_lock_); // Returns true if the reference's pendingNext is null, indicating it is // okay to process this reference. @@ -112,7 +104,7 @@ class MANAGED Reference : public Object { DCHECK(!java_lang_ref_Reference_.IsNull()); return java_lang_ref_Reference_.Read<kReadBarrierOption>(); } - static void SetClass(Class* klass); + static void SetClass(ObjPtr<Class> klass); static void ResetClass(); static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_); @@ -144,9 +136,8 @@ class MANAGED FinalizerReference : public Reference { } template<bool kTransactionActive> - void SetZombie(Object* zombie) REQUIRES_SHARED(Locks::mutator_lock_) { - return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie); - } + void SetZombie(ObjPtr<Object> zombie) REQUIRES_SHARED(Locks::mutator_lock_); + Object* GetZombie() REQUIRES_SHARED(Locks::mutator_lock_) { return GetFieldObjectVolatile<Object>(ZombieOffset()); } diff --git a/runtime/native/java_lang_ref_Reference.cc b/runtime/native/java_lang_ref_Reference.cc index 95f6d517e5..bedca109aa 100644 --- a/runtime/native/java_lang_ref_Reference.cc +++ b/runtime/native/java_lang_ref_Reference.cc @@ -28,8 +28,8 @@ namespace art { static jobject Reference_getReferent(JNIEnv* env, jobject javaThis) { ScopedFastNativeObjectAccess soa(env); ObjPtr<mirror::Reference> ref = soa.Decode<mirror::Reference>(javaThis); - mirror::Object* const referent = - Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(soa.Self(), ref.Ptr()); + ObjPtr<mirror::Object> const referent = + Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(soa.Self(), ref); return soa.AddLocalReference<jobject>(referent); } |