diff options
-rw-r--r-- | runtime/gc/collector/concurrent_copying.cc | 18 | ||||
-rw-r--r-- | runtime/gc/reference_queue.cc | 8 | ||||
-rw-r--r-- | runtime/mirror/class.h | 2 | ||||
-rw-r--r-- | runtime/mirror/reference.h | 5 |
4 files changed, 22 insertions, 11 deletions
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index d413a50534..bd07bb8711 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -1714,12 +1714,19 @@ class ConcurrentCopying::RefFieldsVisitor { // Scan ref fields of an object. inline void ConcurrentCopying::Scan(mirror::Object* to_ref) { + if (kIsDebugBuild) { + // Avoid all read barriers during visit references to help performance. + Thread::Current()->ModifyDebugDisallowReadBarrier(1); + } DCHECK(!region_space_->IsInFromSpace(to_ref)); DCHECK_EQ(Thread::Current(), thread_running_gc_); RefFieldsVisitor visitor(this); // Disable the read barrier for a performance reason. to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>( visitor, visitor); + if (kIsDebugBuild) { + Thread::Current()->ModifyDebugDisallowReadBarrier(-1); + } } // Process a field. @@ -1836,7 +1843,7 @@ void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t by mirror::Class* int_array_class = mirror::IntArray::GetArrayClass(); CHECK(int_array_class != nullptr); AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class); - size_t component_size = int_array_class->GetComponentSize(); + size_t component_size = int_array_class->GetComponentSize<kWithoutReadBarrier>(); CHECK_EQ(component_size, sizeof(int32_t)); size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue(); if (data_offset > byte_size) { @@ -1849,13 +1856,14 @@ void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t by } else { // Use an int array. dummy_obj->SetClass(int_array_class); - CHECK(dummy_obj->IsArrayInstance()); + CHECK((dummy_obj->IsArrayInstance<kVerifyNone, kWithoutReadBarrier>())); int32_t length = (byte_size - data_offset) / component_size; - dummy_obj->AsArray()->SetLength(length); - CHECK_EQ(dummy_obj->AsArray()->GetLength(), length) + mirror::Array* dummy_arr = dummy_obj->AsArray<kVerifyNone, kWithoutReadBarrier>(); + dummy_arr->SetLength(length); + CHECK_EQ(dummy_arr->GetLength(), length) << "byte_size=" << byte_size << " length=" << length << " component_size=" << component_size << " data_offset=" << data_offset; - CHECK_EQ(byte_size, dummy_obj->SizeOf()) + CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone, kWithoutReadBarrier>())) << "byte_size=" << byte_size << " length=" << length << " component_size=" << component_size << " data_offset=" << data_offset; } diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc index 6088a43ab1..62625c41b4 100644 --- a/runtime/gc/reference_queue.cc +++ b/runtime/gc/reference_queue.cc @@ -44,7 +44,9 @@ void ReferenceQueue::EnqueueReference(mirror::Reference* ref) { // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref; list_ = ref; } else { - mirror::Reference* head = list_->GetPendingNext(); + // The list is owned by the GC, everything that has been inserted must already be at least + // gray. + mirror::Reference* head = list_->GetPendingNext<kWithoutReadBarrier>(); DCHECK(head != nullptr); ref->SetPendingNext(head); } @@ -54,14 +56,14 @@ void ReferenceQueue::EnqueueReference(mirror::Reference* ref) { mirror::Reference* ReferenceQueue::DequeuePendingReference() { DCHECK(!IsEmpty()); - mirror::Reference* ref = list_->GetPendingNext(); + mirror::Reference* ref = list_->GetPendingNext<kWithoutReadBarrier>(); DCHECK(ref != nullptr); // Note: the following code is thread-safe because it is only called from ProcessReferences which // is single threaded. if (list_ == ref) { list_ = nullptr; } else { - mirror::Reference* next = ref->GetPendingNext(); + mirror::Reference* next = ref->GetPendingNext<kWithoutReadBarrier>(); list_->SetPendingNext(next); } ref->SetPendingNext(nullptr); diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index 32ed337cca..3ba9e1aaab 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -471,7 +471,7 @@ class MANAGED Class FINAL : public Object { template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> size_t GetComponentSize() SHARED_REQUIRES(Locks::mutator_lock_) { - return 1U << GetComponentSizeShift(); + return 1U << GetComponentSizeShift<kReadBarrierOption>(); } template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h index 3baa12e40b..e8ad5fa1c8 100644 --- a/runtime/mirror/reference.h +++ b/runtime/mirror/reference.h @@ -76,8 +76,9 @@ class MANAGED Reference : public Object { SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr); } + template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier> Reference* GetPendingNext() SHARED_REQUIRES(Locks::mutator_lock_) { - return GetFieldObject<Reference>(PendingNextOffset()); + return GetFieldObject<Reference, kDefaultVerifyFlags, kReadBarrierOption>(PendingNextOffset()); } void SetPendingNext(Reference* pending_next) @@ -102,7 +103,7 @@ class MANAGED Reference : public Object { // removed from the list after having determined the reference is not ready // to be enqueued on a java ReferenceQueue. bool IsUnprocessed() SHARED_REQUIRES(Locks::mutator_lock_) { - return GetPendingNext() == nullptr; + return GetPendingNext<kWithoutReadBarrier>() == nullptr; } template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> |