summaryrefslogtreecommitdiff
path: root/runtime/gc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/accounting/bitmap.h1
-rw-r--r--runtime/gc/accounting/card_table.cc2
-rw-r--r--runtime/gc/accounting/card_table.h11
-rw-r--r--runtime/gc/accounting/heap_bitmap.h1
-rw-r--r--runtime/gc/accounting/mod_union_table.cc8
-rw-r--r--runtime/gc/accounting/mod_union_table_test.cc2
-rw-r--r--runtime/gc/accounting/remembered_set.h1
-rw-r--r--runtime/gc/accounting/space_bitmap.h4
-rw-r--r--runtime/gc/allocation_listener.h3
-rw-r--r--runtime/gc/allocation_record.cc1
-rw-r--r--runtime/gc/allocation_record.h4
-rw-r--r--runtime/gc/allocator/rosalloc.cc2
-rw-r--r--runtime/gc/collector/concurrent_copying-inl.h6
-rw-r--r--runtime/gc/collector/concurrent_copying.cc216
-rw-r--r--runtime/gc/collector/concurrent_copying.h26
-rw-r--r--runtime/gc/collector/garbage_collector.cc3
-rw-r--r--runtime/gc/collector/garbage_collector.h81
-rw-r--r--runtime/gc/collector/immune_spaces_test.cc4
-rw-r--r--runtime/gc/collector/iteration.h100
-rw-r--r--runtime/gc/collector/mark_compact.cc4
-rw-r--r--runtime/gc/collector/mark_compact.h1
-rw-r--r--runtime/gc/collector/mark_sweep.cc4
-rw-r--r--runtime/gc/collector/mark_sweep.h1
-rw-r--r--runtime/gc/collector/object_byte_pair.h44
-rw-r--r--runtime/gc/collector/partial_mark_sweep.cc2
-rw-r--r--runtime/gc/collector/semi_space.cc1
-rw-r--r--runtime/gc/collector/semi_space.h1
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.cc8
-rw-r--r--runtime/gc/gc_cause.cc3
-rw-r--r--runtime/gc/gc_cause.h10
-rw-r--r--runtime/gc/heap-inl.h1
-rw-r--r--runtime/gc/heap.cc84
-rw-r--r--runtime/gc/heap.h40
-rw-r--r--runtime/gc/heap_verification_test.cc39
-rw-r--r--runtime/gc/reference_processor-inl.h2
-rw-r--r--runtime/gc/reference_processor.cc1
-rw-r--r--runtime/gc/reference_processor.h2
-rw-r--r--runtime/gc/reference_queue.cc1
-rw-r--r--runtime/gc/reference_queue.h4
-rw-r--r--runtime/gc/scoped_gc_critical_section.cc2
-rw-r--r--runtime/gc/space/bump_pointer_space-inl.h12
-rw-r--r--runtime/gc/space/bump_pointer_space.cc8
-rw-r--r--runtime/gc/space/bump_pointer_space.h9
-rw-r--r--runtime/gc/space/dlmalloc_space.cc1
-rw-r--r--runtime/gc/space/image_space.cc4
-rw-r--r--runtime/gc/space/image_space.h2
-rw-r--r--runtime/gc/space/large_object_space.cc9
-rw-r--r--runtime/gc/space/region_space-inl.h27
-rw-r--r--runtime/gc/space/region_space.cc15
-rw-r--r--runtime/gc/space/region_space.h10
-rw-r--r--runtime/gc/space/rosalloc_space-inl.h36
-rw-r--r--runtime/gc/space/rosalloc_space.cc34
-rw-r--r--runtime/gc/space/space.cc3
-rw-r--r--runtime/gc/space/space.h3
-rw-r--r--runtime/gc/space/zygote_space.cc4
-rw-r--r--runtime/gc/task_processor_test.cc2
-rw-r--r--runtime/gc/verification.cc54
-rw-r--r--runtime/gc/verification.h11
58 files changed, 670 insertions, 305 deletions
diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h
index eb004726df..d039d88770 100644
--- a/runtime/gc/accounting/bitmap.h
+++ b/runtime/gc/accounting/bitmap.h
@@ -25,7 +25,6 @@
#include "base/mutex.h"
#include "globals.h"
-#include "object_callbacks.h"
namespace art {
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 450659791d..01b5896650 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -16,6 +16,8 @@
#include "card_table.h"
+#include <sys/mman.h>
+
#include "base/logging.h"
#include "base/systrace.h"
#include "card_table-inl.h"
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 68ef15d0cf..17acc763d1 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -47,10 +47,11 @@ template<size_t kAlignment> class SpaceBitmap;
// WriteBarrier, and from there to here.
class CardTable {
public:
- static constexpr size_t kCardShift = 7;
+ static constexpr size_t kCardShift = 10;
static constexpr size_t kCardSize = 1 << kCardShift;
static constexpr uint8_t kCardClean = 0x0;
static constexpr uint8_t kCardDirty = 0x70;
+ static constexpr uint8_t kCardAged = kCardDirty - 1;
static CardTable* Create(const uint8_t* heap_begin, size_t heap_capacity);
~CardTable();
@@ -154,6 +155,14 @@ class CardTable {
};
} // namespace accounting
+
+class AgeCardVisitor {
+ public:
+ uint8_t operator()(uint8_t card) const {
+ return (card == accounting::CardTable::kCardDirty) ? card - 1 : 0;
+ }
+};
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index 76247bce84..7097f87e91 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -19,7 +19,6 @@
#include "base/allocator.h"
#include "base/logging.h"
-#include "object_callbacks.h"
#include "space_bitmap.h"
namespace art {
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 34e30c177f..57c290ea94 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -28,7 +28,7 @@
#include "mirror/object-inl.h"
#include "mirror/object-refvisitor-inl.h"
#include "space_bitmap-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace gc {
@@ -391,7 +391,7 @@ void ModUnionTableReferenceCache::VisitObjects(ObjectCallback* callback, void* a
uintptr_t end = start + CardTable::kCardSize;
live_bitmap->VisitMarkedRange(start,
end,
- [this, callback, arg](mirror::Object* obj) {
+ [callback, arg](mirror::Object* obj) {
callback(obj, arg);
});
}
@@ -402,7 +402,7 @@ void ModUnionTableReferenceCache::VisitObjects(ObjectCallback* callback, void* a
uintptr_t end = start + CardTable::kCardSize;
live_bitmap->VisitMarkedRange(start,
end,
- [this, callback, arg](mirror::Object* obj) {
+ [callback, arg](mirror::Object* obj) {
callback(obj, arg);
});
}
@@ -560,7 +560,7 @@ void ModUnionTableCardCache::VisitObjects(ObjectCallback* callback, void* arg) {
<< start << " " << *space_;
space_->GetLiveBitmap()->VisitMarkedRange(start,
start + CardTable::kCardSize,
- [this, callback, arg](mirror::Object* obj) {
+ [callback, arg](mirror::Object* obj) {
callback(obj, arg);
});
});
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index 48a8742cc8..e5b8ea5609 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -21,7 +21,7 @@
#include "gc/space/space-inl.h"
#include "mirror/array-inl.h"
#include "space_bitmap-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
namespace art {
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index 5594781672..c332f969ad 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -19,7 +19,6 @@
#include "base/allocator.h"
#include "globals.h"
-#include "object_callbacks.h"
#include "safe_map.h"
#include <set>
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index b13648894d..889f57b333 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -25,7 +25,6 @@
#include "base/mutex.h"
#include "globals.h"
-#include "object_callbacks.h"
namespace art {
@@ -35,6 +34,9 @@ namespace mirror {
} // namespace mirror
class MemMap;
+// Same as in object_callbacks.h. Just avoid the include.
+typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
+
namespace gc {
namespace accounting {
diff --git a/runtime/gc/allocation_listener.h b/runtime/gc/allocation_listener.h
index f60bc0c834..21fa2142df 100644
--- a/runtime/gc/allocation_listener.h
+++ b/runtime/gc/allocation_listener.h
@@ -23,14 +23,13 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "obj_ptr.h"
-#include "object_callbacks.h"
#include "gc_root.h"
namespace art {
namespace mirror {
class Object;
-}
+} // namespace mirror
class Thread;
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 122f7799df..2257b81e09 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -20,6 +20,7 @@
#include "base/enums.h"
#include "base/stl_util.h"
#include "obj_ptr-inl.h"
+#include "object_callbacks.h"
#include "stack.h"
#ifdef ART_TARGET_ANDROID
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index 90cff6a8c4..d31e442cc9 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -22,18 +22,18 @@
#include "base/mutex.h"
#include "obj_ptr.h"
-#include "object_callbacks.h"
#include "gc_root.h"
namespace art {
class ArtMethod;
+class IsMarkedVisitor;
class Thread;
namespace mirror {
class Class;
class Object;
-}
+} // namespace mirror
namespace gc {
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 35a251fda8..d5d3540b1f 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -30,7 +30,7 @@
#include "mirror/class-inl.h"
#include "mirror/object.h"
#include "mirror/object-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
namespace art {
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index d5c36bfb19..85a656ec51 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -19,11 +19,12 @@
#include "concurrent_copying.h"
+#include "gc/accounting/atomic_stack.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "gc/space/region_space.h"
-#include "mirror/object-readbarrier-inl.h"
#include "lock_word.h"
+#include "mirror/object-readbarrier-inl.h"
namespace art {
namespace gc {
@@ -152,7 +153,8 @@ inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref,
inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* from_ref) {
mirror::Object* ret;
- if (from_ref == nullptr) {
+ // We can get here before marking starts since we gray immune objects before the marking phase.
+ if (from_ref == nullptr || !Thread::Current()->GetIsGcMarking()) {
return from_ref;
}
// TODO: Consider removing this check when we are done investigating slow paths. b/30162165
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index ab2146a6b8..458e830eda 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -77,6 +77,7 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
thread_running_gc_(nullptr),
is_marking_(false),
+ is_using_read_barrier_entrypoints_(false),
is_active_(false),
is_asserting_to_space_invariant_(false),
region_space_bitmap_(nullptr),
@@ -163,6 +164,15 @@ void ConcurrentCopying::RunPhases() {
ReaderMutexLock mu(self, *Locks::mutator_lock_);
InitializePhase();
}
+ if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
+ // Switch to read barrier mark entrypoints before we gray the objects. This is required in case
+ // a mutator sees a gray bit and dispatches on the entrpoint. (b/37876887).
+ ActivateReadBarrierEntrypoints();
+ // Gray dirty immune objects concurrently to reduce GC pause times. We re-process gray cards in
+ // the pause.
+ ReaderMutexLock mu(self, *Locks::mutator_lock_);
+ GrayAllDirtyImmuneObjects();
+ }
FlipThreadRoots();
{
ReaderMutexLock mu(self, *Locks::mutator_lock_);
@@ -192,6 +202,59 @@ void ConcurrentCopying::RunPhases() {
thread_running_gc_ = nullptr;
}
+class ConcurrentCopying::ActivateReadBarrierEntrypointsCheckpoint : public Closure {
+ public:
+ explicit ActivateReadBarrierEntrypointsCheckpoint(ConcurrentCopying* concurrent_copying)
+ : concurrent_copying_(concurrent_copying) {}
+
+ void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ // Note: self is not necessarily equal to thread since thread may be suspended.
+ Thread* self = Thread::Current();
+ DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
+ << thread->GetState() << " thread " << thread << " self " << self;
+ // Switch to the read barrier entrypoints.
+ thread->SetReadBarrierEntrypoints();
+ // If thread is a running mutator, then act on behalf of the garbage collector.
+ // See the code in ThreadList::RunCheckpoint.
+ concurrent_copying_->GetBarrier().Pass(self);
+ }
+
+ private:
+ ConcurrentCopying* const concurrent_copying_;
+};
+
+class ConcurrentCopying::ActivateReadBarrierEntrypointsCallback : public Closure {
+ public:
+ explicit ActivateReadBarrierEntrypointsCallback(ConcurrentCopying* concurrent_copying)
+ : concurrent_copying_(concurrent_copying) {}
+
+ void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+ // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
+ // to avoid a race with ThreadList::Register().
+ CHECK(!concurrent_copying_->is_using_read_barrier_entrypoints_);
+ concurrent_copying_->is_using_read_barrier_entrypoints_ = true;
+ }
+
+ private:
+ ConcurrentCopying* const concurrent_copying_;
+};
+
+void ConcurrentCopying::ActivateReadBarrierEntrypoints() {
+ Thread* const self = Thread::Current();
+ ActivateReadBarrierEntrypointsCheckpoint checkpoint(this);
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
+ gc_barrier_->Init(self, 0);
+ ActivateReadBarrierEntrypointsCallback callback(this);
+ const size_t barrier_count = thread_list->RunCheckpoint(&checkpoint, &callback);
+ // If there are no threads to wait which implies that all the checkpoint functions are finished,
+ // then no need to release the mutator lock.
+ if (barrier_count == 0) {
+ return;
+ }
+ ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
+ gc_barrier_->Increment(self, barrier_count);
+}
+
void ConcurrentCopying::BindBitmaps() {
Thread* self = Thread::Current();
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -236,7 +299,7 @@ void ConcurrentCopying::InitializePhase() {
objects_moved_.StoreRelaxed(0);
GcCause gc_cause = GetCurrentIteration()->GetGcCause();
if (gc_cause == kGcCauseExplicit ||
- gc_cause == kGcCauseForNativeAlloc ||
+ gc_cause == kGcCauseForNativeAllocBlocking ||
gc_cause == kGcCauseCollectorTransition ||
GetCurrentIteration()->GetClearSoftReferences()) {
force_evacuate_all_ = true;
@@ -296,7 +359,7 @@ class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
// We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
// only.
- thread->VisitRoots(this);
+ thread->VisitRoots(this, kVisitRootFlagAllRoots);
concurrent_copying_->GetBarrier().Pass(self);
}
@@ -352,9 +415,12 @@ class ConcurrentCopying::FlipCallback : public Closure {
if (kVerifyNoMissingCardMarks) {
cc->VerifyNoMissingCardMarks();
}
- CHECK(thread == self);
+ CHECK_EQ(thread, self);
Locks::mutator_lock_->AssertExclusiveHeld(self);
- cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
+ {
+ TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings());
+ cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
+ }
cc->SwapStacks();
if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
cc->RecordLiveStackFreezeSize(self);
@@ -368,18 +434,25 @@ class ConcurrentCopying::FlipCallback : public Closure {
}
if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
CHECK(Runtime::Current()->IsAotCompiler());
- TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
+ TimingLogger::ScopedTiming split3("(Paused)VisitTransactionRoots", cc->GetTimings());
Runtime::Current()->VisitTransactionRoots(cc);
}
if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
- cc->GrayAllDirtyImmuneObjects();
+ cc->GrayAllNewlyDirtyImmuneObjects();
if (kIsDebugBuild) {
// Check that all non-gray immune objects only refernce immune objects.
cc->VerifyGrayImmuneObjects();
}
}
- cc->java_lang_Object_ = down_cast<mirror::Class*>(cc->Mark(
- WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object).Ptr()));
+ // May be null during runtime creation, in this case leave java_lang_Object null.
+ // This is safe since single threaded behavior should mean FillDummyObject does not
+ // happen when java_lang_Object_ is null.
+ if (WellKnownClasses::java_lang_Object != nullptr) {
+ cc->java_lang_Object_ = down_cast<mirror::Class*>(cc->Mark(
+ WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object).Ptr()));
+ } else {
+ cc->java_lang_Object_ = nullptr;
+ }
}
private:
@@ -512,8 +585,8 @@ class ConcurrentCopying::VerifyNoMissingCardMarkVisitor {
void ConcurrentCopying::VerifyNoMissingCardMarkCallback(mirror::Object* obj, void* arg) {
auto* collector = reinterpret_cast<ConcurrentCopying*>(arg);
- // Objects not on dirty cards should never have references to newly allocated regions.
- if (!collector->heap_->GetCardTable()->IsDirty(obj)) {
+ // Objects not on dirty or aged cards should never have references to newly allocated regions.
+ if (collector->heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) {
VerifyNoMissingCardMarkVisitor visitor(collector, /*holder*/ obj);
obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
visitor,
@@ -543,25 +616,8 @@ void ConcurrentCopying::FlipThreadRoots() {
ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
FlipCallback flip_callback(this);
- // This is the point where Concurrent-Copying will pause all threads. We report a pause here, if
- // necessary. This is slightly over-reporting, as this includes the time to actually suspend
- // threads.
- {
- GcPauseListener* pause_listener = GetHeap()->GetGcPauseListener();
- if (pause_listener != nullptr) {
- pause_listener->StartPause();
- }
- }
-
- size_t barrier_count = Runtime::Current()->FlipThreadRoots(
- &thread_flip_visitor, &flip_callback, this);
-
- {
- GcPauseListener* pause_listener = GetHeap()->GetGcPauseListener();
- if (pause_listener != nullptr) {
- pause_listener->EndPause();
- }
- }
+ size_t barrier_count = Runtime::Current()->GetThreadList()->FlipThreadRoots(
+ &thread_flip_visitor, &flip_callback, this, GetHeap()->GetGcPauseListener());
{
ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
@@ -576,53 +632,100 @@ void ConcurrentCopying::FlipThreadRoots() {
}
}
+template <bool kConcurrent>
class ConcurrentCopying::GrayImmuneObjectVisitor {
public:
- explicit GrayImmuneObjectVisitor() {}
+ explicit GrayImmuneObjectVisitor(Thread* self) : self_(self) {}
ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
- if (kUseBakerReadBarrier) {
- if (kIsDebugBuild) {
- Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+ if (kUseBakerReadBarrier && obj->GetReadBarrierState() == ReadBarrier::WhiteState()) {
+ if (kConcurrent) {
+ Locks::mutator_lock_->AssertSharedHeld(self_);
+ obj->AtomicSetReadBarrierState(ReadBarrier::WhiteState(), ReadBarrier::GrayState());
+ // Mod union table VisitObjects may visit the same object multiple times so we can't check
+ // the result of the atomic set.
+ } else {
+ Locks::mutator_lock_->AssertExclusiveHeld(self_);
+ obj->SetReadBarrierState(ReadBarrier::GrayState());
}
- obj->SetReadBarrierState(ReadBarrier::GrayState());
}
}
static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
- reinterpret_cast<GrayImmuneObjectVisitor*>(arg)->operator()(obj);
+ reinterpret_cast<GrayImmuneObjectVisitor<kConcurrent>*>(arg)->operator()(obj);
}
+
+ private:
+ Thread* const self_;
};
void ConcurrentCopying::GrayAllDirtyImmuneObjects() {
- TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
- gc::Heap* const heap = Runtime::Current()->GetHeap();
- accounting::CardTable* const card_table = heap->GetCardTable();
- WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ TimingLogger::ScopedTiming split("GrayAllDirtyImmuneObjects", GetTimings());
+ accounting::CardTable* const card_table = heap_->GetCardTable();
+ Thread* const self = Thread::Current();
+ using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ true>;
+ VisitorType visitor(self);
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
- GrayImmuneObjectVisitor visitor;
- accounting::ModUnionTable* table = heap->FindModUnionTableFromSpace(space);
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
// Mark all the objects on dirty cards since these may point to objects in other space.
// Once these are marked, the GC will eventually clear them later.
// Table is non null for boot image and zygote spaces. It is only null for application image
// spaces.
if (table != nullptr) {
- // TODO: Consider adding precleaning outside the pause.
table->ProcessCards();
- table->VisitObjects(GrayImmuneObjectVisitor::Callback, &visitor);
- // Since the cards are recorded in the mod-union table and this is paused, we can clear
- // the cards for the space (to madvise).
+ table->VisitObjects(&VisitorType::Callback, &visitor);
+ // Don't clear cards here since we need to rescan in the pause. If we cleared the cards here,
+ // there would be races with the mutator marking new cards.
+ } else {
+ // Keep cards aged if we don't have a mod-union table since we may need to scan them in future
+ // GCs. This case is for app images.
+ card_table->ModifyCardsAtomic(
+ space->Begin(),
+ space->End(),
+ [](uint8_t card) {
+ return (card != gc::accounting::CardTable::kCardClean)
+ ? gc::accounting::CardTable::kCardAged
+ : card;
+ },
+ /* card modified visitor */ VoidFunctor());
+ card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(),
+ space->Begin(),
+ space->End(),
+ visitor,
+ gc::accounting::CardTable::kCardAged);
+ }
+ }
+}
+
+void ConcurrentCopying::GrayAllNewlyDirtyImmuneObjects() {
+ TimingLogger::ScopedTiming split("(Paused)GrayAllNewlyDirtyImmuneObjects", GetTimings());
+ accounting::CardTable* const card_table = heap_->GetCardTable();
+ using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ false>;
+ Thread* const self = Thread::Current();
+ VisitorType visitor(self);
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
+ DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+
+ // Don't need to scan aged cards since we did these before the pause. Note that scanning cards
+ // also handles the mod-union table cards.
+ card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(),
+ space->Begin(),
+ space->End(),
+ visitor,
+ gc::accounting::CardTable::kCardDirty);
+ if (table != nullptr) {
+ // Add the cards to the mod-union table so that we can clear cards to save RAM.
+ table->ProcessCards();
TimingLogger::ScopedTiming split2("(Paused)ClearCards", GetTimings());
card_table->ClearCardRange(space->Begin(),
AlignDown(space->End(), accounting::CardTable::kCardSize));
- } else {
- // TODO: Consider having a mark bitmap for app image spaces and avoid scanning during the
- // pause because app image spaces are all dirty pages anyways.
- card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor);
}
}
- // Since all of the objects that may point to other spaces are marked, we can avoid all the read
+ // Since all of the objects that may point to other spaces are gray, we can avoid all the read
// barriers in the immune spaces.
updated_all_immune_objects_.StoreRelaxed(true);
}
@@ -651,6 +754,7 @@ class ConcurrentCopying::ImmuneSpaceScanObjVisitor {
ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
+ // Only need to scan gray objects.
if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
collector_->ScanImmuneObject(obj);
// Done scanning the object, go back to white.
@@ -700,6 +804,7 @@ void ConcurrentCopying::MarkingPhase() {
if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) {
table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor);
} else {
+ // TODO: Scan only the aged cards.
live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
reinterpret_cast<uintptr_t>(space->Limit()),
visitor);
@@ -869,6 +974,12 @@ class ConcurrentCopying::DisableMarkingCallback : public Closure {
// to avoid a race with ThreadList::Register().
CHECK(concurrent_copying_->is_marking_);
concurrent_copying_->is_marking_ = false;
+ if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
+ CHECK(concurrent_copying_->is_using_read_barrier_entrypoints_);
+ concurrent_copying_->is_using_read_barrier_entrypoints_ = false;
+ } else {
+ CHECK(!concurrent_copying_->is_using_read_barrier_entrypoints_);
+ }
}
private:
@@ -2082,6 +2193,7 @@ void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t by
size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
if (data_offset > byte_size) {
// An int array is too big. Use java.lang.Object.
+ CHECK(java_lang_Object_ != nullptr);
AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object_);
CHECK_EQ(byte_size, (java_lang_Object_->GetObjectSize<kVerifyNone, kWithoutReadBarrier>()));
dummy_obj->SetClass(java_lang_Object_);
@@ -2175,7 +2287,9 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref,
// Note that from_ref is a from space ref so the SizeOf() call will access the from-space meta
// objects, but it's ok and necessary.
size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags>();
- size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
+ size_t region_space_alloc_size = (obj_size <= space::RegionSpace::kRegionSize)
+ ? RoundUp(obj_size, space::RegionSpace::kAlignment)
+ : RoundUp(obj_size, space::RegionSpace::kRegionSize);
size_t region_space_bytes_allocated = 0U;
size_t non_moving_space_bytes_allocated = 0U;
size_t bytes_allocated = 0U;
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index e4099c8a57..7b4340ee09 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -21,10 +21,7 @@
#include "garbage_collector.h"
#include "immune_spaces.h"
#include "jni.h"
-#include "object_callbacks.h"
#include "offsets.h"
-#include "gc/accounting/space_bitmap.h"
-#include "mirror/object.h"
#include "mirror/object_reference.h"
#include "safe_map.h"
@@ -35,11 +32,16 @@ namespace art {
class Closure;
class RootInfo;
+namespace mirror {
+class Object;
+} // namespace mirror
+
namespace gc {
namespace accounting {
template<typename T> class AtomicStack;
typedef AtomicStack<mirror::Object> ObjectStack;
+ template <size_t kAlignment> class SpaceBitmap;
typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
class HeapBitmap;
class ReadBarrierTable;
@@ -118,6 +120,11 @@ class ConcurrentCopying : public GarbageCollector {
bool IsMarking() const {
return is_marking_;
}
+ // We may want to use read barrier entrypoints before is_marking_ is true since concurrent graying
+ // creates a small window where we might dispatch on these entrypoints.
+ bool IsUsingReadBarrierEntrypoints() const {
+ return is_using_read_barrier_entrypoints_;
+ }
bool IsActive() const {
return is_active_;
}
@@ -168,6 +175,9 @@ class ConcurrentCopying : public GarbageCollector {
void GrayAllDirtyImmuneObjects()
REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
+ void GrayAllNewlyDirtyImmuneObjects()
+ REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
void VerifyGrayImmuneObjects()
REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
@@ -253,6 +263,8 @@ class ConcurrentCopying : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void DumpPerformanceInfo(std::ostream& os) OVERRIDE REQUIRES(!rb_slow_path_histogram_lock_);
+ // Set the read barrier mark entrypoints to non-null.
+ void ActivateReadBarrierEntrypoints();
space::RegionSpace* region_space_; // The underlying region space.
std::unique_ptr<Barrier> gc_barrier_;
@@ -269,10 +281,12 @@ class ConcurrentCopying : public GarbageCollector {
GUARDED_BY(mark_stack_lock_);
Thread* thread_running_gc_;
bool is_marking_; // True while marking is ongoing.
+ // True while we might dispatch on the read barrier entrypoints.
+ bool is_using_read_barrier_entrypoints_;
bool is_active_; // True while the collection is ongoing.
bool is_asserting_to_space_invariant_; // True while asserting the to-space invariant.
ImmuneSpaces immune_spaces_;
- accounting::SpaceBitmap<kObjectAlignment>* region_space_bitmap_;
+ accounting::ContinuousSpaceBitmap* region_space_bitmap_;
// A cache of Heap::GetMarkBitmap().
accounting::HeapBitmap* heap_mark_bitmap_;
size_t live_stack_freeze_size_;
@@ -331,6 +345,8 @@ class ConcurrentCopying : public GarbageCollector {
// ObjPtr since the GC may transition to suspended and runnable between phases.
mirror::Class* java_lang_Object_;
+ class ActivateReadBarrierEntrypointsCallback;
+ class ActivateReadBarrierEntrypointsCheckpoint;
class AssertToSpaceInvariantFieldVisitor;
class AssertToSpaceInvariantObjectVisitor;
class AssertToSpaceInvariantRefsVisitor;
@@ -340,7 +356,7 @@ class ConcurrentCopying : public GarbageCollector {
class DisableMarkingCheckpoint;
class DisableWeakRefAccessCallback;
class FlipCallback;
- class GrayImmuneObjectVisitor;
+ template <bool kConcurrent> class GrayImmuneObjectVisitor;
class ImmuneSpaceScanObjVisitor;
class LostCopyVisitor;
class RefFieldsVisitor;
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 1e4196b1ac..c5a341fc80 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -31,7 +31,8 @@
#include "gc/heap.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
-#include "thread-inl.h"
+#include "runtime.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
#include "utils.h"
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 14d049971f..dec206be30 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -27,6 +27,8 @@
#include "gc/gc_cause.h"
#include "gc_root.h"
#include "gc_type.h"
+#include "iteration.h"
+#include "object_byte_pair.h"
#include "object_callbacks.h"
namespace art {
@@ -43,85 +45,6 @@ class Heap;
namespace collector {
-struct ObjectBytePair {
- explicit ObjectBytePair(uint64_t num_objects = 0, int64_t num_bytes = 0)
- : objects(num_objects), bytes(num_bytes) {}
- void Add(const ObjectBytePair& other) {
- objects += other.objects;
- bytes += other.bytes;
- }
- // Number of objects which were freed.
- uint64_t objects;
- // Freed bytes are signed since the GC can free negative bytes if it promotes objects to a space
- // which has a larger allocation size.
- int64_t bytes;
-};
-
-// A information related single garbage collector iteration. Since we only ever have one GC running
-// at any given time, we can have a single iteration info.
-class Iteration {
- public:
- Iteration();
- // Returns how long the mutators were paused in nanoseconds.
- const std::vector<uint64_t>& GetPauseTimes() const {
- return pause_times_;
- }
- TimingLogger* GetTimings() {
- return &timings_;
- }
- // Returns how long the GC took to complete in nanoseconds.
- uint64_t GetDurationNs() const {
- return duration_ns_;
- }
- int64_t GetFreedBytes() const {
- return freed_.bytes;
- }
- int64_t GetFreedLargeObjectBytes() const {
- return freed_los_.bytes;
- }
- uint64_t GetFreedObjects() const {
- return freed_.objects;
- }
- uint64_t GetFreedLargeObjects() const {
- return freed_los_.objects;
- }
- uint64_t GetFreedRevokeBytes() const {
- return freed_bytes_revoke_;
- }
- void SetFreedRevoke(uint64_t freed) {
- freed_bytes_revoke_ = freed;
- }
- void Reset(GcCause gc_cause, bool clear_soft_references);
- // Returns the estimated throughput of the iteration.
- uint64_t GetEstimatedThroughput() const;
- bool GetClearSoftReferences() const {
- return clear_soft_references_;
- }
- void SetClearSoftReferences(bool clear_soft_references) {
- clear_soft_references_ = clear_soft_references;
- }
- GcCause GetGcCause() const {
- return gc_cause_;
- }
-
- private:
- void SetDurationNs(uint64_t duration) {
- duration_ns_ = duration;
- }
-
- GcCause gc_cause_;
- bool clear_soft_references_;
- uint64_t duration_ns_;
- TimingLogger timings_;
- ObjectBytePair freed_;
- ObjectBytePair freed_los_;
- uint64_t freed_bytes_revoke_; // see Heap::num_bytes_freed_revoke_.
- std::vector<uint64_t> pause_times_;
-
- friend class GarbageCollector;
- DISALLOW_COPY_AND_ASSIGN(Iteration);
-};
-
class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public MarkObjectVisitor {
public:
class SCOPED_LOCKABLE ScopedPause {
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index cf93ec614d..9823708606 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -14,12 +14,14 @@
* limitations under the License.
*/
+#include <sys/mman.h>
+
#include "common_runtime_test.h"
#include "gc/collector/immune_spaces.h"
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
#include "oat_file.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace mirror {
diff --git a/runtime/gc/collector/iteration.h b/runtime/gc/collector/iteration.h
new file mode 100644
index 0000000000..fbe41664f7
--- /dev/null
+++ b/runtime/gc/collector/iteration.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_ITERATION_H_
+#define ART_RUNTIME_GC_COLLECTOR_ITERATION_H_
+
+#include <inttypes.h>
+#include <vector>
+
+#include "android-base/macros.h"
+#include "base/timing_logger.h"
+#include "object_byte_pair.h"
+
+namespace art {
+namespace gc {
+namespace collector {
+
+// A information related single garbage collector iteration. Since we only ever have one GC running
+// at any given time, we can have a single iteration info.
+class Iteration {
+ public:
+ Iteration();
+ // Returns how long the mutators were paused in nanoseconds.
+ const std::vector<uint64_t>& GetPauseTimes() const {
+ return pause_times_;
+ }
+ TimingLogger* GetTimings() {
+ return &timings_;
+ }
+ // Returns how long the GC took to complete in nanoseconds.
+ uint64_t GetDurationNs() const {
+ return duration_ns_;
+ }
+ int64_t GetFreedBytes() const {
+ return freed_.bytes;
+ }
+ int64_t GetFreedLargeObjectBytes() const {
+ return freed_los_.bytes;
+ }
+ uint64_t GetFreedObjects() const {
+ return freed_.objects;
+ }
+ uint64_t GetFreedLargeObjects() const {
+ return freed_los_.objects;
+ }
+ uint64_t GetFreedRevokeBytes() const {
+ return freed_bytes_revoke_;
+ }
+ void SetFreedRevoke(uint64_t freed) {
+ freed_bytes_revoke_ = freed;
+ }
+ void Reset(GcCause gc_cause, bool clear_soft_references);
+ // Returns the estimated throughput of the iteration.
+ uint64_t GetEstimatedThroughput() const;
+ bool GetClearSoftReferences() const {
+ return clear_soft_references_;
+ }
+ void SetClearSoftReferences(bool clear_soft_references) {
+ clear_soft_references_ = clear_soft_references;
+ }
+ GcCause GetGcCause() const {
+ return gc_cause_;
+ }
+
+ private:
+ void SetDurationNs(uint64_t duration) {
+ duration_ns_ = duration;
+ }
+
+ GcCause gc_cause_;
+ bool clear_soft_references_;
+ uint64_t duration_ns_;
+ TimingLogger timings_;
+ ObjectBytePair freed_;
+ ObjectBytePair freed_los_;
+ uint64_t freed_bytes_revoke_; // see Heap::num_bytes_freed_revoke_.
+ std::vector<uint64_t> pause_times_;
+
+ friend class GarbageCollector;
+ DISALLOW_COPY_AND_ASSIGN(Iteration);
+};
+
+} // namespace collector
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_COLLECTOR_ITERATION_H_
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index cab293f23c..aef98dee58 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -32,7 +32,7 @@
#include "mirror/object-refvisitor-inl.h"
#include "runtime.h"
#include "stack.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
namespace art {
@@ -140,7 +140,7 @@ inline mirror::Object* MarkCompact::MarkObject(mirror::Object* obj) {
}
} else {
DCHECK(!space_->HasAddress(obj));
- auto slow_path = [this](const mirror::Object* ref)
+ auto slow_path = [](const mirror::Object* ref)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Marking a large object, make sure its aligned as a sanity check.
if (!IsAligned<kPageSize>(ref)) {
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 85727c25c2..0bf4095ac3 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -28,7 +28,6 @@
#include "gc/accounting/heap_bitmap.h"
#include "immune_spaces.h"
#include "lock_word.h"
-#include "object_callbacks.h"
#include "offsets.h"
namespace art {
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index f591cf09ca..fb82b4d270 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -42,7 +42,7 @@
#include "mirror/object-inl.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
namespace art {
@@ -1141,7 +1141,7 @@ class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor
Thread* const self = Thread::Current();
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
<< thread->GetState() << " thread " << thread << " self " << self;
- thread->VisitRoots(this);
+ thread->VisitRoots(this, kVisitRootFlagAllRoots);
if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
ScopedTrace trace2("RevokeRosAllocThreadLocalBuffers");
mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 5a9b9f8765..b9e06f9688 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -27,7 +27,6 @@
#include "gc_root.h"
#include "gc/accounting/heap_bitmap.h"
#include "immune_spaces.h"
-#include "object_callbacks.h"
#include "offsets.h"
namespace art {
diff --git a/runtime/gc/collector/object_byte_pair.h b/runtime/gc/collector/object_byte_pair.h
new file mode 100644
index 0000000000..16ef06b6dd
--- /dev/null
+++ b/runtime/gc/collector/object_byte_pair.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_OBJECT_BYTE_PAIR_H_
+#define ART_RUNTIME_GC_COLLECTOR_OBJECT_BYTE_PAIR_H_
+
+#include <inttypes.h>
+
+namespace art {
+namespace gc {
+namespace collector {
+
+struct ObjectBytePair {
+ explicit ObjectBytePair(uint64_t num_objects = 0, int64_t num_bytes = 0)
+ : objects(num_objects), bytes(num_bytes) {}
+ void Add(const ObjectBytePair& other) {
+ objects += other.objects;
+ bytes += other.bytes;
+ }
+ // Number of objects which were freed.
+ uint64_t objects;
+ // Freed bytes are signed since the GC can free negative bytes if it promotes objects to a space
+ // which has a larger allocation size.
+ int64_t bytes;
+};
+
+} // namespace collector
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_COLLECTOR_OBJECT_BYTE_PAIR_H_
diff --git a/runtime/gc/collector/partial_mark_sweep.cc b/runtime/gc/collector/partial_mark_sweep.cc
index 984779484e..f6ca867e69 100644
--- a/runtime/gc/collector/partial_mark_sweep.cc
+++ b/runtime/gc/collector/partial_mark_sweep.cc
@@ -19,7 +19,7 @@
#include "gc/heap.h"
#include "gc/space/space.h"
#include "partial_mark_sweep.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 41e605104c..d3798924ee 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -193,6 +193,7 @@ void SemiSpace::MarkingPhase() {
if (generational_) {
if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
+ GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAllocBlocking ||
GetCurrentIteration()->GetClearSoftReferences()) {
// If an explicit, native allocation-triggered, or last attempt
// collection, collect the whole heap.
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 9d6e74dde4..d3858baaf5 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -27,7 +27,6 @@
#include "gc/accounting/heap_bitmap.h"
#include "immune_spaces.h"
#include "mirror/object_reference.h"
-#include "object_callbacks.h"
#include "offsets.h"
namespace art {
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index a2dbe3f7a0..98fdfac17b 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -14,11 +14,15 @@
* limitations under the License.
*/
+#include "sticky_mark_sweep.h"
+
+#include "gc/accounting/atomic_stack.h"
+#include "gc/accounting/card_table.h"
#include "gc/heap.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
-#include "sticky_mark_sweep.h"
-#include "thread-inl.h"
+#include "runtime.h"
+#include "thread-current-inl.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index 65861167ce..a3a2051934 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -25,11 +25,12 @@ namespace gc {
const char* PrettyCause(GcCause cause) {
switch (cause) {
+ case kGcCauseNone: return "None";
case kGcCauseForAlloc: return "Alloc";
case kGcCauseBackground: return "Background";
case kGcCauseExplicit: return "Explicit";
case kGcCauseForNativeAlloc: return "NativeAlloc";
- case kGcCauseForNativeAllocBackground: return "NativeAllocBackground";
+ case kGcCauseForNativeAllocBlocking: return "NativeAllocBlocking";
case kGcCauseCollectorTransition: return "CollectorTransition";
case kGcCauseDisableMovingGc: return "DisableMovingGc";
case kGcCauseHomogeneousSpaceCompact: return "HomogeneousSpaceCompact";
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index e3813928d8..78496f3ead 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -24,6 +24,8 @@ namespace gc {
// What caused the GC?
enum GcCause {
+ // Invalid GC cause used as a placeholder.
+ kGcCauseNone,
// GC triggered by a failed allocation. Thread doing allocation is blocked waiting for GC before
// retrying allocation.
kGcCauseForAlloc,
@@ -31,10 +33,12 @@ enum GcCause {
kGcCauseBackground,
// An explicit System.gc() call.
kGcCauseExplicit,
- // GC triggered for a native allocation.
+ // GC triggered for a native allocation when NativeAllocationGcWatermark is exceeded.
+ // (This may be a blocking GC depending on whether we run a non-concurrent collector).
kGcCauseForNativeAlloc,
- // Background GC triggered for a native allocation.
- kGcCauseForNativeAllocBackground,
+ // GC triggered for a native allocation when NativeAllocationBlockingGcWatermark is exceeded.
+ // (This is always a blocking GC).
+ kGcCauseForNativeAllocBlocking,
// GC triggered for a collector transition.
kGcCauseCollectorTransition,
// Not a real GC cause, used when we disable moving GC (currently for GetPrimitiveArrayCritical).
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 79086da703..060f12db33 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -21,6 +21,7 @@
#include "allocation_listener.h"
#include "base/time_utils.h"
+#include "gc/accounting/atomic_stack.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/allocation_record.h"
#include "gc/collector/semi_space.h"
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 2534e32d32..a245e97e2a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -37,10 +37,10 @@
#include "cutils/sched_policy.h"
#include "debugger.h"
#include "dex_file-inl.h"
-#include "gc/accounting/atomic_stack.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/mod_union_table-inl.h"
+#include "gc/accounting/read_barrier_table.h"
#include "gc/accounting/remembered_set.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/collector/concurrent_copying.h"
@@ -63,6 +63,7 @@
#include "gc/verification.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "gc_pause_listener.h"
+#include "gc_root.h"
#include "heap-inl.h"
#include "image.h"
#include "intern_table.h"
@@ -150,8 +151,13 @@ static constexpr bool kUsePartialTlabs = true;
static uint8_t* const kPreferredAllocSpaceBegin =
reinterpret_cast<uint8_t*>(300 * MB - Heap::kDefaultNonMovingSpaceCapacity);
#else
-// For 32-bit, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
+#ifdef __ANDROID__
+// For 32-bit Android, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
static uint8_t* const kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000);
+#else
+// For 32-bit host, use 0x40000000 because asan uses most of the space below this.
+static uint8_t* const kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x40000000);
+#endif
#endif
static inline bool CareAboutPauseTimes() {
@@ -210,6 +216,7 @@ Heap::Heap(size_t initial_size,
disable_thread_flip_count_(0),
thread_flip_running_(false),
collector_type_running_(kCollectorTypeNone),
+ last_gc_cause_(kGcCauseNone),
thread_running_gc_(nullptr),
last_gc_type_(collector::kGcTypeNone),
next_gc_type_(collector::kGcTypePartial),
@@ -558,6 +565,7 @@ Heap::Heap(size_t initial_size,
native_blocking_gc_lock_ = new Mutex("Native blocking GC lock");
native_blocking_gc_cond_.reset(new ConditionVariable("Native blocking GC condition variable",
*native_blocking_gc_lock_));
+ native_blocking_gc_is_assigned_ = false;
native_blocking_gc_in_progress_ = false;
native_blocking_gcs_finished_ = 0;
@@ -975,7 +983,46 @@ void Heap::VisitObjectsInternal(ObjectCallback callback, void* arg) {
// TODO: Switch to standard begin and end to use ranged a based loop.
for (auto* it = allocation_stack_->Begin(), *end = allocation_stack_->End(); it < end; ++it) {
mirror::Object* const obj = it->AsMirrorPtr();
- if (obj != nullptr && obj->GetClass() != nullptr) {
+
+ mirror::Class* kls = nullptr;
+ if (obj != nullptr && (kls = obj->GetClass()) != nullptr) {
+ // Below invariant is safe regardless of what space the Object is in.
+ // For speed reasons, only perform it when Rosalloc could possibly be used.
+ // (Disabled for read barriers because it never uses Rosalloc).
+ // (See the DCHECK in RosAllocSpace constructor).
+ if (!kUseReadBarrier) {
+ // Rosalloc has a race in allocation. Objects can be written into the allocation
+ // stack before their header writes are visible to this thread.
+ // See b/28790624 for more details.
+ //
+ // obj.class will either be pointing to a valid Class*, or it will point
+ // to a rosalloc free buffer.
+ //
+ // If it's pointing to a valid Class* then that Class's Class will be the
+ // ClassClass (whose Class is itself).
+ //
+ // A rosalloc free buffer will point to another rosalloc free buffer
+ // (or to null), and never to itself.
+ //
+ // Either way dereferencing while its not-null is safe because it will
+ // always point to another valid pointer or to null.
+ mirror::Class* klsClass = kls->GetClass();
+
+ if (klsClass == nullptr) {
+ continue;
+ } else if (klsClass->GetClass() != klsClass) {
+ continue;
+ }
+ } else {
+ // Ensure the invariant is not broken for non-rosalloc cases.
+ DCHECK(Heap::rosalloc_space_ == nullptr)
+ << "unexpected rosalloc with read barriers";
+ DCHECK(kls->GetClass() != nullptr)
+ << "invalid object: class does not have a class";
+ DCHECK_EQ(kls->GetClass()->GetClass(), kls->GetClass())
+ << "invalid object: class's class is not ClassClass";
+ }
+
// Avoid the race condition caused by the object not yet being written into the allocation
// stack or the class not yet being written in the object. Or, if
// kUseThreadLocalAllocationStack, there can be nulls on the allocation stack.
@@ -1414,6 +1461,7 @@ void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) {
// Ensure there is only one GC at a time.
WaitForGcToCompleteLocked(cause, self);
collector_type_running_ = collector_type;
+ last_gc_cause_ = cause;
thread_running_gc_ = self;
}
@@ -2688,6 +2736,10 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
// old_native_bytes_allocated_ now that GC has been triggered, resetting
// new_native_bytes_allocated_ to zero in the process.
old_native_bytes_allocated_.FetchAndAddRelaxed(new_native_bytes_allocated_.ExchangeRelaxed(0));
+ if (gc_cause == kGcCauseForNativeAllocBlocking) {
+ MutexLock mu(self, *native_blocking_gc_lock_);
+ native_blocking_gc_in_progress_ = true;
+ }
}
DCHECK_LT(gc_type, collector::kGcTypeMax);
@@ -3489,6 +3541,7 @@ collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
collector::GcType last_gc_type = collector::kGcTypeNone;
+ GcCause last_gc_cause = kGcCauseNone;
uint64_t wait_start = NanoTime();
while (collector_type_running_ != kCollectorTypeNone) {
if (self != task_processor_->GetRunningThread()) {
@@ -3503,12 +3556,13 @@ collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
// We must wait, change thread state then sleep on gc_complete_cond_;
gc_complete_cond_->Wait(self);
last_gc_type = last_gc_type_;
+ last_gc_cause = last_gc_cause_;
}
uint64_t wait_time = NanoTime() - wait_start;
total_wait_time_ += wait_time;
if (wait_time > long_pause_log_threshold_) {
- LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
- << " for cause " << cause;
+ LOG(INFO) << "WaitForGcToComplete blocked " << cause << " on " << last_gc_cause << " for "
+ << PrettyDuration(wait_time);
}
if (self != task_processor_->GetRunningThread()) {
// The current thread is about to run a collection. If the thread
@@ -3519,6 +3573,7 @@ collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
// it results in log spam. kGcCauseExplicit is already logged in LogGC, so avoid it here too.
if (cause == kGcCauseForAlloc ||
cause == kGcCauseForNativeAlloc ||
+ cause == kGcCauseForNativeAllocBlocking ||
cause == kGcCauseDisableMovingGc) {
VLOG(gc) << "Starting a blocking GC " << cause;
}
@@ -3920,33 +3975,36 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
// finish before addressing the fact that we exceeded the blocking
// watermark again.
do {
+ ScopedTrace trace("RegisterNativeAllocation: Wait For Prior Blocking GC Completion");
native_blocking_gc_cond_->Wait(self);
} while (native_blocking_gcs_finished_ == initial_gcs_finished);
initial_gcs_finished++;
}
// It's possible multiple threads have seen that we exceeded the
- // blocking watermark. Ensure that only one of those threads runs the
- // blocking GC. The rest of the threads should instead wait for the
- // blocking GC to complete.
+ // blocking watermark. Ensure that only one of those threads is assigned
+ // to run the blocking GC. The rest of the threads should instead wait
+ // for the blocking GC to complete.
if (native_blocking_gcs_finished_ == initial_gcs_finished) {
- if (native_blocking_gc_in_progress_) {
+ if (native_blocking_gc_is_assigned_) {
do {
+ ScopedTrace trace("RegisterNativeAllocation: Wait For Blocking GC Completion");
native_blocking_gc_cond_->Wait(self);
} while (native_blocking_gcs_finished_ == initial_gcs_finished);
} else {
- native_blocking_gc_in_progress_ = true;
+ native_blocking_gc_is_assigned_ = true;
run_gc = true;
}
}
}
if (run_gc) {
- CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
+ CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAllocBlocking, false);
RunFinalization(env, kNativeAllocationFinalizeTimeout);
CHECK(!env->ExceptionCheck());
MutexLock mu(self, *native_blocking_gc_lock_);
+ native_blocking_gc_is_assigned_ = false;
native_blocking_gc_in_progress_ = false;
native_blocking_gcs_finished_++;
native_blocking_gc_cond_->Broadcast(self);
@@ -3956,7 +4014,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
// Trigger another GC because there have been enough native bytes
// allocated since the last GC.
if (IsGcConcurrent()) {
- RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAllocBackground, /*force_full*/true);
+ RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full*/true);
} else {
CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
}
@@ -3998,7 +4056,7 @@ void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte
<< " IsVariableSize=" << c->IsVariableSize()
<< " ObjectSize=" << c->GetObjectSize()
<< " sizeof(Class)=" << sizeof(mirror::Class)
- << " klass=" << c.Ptr();
+ << verification_->DumpObjectInfo(c.Ptr(), /*tag*/ "klass");
CHECK_GE(byte_count, sizeof(mirror::Object));
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index aa123d8736..3484e0297d 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -26,17 +26,14 @@
#include "arch/instruction_set.h"
#include "atomic.h"
#include "base/time_utils.h"
-#include "gc/accounting/atomic_stack.h"
-#include "gc/accounting/card_table.h"
-#include "gc/accounting/read_barrier_table.h"
#include "gc/gc_cause.h"
#include "gc/collector/gc_type.h"
+#include "gc/collector/iteration.h"
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
#include "globals.h"
#include "handle.h"
#include "obj_ptr.h"
-#include "object_callbacks.h"
#include "offsets.h"
#include "process_state.h"
#include "safe_map.h"
@@ -45,13 +42,18 @@
namespace art {
class ConditionVariable;
+class IsMarkedVisitor;
class Mutex;
+class RootVisitor;
class StackVisitor;
class Thread;
class ThreadPool;
class TimingLogger;
class VariableSizedHandleScope;
+// Same as in object_callbacks.h. Just avoid the include.
+typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
+
namespace mirror {
class Class;
class Object;
@@ -67,8 +69,12 @@ class TaskProcessor;
class Verification;
namespace accounting {
+ template <typename T> class AtomicStack;
+ typedef AtomicStack<mirror::Object> ObjectStack;
+ class CardTable;
class HeapBitmap;
class ModUnionTable;
+ class ReadBarrierTable;
class RememberedSet;
} // namespace accounting
@@ -99,13 +105,6 @@ namespace space {
class ZygoteSpace;
} // namespace space
-class AgeCardVisitor {
- public:
- uint8_t operator()(uint8_t card) const {
- return (card == accounting::CardTable::kCardDirty) ? card - 1 : 0;
- }
-};
-
enum HomogeneousSpaceCompactResult {
// Success.
kSuccess,
@@ -1190,9 +1189,12 @@ class Heap {
// Task processor, proxies heap trim requests to the daemon threads.
std::unique_ptr<TaskProcessor> task_processor_;
- // True while the garbage collector is running.
+ // Collector type of the running GC.
volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_);
+ // Cause of the last running GC.
+ volatile GcCause last_gc_cause_ GUARDED_BY(gc_complete_lock_);
+
// The thread currently running the GC.
volatile Thread* thread_running_gc_ GUARDED_BY(gc_complete_lock_);
@@ -1237,10 +1239,20 @@ class Heap {
// old_native_bytes_allocated_ and new_native_bytes_allocated_.
Atomic<size_t> old_native_bytes_allocated_;
- // Used for synchronization of blocking GCs triggered by
- // RegisterNativeAllocation.
+ // Used for synchronization when multiple threads call into
+ // RegisterNativeAllocation and require blocking GC.
+ // * If a previous blocking GC is in progress, all threads will wait for
+ // that GC to complete, then wait for one of the threads to complete another
+ // blocking GC.
+ // * If a blocking GC is assigned but not in progress, a thread has been
+ // assigned to run a blocking GC but has not started yet. Threads will wait
+ // for the assigned blocking GC to complete.
+ // * If a blocking GC is not assigned nor in progress, the first thread will
+ // run a blocking GC and signal to other threads that blocking GC has been
+ // assigned.
Mutex* native_blocking_gc_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::unique_ptr<ConditionVariable> native_blocking_gc_cond_ GUARDED_BY(native_blocking_gc_lock_);
+ bool native_blocking_gc_is_assigned_ GUARDED_BY(native_blocking_gc_lock_);
bool native_blocking_gc_in_progress_ GUARDED_BY(native_blocking_gc_lock_);
uint32_t native_blocking_gcs_finished_ GUARDED_BY(native_blocking_gc_lock_);
diff --git a/runtime/gc/heap_verification_test.cc b/runtime/gc/heap_verification_test.cc
index c8233e37ab..30714ba09e 100644
--- a/runtime/gc/heap_verification_test.cc
+++ b/runtime/gc/heap_verification_test.cc
@@ -16,7 +16,8 @@
#include "common_runtime_test.h"
-#include "class_linker.h"
+#include "base/memory_tool.h"
+#include "class_linker-inl.h"
#include "handle_scope-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
@@ -53,6 +54,11 @@ TEST_F(VerificationTest, IsValidHeapObjectAddress) {
Handle<mirror::String> string(
hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "test")));
EXPECT_TRUE(v->IsValidHeapObjectAddress(string.Get()));
+ // Address in the heap that isn't aligned.
+ const void* unaligned_address =
+ reinterpret_cast<const void*>(reinterpret_cast<uintptr_t>(string.Get()) + 1);
+ EXPECT_TRUE(v->IsAddressInHeapSpace(unaligned_address));
+ EXPECT_FALSE(v->IsValidHeapObjectAddress(unaligned_address));
EXPECT_TRUE(v->IsValidHeapObjectAddress(string->GetClass()));
const uintptr_t uint_klass = reinterpret_cast<uintptr_t>(string->GetClass());
// Not actually a valid object but the verification can't know that. Guaranteed to be inside a
@@ -63,7 +69,7 @@ TEST_F(VerificationTest, IsValidHeapObjectAddress) {
reinterpret_cast<const void*>(&uint_klass)));
}
-TEST_F(VerificationTest, IsValidClass) {
+TEST_F(VerificationTest, IsValidClassOrNotInHeap) {
ScopedObjectAccess soa(Thread::Current());
VariableSizedHandleScope hs(soa.Self());
Handle<mirror::String> string(
@@ -72,14 +78,35 @@ TEST_F(VerificationTest, IsValidClass) {
EXPECT_FALSE(v->IsValidClass(reinterpret_cast<const void*>(1)));
EXPECT_FALSE(v->IsValidClass(reinterpret_cast<const void*>(4)));
EXPECT_FALSE(v->IsValidClass(nullptr));
- EXPECT_FALSE(v->IsValidClass(string.Get()));
EXPECT_TRUE(v->IsValidClass(string->GetClass()));
+ EXPECT_FALSE(v->IsValidClass(string.Get()));
+}
+
+TEST_F(VerificationTest, IsValidClassInHeap) {
+ TEST_DISABLED_FOR_MEMORY_TOOL();
+ ScopedObjectAccess soa(Thread::Current());
+ VariableSizedHandleScope hs(soa.Self());
+ Handle<mirror::String> string(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "test")));
+ const Verification* const v = Runtime::Current()->GetHeap()->GetVerification();
const uintptr_t uint_klass = reinterpret_cast<uintptr_t>(string->GetClass());
EXPECT_FALSE(v->IsValidClass(reinterpret_cast<const void*>(uint_klass - kObjectAlignment)));
EXPECT_FALSE(v->IsValidClass(reinterpret_cast<const void*>(&uint_klass)));
}
-TEST_F(VerificationTest, DumpObjectInfo) {
+TEST_F(VerificationTest, DumpInvalidObjectInfo) {
+ ScopedLogSeverity sls(LogSeverity::INFO);
+ ScopedObjectAccess soa(Thread::Current());
+ Runtime* const runtime = Runtime::Current();
+ VariableSizedHandleScope hs(soa.Self());
+ const Verification* const v = runtime->GetHeap()->GetVerification();
+ LOG(INFO) << v->DumpObjectInfo(reinterpret_cast<const void*>(1), "obj");
+ LOG(INFO) << v->DumpObjectInfo(reinterpret_cast<const void*>(4), "obj");
+ LOG(INFO) << v->DumpObjectInfo(nullptr, "obj");
+}
+
+TEST_F(VerificationTest, DumpValidObjectInfo) {
+ TEST_DISABLED_FOR_MEMORY_TOOL();
ScopedLogSeverity sls(LogSeverity::INFO);
ScopedObjectAccess soa(Thread::Current());
Runtime* const runtime = Runtime::Current();
@@ -89,9 +116,6 @@ TEST_F(VerificationTest, DumpObjectInfo) {
Handle<mirror::ObjectArray<mirror::Object>> arr(
hs.NewHandle(AllocObjectArray<mirror::Object>(soa.Self(), 256)));
const Verification* const v = runtime->GetHeap()->GetVerification();
- LOG(INFO) << v->DumpObjectInfo(reinterpret_cast<const void*>(1), "obj");
- LOG(INFO) << v->DumpObjectInfo(reinterpret_cast<const void*>(4), "obj");
- LOG(INFO) << v->DumpObjectInfo(nullptr, "obj");
LOG(INFO) << v->DumpObjectInfo(string.Get(), "test");
LOG(INFO) << v->DumpObjectInfo(string->GetClass(), "obj");
const uintptr_t uint_klass = reinterpret_cast<uintptr_t>(string->GetClass());
@@ -102,6 +126,7 @@ TEST_F(VerificationTest, DumpObjectInfo) {
}
TEST_F(VerificationTest, LogHeapCorruption) {
+ TEST_DISABLED_FOR_MEMORY_TOOL();
ScopedLogSeverity sls(LogSeverity::INFO);
ScopedObjectAccess soa(Thread::Current());
Runtime* const runtime = Runtime::Current();
diff --git a/runtime/gc/reference_processor-inl.h b/runtime/gc/reference_processor-inl.h
index f619a15f74..0f47d3dc9f 100644
--- a/runtime/gc/reference_processor-inl.h
+++ b/runtime/gc/reference_processor-inl.h
@@ -19,6 +19,8 @@
#include "reference_processor.h"
+#include "mirror/reference-inl.h"
+
namespace art {
namespace gc {
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 886c950710..52da7632f0 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -22,6 +22,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/reference-inl.h"
+#include "object_callbacks.h"
#include "reference_processor-inl.h"
#include "reflection.h"
#include "ScopedLocalRef.h"
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index 38b68cbbe8..a8135d9a3b 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -20,11 +20,11 @@
#include "base/mutex.h"
#include "globals.h"
#include "jni.h"
-#include "object_callbacks.h"
#include "reference_queue.h"
namespace art {
+class IsMarkedVisitor;
class TimingLogger;
namespace mirror {
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index fd5dcf9de6..321d22a592 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -22,6 +22,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/reference-inl.h"
+#include "object_callbacks.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index b73a880a8a..c48d48c530 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -27,7 +27,6 @@
#include "globals.h"
#include "jni.h"
#include "obj_ptr.h"
-#include "object_callbacks.h"
#include "offsets.h"
#include "thread_pool.h"
@@ -36,6 +35,9 @@ namespace mirror {
class Reference;
} // namespace mirror
+class IsMarkedVisitor;
+class MarkObjectVisitor;
+
namespace gc {
namespace collector {
diff --git a/runtime/gc/scoped_gc_critical_section.cc b/runtime/gc/scoped_gc_critical_section.cc
index f937d2c778..2976dd0252 100644
--- a/runtime/gc/scoped_gc_critical_section.cc
+++ b/runtime/gc/scoped_gc_critical_section.cc
@@ -19,7 +19,7 @@
#include "gc/collector_type.h"
#include "gc/heap.h"
#include "runtime.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 45cea5a48c..1509bb027d 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -17,9 +17,10 @@
#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_INL_H_
#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_INL_H_
-#include "base/bit_utils.h"
#include "bump_pointer_space.h"
+#include "base/bit_utils.h"
+
namespace art {
namespace gc {
namespace space {
@@ -86,15 +87,6 @@ inline mirror::Object* BumpPointerSpace::AllocNonvirtual(size_t num_bytes) {
return ret;
}
-inline size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- size_t num_bytes = obj->SizeOf();
- if (usable_size != nullptr) {
- *usable_size = RoundUp(num_bytes, kAlignment);
- }
- return num_bytes;
-}
-
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 426b33218c..bb1ede15f2 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -271,6 +271,14 @@ void BumpPointerSpace::LogFragmentationAllocFailure(std::ostream& os,
// Caller's job to print failed_alloc_bytes.
}
+size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
+ size_t num_bytes = obj->SizeOf();
+ if (usable_size != nullptr) {
+ *usable_size = RoundUp(num_bytes, kAlignment);
+ }
+ return num_bytes;
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index e9982e9d3c..566dc5dc40 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -17,10 +17,17 @@
#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
-#include "object_callbacks.h"
#include "space.h"
namespace art {
+
+namespace mirror {
+class Object;
+}
+
+// Same as in object_callbacks.h. Just avoid the include.
+typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
+
namespace gc {
namespace collector {
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 9282ec7944..7ec54f59fe 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -26,6 +26,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
#include "thread.h"
#include "thread_list.h"
#include "utils.h"
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 0f51b87033..b6bff5b17c 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -42,6 +42,7 @@
#include "mirror/object-refvisitor-inl.h"
#include "oat_file.h"
#include "os.h"
+#include "runtime.h"
#include "space-inl.h"
#include "utils.h"
@@ -651,7 +652,8 @@ class ImageSpaceLoader {
bitmap_name,
image_bitmap_map.release(),
reinterpret_cast<uint8_t*>(map->Begin()),
- image_objects.End()));
+ // Make sure the bitmap is aligned to card size instead of just bitmap word size.
+ RoundUp(image_objects.End(), gc::accounting::CardTable::kCardSize)));
if (bitmap == nullptr) {
*error_msg = StringPrintf("Could not create bitmap '%s'", bitmap_name.c_str());
return nullptr;
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index aa3dd42416..3383d6b383 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -19,7 +19,7 @@
#include "arch/instruction_set.h"
#include "gc/accounting/space_bitmap.h"
-#include "runtime.h"
+#include "image.h"
#include "space.h"
namespace art {
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 3988073de8..4597a96ce2 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -16,19 +16,22 @@
#include "large_object_space.h"
+#include <sys/mman.h>
+
#include <memory>
-#include "gc/accounting/heap_bitmap-inl.h"
-#include "gc/accounting/space_bitmap-inl.h"
#include "base/logging.h"
#include "base/memory_tool.h"
#include "base/mutex-inl.h"
#include "base/stl_util.h"
+#include "gc/accounting/heap_bitmap-inl.h"
+#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/heap.h"
#include "image.h"
#include "os.h"
#include "scoped_thread_state_change-inl.h"
#include "space-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 3910a03342..6a7c502b61 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -18,7 +18,7 @@
#define ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
#include "region_space.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace gc {
@@ -138,20 +138,6 @@ inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* byte
return reinterpret_cast<mirror::Object*>(old_top);
}
-inline size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
- size_t num_bytes = obj->SizeOf();
- if (usable_size != nullptr) {
- if (LIKELY(num_bytes <= kRegionSize)) {
- DCHECK(RefToRegion(obj)->IsAllocated());
- *usable_size = RoundUp(num_bytes, kAlignment);
- } else {
- DCHECK(RefToRegion(obj)->IsLarge());
- *usable_size = RoundUp(num_bytes, kRegionSize);
- }
- }
- return num_bytes;
-}
-
template<RegionSpace::RegionType kRegionType>
uint64_t RegionSpace::GetBytesAllocatedInternal() {
uint64_t bytes = 0;
@@ -315,18 +301,21 @@ mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocate
DCHECK(first_reg->IsFree());
first_reg->UnfreeLarge(this, time_);
++num_non_free_regions_;
- first_reg->SetTop(first_reg->Begin() + num_bytes);
+ size_t allocated = num_regs * kRegionSize;
+ // We make 'top' all usable bytes, as the caller of this
+ // allocation may use all of 'usable_size' (see mirror::Array::Alloc).
+ first_reg->SetTop(first_reg->Begin() + allocated);
for (size_t p = left + 1; p < right; ++p) {
DCHECK_LT(p, num_regions_);
DCHECK(regions_[p].IsFree());
regions_[p].UnfreeLargeTail(this, time_);
++num_non_free_regions_;
}
- *bytes_allocated = num_bytes;
+ *bytes_allocated = allocated;
if (usable_size != nullptr) {
- *usable_size = num_regs * kRegionSize;
+ *usable_size = allocated;
}
- *bytes_tl_bulk_allocated = num_bytes;
+ *bytes_tl_bulk_allocated = allocated;
return reinterpret_cast<mirror::Object*>(first_reg->Begin());
} else {
// right points to the non-free region. Start with the one after it.
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index e792531bfe..8d8c4885ef 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -16,6 +16,7 @@
#include "bump_pointer_space.h"
#include "bump_pointer_space-inl.h"
+#include "gc/accounting/read_barrier_table.h"
#include "mirror/object-inl.h"
#include "mirror/class-inl.h"
#include "thread_list.h"
@@ -528,6 +529,20 @@ void RegionSpace::Region::Dump(std::ostream& os) const {
<< " is_newly_allocated=" << is_newly_allocated_ << " is_a_tlab=" << is_a_tlab_ << " thread=" << thread_ << "\n";
}
+size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
+ size_t num_bytes = obj->SizeOf();
+ if (usable_size != nullptr) {
+ if (LIKELY(num_bytes <= kRegionSize)) {
+ DCHECK(RefToRegion(obj)->IsAllocated());
+ *usable_size = RoundUp(num_bytes, kAlignment);
+ } else {
+ DCHECK(RefToRegion(obj)->IsLarge());
+ *usable_size = RoundUp(num_bytes, kRegionSize);
+ }
+ }
+ return num_bytes;
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 4dea0fa5c0..472c77bd3d 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -17,13 +17,17 @@
#ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
#define ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
-#include "gc/accounting/read_barrier_table.h"
#include "object_callbacks.h"
#include "space.h"
#include "thread.h"
namespace art {
namespace gc {
+
+namespace accounting {
+class ReadBarrierTable;
+} // namespace accounting
+
namespace space {
// A space that consists of equal-sized regions.
@@ -411,7 +415,9 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
DCHECK(IsInUnevacFromSpace());
DCHECK(!IsLargeTail());
DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
- live_bytes_ += live_bytes;
+ // For large allocations, we always consider all bytes in the
+ // regions live.
+ live_bytes_ += IsLarge() ? Top() - begin_ : live_bytes;
DCHECK_LE(live_bytes_, BytesAllocated());
}
diff --git a/runtime/gc/space/rosalloc_space-inl.h b/runtime/gc/space/rosalloc_space-inl.h
index 8bff2b4c0f..09aa7cf8a3 100644
--- a/runtime/gc/space/rosalloc_space-inl.h
+++ b/runtime/gc/space/rosalloc_space-inl.h
@@ -17,49 +17,17 @@
#ifndef ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_INL_H_
#define ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_INL_H_
+#include "rosalloc_space.h"
+
#include "base/memory_tool.h"
#include "gc/allocator/rosalloc-inl.h"
#include "gc/space/memory_tool_settings.h"
-#include "rosalloc_space.h"
#include "thread.h"
namespace art {
namespace gc {
namespace space {
-template<bool kMaybeIsRunningOnMemoryTool>
-inline size_t RosAllocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
- // obj is a valid object. Use its class in the header to get the size.
- // Don't use verification since the object may be dead if we are sweeping.
- size_t size = obj->SizeOf<kVerifyNone>();
- bool add_redzones = false;
- if (kMaybeIsRunningOnMemoryTool) {
- add_redzones = RUNNING_ON_MEMORY_TOOL ? kMemoryToolAddsRedzones : 0;
- if (add_redzones) {
- size += 2 * kDefaultMemoryToolRedZoneBytes;
- }
- } else {
- DCHECK_EQ(RUNNING_ON_MEMORY_TOOL, 0U);
- }
- size_t size_by_size = rosalloc_->UsableSize(size);
- if (kIsDebugBuild) {
- // On memory tool, the red zone has an impact...
- const uint8_t* obj_ptr = reinterpret_cast<const uint8_t*>(obj);
- size_t size_by_ptr = rosalloc_->UsableSize(
- obj_ptr - (add_redzones ? kDefaultMemoryToolRedZoneBytes : 0));
- if (size_by_size != size_by_ptr) {
- LOG(INFO) << "Found a bad sized obj of size " << size
- << " at " << std::hex << reinterpret_cast<intptr_t>(obj_ptr) << std::dec
- << " size_by_size=" << size_by_size << " size_by_ptr=" << size_by_ptr;
- }
- DCHECK_EQ(size_by_size, size_by_ptr);
- }
- if (usable_size != nullptr) {
- *usable_size = size_by_size;
- }
- return size_by_size;
-}
-
template<bool kThreadSafe>
inline mirror::Object* RosAllocSpace::AllocCommon(Thread* self, size_t num_bytes,
size_t* bytes_allocated, size_t* usable_size,
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 8ccbfaa7a3..9e900e4558 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -24,6 +24,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
#include "thread.h"
#include "thread_list.h"
#include "utils.h"
@@ -373,6 +374,39 @@ void RosAllocSpace::DumpStats(std::ostream& os) {
rosalloc_->DumpStats(os);
}
+template<bool kMaybeIsRunningOnMemoryTool>
+size_t RosAllocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
+ // obj is a valid object. Use its class in the header to get the size.
+ // Don't use verification since the object may be dead if we are sweeping.
+ size_t size = obj->SizeOf<kVerifyNone>();
+ bool add_redzones = false;
+ if (kMaybeIsRunningOnMemoryTool) {
+ add_redzones = RUNNING_ON_MEMORY_TOOL ? kMemoryToolAddsRedzones : 0;
+ if (add_redzones) {
+ size += 2 * kDefaultMemoryToolRedZoneBytes;
+ }
+ } else {
+ DCHECK_EQ(RUNNING_ON_MEMORY_TOOL, 0U);
+ }
+ size_t size_by_size = rosalloc_->UsableSize(size);
+ if (kIsDebugBuild) {
+ // On memory tool, the red zone has an impact...
+ const uint8_t* obj_ptr = reinterpret_cast<const uint8_t*>(obj);
+ size_t size_by_ptr = rosalloc_->UsableSize(
+ obj_ptr - (add_redzones ? kDefaultMemoryToolRedZoneBytes : 0));
+ if (size_by_size != size_by_ptr) {
+ LOG(INFO) << "Found a bad sized obj of size " << size
+ << " at " << std::hex << reinterpret_cast<intptr_t>(obj_ptr) << std::dec
+ << " size_by_size=" << size_by_size << " size_by_ptr=" << size_by_ptr;
+ }
+ DCHECK_EQ(size_by_size, size_by_ptr);
+ }
+ if (usable_size != nullptr) {
+ *usable_size = size_by_size;
+ }
+ return size_by_size;
+}
+
} // namespace space
namespace allocator {
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
index a2e2c1c7fb..74ce273abf 100644
--- a/runtime/gc/space/space.cc
+++ b/runtime/gc/space/space.cc
@@ -19,8 +19,9 @@
#include "base/logging.h"
#include "gc/accounting/heap_bitmap.h"
#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/heap.h"
#include "runtime.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index fc558cf8e4..2a4f830843 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -24,9 +24,8 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "gc/accounting/space_bitmap.h"
-#include "gc/collector/garbage_collector.h"
+#include "gc/collector/object_byte_pair.h"
#include "globals.h"
-#include "image.h"
#include "mem_map.h"
namespace art {
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index bbfcb31ab1..fddb3f2dd2 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -16,10 +16,12 @@
#include "zygote_space.h"
+#include "base/mutex-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
-#include "thread-inl.h"
+#include "runtime.h"
+#include "thread-current-inl.h"
#include "utils.h"
namespace art {
diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc
index f1d26d9a41..5a75b37b67 100644
--- a/runtime/gc/task_processor_test.cc
+++ b/runtime/gc/task_processor_test.cc
@@ -18,7 +18,7 @@
#include "common_runtime_test.h"
#include "task_processor.h"
#include "thread_pool.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index c14f250528..b007d1decd 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -25,6 +25,28 @@
namespace art {
namespace gc {
+std::string Verification::DumpRAMAroundAddress(uintptr_t addr, uintptr_t bytes) const {
+ const uintptr_t dump_start = addr - bytes;
+ const uintptr_t dump_end = addr + bytes;
+ std::ostringstream oss;
+ if (dump_start < dump_end &&
+ IsAddressInHeapSpace(reinterpret_cast<const void*>(dump_start)) &&
+ IsAddressInHeapSpace(reinterpret_cast<const void*>(dump_end - 1))) {
+ oss << " adjacent_ram=";
+ for (uintptr_t p = dump_start; p < dump_end; ++p) {
+ if (p == addr) {
+ // Marker of where the address is.
+ oss << "|";
+ }
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(p);
+ oss << std::hex << std::setfill('0') << std::setw(2) << static_cast<uintptr_t>(*ptr);
+ }
+ } else {
+ oss << " <invalid address>";
+ }
+ return oss.str();
+}
+
std::string Verification::DumpObjectInfo(const void* addr, const char* tag) const {
std::ostringstream oss;
oss << tag << "=" << addr;
@@ -50,23 +72,7 @@ std::string Verification::DumpObjectInfo(const void* addr, const char* tag) cons
card_table->GetCard(reinterpret_cast<const mirror::Object*>(addr)));
}
// Dump adjacent RAM.
- const uintptr_t uint_addr = reinterpret_cast<uintptr_t>(addr);
- static constexpr size_t kBytesBeforeAfter = 2 * kObjectAlignment;
- const uintptr_t dump_start = uint_addr - kBytesBeforeAfter;
- const uintptr_t dump_end = uint_addr + kBytesBeforeAfter;
- if (dump_start < dump_end &&
- IsValidHeapObjectAddress(reinterpret_cast<const void*>(dump_start)) &&
- IsValidHeapObjectAddress(reinterpret_cast<const void*>(dump_end - kObjectAlignment))) {
- oss << " adjacent_ram=";
- for (uintptr_t p = dump_start; p < dump_end; ++p) {
- if (p == uint_addr) {
- // Marker of where the object is.
- oss << "|";
- }
- uint8_t* ptr = reinterpret_cast<uint8_t*>(p);
- oss << std::hex << std::setfill('0') << std::setw(2) << static_cast<uintptr_t>(*ptr);
- }
- }
+ oss << DumpRAMAroundAddress(reinterpret_cast<uintptr_t>(addr), 4 * kObjectAlignment);
} else {
oss << " <invalid address>";
}
@@ -90,12 +96,15 @@ void Verification::LogHeapCorruption(ObjPtr<mirror::Object> holder,
if (holder != nullptr) {
mirror::Class* holder_klass = holder->GetClass<kVerifyNone, kWithoutReadBarrier>();
if (IsValidClass(holder_klass)) {
- oss << "field_offset=" << offset.Uint32Value();
+ oss << " field_offset=" << offset.Uint32Value();
ArtField* field = holder->FindFieldByOffset(offset);
if (field != nullptr) {
oss << " name=" << field->GetName();
}
}
+ mirror::HeapReference<mirror::Object>* addr = holder->GetFieldObjectReferenceAddr(offset);
+ oss << " reference addr"
+ << DumpRAMAroundAddress(reinterpret_cast<uintptr_t>(addr), 4 * kObjectAlignment);
}
if (fatal) {
@@ -105,10 +114,7 @@ void Verification::LogHeapCorruption(ObjPtr<mirror::Object> holder,
}
}
-bool Verification::IsValidHeapObjectAddress(const void* addr, space::Space** out_space) const {
- if (!IsAligned<kObjectAlignment>(addr)) {
- return false;
- }
+bool Verification::IsAddressInHeapSpace(const void* addr, space::Space** out_space) const {
space::Space* const space = heap_->FindSpaceFromAddress(addr);
if (space != nullptr) {
if (out_space != nullptr) {
@@ -119,6 +125,10 @@ bool Verification::IsValidHeapObjectAddress(const void* addr, space::Space** out
return false;
}
+bool Verification::IsValidHeapObjectAddress(const void* addr, space::Space** out_space) const {
+ return IsAligned<kObjectAlignment>(addr) && IsAddressInHeapSpace(addr, out_space);
+}
+
bool Verification::IsValidClass(const void* addr) const {
if (!IsValidHeapObjectAddress(addr)) {
return false;
diff --git a/runtime/gc/verification.h b/runtime/gc/verification.h
index 3d95d93015..3b5eaf5f46 100644
--- a/runtime/gc/verification.h
+++ b/runtime/gc/verification.h
@@ -49,14 +49,21 @@ class Verification {
mirror::Object* ref,
bool fatal) const REQUIRES_SHARED(Locks::mutator_lock_);
-
// Return true if the klass is likely to be a valid mirror::Class.
bool IsValidClass(const void* klass) const REQUIRES_SHARED(Locks::mutator_lock_);
- // Does not allow null.
+ // Does not allow null, checks alignment.
bool IsValidHeapObjectAddress(const void* addr, space::Space** out_space = nullptr) const
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Does not check alignment, used by DumpRAMAroundAddress.
+ bool IsAddressInHeapSpace(const void* addr, space::Space** out_space = nullptr) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Dump bytes of RAM before and after an address.
+ std::string DumpRAMAroundAddress(uintptr_t addr, uintptr_t bytes) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
gc::Heap* const heap_;
};