summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/image_writer.cc109
-rw-r--r--compiler/image_writer.h11
-rw-r--r--oatdump/oatdump.cc46
-rw-r--r--patchoat/patchoat.cc7
-rw-r--r--patchoat/patchoat.h5
-rw-r--r--runtime/class_linker.cc122
-rw-r--r--runtime/debugger.cc18
-rw-r--r--runtime/gc/accounting/heap_bitmap-inl.h2
-rw-r--r--runtime/gc/accounting/heap_bitmap.cc9
-rw-r--r--runtime/gc/accounting/heap_bitmap.h5
-rw-r--r--runtime/gc/accounting/mod_union_table.cc5
-rw-r--r--runtime/gc/accounting/mod_union_table.h15
-rw-r--r--runtime/gc/accounting/space_bitmap-inl.h25
-rw-r--r--runtime/gc/accounting/space_bitmap.cc21
-rw-r--r--runtime/gc/accounting/space_bitmap.h12
-rw-r--r--runtime/gc/collector/concurrent_copying.cc110
-rw-r--r--runtime/gc/collector/concurrent_copying.h5
-rw-r--r--runtime/gc/heap-visit-objects-inl.h169
-rw-r--r--runtime/gc/heap.cc361
-rw-r--r--runtime/gc/heap.h19
-rw-r--r--runtime/gc/space/bump_pointer_space-walk-inl.h100
-rw-r--r--runtime/gc/space/bump_pointer_space.cc52
-rw-r--r--runtime/gc/space/bump_pointer_space.h9
-rw-r--r--runtime/gc/space/region_space-inl.h12
-rw-r--r--runtime/gc/space/region_space.h19
-rw-r--r--runtime/hprof/hprof.cc15
-rw-r--r--runtime/mirror/throwable.cc1
-rw-r--r--runtime/object_callbacks.h3
-rw-r--r--runtime/openjdkjvmti/ti_class.cc15
-rw-r--r--runtime/openjdkjvmti/ti_heap.cc84
30 files changed, 641 insertions, 745 deletions
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 4f1fef9f58..f92bf95065 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -44,6 +44,7 @@
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/collector/concurrent_copying.h"
#include "gc/heap.h"
+#include "gc/heap-visit-objects-inl.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
#include "gc/verification.h"
@@ -117,19 +118,17 @@ bool ImageWriter::IsInBootOatFile(const void* ptr) const {
return false;
}
-static void ClearDexFileCookieCallback(Object* obj, void* arg ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(obj != nullptr);
- Class* klass = obj->GetClass();
- if (klass == WellKnownClasses::ToClass(WellKnownClasses::dalvik_system_DexFile)) {
- ArtField* field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
- // Null out the cookie to enable determinism. b/34090128
- field->SetObject</*kTransactionActive*/false>(obj, nullptr);
- }
-}
-
static void ClearDexFileCookies() REQUIRES_SHARED(Locks::mutator_lock_) {
- Runtime::Current()->GetHeap()->VisitObjects(ClearDexFileCookieCallback, nullptr);
+ auto visitor = [](Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(obj != nullptr);
+ Class* klass = obj->GetClass();
+ if (klass == WellKnownClasses::ToClass(WellKnownClasses::dalvik_system_DexFile)) {
+ ArtField* field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
+ // Null out the cookie to enable determinism. b/34090128
+ field->SetObject</*kTransactionActive*/false>(obj, nullptr);
+ }
+ };
+ Runtime::Current()->GetHeap()->VisitObjects(visitor);
}
bool ImageWriter::PrepareImageAddressSpace() {
@@ -1176,21 +1175,19 @@ void ImageWriter::PruneNonImageClasses() {
void ImageWriter::CheckNonImageClassesRemoved() {
if (compiler_driver_.GetImageClasses() != nullptr) {
+ auto visitor = [&](Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (obj->IsClass() && !IsInBootImage(obj)) {
+ Class* klass = obj->AsClass();
+ if (!KeepClass(klass)) {
+ DumpImageClasses();
+ std::string temp;
+ CHECK(KeepClass(klass))
+ << Runtime::Current()->GetHeap()->GetVerification()->FirstPathFromRootSet(klass);
+ }
+ }
+ };
gc::Heap* heap = Runtime::Current()->GetHeap();
- heap->VisitObjects(CheckNonImageClassesRemovedCallback, this);
- }
-}
-
-void ImageWriter::CheckNonImageClassesRemovedCallback(Object* obj, void* arg) {
- ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg);
- if (obj->IsClass() && !image_writer->IsInBootImage(obj)) {
- Class* klass = obj->AsClass();
- if (!image_writer->KeepClass(klass)) {
- image_writer->DumpImageClasses();
- std::string temp;
- CHECK(image_writer->KeepClass(klass))
- << Runtime::Current()->GetHeap()->GetVerification()->FirstPathFromRootSet(klass);
- }
+ heap->VisitObjects(visitor);
}
}
@@ -1532,26 +1529,6 @@ void ImageWriter::AssignMethodOffset(ArtMethod* method,
offset += ArtMethod::Size(target_ptr_size_);
}
-void ImageWriter::EnsureBinSlotAssignedCallback(mirror::Object* obj, void* arg) {
- ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg);
- DCHECK(writer != nullptr);
- if (!Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(obj)) {
- CHECK(writer->IsImageBinSlotAssigned(obj)) << mirror::Object::PrettyTypeOf(obj) << " " << obj;
- }
-}
-
-void ImageWriter::DeflateMonitorCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED) {
- Monitor::Deflate(Thread::Current(), obj);
-}
-
-void ImageWriter::UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg) {
- ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg);
- DCHECK(writer != nullptr);
- if (!writer->IsInBootImage(obj)) {
- writer->UnbinObjectsIntoOffset(obj);
- }
-}
-
void ImageWriter::UnbinObjectsIntoOffset(mirror::Object* obj) {
DCHECK(!IsInBootImage(obj));
CHECK(obj != nullptr);
@@ -1686,7 +1663,12 @@ void ImageWriter::CalculateNewObjectOffsets() {
// Deflate monitors before we visit roots since deflating acquires the monitor lock. Acquiring
// this lock while holding other locks may cause lock order violations.
- heap->VisitObjects(DeflateMonitorCallback, this);
+ {
+ auto deflate_monitor = [](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ Monitor::Deflate(Thread::Current(), obj);
+ };
+ heap->VisitObjects(deflate_monitor);
+ }
// Work list of <object, oat_index> for objects. Everything on the stack must already be
// assigned a bin slot.
@@ -1748,7 +1730,15 @@ void ImageWriter::CalculateNewObjectOffsets() {
}
// Verify that all objects have assigned image bin slots.
- heap->VisitObjects(EnsureBinSlotAssignedCallback, this);
+ {
+ auto ensure_bin_slots_assigned = [&](mirror::Object* obj)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(obj)) {
+ CHECK(IsImageBinSlotAssigned(obj)) << mirror::Object::PrettyTypeOf(obj) << " " << obj;
+ }
+ };
+ heap->VisitObjects(ensure_bin_slots_assigned);
+ }
// Calculate size of the dex cache arrays slot and prepare offsets.
PrepareDexCacheArraySlots();
@@ -1812,7 +1802,15 @@ void ImageWriter::CalculateNewObjectOffsets() {
}
// Transform each object's bin slot into an offset which will be used to do the final copy.
- heap->VisitObjects(UnbinObjectsIntoOffsetCallback, this);
+ {
+ auto unbin_objects_into_offset = [&](mirror::Object* obj)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!IsInBootImage(obj)) {
+ UnbinObjectsIntoOffset(obj);
+ }
+ };
+ heap->VisitObjects(unbin_objects_into_offset);
+ }
size_t i = 0;
for (ImageInfo& image_info : image_infos_) {
@@ -2119,8 +2117,11 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
}
void ImageWriter::CopyAndFixupObjects() {
- gc::Heap* heap = Runtime::Current()->GetHeap();
- heap->VisitObjects(CopyAndFixupObjectsCallback, this);
+ auto visitor = [&](Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(obj != nullptr);
+ CopyAndFixupObject(obj);
+ };
+ Runtime::Current()->GetHeap()->VisitObjects(visitor);
// Fix up the object previously had hash codes.
for (const auto& hash_pair : saved_hashcode_map_) {
Object* obj = hash_pair.first;
@@ -2130,12 +2131,6 @@ void ImageWriter::CopyAndFixupObjects() {
saved_hashcode_map_.clear();
}
-void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) {
- DCHECK(obj != nullptr);
- DCHECK(arg != nullptr);
- reinterpret_cast<ImageWriter*>(arg)->CopyAndFixupObject(obj);
-}
-
void ImageWriter::FixupPointerArray(mirror::Object* dst,
mirror::PointerArray* arr,
mirror::Class* klass,
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index c42523b783..ee6fc1dff6 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -397,8 +397,6 @@ class ImageWriter FINAL {
// Verify unwanted classes removed.
void CheckNonImageClassesRemoved() REQUIRES_SHARED(Locks::mutator_lock_);
- static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_);
// Lays out where the image objects will be at runtime.
void CalculateNewObjectOffsets()
@@ -414,18 +412,9 @@ class ImageWriter FINAL {
void UnbinObjectsIntoOffset(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_);
- static void EnsureBinSlotAssignedCallback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_);
- static void DeflateMonitorCallback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_);
- static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Creates the contiguous image in memory and adjusts pointers.
void CopyAndFixupNativeData(size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
void CopyAndFixupObjects() REQUIRES_SHARED(Locks::mutator_lock_);
- static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_);
void CopyAndFixupObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 066c66ac93..ae26e7dfcf 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -42,6 +42,7 @@
#include "dex_instruction-inl.h"
#include "disassembler.h"
#include "elf_builder.h"
+#include "gc/accounting/space_bitmap-inl.h"
#include "gc/space/image_space.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
@@ -1930,9 +1931,12 @@ class ImageDumper {
}
}
}
+ auto dump_visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ DumpObject(obj);
+ };
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
// Dump the normal objects before ArtMethods.
- image_space_.GetLiveBitmap()->Walk(ImageDumper::Callback, this);
+ image_space_.GetLiveBitmap()->Walk(dump_visitor);
indent_os << "\n";
// TODO: Dump fields.
// Dump methods after.
@@ -1941,7 +1945,7 @@ class ImageDumper {
image_space_.Begin(),
image_header_.GetPointerSize());
// Dump the large objects separately.
- heap->GetLargeObjectsSpace()->GetLiveBitmap()->Walk(ImageDumper::Callback, this);
+ heap->GetLargeObjectsSpace()->GetLiveBitmap()->Walk(dump_visitor);
indent_os << "\n";
}
os << "STATS:\n" << std::flush;
@@ -2156,20 +2160,18 @@ class ImageDumper {
return oat_code_begin + GetQuickOatCodeSize(m);
}
- static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
+ void DumpObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
- DCHECK(arg != nullptr);
- ImageDumper* state = reinterpret_cast<ImageDumper*>(arg);
- if (!state->InDumpSpace(obj)) {
+ if (!InDumpSpace(obj)) {
return;
}
size_t object_bytes = obj->SizeOf();
size_t alignment_bytes = RoundUp(object_bytes, kObjectAlignment) - object_bytes;
- state->stats_.object_bytes += object_bytes;
- state->stats_.alignment_bytes += alignment_bytes;
+ stats_.object_bytes += object_bytes;
+ stats_.alignment_bytes += alignment_bytes;
- std::ostream& os = state->vios_.Stream();
+ std::ostream& os = vios_.Stream();
mirror::Class* obj_class = obj->GetClass();
if (obj_class->IsArrayClass()) {
@@ -2186,9 +2188,9 @@ class ImageDumper {
} else {
os << StringPrintf("%p: %s\n", obj, obj_class->PrettyDescriptor().c_str());
}
- ScopedIndentation indent1(&state->vios_);
+ ScopedIndentation indent1(&vios_);
DumpFields(os, obj, obj_class);
- const PointerSize image_pointer_size = state->image_header_.GetPointerSize();
+ const PointerSize image_pointer_size = image_header_.GetPointerSize();
if (obj->IsObjectArray()) {
auto* obj_array = obj->AsObjectArray<mirror::Object>();
for (int32_t i = 0, length = obj_array->GetLength(); i < length; i++) {
@@ -2215,22 +2217,22 @@ class ImageDumper {
mirror::Class* klass = obj->AsClass();
if (klass->NumStaticFields() != 0) {
os << "STATICS:\n";
- ScopedIndentation indent2(&state->vios_);
+ ScopedIndentation indent2(&vios_);
for (ArtField& field : klass->GetSFields()) {
PrintField(os, &field, field.GetDeclaringClass());
}
}
} else {
- auto it = state->dex_caches_.find(obj);
- if (it != state->dex_caches_.end()) {
+ auto it = dex_caches_.find(obj);
+ if (it != dex_caches_.end()) {
auto* dex_cache = down_cast<mirror::DexCache*>(obj);
- const auto& field_section = state->image_header_.GetImageSection(
+ const auto& field_section = image_header_.GetImageSection(
ImageHeader::kSectionArtFields);
- const auto& method_section = state->image_header_.GetMethodsSection();
+ const auto& method_section = image_header_.GetMethodsSection();
size_t num_methods = dex_cache->NumResolvedMethods();
if (num_methods != 0u) {
os << "Methods (size=" << num_methods << "):\n";
- ScopedIndentation indent2(&state->vios_);
+ ScopedIndentation indent2(&vios_);
auto* resolved_methods = dex_cache->GetResolvedMethods();
for (size_t i = 0, length = dex_cache->NumResolvedMethods(); i < length; ++i) {
auto* elem = mirror::DexCache::GetElementPtrSize(resolved_methods,
@@ -2254,7 +2256,7 @@ class ImageDumper {
if (elem == nullptr) {
msg = "null";
} else if (method_section.Contains(
- reinterpret_cast<uint8_t*>(elem) - state->image_space_.Begin())) {
+ reinterpret_cast<uint8_t*>(elem) - image_space_.Begin())) {
msg = reinterpret_cast<ArtMethod*>(elem)->PrettyMethod();
} else {
msg = "<not in method section>";
@@ -2265,7 +2267,7 @@ class ImageDumper {
size_t num_fields = dex_cache->NumResolvedFields();
if (num_fields != 0u) {
os << "Fields (size=" << num_fields << "):\n";
- ScopedIndentation indent2(&state->vios_);
+ ScopedIndentation indent2(&vios_);
auto* resolved_fields = dex_cache->GetResolvedFields();
for (size_t i = 0, length = dex_cache->NumResolvedFields(); i < length; ++i) {
auto* elem = mirror::DexCache::GetNativePairPtrSize(
@@ -2288,7 +2290,7 @@ class ImageDumper {
if (elem == nullptr) {
msg = "null";
} else if (field_section.Contains(
- reinterpret_cast<uint8_t*>(elem) - state->image_space_.Begin())) {
+ reinterpret_cast<uint8_t*>(elem) - image_space_.Begin())) {
msg = reinterpret_cast<ArtField*>(elem)->PrettyField();
} else {
msg = "<not in field section>";
@@ -2299,7 +2301,7 @@ class ImageDumper {
size_t num_types = dex_cache->NumResolvedTypes();
if (num_types != 0u) {
os << "Types (size=" << num_types << "):\n";
- ScopedIndentation indent2(&state->vios_);
+ ScopedIndentation indent2(&vios_);
auto* resolved_types = dex_cache->GetResolvedTypes();
for (size_t i = 0; i < num_types; ++i) {
auto pair = resolved_types[i].load(std::memory_order_relaxed);
@@ -2331,7 +2333,7 @@ class ImageDumper {
}
}
std::string temp;
- state->stats_.Update(obj_class->GetDescriptor(&temp), object_bytes);
+ stats_.Update(obj_class->GetDescriptor(&temp), object_bytes);
}
void DumpMethod(ArtMethod* method, std::ostream& indent_os)
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 149960ee2c..a93969f0c0 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -614,7 +614,10 @@ bool PatchOat::PatchImage(bool primary_image) {
TimingLogger::ScopedTiming t("Walk Bitmap", timings_);
// Walk the bitmap.
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- bitmap_->Walk(PatchOat::BitmapCallback, this);
+ auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ VisitObject(obj);
+ };
+ bitmap_->Walk(visitor);
}
return true;
}
@@ -638,7 +641,7 @@ void PatchOat::PatchVisitor::operator() (ObjPtr<mirror::Class> cls ATTRIBUTE_UNU
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
}
-// Called by BitmapCallback
+// Called by PatchImage.
void PatchOat::VisitObject(mirror::Object* object) {
mirror::Object* copy = RelocatedCopyOf(object);
CHECK(copy != nullptr);
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index e15a6bc695..182ce94a78 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -79,11 +79,6 @@ class PatchOat {
static bool ReplaceOatFileWithSymlink(const std::string& input_oat_filename,
const std::string& output_oat_filename);
- static void BitmapCallback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- reinterpret_cast<PatchOat*>(arg)->VisitObject(obj);
- }
-
void VisitObject(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_);
void FixupMethod(ArtMethod* object, ArtMethod* copy)
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 1c3375c93a..41adae4c8a 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -57,6 +57,7 @@
#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
+#include "gc/heap-visit-objects-inl.h"
#include "gc/scoped_gc_critical_section.h"
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
@@ -863,24 +864,6 @@ struct TrampolineCheckData {
bool error;
};
-static void CheckTrampolines(mirror::Object* obj, void* arg) NO_THREAD_SAFETY_ANALYSIS {
- if (obj->IsClass()) {
- ObjPtr<mirror::Class> klass = obj->AsClass();
- TrampolineCheckData* d = reinterpret_cast<TrampolineCheckData*>(arg);
- for (ArtMethod& m : klass->GetMethods(d->pointer_size)) {
- const void* entrypoint = m.GetEntryPointFromQuickCompiledCodePtrSize(d->pointer_size);
- if (entrypoint == d->quick_resolution_trampoline ||
- entrypoint == d->quick_imt_conflict_trampoline ||
- entrypoint == d->quick_generic_jni_trampoline ||
- entrypoint == d->quick_to_interpreter_bridge_trampoline) {
- d->m = &m;
- d->error = true;
- return;
- }
- }
- }
-}
-
bool ClassLinker::InitFromBootImage(std::string* error_msg) {
VLOG(startup) << __FUNCTION__ << " entering";
CHECK(!init_done_);
@@ -945,7 +928,24 @@ bool ClassLinker::InitFromBootImage(std::string* error_msg) {
data.quick_generic_jni_trampoline = ith_quick_generic_jni_trampoline;
data.quick_to_interpreter_bridge_trampoline = ith_quick_to_interpreter_bridge_trampoline;
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- spaces[i]->GetLiveBitmap()->Walk(CheckTrampolines, &data);
+ auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (obj->IsClass()) {
+ ObjPtr<mirror::Class> klass = obj->AsClass();
+ for (ArtMethod& m : klass->GetMethods(data.pointer_size)) {
+ const void* entrypoint =
+ m.GetEntryPointFromQuickCompiledCodePtrSize(data.pointer_size);
+ if (entrypoint == data.quick_resolution_trampoline ||
+ entrypoint == data.quick_imt_conflict_trampoline ||
+ entrypoint == data.quick_generic_jni_trampoline ||
+ entrypoint == data.quick_to_interpreter_bridge_trampoline) {
+ data.m = &m;
+ data.error = true;
+ return;
+ }
+ }
+ }
+ };
+ spaces[i]->GetLiveBitmap()->Walk(visitor);
if (data.error) {
ArtMethod* m = data.m;
LOG(ERROR) << "Found a broken ArtMethod: " << ArtMethod::PrettyMethod(m);
@@ -1620,7 +1620,46 @@ class ImageSanityChecks FINAL {
static void CheckObjects(gc::Heap* heap, ClassLinker* class_linker)
REQUIRES_SHARED(Locks::mutator_lock_) {
ImageSanityChecks isc(heap, class_linker);
- heap->VisitObjects(ImageSanityChecks::SanityCheckObjectsCallback, &isc);
+ auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(obj != nullptr);
+ CHECK(obj->GetClass() != nullptr) << "Null class in object " << obj;
+ CHECK(obj->GetClass()->GetClass() != nullptr) << "Null class class " << obj;
+ if (obj->IsClass()) {
+ auto klass = obj->AsClass();
+ for (ArtField& field : klass->GetIFields()) {
+ CHECK_EQ(field.GetDeclaringClass(), klass);
+ }
+ for (ArtField& field : klass->GetSFields()) {
+ CHECK_EQ(field.GetDeclaringClass(), klass);
+ }
+ const auto pointer_size = isc.pointer_size_;
+ for (auto& m : klass->GetMethods(pointer_size)) {
+ isc.SanityCheckArtMethod(&m, klass);
+ }
+ auto* vtable = klass->GetVTable();
+ if (vtable != nullptr) {
+ isc.SanityCheckArtMethodPointerArray(vtable, nullptr);
+ }
+ if (klass->ShouldHaveImt()) {
+ ImTable* imt = klass->GetImt(pointer_size);
+ for (size_t i = 0; i < ImTable::kSize; ++i) {
+ isc.SanityCheckArtMethod(imt->Get(i, pointer_size), nullptr);
+ }
+ }
+ if (klass->ShouldHaveEmbeddedVTable()) {
+ for (int32_t i = 0; i < klass->GetEmbeddedVTableLength(); ++i) {
+ isc.SanityCheckArtMethod(klass->GetEmbeddedVTableEntry(i, pointer_size), nullptr);
+ }
+ }
+ mirror::IfTable* iftable = klass->GetIfTable();
+ for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
+ if (iftable->GetMethodArrayCount(i) > 0) {
+ isc.SanityCheckArtMethodPointerArray(iftable->GetMethodArray(i), nullptr);
+ }
+ }
+ }
+ };
+ heap->VisitObjects(visitor);
}
static void CheckPointerArray(gc::Heap* heap,
@@ -1632,49 +1671,6 @@ class ImageSanityChecks FINAL {
isc.SanityCheckArtMethodPointerArray(arr, size);
}
- static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(obj != nullptr);
- CHECK(obj->GetClass() != nullptr) << "Null class in object " << obj;
- CHECK(obj->GetClass()->GetClass() != nullptr) << "Null class class " << obj;
- if (obj->IsClass()) {
- ImageSanityChecks* isc = reinterpret_cast<ImageSanityChecks*>(arg);
-
- auto klass = obj->AsClass();
- for (ArtField& field : klass->GetIFields()) {
- CHECK_EQ(field.GetDeclaringClass(), klass);
- }
- for (ArtField& field : klass->GetSFields()) {
- CHECK_EQ(field.GetDeclaringClass(), klass);
- }
- const auto pointer_size = isc->pointer_size_;
- for (auto& m : klass->GetMethods(pointer_size)) {
- isc->SanityCheckArtMethod(&m, klass);
- }
- auto* vtable = klass->GetVTable();
- if (vtable != nullptr) {
- isc->SanityCheckArtMethodPointerArray(vtable, nullptr);
- }
- if (klass->ShouldHaveImt()) {
- ImTable* imt = klass->GetImt(pointer_size);
- for (size_t i = 0; i < ImTable::kSize; ++i) {
- isc->SanityCheckArtMethod(imt->Get(i, pointer_size), nullptr);
- }
- }
- if (klass->ShouldHaveEmbeddedVTable()) {
- for (int32_t i = 0; i < klass->GetEmbeddedVTableLength(); ++i) {
- isc->SanityCheckArtMethod(klass->GetEmbeddedVTableEntry(i, pointer_size), nullptr);
- }
- }
- mirror::IfTable* iftable = klass->GetIfTable();
- for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
- if (iftable->GetMethodArrayCount(i) > 0) {
- isc->SanityCheckArtMethodPointerArray(iftable->GetMethodArray(i), nullptr);
- }
- }
- }
- }
-
private:
ImageSanityChecks(gc::Heap* heap, ClassLinker* class_linker)
: spaces_(heap->GetBootImageSpaces()),
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 0f15e8b934..778b92851b 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -39,6 +39,7 @@
#include "gc/accounting/card_table-inl.h"
#include "gc/allocation_record.h"
#include "gc/scoped_gc_critical_section.h"
+#include "gc/space/bump_pointer_space-walk-inl.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
#include "handle_scope-inl.h"
@@ -4813,13 +4814,6 @@ class HeapChunkContext {
DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
};
-static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
- const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
- HeapChunkContext::HeapChunkJavaCallback(
- obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
-}
-
void Dbg::DdmSendHeapSegments(bool native) {
Dbg::HpsgWhen when = native ? gDdmNhsgWhen : gDdmHpsgWhen;
Dbg::HpsgWhat what = native ? gDdmNhsgWhat : gDdmHpsgWhat;
@@ -4839,6 +4833,12 @@ void Dbg::DdmSendHeapSegments(bool native) {
// Send a series of heap segment chunks.
HeapChunkContext context(what == HPSG_WHAT_MERGED_OBJECTS, native);
+ auto bump_pointer_space_visitor = [&](mirror::Object* obj)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
+ HeapChunkContext::HeapChunkJavaCallback(
+ obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, &context);
+ };
if (native) {
UNIMPLEMENTED(WARNING) << "Native heap inspection is not supported";
} else {
@@ -4861,7 +4861,7 @@ void Dbg::DdmSendHeapSegments(bool native) {
} else if (space->IsBumpPointerSpace()) {
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
context.SetChunkOverhead(0);
- space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
+ space->AsBumpPointerSpace()->Walk(bump_pointer_space_visitor);
HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
} else if (space->IsRegionSpace()) {
heap->IncrementDisableMovingGC(self);
@@ -4870,7 +4870,7 @@ void Dbg::DdmSendHeapSegments(bool native) {
ScopedSuspendAll ssa(__FUNCTION__);
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
context.SetChunkOverhead(0);
- space->AsRegionSpace()->Walk(BumpPointerSpaceCallback, &context);
+ space->AsRegionSpace()->Walk(bump_pointer_space_visitor);
HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
}
heap->DecrementDisableMovingGC(self);
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h
index 8fcc87d3a1..edf2e5bd65 100644
--- a/runtime/gc/accounting/heap_bitmap-inl.h
+++ b/runtime/gc/accounting/heap_bitmap-inl.h
@@ -26,7 +26,7 @@ namespace gc {
namespace accounting {
template <typename Visitor>
-inline void HeapBitmap::Visit(const Visitor& visitor) {
+inline void HeapBitmap::Visit(Visitor&& visitor) {
for (const auto& bitmap : continuous_space_bitmaps_) {
bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor);
}
diff --git a/runtime/gc/accounting/heap_bitmap.cc b/runtime/gc/accounting/heap_bitmap.cc
index a5d59bfec2..1d729ff4b1 100644
--- a/runtime/gc/accounting/heap_bitmap.cc
+++ b/runtime/gc/accounting/heap_bitmap.cc
@@ -71,15 +71,6 @@ void HeapBitmap::RemoveLargeObjectBitmap(LargeObjectBitmap* bitmap) {
large_object_bitmaps_.erase(it);
}
-void HeapBitmap::Walk(ObjectCallback* callback, void* arg) {
- for (const auto& bitmap : continuous_space_bitmaps_) {
- bitmap->Walk(callback, arg);
- }
- for (const auto& bitmap : large_object_bitmaps_) {
- bitmap->Walk(callback, arg);
- }
-}
-
} // namespace accounting
} // namespace gc
} // namespace art
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index 7097f87e91..36426e9b6c 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -47,11 +47,8 @@ class HeapBitmap {
ContinuousSpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) const;
LargeObjectBitmap* GetLargeObjectBitmap(const mirror::Object* obj) const;
- void Walk(ObjectCallback* callback, void* arg)
- REQUIRES_SHARED(Locks::heap_bitmap_lock_);
-
template <typename Visitor>
- void Visit(const Visitor& visitor)
+ ALWAYS_INLINE void Visit(Visitor&& visitor)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 57c290ea94..290199579b 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -27,6 +27,7 @@
#include "gc/space/space.h"
#include "mirror/object-inl.h"
#include "mirror/object-refvisitor-inl.h"
+#include "object_callbacks.h"
#include "space_bitmap-inl.h"
#include "thread-current-inl.h"
@@ -383,7 +384,7 @@ void ModUnionTableReferenceCache::Dump(std::ostream& os) {
}
}
-void ModUnionTableReferenceCache::VisitObjects(ObjectCallback* callback, void* arg) {
+void ModUnionTableReferenceCache::VisitObjects(ObjectCallback callback, void* arg) {
CardTable* const card_table = heap_->GetCardTable();
ContinuousSpaceBitmap* live_bitmap = space_->GetLiveBitmap();
for (uint8_t* card : cleared_cards_) {
@@ -550,7 +551,7 @@ void ModUnionTableCardCache::UpdateAndMarkReferences(MarkObjectVisitor* visitor)
0, RoundUp(space_->Size(), CardTable::kCardSize) / CardTable::kCardSize, bit_visitor);
}
-void ModUnionTableCardCache::VisitObjects(ObjectCallback* callback, void* arg) {
+void ModUnionTableCardCache::VisitObjects(ObjectCallback callback, void* arg) {
card_bitmap_->VisitSetBits(
0,
RoundUp(space_->Size(), CardTable::kCardSize) / CardTable::kCardSize,
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 591365f33a..9e261fd8b5 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -21,21 +21,25 @@
#include "base/allocator.h"
#include "card_table.h"
#include "globals.h"
-#include "object_callbacks.h"
+#include "mirror/object_reference.h"
#include "safe_map.h"
#include <set>
#include <vector>
namespace art {
+
namespace mirror {
class Object;
} // namespace mirror
+class MarkObjectVisitor;
+
namespace gc {
namespace space {
class ContinuousSpace;
} // namespace space
+
class Heap;
namespace accounting {
@@ -44,6 +48,9 @@ namespace accounting {
// cleared between GC phases, reducing the number of dirty cards that need to be scanned.
class ModUnionTable {
public:
+ // A callback for visiting an object in the heap.
+ using ObjectCallback = void (*)(mirror::Object*, void*);
+
typedef std::set<uint8_t*, std::less<uint8_t*>,
TrackingAllocator<uint8_t*, kAllocatorTagModUnionCardSet>> CardSet;
typedef MemoryRangeBitmap<CardTable::kCardSize> CardBitmap;
@@ -72,7 +79,7 @@ class ModUnionTable {
virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) = 0;
// Visit all of the objects that may contain references to other spaces.
- virtual void VisitObjects(ObjectCallback* callback, void* arg) = 0;
+ virtual void VisitObjects(ObjectCallback callback, void* arg) = 0;
// Verification, sanity checks that we don't have clean cards which conflict with out cached data
// for said cards. Exclusive lock is required since verify sometimes uses
@@ -124,7 +131,7 @@ class ModUnionTableReferenceCache : public ModUnionTable {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
- virtual void VisitObjects(ObjectCallback* callback, void* arg) OVERRIDE
+ virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -171,7 +178,7 @@ class ModUnionTableCardCache : public ModUnionTable {
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void VisitObjects(ObjectCallback* callback, void* arg) OVERRIDE
+ virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 9feaf415a5..b37dd965fc 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -62,8 +62,9 @@ inline bool SpaceBitmap<kAlignment>::Test(const mirror::Object* obj) const {
}
template<size_t kAlignment> template<typename Visitor>
-inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
- const Visitor& visitor) const {
+inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin,
+ uintptr_t visit_end,
+ Visitor&& visitor) const {
DCHECK_LE(visit_begin, visit_end);
#if 0
for (uintptr_t i = visit_begin; i < visit_end; i += kAlignment) {
@@ -155,6 +156,26 @@ inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin, uin
#endif
}
+template<size_t kAlignment> template<typename Visitor>
+void SpaceBitmap<kAlignment>::Walk(Visitor&& visitor) {
+ CHECK(bitmap_begin_ != nullptr);
+
+ uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
+ Atomic<uintptr_t>* bitmap_begin = bitmap_begin_;
+ for (uintptr_t i = 0; i <= end; ++i) {
+ uintptr_t w = bitmap_begin[i].LoadRelaxed();
+ if (w != 0) {
+ uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
+ do {
+ const size_t shift = CTZ(w);
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
+ visitor(obj);
+ w ^= (static_cast<uintptr_t>(1)) << shift;
+ } while (w != 0);
+ }
+ }
+}
+
template<size_t kAlignment> template<bool kSetBit>
inline bool SpaceBitmap<kAlignment>::Modify(const mirror::Object* obj) {
uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index eb9f0395ac..317e2fc591 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -137,27 +137,6 @@ void SpaceBitmap<kAlignment>::CopyFrom(SpaceBitmap* source_bitmap) {
}
template<size_t kAlignment>
-void SpaceBitmap<kAlignment>::Walk(ObjectCallback* callback, void* arg) {
- CHECK(bitmap_begin_ != nullptr);
- CHECK(callback != nullptr);
-
- uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
- Atomic<uintptr_t>* bitmap_begin = bitmap_begin_;
- for (uintptr_t i = 0; i <= end; ++i) {
- uintptr_t w = bitmap_begin[i].LoadRelaxed();
- if (w != 0) {
- uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
- do {
- const size_t shift = CTZ(w);
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
- (*callback)(obj, arg);
- w ^= (static_cast<uintptr_t>(1)) << shift;
- } while (w != 0);
- }
- }
-}
-
-template<size_t kAlignment>
void SpaceBitmap<kAlignment>::SweepWalk(const SpaceBitmap<kAlignment>& live_bitmap,
const SpaceBitmap<kAlignment>& mark_bitmap,
uintptr_t sweep_begin, uintptr_t sweep_end,
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 889f57b333..2fe6394c0f 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -34,9 +34,6 @@ namespace mirror {
} // namespace mirror
class MemMap;
-// Same as in object_callbacks.h. Just avoid the include.
-typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
-
namespace gc {
namespace accounting {
@@ -108,8 +105,6 @@ class SpaceBitmap {
return index < bitmap_size_ / sizeof(intptr_t);
}
- void VisitRange(uintptr_t base, uintptr_t max, ObjectCallback* callback, void* arg) const;
-
class ClearVisitor {
public:
explicit ClearVisitor(SpaceBitmap* const bitmap)
@@ -134,13 +129,14 @@ class SpaceBitmap {
// TODO: Use lock annotations when clang is fixed.
// REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
template <typename Visitor>
- void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const
+ void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, Visitor&& visitor) const
NO_THREAD_SAFETY_ANALYSIS;
// Visits set bits in address order. The callback is not permitted to change the bitmap bits or
// max during the traversal.
- void Walk(ObjectCallback* callback, void* arg)
- REQUIRES_SHARED(Locks::heap_bitmap_lock_);
+ template <typename Visitor>
+ void Walk(Visitor&& visitor)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Walk through the bitmaps in increasing address order, and find the object pointers that
// correspond to garbage objects. Call <callback> zero or more times with lists of these object
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 8d3c62f3d0..9d672b1d34 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -583,23 +583,22 @@ class ConcurrentCopying::VerifyNoMissingCardMarkVisitor {
ObjPtr<mirror::Object> const holder_;
};
-void ConcurrentCopying::VerifyNoMissingCardMarkCallback(mirror::Object* obj, void* arg) {
- auto* collector = reinterpret_cast<ConcurrentCopying*>(arg);
- // Objects not on dirty or aged cards should never have references to newly allocated regions.
- if (collector->heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) {
- VerifyNoMissingCardMarkVisitor visitor(collector, /*holder*/ obj);
- obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
- visitor,
- visitor);
- }
-}
-
void ConcurrentCopying::VerifyNoMissingCardMarks() {
+ auto visitor = [&](mirror::Object* obj)
+ REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_) {
+ // Objects not on dirty or aged cards should never have references to newly allocated regions.
+ if (heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) {
+ VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder*/ obj);
+ obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
+ internal_visitor, internal_visitor);
+ }
+ };
TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
- region_space_->Walk(&VerifyNoMissingCardMarkCallback, this);
+ region_space_->Walk(visitor);
{
ReaderMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
- heap_->GetLiveBitmap()->Walk(&VerifyNoMissingCardMarkCallback, this);
+ heap_->GetLiveBitmap()->Visit(visitor);
}
}
@@ -1212,34 +1211,6 @@ class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
ConcurrentCopying* const collector_;
};
-class ConcurrentCopying::VerifyNoFromSpaceRefsObjectVisitor {
- public:
- explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
- : collector_(collector) {}
- void operator()(mirror::Object* obj) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjectCallback(obj, collector_);
- }
- static void ObjectCallback(mirror::Object* obj, void *arg)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- CHECK(obj != nullptr);
- ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
- space::RegionSpace* region_space = collector->RegionSpace();
- CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
- VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
- obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
- visitor,
- visitor);
- if (kUseBakerReadBarrier) {
- CHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::WhiteState())
- << "obj=" << obj << " non-white rb_state " << obj->GetReadBarrierState();
- }
- }
-
- private:
- ConcurrentCopying* const collector_;
-};
-
// Verify there's no from-space references left after the marking phase.
void ConcurrentCopying::VerifyNoFromSpaceReferences() {
Thread* self = Thread::Current();
@@ -1252,7 +1223,21 @@ void ConcurrentCopying::VerifyNoFromSpaceReferences() {
CHECK(!thread->GetIsGcMarking());
}
}
- VerifyNoFromSpaceRefsObjectVisitor visitor(this);
+
+ auto verify_no_from_space_refs_visitor = [&](mirror::Object* obj)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ CHECK(obj != nullptr);
+ space::RegionSpace* region_space = RegionSpace();
+ CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
+ VerifyNoFromSpaceRefsFieldVisitor visitor(this);
+ obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ visitor,
+ visitor);
+ if (kUseBakerReadBarrier) {
+ CHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::WhiteState())
+ << "obj=" << obj << " non-white rb_state " << obj->GetReadBarrierState();
+ }
+ };
// Roots.
{
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -1260,11 +1245,11 @@ void ConcurrentCopying::VerifyNoFromSpaceReferences() {
Runtime::Current()->VisitRoots(&ref_visitor);
}
// The to-space.
- region_space_->WalkToSpace(VerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, this);
+ region_space_->WalkToSpace(verify_no_from_space_refs_visitor);
// Non-moving spaces.
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- heap_->GetMarkBitmap()->Visit(visitor);
+ heap_->GetMarkBitmap()->Visit(verify_no_from_space_refs_visitor);
}
// The alloc stack.
{
@@ -1275,7 +1260,7 @@ void ConcurrentCopying::VerifyNoFromSpaceReferences() {
if (obj != nullptr && obj->GetClass() != nullptr) {
// TODO: need to call this only if obj is alive?
ref_visitor(obj);
- visitor(obj);
+ verify_no_from_space_refs_visitor(obj);
}
}
}
@@ -1337,31 +1322,6 @@ class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
ConcurrentCopying* const collector_;
};
-class ConcurrentCopying::AssertToSpaceInvariantObjectVisitor {
- public:
- explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
- : collector_(collector) {}
- void operator()(mirror::Object* obj) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjectCallback(obj, collector_);
- }
- static void ObjectCallback(mirror::Object* obj, void *arg)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- CHECK(obj != nullptr);
- ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
- space::RegionSpace* region_space = collector->RegionSpace();
- CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
- collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
- AssertToSpaceInvariantFieldVisitor visitor(collector);
- obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
- visitor,
- visitor);
- }
-
- private:
- ConcurrentCopying* const collector_;
-};
-
class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
public:
RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
@@ -1599,8 +1559,14 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
region_space_->AddLiveBytes(to_ref, alloc_size);
}
if (ReadBarrier::kEnableToSpaceInvariantChecks) {
- AssertToSpaceInvariantObjectVisitor visitor(this);
- visitor(to_ref);
+ CHECK(to_ref != nullptr);
+ space::RegionSpace* region_space = RegionSpace();
+ CHECK(!region_space->IsInFromSpace(to_ref)) << "Scanning object " << to_ref << " in from space";
+ AssertToSpaceInvariant(nullptr, MemberOffset(0), to_ref);
+ AssertToSpaceInvariantFieldVisitor visitor(this);
+ to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ visitor,
+ visitor);
}
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 7b4340ee09..ab609906bf 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -181,9 +181,6 @@ class ConcurrentCopying : public GarbageCollector {
void VerifyGrayImmuneObjects()
REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- static void VerifyNoMissingCardMarkCallback(mirror::Object* obj, void* arg)
- REQUIRES(Locks::mutator_lock_)
- REQUIRES(!mark_stack_lock_);
void VerifyNoMissingCardMarks()
REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
@@ -348,7 +345,6 @@ class ConcurrentCopying : public GarbageCollector {
class ActivateReadBarrierEntrypointsCallback;
class ActivateReadBarrierEntrypointsCheckpoint;
class AssertToSpaceInvariantFieldVisitor;
- class AssertToSpaceInvariantObjectVisitor;
class AssertToSpaceInvariantRefsVisitor;
class ClearBlackPtrsVisitor;
class ComputeUnevacFromSpaceLiveRatioVisitor;
@@ -365,7 +361,6 @@ class ConcurrentCopying : public GarbageCollector {
class ThreadFlipVisitor;
class VerifyGrayImmuneObjectsVisitor;
class VerifyNoFromSpaceRefsFieldVisitor;
- class VerifyNoFromSpaceRefsObjectVisitor;
class VerifyNoFromSpaceRefsVisitor;
class VerifyNoMissingCardMarkVisitor;
diff --git a/runtime/gc/heap-visit-objects-inl.h b/runtime/gc/heap-visit-objects-inl.h
new file mode 100644
index 0000000000..b6ccb277cd
--- /dev/null
+++ b/runtime/gc/heap-visit-objects-inl.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_HEAP_VISIT_OBJECTS_INL_H_
+#define ART_RUNTIME_GC_HEAP_VISIT_OBJECTS_INL_H_
+
+#include "heap.h"
+
+#include "base/mutex-inl.h"
+#include "gc/accounting/heap_bitmap-inl.h"
+#include "gc/space/bump_pointer_space-walk-inl.h"
+#include "gc/space/region_space-inl.h"
+#include "mirror/object-inl.h"
+#include "obj_ptr-inl.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-current-inl.h"
+#include "thread_list.h"
+
+namespace art {
+namespace gc {
+
+// Visit objects when threads aren't suspended. If concurrent moving
+// GC, disable moving GC and suspend threads and then visit objects.
+template <typename Visitor>
+inline void Heap::VisitObjects(Visitor&& visitor) {
+ Thread* self = Thread::Current();
+ Locks::mutator_lock_->AssertSharedHeld(self);
+ DCHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)) << "Call VisitObjectsPaused() instead";
+ if (IsGcConcurrentAndMoving()) {
+ // Concurrent moving GC. Just suspending threads isn't sufficient
+ // because a collection isn't one big pause and we could suspend
+ // threads in the middle (between phases) of a concurrent moving
+ // collection where it's not easily known which objects are alive
+ // (both the region space and the non-moving space) or which
+ // copies of objects to visit, and the to-space invariant could be
+ // easily broken. Visit objects while GC isn't running by using
+ // IncrementDisableMovingGC() and threads are suspended.
+ IncrementDisableMovingGC(self);
+ {
+ ScopedThreadSuspension sts(self, kWaitingForVisitObjects);
+ ScopedSuspendAll ssa(__FUNCTION__);
+ VisitObjectsInternalRegionSpace(visitor);
+ VisitObjectsInternal(visitor);
+ }
+ DecrementDisableMovingGC(self);
+ } else {
+ // Since concurrent moving GC has thread suspension, also poison ObjPtr the normal case to
+ // catch bugs.
+ self->PoisonObjectPointers();
+ // GCs can move objects, so don't allow this.
+ ScopedAssertNoThreadSuspension ants("Visiting objects");
+ DCHECK(region_space_ == nullptr);
+ VisitObjectsInternal(visitor);
+ self->PoisonObjectPointers();
+ }
+}
+
+template <typename Visitor>
+inline void Heap::VisitObjectsPaused(Visitor&& visitor) {
+ Thread* self = Thread::Current();
+ Locks::mutator_lock_->AssertExclusiveHeld(self);
+ VisitObjectsInternalRegionSpace(visitor);
+ VisitObjectsInternal(visitor);
+}
+
+// Visit objects in the region spaces.
+template <typename Visitor>
+inline void Heap::VisitObjectsInternalRegionSpace(Visitor&& visitor) {
+ Thread* self = Thread::Current();
+ Locks::mutator_lock_->AssertExclusiveHeld(self);
+ if (region_space_ != nullptr) {
+ DCHECK(IsGcConcurrentAndMoving());
+ if (!zygote_creation_lock_.IsExclusiveHeld(self)) {
+ // Exclude the pre-zygote fork time where the semi-space collector
+ // calls VerifyHeapReferences() as part of the zygote compaction
+ // which then would call here without the moving GC disabled,
+ // which is fine.
+ bool is_thread_running_gc = false;
+ if (kIsDebugBuild) {
+ MutexLock mu(self, *gc_complete_lock_);
+ is_thread_running_gc = self == thread_running_gc_;
+ }
+ // If we are not the thread running the GC on in a GC exclusive region, then moving GC
+ // must be disabled.
+ DCHECK(is_thread_running_gc || IsMovingGCDisabled(self));
+ }
+ region_space_->Walk(visitor);
+ }
+}
+
+// Visit objects in the other spaces.
+template <typename Visitor>
+inline void Heap::VisitObjectsInternal(Visitor&& visitor) {
+ if (bump_pointer_space_ != nullptr) {
+ // Visit objects in bump pointer space.
+ bump_pointer_space_->Walk(visitor);
+ }
+ // TODO: Switch to standard begin and end to use ranged a based loop.
+ for (auto* it = allocation_stack_->Begin(), *end = allocation_stack_->End(); it < end; ++it) {
+ mirror::Object* const obj = it->AsMirrorPtr();
+
+ mirror::Class* kls = nullptr;
+ if (obj != nullptr && (kls = obj->GetClass()) != nullptr) {
+ // Below invariant is safe regardless of what space the Object is in.
+ // For speed reasons, only perform it when Rosalloc could possibly be used.
+ // (Disabled for read barriers because it never uses Rosalloc).
+ // (See the DCHECK in RosAllocSpace constructor).
+ if (!kUseReadBarrier) {
+ // Rosalloc has a race in allocation. Objects can be written into the allocation
+ // stack before their header writes are visible to this thread.
+ // See b/28790624 for more details.
+ //
+ // obj.class will either be pointing to a valid Class*, or it will point
+ // to a rosalloc free buffer.
+ //
+ // If it's pointing to a valid Class* then that Class's Class will be the
+ // ClassClass (whose Class is itself).
+ //
+ // A rosalloc free buffer will point to another rosalloc free buffer
+ // (or to null), and never to itself.
+ //
+ // Either way dereferencing while its not-null is safe because it will
+ // always point to another valid pointer or to null.
+ mirror::Class* klsClass = kls->GetClass();
+
+ if (klsClass == nullptr) {
+ continue;
+ } else if (klsClass->GetClass() != klsClass) {
+ continue;
+ }
+ } else {
+ // Ensure the invariant is not broken for non-rosalloc cases.
+ DCHECK(Heap::rosalloc_space_ == nullptr)
+ << "unexpected rosalloc with read barriers";
+ DCHECK(kls->GetClass() != nullptr)
+ << "invalid object: class does not have a class";
+ DCHECK_EQ(kls->GetClass()->GetClass(), kls->GetClass())
+ << "invalid object: class's class is not ClassClass";
+ }
+
+ // Avoid the race condition caused by the object not yet being written into the allocation
+ // stack or the class not yet being written in the object. Or, if
+ // kUseThreadLocalAllocationStack, there can be nulls on the allocation stack.
+ visitor(obj);
+ }
+ }
+ {
+ ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ GetLiveBitmap()->Visit<Visitor>(visitor);
+ }
+}
+
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_HEAP_VISIT_OBJECTS_INL_H_
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index ad4c0d5b2d..6ab98273ce 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -65,6 +65,7 @@
#include "gc_pause_listener.h"
#include "gc_root.h"
#include "heap-inl.h"
+#include "heap-visit-objects-inl.h"
#include "image.h"
#include "intern_table.h"
#include "java_vm_ext.h"
@@ -905,134 +906,6 @@ void Heap::CreateThreadPool() {
}
}
-// Visit objects when threads aren't suspended. If concurrent moving
-// GC, disable moving GC and suspend threads and then visit objects.
-void Heap::VisitObjects(ObjectCallback callback, void* arg) {
- Thread* self = Thread::Current();
- Locks::mutator_lock_->AssertSharedHeld(self);
- DCHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)) << "Call VisitObjectsPaused() instead";
- if (IsGcConcurrentAndMoving()) {
- // Concurrent moving GC. Just suspending threads isn't sufficient
- // because a collection isn't one big pause and we could suspend
- // threads in the middle (between phases) of a concurrent moving
- // collection where it's not easily known which objects are alive
- // (both the region space and the non-moving space) or which
- // copies of objects to visit, and the to-space invariant could be
- // easily broken. Visit objects while GC isn't running by using
- // IncrementDisableMovingGC() and threads are suspended.
- IncrementDisableMovingGC(self);
- {
- ScopedThreadSuspension sts(self, kWaitingForVisitObjects);
- ScopedSuspendAll ssa(__FUNCTION__);
- VisitObjectsInternalRegionSpace(callback, arg);
- VisitObjectsInternal(callback, arg);
- }
- DecrementDisableMovingGC(self);
- } else {
- // Since concurrent moving GC has thread suspension, also poison ObjPtr the normal case to
- // catch bugs.
- self->PoisonObjectPointers();
- // GCs can move objects, so don't allow this.
- ScopedAssertNoThreadSuspension ants("Visiting objects");
- DCHECK(region_space_ == nullptr);
- VisitObjectsInternal(callback, arg);
- self->PoisonObjectPointers();
- }
-}
-
-// Visit objects when threads are already suspended.
-void Heap::VisitObjectsPaused(ObjectCallback callback, void* arg) {
- Thread* self = Thread::Current();
- Locks::mutator_lock_->AssertExclusiveHeld(self);
- VisitObjectsInternalRegionSpace(callback, arg);
- VisitObjectsInternal(callback, arg);
-}
-
-// Visit objects in the region spaces.
-void Heap::VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg) {
- Thread* self = Thread::Current();
- Locks::mutator_lock_->AssertExclusiveHeld(self);
- if (region_space_ != nullptr) {
- DCHECK(IsGcConcurrentAndMoving());
- if (!zygote_creation_lock_.IsExclusiveHeld(self)) {
- // Exclude the pre-zygote fork time where the semi-space collector
- // calls VerifyHeapReferences() as part of the zygote compaction
- // which then would call here without the moving GC disabled,
- // which is fine.
- bool is_thread_running_gc = false;
- if (kIsDebugBuild) {
- MutexLock mu(self, *gc_complete_lock_);
- is_thread_running_gc = self == thread_running_gc_;
- }
- // If we are not the thread running the GC on in a GC exclusive region, then moving GC
- // must be disabled.
- DCHECK(is_thread_running_gc || IsMovingGCDisabled(self));
- }
- region_space_->Walk(callback, arg);
- }
-}
-
-// Visit objects in the other spaces.
-void Heap::VisitObjectsInternal(ObjectCallback callback, void* arg) {
- if (bump_pointer_space_ != nullptr) {
- // Visit objects in bump pointer space.
- bump_pointer_space_->Walk(callback, arg);
- }
- // TODO: Switch to standard begin and end to use ranged a based loop.
- for (auto* it = allocation_stack_->Begin(), *end = allocation_stack_->End(); it < end; ++it) {
- mirror::Object* const obj = it->AsMirrorPtr();
-
- mirror::Class* kls = nullptr;
- if (obj != nullptr && (kls = obj->GetClass()) != nullptr) {
- // Below invariant is safe regardless of what space the Object is in.
- // For speed reasons, only perform it when Rosalloc could possibly be used.
- // (Disabled for read barriers because it never uses Rosalloc).
- // (See the DCHECK in RosAllocSpace constructor).
- if (!kUseReadBarrier) {
- // Rosalloc has a race in allocation. Objects can be written into the allocation
- // stack before their header writes are visible to this thread.
- // See b/28790624 for more details.
- //
- // obj.class will either be pointing to a valid Class*, or it will point
- // to a rosalloc free buffer.
- //
- // If it's pointing to a valid Class* then that Class's Class will be the
- // ClassClass (whose Class is itself).
- //
- // A rosalloc free buffer will point to another rosalloc free buffer
- // (or to null), and never to itself.
- //
- // Either way dereferencing while its not-null is safe because it will
- // always point to another valid pointer or to null.
- mirror::Class* klsClass = kls->GetClass();
-
- if (klsClass == nullptr) {
- continue;
- } else if (klsClass->GetClass() != klsClass) {
- continue;
- }
- } else {
- // Ensure the invariant is not broken for non-rosalloc cases.
- DCHECK(Heap::rosalloc_space_ == nullptr)
- << "unexpected rosalloc with read barriers";
- DCHECK(kls->GetClass() != nullptr)
- << "invalid object: class does not have a class";
- DCHECK_EQ(kls->GetClass()->GetClass(), kls->GetClass())
- << "invalid object: class's class is not ClassClass";
- }
-
- // Avoid the race condition caused by the object not yet being written into the allocation
- // stack or the class not yet being written in the object. Or, if
- // kUseThreadLocalAllocationStack, there can be nulls on the allocation stack.
- callback(obj, arg);
- }
- }
- {
- ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- GetLiveBitmap()->Walk(callback, arg);
- }
-}
-
void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
space::ContinuousSpace* space2 = non_moving_space_;
@@ -1639,13 +1512,17 @@ void Heap::VerifyObjectBody(ObjPtr<mirror::Object> obj) {
}
}
-void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
- reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
-}
-
void Heap::VerifyHeap() {
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
+ auto visitor = [&](mirror::Object* obj) {
+ VerifyObjectBody(obj);
+ };
+ // Technically we need the mutator lock here to call Visit. However, VerifyObjectBody is already
+ // NO_THREAD_SAFETY_ANALYSIS.
+ auto no_thread_safety_analysis = [&]() NO_THREAD_SAFETY_ANALYSIS {
+ GetLiveBitmap()->Visit(visitor);
+ };
+ no_thread_safety_analysis();
}
void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
@@ -1918,138 +1795,84 @@ uint64_t Heap::GetBytesAllocatedEver() const {
return GetBytesFreedEver() + GetBytesAllocated();
}
-class InstanceCounter {
- public:
- InstanceCounter(const std::vector<Handle<mirror::Class>>& classes,
- bool use_is_assignable_from,
- uint64_t* counts)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {}
-
- static void Callback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
- InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
+void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
+ bool use_is_assignable_from,
+ uint64_t* counts) {
+ auto instance_counter = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* instance_class = obj->GetClass();
CHECK(instance_class != nullptr);
- for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
- ObjPtr<mirror::Class> klass = instance_counter->classes_[i].Get();
- if (instance_counter->use_is_assignable_from_) {
+ for (size_t i = 0; i < classes.size(); ++i) {
+ ObjPtr<mirror::Class> klass = classes[i].Get();
+ if (use_is_assignable_from) {
if (klass != nullptr && klass->IsAssignableFrom(instance_class)) {
- ++instance_counter->counts_[i];
+ ++counts[i];
}
} else if (instance_class == klass) {
- ++instance_counter->counts_[i];
+ ++counts[i];
}
}
- }
-
- private:
- const std::vector<Handle<mirror::Class>>& classes_;
- bool use_is_assignable_from_;
- uint64_t* const counts_;
- DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
-};
-
-void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
- bool use_is_assignable_from,
- uint64_t* counts) {
- InstanceCounter counter(classes, use_is_assignable_from, counts);
- VisitObjects(InstanceCounter::Callback, &counter);
+ };
+ VisitObjects(instance_counter);
}
-class InstanceCollector {
- public:
- InstanceCollector(VariableSizedHandleScope& scope,
- Handle<mirror::Class> c,
- int32_t max_count,
- std::vector<Handle<mirror::Object>>& instances)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : scope_(scope),
- class_(c),
- max_count_(max_count),
- instances_(instances) {}
-
- static void Callback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
- DCHECK(arg != nullptr);
- InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
- if (obj->GetClass() == instance_collector->class_.Get()) {
- if (instance_collector->max_count_ == 0 ||
- instance_collector->instances_.size() < instance_collector->max_count_) {
- instance_collector->instances_.push_back(instance_collector->scope_.NewHandle(obj));
- }
- }
- }
-
- private:
- VariableSizedHandleScope& scope_;
- Handle<mirror::Class> const class_;
- const uint32_t max_count_;
- std::vector<Handle<mirror::Object>>& instances_;
- DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
-};
-
void Heap::GetInstances(VariableSizedHandleScope& scope,
- Handle<mirror::Class> c,
+ Handle<mirror::Class> h_class,
int32_t max_count,
std::vector<Handle<mirror::Object>>& instances) {
- InstanceCollector collector(scope, c, max_count, instances);
- VisitObjects(&InstanceCollector::Callback, &collector);
-}
-
-class ReferringObjectsFinder {
- public:
- ReferringObjectsFinder(VariableSizedHandleScope& scope,
- Handle<mirror::Object> object,
- int32_t max_count,
- std::vector<Handle<mirror::Object>>& referring_objects)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : scope_(scope),
- object_(object),
- max_count_(max_count),
- referring_objects_(referring_objects) {}
-
- static void Callback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
- reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
- }
-
- // For bitmap Visit.
- // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
- // annotalysis on visitors.
- void operator()(ObjPtr<mirror::Object> o) const NO_THREAD_SAFETY_ANALYSIS {
- o->VisitReferences(*this, VoidFunctor());
- }
-
- // For Object::VisitReferences.
- void operator()(ObjPtr<mirror::Object> obj,
- MemberOffset offset,
- bool is_static ATTRIBUTE_UNUSED) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
- if (ref == object_.Get() && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
- referring_objects_.push_back(scope_.NewHandle(obj));
+ DCHECK_GE(max_count, 0);
+ auto instance_collector = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (obj->GetClass() == h_class.Get()) {
+ if (max_count == 0 || instances.size() < static_cast<size_t>(max_count)) {
+ instances.push_back(scope.NewHandle(obj));
+ }
}
- }
-
- void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
- const {}
- void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
-
- private:
- VariableSizedHandleScope& scope_;
- Handle<mirror::Object> const object_;
- const uint32_t max_count_;
- std::vector<Handle<mirror::Object>>& referring_objects_;
- DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
-};
+ };
+ VisitObjects(instance_collector);
+}
void Heap::GetReferringObjects(VariableSizedHandleScope& scope,
Handle<mirror::Object> o,
int32_t max_count,
std::vector<Handle<mirror::Object>>& referring_objects) {
+ class ReferringObjectsFinder {
+ public:
+ ReferringObjectsFinder(VariableSizedHandleScope& scope_in,
+ Handle<mirror::Object> object_in,
+ int32_t max_count_in,
+ std::vector<Handle<mirror::Object>>& referring_objects_in)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ : scope_(scope_in),
+ object_(object_in),
+ max_count_(max_count_in),
+ referring_objects_(referring_objects_in) {}
+
+ // For Object::VisitReferences.
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
+ if (ref == object_.Get() && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
+ referring_objects_.push_back(scope_.NewHandle(obj));
+ }
+ }
+
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+ const {}
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
+ private:
+ VariableSizedHandleScope& scope_;
+ Handle<mirror::Object> const object_;
+ const uint32_t max_count_;
+ std::vector<Handle<mirror::Object>>& referring_objects_;
+ DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
+ };
ReferringObjectsFinder finder(scope, o, max_count, referring_objects);
- VisitObjects(&ReferringObjectsFinder::Callback, &finder);
+ auto referring_objects_finder = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ obj->VisitReferences(finder, VoidFunctor());
+ };
+ VisitObjects(referring_objects_finder);
}
void Heap::CollectGarbage(bool clear_soft_references) {
@@ -2357,24 +2180,25 @@ class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
bin_mark_bitmap_(nullptr),
is_running_on_memory_tool_(is_running_on_memory_tool) {}
- void BuildBins(space::ContinuousSpace* space) {
+ void BuildBins(space::ContinuousSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
bin_live_bitmap_ = space->GetLiveBitmap();
bin_mark_bitmap_ = space->GetMarkBitmap();
- BinContext context;
- context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
- context.collector_ = this;
+ uintptr_t prev = reinterpret_cast<uintptr_t>(space->Begin());
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
// Note: This requires traversing the space in increasing order of object addresses.
- bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
+ auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
+ size_t bin_size = object_addr - prev;
+ // Add the bin consisting of the end of the previous object to the start of the current object.
+ AddBin(bin_size, prev);
+ prev = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
+ };
+ bin_live_bitmap_->Walk(visitor);
// Add the last bin which spans after the last object to the end of the space.
- AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
+ AddBin(reinterpret_cast<uintptr_t>(space->End()) - prev, prev);
}
private:
- struct BinContext {
- uintptr_t prev_; // The end of the previous object.
- ZygoteCompactingCollector* collector_;
- };
// Maps from bin sizes to locations.
std::multimap<size_t, uintptr_t> bins_;
// Live bitmap of the space which contains the bins.
@@ -2383,18 +2207,6 @@ class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
const bool is_running_on_memory_tool_;
- static void Callback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(arg != nullptr);
- BinContext* context = reinterpret_cast<BinContext*>(arg);
- ZygoteCompactingCollector* collector = context->collector_;
- uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
- size_t bin_size = object_addr - context->prev_;
- // Add the bin consisting of the end of the previous object to the start of the current object.
- collector->AddBin(bin_size, context->prev_);
- context->prev_ = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
- }
-
void AddBin(size_t size, uintptr_t position) {
if (is_running_on_memory_tool_) {
MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
@@ -2935,7 +2747,7 @@ class ScanVisitor {
class VerifyReferenceVisitor : public SingleRootVisitor {
public:
VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
- REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
size_t GetFailureCount() const {
@@ -3089,8 +2901,7 @@ class VerifyObjectVisitor {
VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
: heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
- void operator()(mirror::Object* obj)
- REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ void operator()(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
// Note: we are verifying the references in obj but not obj itself, this is because obj must
// be live or else how did we find it in the live bitmap?
VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
@@ -3098,12 +2909,6 @@ class VerifyObjectVisitor {
obj->VisitReferences(visitor, visitor);
}
- static void VisitCallback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
- VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
- visitor->operator()(obj);
- }
-
void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
@@ -3175,7 +2980,7 @@ size_t Heap::VerifyHeapReferences(bool verify_referents) {
// 2. Allocated during the GC (pre sweep GC verification).
// We don't want to verify the objects in the live stack since they themselves may be
// pointing to dead objects if they are not reachable.
- VisitObjectsPaused(VerifyObjectVisitor::VisitCallback, &visitor);
+ VisitObjectsPaused(visitor);
// Verify the roots:
visitor.VerifyRoots();
if (visitor.GetFailureCount() > 0) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 9e55081b63..e172d2d825 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -25,6 +25,7 @@
#include "allocator_type.h"
#include "arch/instruction_set.h"
#include "atomic.h"
+#include "base/mutex.h"
#include "base/time_utils.h"
#include "gc/gc_cause.h"
#include "gc/collector/gc_type.h"
@@ -51,9 +52,6 @@ class ThreadPool;
class TimingLogger;
class VariableSizedHandleScope;
-// Same as in object_callbacks.h. Just avoid the include.
-typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
-
namespace mirror {
class Class;
class Object;
@@ -250,10 +248,12 @@ class Heap {
}
// Visit all of the live objects in the heap.
- void VisitObjects(ObjectCallback callback, void* arg)
+ template <typename Visitor>
+ ALWAYS_INLINE void VisitObjects(Visitor&& visitor)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
- void VisitObjectsPaused(ObjectCallback callback, void* arg)
+ template <typename Visitor>
+ ALWAYS_INLINE void VisitObjectsPaused(Visitor&& visitor)
REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
@@ -1007,9 +1007,6 @@ class Heap {
size_t GetPercentFree();
- static void VerificationCallback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::heap_bitmap_lock_);
-
// Swap the allocation stack with the live stack.
void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1051,10 +1048,12 @@ class Heap {
// Trim 0 pages at the end of reference tables.
void TrimIndirectReferenceTables(Thread* self);
- void VisitObjectsInternal(ObjectCallback callback, void* arg)
+ template <typename Visitor>
+ ALWAYS_INLINE void VisitObjectsInternal(Visitor&& visitor)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
- void VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg)
+ template <typename Visitor>
+ ALWAYS_INLINE void VisitObjectsInternalRegionSpace(Visitor&& visitor)
REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
diff --git a/runtime/gc/space/bump_pointer_space-walk-inl.h b/runtime/gc/space/bump_pointer_space-walk-inl.h
new file mode 100644
index 0000000000..5d05ea2d65
--- /dev/null
+++ b/runtime/gc/space/bump_pointer_space-walk-inl.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_WALK_INL_H_
+#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_WALK_INL_H_
+
+#include "bump_pointer_space.h"
+
+#include "base/bit_utils.h"
+#include "mirror/object-inl.h"
+#include "thread-current-inl.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+template <typename Visitor>
+inline void BumpPointerSpace::Walk(Visitor&& visitor) {
+ uint8_t* pos = Begin();
+ uint8_t* end = End();
+ uint8_t* main_end = pos;
+ // Internal indirection w/ NO_THREAD_SAFETY_ANALYSIS. Optimally, we'd like to have an annotation
+ // like
+ // REQUIRES_AS(visitor.operator(mirror::Object*))
+ // on Walk to expose the interprocedural nature of locks here without having to duplicate the
+ // function.
+ //
+ // NO_THREAD_SAFETY_ANALYSIS is a workaround. The problem with the workaround of course is that
+ // it doesn't complain at the callsite. However, that is strictly not worse than the
+ // ObjectCallback version it replaces.
+ auto no_thread_safety_analysis_visit = [&](mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS {
+ visitor(obj);
+ };
+
+ {
+ MutexLock mu(Thread::Current(), block_lock_);
+ // If we have 0 blocks then we need to update the main header since we have bump pointer style
+ // allocation into an unbounded region (actually bounded by Capacity()).
+ if (num_blocks_ == 0) {
+ UpdateMainBlock();
+ }
+ main_end = Begin() + main_block_size_;
+ if (num_blocks_ == 0) {
+ // We don't have any other blocks, this means someone else may be allocating into the main
+ // block. In this case, we don't want to try and visit the other blocks after the main block
+ // since these could actually be part of the main block.
+ end = main_end;
+ }
+ }
+ // Walk all of the objects in the main block first.
+ while (pos < main_end) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
+ // No read barrier because obj may not be a valid object.
+ if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() == nullptr) {
+ // There is a race condition where a thread has just allocated an object but not set the
+ // class. We can't know the size of this object, so we don't visit it and exit the function
+ // since there is guaranteed to be not other blocks.
+ return;
+ } else {
+ no_thread_safety_analysis_visit(obj);
+ pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
+ }
+ }
+ // Walk the other blocks (currently only TLABs).
+ while (pos < end) {
+ BlockHeader* header = reinterpret_cast<BlockHeader*>(pos);
+ size_t block_size = header->size_;
+ pos += sizeof(BlockHeader); // Skip the header so that we know where the objects
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
+ const mirror::Object* end_obj = reinterpret_cast<const mirror::Object*>(pos + block_size);
+ CHECK_LE(reinterpret_cast<const uint8_t*>(end_obj), End());
+ // We don't know how many objects are allocated in the current block. When we hit a null class
+ // assume its the end. TODO: Have a thread update the header when it flushes the block?
+ // No read barrier because obj may not be a valid object.
+ while (obj < end_obj && obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
+ no_thread_safety_analysis_visit(obj);
+ obj = GetNextObject(obj);
+ }
+ pos += block_size;
+ }
+}
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_WALK_INL_H_
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index bb1ede15f2..5d91f4bf8e 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -153,58 +153,6 @@ uint8_t* BumpPointerSpace::AllocBlock(size_t bytes) {
return storage;
}
-void BumpPointerSpace::Walk(ObjectCallback* callback, void* arg) {
- uint8_t* pos = Begin();
- uint8_t* end = End();
- uint8_t* main_end = pos;
- {
- MutexLock mu(Thread::Current(), block_lock_);
- // If we have 0 blocks then we need to update the main header since we have bump pointer style
- // allocation into an unbounded region (actually bounded by Capacity()).
- if (num_blocks_ == 0) {
- UpdateMainBlock();
- }
- main_end = Begin() + main_block_size_;
- if (num_blocks_ == 0) {
- // We don't have any other blocks, this means someone else may be allocating into the main
- // block. In this case, we don't want to try and visit the other blocks after the main block
- // since these could actually be part of the main block.
- end = main_end;
- }
- }
- // Walk all of the objects in the main block first.
- while (pos < main_end) {
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
- // No read barrier because obj may not be a valid object.
- if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() == nullptr) {
- // There is a race condition where a thread has just allocated an object but not set the
- // class. We can't know the size of this object, so we don't visit it and exit the function
- // since there is guaranteed to be not other blocks.
- return;
- } else {
- callback(obj, arg);
- pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
- }
- }
- // Walk the other blocks (currently only TLABs).
- while (pos < end) {
- BlockHeader* header = reinterpret_cast<BlockHeader*>(pos);
- size_t block_size = header->size_;
- pos += sizeof(BlockHeader); // Skip the header so that we know where the objects
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
- const mirror::Object* end_obj = reinterpret_cast<const mirror::Object*>(pos + block_size);
- CHECK_LE(reinterpret_cast<const uint8_t*>(end_obj), End());
- // We don't know how many objects are allocated in the current block. When we hit a null class
- // assume its the end. TODO: Have a thread update the header when it flushes the block?
- // No read barrier because obj may not be a valid object.
- while (obj < end_obj && obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
- callback(obj, arg);
- obj = GetNextObject(obj);
- }
- pos += block_size;
- }
-}
-
accounting::ContinuousSpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCallback() {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 566dc5dc40..4197d0cd3f 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -25,9 +25,6 @@ namespace mirror {
class Object;
}
-// Same as in object_callbacks.h. Just avoid the include.
-typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
-
namespace gc {
namespace collector {
@@ -149,8 +146,10 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
}
// Go through all of the blocks and visit the continuous objects.
- void Walk(ObjectCallback* callback, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!block_lock_);
+ template <typename Visitor>
+ ALWAYS_INLINE void Walk(Visitor&& visitor)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!block_lock_);
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 2fba4a8bd1..a3b53b4cad 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -184,8 +184,8 @@ uint64_t RegionSpace::GetObjectsAllocatedInternal() {
return bytes;
}
-template<bool kToSpaceOnly>
-void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
+template<bool kToSpaceOnly, typename Visitor>
+void RegionSpace::WalkInternal(Visitor&& visitor) {
// TODO: MutexLock on region_lock_ won't work due to lock order
// issues (the classloader classes lock and the monitor lock). We
// call this with threads suspended.
@@ -201,7 +201,7 @@ void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
DCHECK(obj->GetClass() != nullptr);
- callback(obj, arg);
+ visitor(obj);
} else if (r->IsLargeTail()) {
// Do nothing.
} else {
@@ -215,14 +215,12 @@ void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
GetLiveBitmap()->VisitMarkedRange(
reinterpret_cast<uintptr_t>(pos),
reinterpret_cast<uintptr_t>(top),
- [callback, arg](mirror::Object* obj) {
- callback(obj, arg);
- });
+ visitor);
} else {
while (pos < top) {
mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
- callback(obj, arg);
+ visitor(obj);
pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
} else {
break;
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 6412158a77..77d76fb93b 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -17,7 +17,8 @@
#ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
#define ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
-#include "object_callbacks.h"
+#include "base/macros.h"
+#include "base/mutex.h"
#include "space.h"
#include "thread.h"
@@ -152,14 +153,14 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
}
// Go through all of the blocks and visit the continuous objects.
- void Walk(ObjectCallback* callback, void* arg)
- REQUIRES(Locks::mutator_lock_) {
- WalkInternal<false>(callback, arg);
+ template <typename Visitor>
+ ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES(Locks::mutator_lock_) {
+ WalkInternal<false /* kToSpaceOnly */>(visitor);
}
-
- void WalkToSpace(ObjectCallback* callback, void* arg)
+ template <typename Visitor>
+ ALWAYS_INLINE void WalkToSpace(Visitor&& visitor)
REQUIRES(Locks::mutator_lock_) {
- WalkInternal<true>(callback, arg);
+ WalkInternal<true>(visitor);
}
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
@@ -247,8 +248,8 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
private:
RegionSpace(const std::string& name, MemMap* mem_map);
- template<bool kToSpaceOnly>
- void WalkInternal(ObjectCallback* callback, void* arg) NO_THREAD_SAFETY_ANALYSIS;
+ template<bool kToSpaceOnly, typename Visitor>
+ ALWAYS_INLINE void WalkInternal(Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
class Region {
public:
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index ec860c76f2..f428bc2751 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -52,6 +52,7 @@
#include "gc/allocation_record.h"
#include "gc/scoped_gc_critical_section.h"
#include "gc/heap.h"
+#include "gc/heap-visit-objects-inl.h"
#include "gc/space/space.h"
#include "globals.h"
#include "jdwp/jdwp.h"
@@ -485,13 +486,6 @@ class Hprof : public SingleRootVisitor {
}
private:
- static void VisitObjectCallback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(obj != nullptr);
- DCHECK(arg != nullptr);
- reinterpret_cast<Hprof*>(arg)->DumpHeapObject(obj);
- }
-
void DumpHeapObject(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -534,8 +528,11 @@ class Hprof : public SingleRootVisitor {
simple_roots_.clear();
runtime->VisitRoots(this);
runtime->VisitImageRoots(this);
- runtime->GetHeap()->VisitObjectsPaused(VisitObjectCallback, this);
-
+ auto dump_object = [this](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(obj != nullptr);
+ DumpHeapObject(obj);
+ };
+ runtime->GetHeap()->VisitObjectsPaused(dump_object);
output_->StartNewRecord(HPROF_TAG_HEAP_DUMP_END, kHprofTime);
output_->EndRecord();
}
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index 7027410ca6..aee4b19eeb 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -26,7 +26,6 @@
#include "object-inl.h"
#include "object_array.h"
#include "object_array-inl.h"
-#include "object_callbacks.h"
#include "stack_trace_element.h"
#include "string.h"
#include "utils.h"
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index ea5e69821b..9eccb5a280 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -25,9 +25,6 @@ namespace mirror {
template<class MirrorType> class HeapReference;
} // namespace mirror
-// A callback for visiting an object in the heap.
-typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
-
class IsMarkedVisitor {
public:
virtual ~IsMarkedVisitor() {}
diff --git a/runtime/openjdkjvmti/ti_class.cc b/runtime/openjdkjvmti/ti_class.cc
index b8e79555ae..99dfcfe665 100644
--- a/runtime/openjdkjvmti/ti_class.cc
+++ b/runtime/openjdkjvmti/ti_class.cc
@@ -46,6 +46,7 @@
#include "events-inl.h"
#include "fixed_up_dex_file.h"
#include "gc/heap.h"
+#include "gc/heap-visit-objects-inl.h"
#include "gc_root.h"
#include "handle.h"
#include "jni_env_ext-inl.h"
@@ -544,21 +545,15 @@ struct ClassCallback : public art::ClassLoadCallback {
LOG(FATAL) << "Unreachable";
}
- static void AllObjectsCallback(art::mirror::Object* obj, void* arg)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
- HeapFixupVisitor* hfv = reinterpret_cast<HeapFixupVisitor*>(arg);
-
- // Visit references, not native roots.
- obj->VisitReferences<false>(*hfv, *hfv);
- }
-
private:
const art::mirror::Class* input_;
art::mirror::Class* output_;
};
HeapFixupVisitor hfv(input, output);
- art::Runtime::Current()->GetHeap()->VisitObjectsPaused(HeapFixupVisitor::AllObjectsCallback,
- &hfv);
+ auto object_visitor = [&](art::mirror::Object* obj) {
+ obj->VisitReferences<false>(hfv, hfv); // Visit references, not native roots.
+ };
+ art::Runtime::Current()->GetHeap()->VisitObjectsPaused(object_visitor);
}
// A set of all the temp classes we have handed out. We have to fix up references to these.
diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc
index 29658d9154..91fdaca427 100644
--- a/runtime/openjdkjvmti/ti_heap.cc
+++ b/runtime/openjdkjvmti/ti_heap.cc
@@ -22,6 +22,7 @@
#include "base/mutex.h"
#include "class_linker.h"
#include "gc/heap.h"
+#include "gc/heap-visit-objects-inl.h"
#include "gc_root-inl.h"
#include "java_frame_root_info.h"
#include "jni_env_ext.h"
@@ -30,7 +31,6 @@
#include "mirror/class.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "object_callbacks.h"
#include "object_tagging.h"
#include "obj_ptr-inl.h"
#include "primitive.h"
@@ -653,33 +653,25 @@ void HeapUtil::Unregister() {
art::Runtime::Current()->RemoveSystemWeakHolder(&gIndexCachingTable);
}
-template <typename Callback>
-struct IterateThroughHeapData {
- IterateThroughHeapData(Callback _cb,
- ObjectTagTable* _tag_table,
- jvmtiEnv* _env,
- art::ObjPtr<art::mirror::Class> klass,
- jint _heap_filter,
- const jvmtiHeapCallbacks* _callbacks,
- const void* _user_data)
- : cb(_cb),
- tag_table(_tag_table),
- heap_filter(_heap_filter),
- filter_klass(klass),
- env(_env),
- callbacks(_callbacks),
- user_data(_user_data),
- stop_reports(false) {
+template <typename T>
+static jvmtiError DoIterateThroughHeap(T fn,
+ jvmtiEnv* env,
+ ObjectTagTable* tag_table,
+ jint heap_filter_int,
+ jclass klass,
+ const jvmtiHeapCallbacks* callbacks,
+ const void* user_data) {
+ if (callbacks == nullptr) {
+ return ERR(NULL_POINTER);
}
- static void ObjectCallback(art::mirror::Object* obj, void* arg)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
- IterateThroughHeapData* ithd = reinterpret_cast<IterateThroughHeapData*>(arg);
- ithd->ObjectCallback(obj);
- }
+ art::Thread* self = art::Thread::Current();
+ art::ScopedObjectAccess soa(self); // Now we know we have the shared lock.
- void ObjectCallback(art::mirror::Object* obj)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ bool stop_reports = false;
+ const HeapFilter heap_filter(heap_filter_int);
+ art::ObjPtr<art::mirror::Class> filter_klass = soa.Decode<art::mirror::Class>(klass);
+ auto visitor = [&](art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
// Early return, as we can't really stop visiting.
if (stop_reports) {
return;
@@ -713,7 +705,7 @@ struct IterateThroughHeapData {
}
jlong saved_tag = tag;
- jint ret = cb(obj, callbacks, class_tag, size, &tag, length, const_cast<void*>(user_data));
+ jint ret = fn(obj, callbacks, class_tag, size, &tag, length, const_cast<void*>(user_data));
if (tag != saved_tag) {
tag_table->Set(obj, tag);
@@ -734,44 +726,8 @@ struct IterateThroughHeapData {
if (!stop_reports) {
stop_reports = ReportPrimitiveField::Report(obj, tag_table, callbacks, user_data);
}
- }
-
- Callback cb;
- ObjectTagTable* tag_table;
- const HeapFilter heap_filter;
- art::ObjPtr<art::mirror::Class> filter_klass;
- jvmtiEnv* env;
- const jvmtiHeapCallbacks* callbacks;
- const void* user_data;
-
- bool stop_reports;
-};
-
-template <typename T>
-static jvmtiError DoIterateThroughHeap(T fn,
- jvmtiEnv* env,
- ObjectTagTable* tag_table,
- jint heap_filter,
- jclass klass,
- const jvmtiHeapCallbacks* callbacks,
- const void* user_data) {
- if (callbacks == nullptr) {
- return ERR(NULL_POINTER);
- }
-
- art::Thread* self = art::Thread::Current();
- art::ScopedObjectAccess soa(self); // Now we know we have the shared lock.
-
- using Iterator = IterateThroughHeapData<T>;
- Iterator ithd(fn,
- tag_table,
- env,
- soa.Decode<art::mirror::Class>(klass),
- heap_filter,
- callbacks,
- user_data);
-
- art::Runtime::Current()->GetHeap()->VisitObjects(Iterator::ObjectCallback, &ithd);
+ };
+ art::Runtime::Current()->GetHeap()->VisitObjects(visitor);
return ERR(NONE);
}