Clean up the locks around Heap::VisitObjects().
This is so that we could support suspending all threads when visiting
objects in the presence of a concurrent, moving collector.
Bug: 12687968
Change-Id: Icc8e60630465afde948ebc6ea91d4ebaff5d7837
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 8c7d611..ba02338 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -501,19 +501,14 @@
size_t total_strings = 0;
gc::Heap* heap = Runtime::Current()->GetHeap();
ClassLinker* cl = Runtime::Current()->GetClassLinker();
- {
- ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- heap->VisitObjects(CountStringsCallback, &total_strings); // Count the strings.
- }
+ // Count the strings.
+ heap->VisitObjects(CountStringsCallback, &total_strings);
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
auto strings = hs.NewHandle(cl->AllocStringArray(self, total_strings));
StringCollector string_collector(strings, 0U);
- {
- ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- // Read strings into the array.
- heap->VisitObjects(StringCollector::Callback, &string_collector);
- }
+ // Read strings into the array.
+ heap->VisitObjects(StringCollector::Callback, &string_collector);
// Some strings could have gotten freed if AllocStringArray caused a GC.
CHECK_LE(string_collector.GetIndex(), total_strings);
total_strings = string_collector.GetIndex();
@@ -595,7 +590,6 @@
}
void ImageWriter::ComputeEagerResolvedStrings() {
- ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Runtime::Current()->GetHeap()->VisitObjects(ComputeEagerResolvedStringsCallback, this);
}
@@ -668,7 +662,6 @@
void ImageWriter::CheckNonImageClassesRemoved() {
if (compiler_driver_.GetImageClasses() != nullptr) {
gc::Heap* heap = Runtime::Current()->GetHeap();
- ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
heap->VisitObjects(CheckNonImageClassesRemovedCallback, this);
}
}
@@ -869,17 +862,14 @@
// know where image_roots is going to end up
image_end_ += RoundUp(sizeof(ImageHeader), kObjectAlignment); // 64-bit-alignment
- {
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- // TODO: Image spaces only?
- DCHECK_LT(image_end_, image_->Size());
- image_objects_offset_begin_ = image_end_;
- // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots.
- heap->VisitObjects(WalkFieldsCallback, this);
- // Transform each object's bin slot into an offset which will be used to do the final copy.
- heap->VisitObjects(UnbinObjectsIntoOffsetCallback, this);
- DCHECK(saved_hashes_map_.empty()); // All binslot hashes should've been put into vector by now.
- }
+ // TODO: Image spaces only?
+ DCHECK_LT(image_end_, image_->Size());
+ image_objects_offset_begin_ = image_end_;
+ // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots.
+ heap->VisitObjects(WalkFieldsCallback, this);
+ // Transform each object's bin slot into an offset which will be used to do the final copy.
+ heap->VisitObjects(UnbinObjectsIntoOffsetCallback, this);
+ DCHECK(saved_hashes_map_.empty()); // All binslot hashes should've been put into vector by now.
DCHECK_GT(image_end_, GetBinSizeSum());
@@ -920,7 +910,6 @@
// TODO: heap validation can't handle this fix up pass
heap->DisableObjectValidation();
// TODO: Image spaces only?
- WriterMutexLock mu(ants.Self(), *Locks::heap_bitmap_lock_);
heap->VisitObjects(CopyAndFixupObjectsCallback, this);
// Fix up the object previously had hash codes.
for (const std::pair<mirror::Object*, uint32_t>& hash_pair : saved_hashes_) {