diff options
| -rw-r--r-- | runtime/gc/collector/mark_compact.cc | 23 |
1 files changed, 12 insertions, 11 deletions
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc index 336b143771..993e0dbb17 100644 --- a/runtime/gc/collector/mark_compact.cc +++ b/runtime/gc/collector/mark_compact.cc @@ -581,7 +581,7 @@ void MarkCompact::InitializePhase() { bytes_scanned_ = 0; freed_objects_ = 0; // The first buffer is used by gc-thread. - compaction_buffer_counter_ = 1; + compaction_buffer_counter_.store(1, std::memory_order_relaxed); from_space_slide_diff_ = from_space_begin_ - bump_pointer_space_->Begin(); black_allocations_begin_ = bump_pointer_space_->Limit(); walk_super_class_cache_ = nullptr; @@ -2791,6 +2791,7 @@ void MarkCompact::CompactionPause() { RecordFree(ObjectBytePair(freed_objects_, freed_bytes)); } else { DCHECK_EQ(compaction_in_progress_count_.load(std::memory_order_relaxed), 0u); + DCHECK_EQ(compaction_buffer_counter_.load(std::memory_order_relaxed), 1); if (!use_uffd_sigbus_) { // We must start worker threads before resuming mutators to avoid deadlocks. heap_->GetThreadPool()->StartWorkers(thread_running_gc_); @@ -3036,16 +3037,8 @@ bool MarkCompact::SigbusHandler(siginfo_t* info) { ConcurrentlyProcessMovingPage<kMinorFaultMode>( fault_page, nullptr, nr_moving_space_used_pages); } else { - uint8_t* buf = self->GetThreadLocalGcBuffer(); - if (buf == nullptr) { - uint16_t idx = compaction_buffer_counter_.fetch_add(1, std::memory_order_relaxed); - // The buffer-map is one page bigger as the first buffer is used by GC-thread. - CHECK_LE(idx, kMutatorCompactionBufferCount); - buf = compaction_buffers_map_.Begin() + idx * kPageSize; - DCHECK(compaction_buffers_map_.HasAddress(buf)); - self->SetThreadLocalGcBuffer(buf); - } - ConcurrentlyProcessMovingPage<kCopyMode>(fault_page, buf, nr_moving_space_used_pages); + ConcurrentlyProcessMovingPage<kCopyMode>( + fault_page, self->GetThreadLocalGcBuffer(), nr_moving_space_used_pages); } return true; } else { @@ -3154,6 +3147,14 @@ void MarkCompact::ConcurrentlyProcessMovingPage(uint8_t* fault_page, if (kMode == kMinorFaultMode) { DCHECK_EQ(buf, nullptr); buf = shadow_to_space_map_.Begin() + page_idx * kPageSize; + } else if (UNLIKELY(buf == nullptr)) { + DCHECK_EQ(kMode, kCopyMode); + uint16_t idx = compaction_buffer_counter_.fetch_add(1, std::memory_order_relaxed); + // The buffer-map is one page bigger as the first buffer is used by GC-thread. + CHECK_LE(idx, kMutatorCompactionBufferCount); + buf = compaction_buffers_map_.Begin() + idx * kPageSize; + DCHECK(compaction_buffers_map_.HasAddress(buf)); + Thread::Current()->SetThreadLocalGcBuffer(buf); } if (fault_page < post_compact_end_) { |