summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/gc/collector/mark_compact-inl.h7
-rw-r--r--runtime/gc/collector/mark_compact.cc12
-rw-r--r--runtime/gc/collector/mark_compact.h5
3 files changed, 2 insertions, 22 deletions
diff --git a/runtime/gc/collector/mark_compact-inl.h b/runtime/gc/collector/mark_compact-inl.h
index 021d33f5d7..447dd256d0 100644
--- a/runtime/gc/collector/mark_compact-inl.h
+++ b/runtime/gc/collector/mark_compact-inl.h
@@ -192,13 +192,6 @@ uint32_t MarkCompact::LiveWordsBitmap<kAlignment>::FindNthLiveWordOffset(size_t
UNREACHABLE();
}
-inline bool MarkCompact::IsOnAllocStack(mirror::Object* ref) {
- // TODO: Explain why this is here. What release operation does it pair with?
- std::atomic_thread_fence(std::memory_order_acquire);
- accounting::ObjectStack* stack = heap_->GetAllocationStack();
- return stack->Contains(ref);
-}
-
inline void MarkCompact::UpdateRef(mirror::Object* obj,
MemberOffset offset,
uint8_t* begin,
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 25f30ec485..2a1bbc4738 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -453,7 +453,6 @@ MarkCompact::MarkCompact(Heap* heap)
uffd_(kFdUnused),
sigbus_in_progress_count_{kSigbusCounterCompactionDoneMask, kSigbusCounterCompactionDoneMask},
compacting_(false),
- marking_done_(false),
uffd_initialized_(false),
clamp_info_map_status_(ClampInfoStatus::kClampInfoNotDone) {
if (kIsDebugBuild) {
@@ -1108,7 +1107,6 @@ void MarkCompact::MarkingPause() {
// Enable the reference processing slow path, needs to be done with mutators
// paused since there is no lock in the GetReferent fast path.
heap_->GetReferenceProcessor()->EnableSlowPath();
- marking_done_ = true;
}
void MarkCompact::SweepSystemWeaks(Thread* self, Runtime* runtime, const bool paused) {
@@ -4043,9 +4041,7 @@ mirror::Object* MarkCompact::IsMarked(mirror::Object* obj) {
}
return (is_black || moving_space_bitmap_->Test(obj)) ? obj : nullptr;
} else if (non_moving_space_bitmap_->HasAddress(obj)) {
- if (non_moving_space_bitmap_->Test(obj)) {
- return obj;
- }
+ return non_moving_space_bitmap_->Test(obj) ? obj : nullptr;
} else if (immune_spaces_.ContainsObject(obj)) {
return obj;
} else {
@@ -4055,9 +4051,7 @@ mirror::Object* MarkCompact::IsMarked(mirror::Object* obj) {
accounting::LargeObjectBitmap* los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
if (los_bitmap->HasAddress(obj)) {
DCHECK(IsAlignedParam(obj, space::LargeObjectSpace::ObjectAlignment()));
- if (los_bitmap->Test(obj)) {
- return obj;
- }
+ return los_bitmap->Test(obj) ? obj : nullptr;
} else {
// The given obj is not in any of the known spaces, so return null. This could
// happen for instance in interpreter caches wherein a concurrent updation
@@ -4067,7 +4061,6 @@ mirror::Object* MarkCompact::IsMarked(mirror::Object* obj) {
return nullptr;
}
}
- return marking_done_ && IsOnAllocStack(obj) ? obj : nullptr;
}
bool MarkCompact::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
@@ -4091,7 +4084,6 @@ void MarkCompact::FinishPhase() {
GetCurrentIteration()->SetScannedBytes(bytes_scanned_);
bool is_zygote = Runtime::Current()->IsZygote();
compacting_ = false;
- marking_done_ = false;
ZeroAndReleaseMemory(compaction_buffers_map_.Begin(), compaction_buffers_map_.Size());
info_map_.MadviseDontNeedAndZero();
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 56a6a17196..0ea8fb56e0 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -255,8 +255,6 @@ class MarkCompact final : public GarbageCollector {
+ from_space_slide_diff_);
}
- inline bool IsOnAllocStack(mirror::Object* ref)
- REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Verifies that that given object reference refers to a valid object.
// Otherwise fataly dumps logs, including those from callback.
template <typename Callback>
@@ -776,9 +774,6 @@ class MarkCompact final : public GarbageCollector {
std::atomic<uint16_t> compaction_buffer_counter_;
// True while compacting.
bool compacting_;
- // Set to true in MarkingPause() to indicate when allocation_stack_ should be
- // checked in IsMarked() for black allocations.
- bool marking_done_;
// Flag indicating whether one-time uffd initialization has been done. It will
// be false on the first GC for non-zygote processes, and always for zygote.
// Its purpose is to minimize the userfaultfd overhead to the minimal in