Refactor and remove copy mark bits.
Refactor code GC realted code to be in a GC folder.
Remove copy mark bits by using pointer changing instead.
Enable concurrent sweeping of system weaks.
Fix non concurrent GC plan.
Change-Id: I9c71478be27d21a75f8a4e6af6faabe896e5e263
diff --git a/src/check_jni.cc b/src/check_jni.cc
index 5146039..e7590d3 100644
--- a/src/check_jni.cc
+++ b/src/check_jni.cc
@@ -23,7 +23,7 @@
#include "logging.h"
#include "object_utils.h"
#include "scoped_thread_state_change.h"
-#include "space.h"
+#include "gc/space.h"
#include "thread.h"
#include "runtime.h"
diff --git a/src/class_linker.cc b/src/class_linker.cc
index 929582a..1853a59 100644
--- a/src/class_linker.cc
+++ b/src/class_linker.cc
@@ -48,8 +48,8 @@
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
#include "sirt_ref.h"
-#include "space.h"
-#include "space_bitmap.h"
+#include "gc/space.h"
+#include "gc/space_bitmap.h"
#include "stack_indirect_reference_table.h"
#include "stl_util.h"
#include "thread.h"
diff --git a/src/compiler.cc b/src/compiler.cc
index 8909c36..c69c4ff 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -30,7 +30,7 @@
#include "oat/runtime/stub.h"
#include "object_utils.h"
#include "runtime.h"
-#include "space.h"
+#include "gc/space.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
#include "stl_util.h"
diff --git a/src/compiler/Dalvik.h b/src/compiler/Dalvik.h
index d1b0e1c..5406f53 100644
--- a/src/compiler/Dalvik.h
+++ b/src/compiler/Dalvik.h
@@ -25,7 +25,7 @@
#include <stdint.h>
#include <stdio.h>
-#include "card_table.h"
+#include "gc/card_table.h"
#include "class_linker.h"
#include "compiler.h"
#include "dex_cache.h"
diff --git a/src/compiler/codegen/CodegenFactory.cc b/src/compiler/codegen/CodegenFactory.cc
index 9d380c7..ce25aad 100644
--- a/src/compiler/codegen/CodegenFactory.cc
+++ b/src/compiler/codegen/CodegenFactory.cc
@@ -280,7 +280,7 @@
newLIR2(cUnit, kX86Mov32RT, regCardBase,
Thread::CardTableOffset().Int32Value());
#endif
- opRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, GC_CARD_SHIFT);
+ opRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, CardTable::kCardShift);
storeBaseIndexed(cUnit, regCardBase, regCardNo, regCardBase, 0,
kUnsignedByte);
LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
diff --git a/src/debugger.cc b/src/debugger.cc
index 6f0ec58..1b89d9b 100644
--- a/src/debugger.cc
+++ b/src/debugger.cc
@@ -32,7 +32,7 @@
#include "ScopedPrimitiveArray.h"
#include "scoped_thread_state_change.h"
#include "sirt_ref.h"
-#include "space.h"
+#include "gc/space.h"
#include "stack_indirect_reference_table.h"
#include "thread_list.h"
#include "well_known_classes.h"
diff --git a/src/atomic_stack.h b/src/gc/atomic_stack.h
similarity index 100%
rename from src/atomic_stack.h
rename to src/gc/atomic_stack.h
diff --git a/src/card_table.cc b/src/gc/card_table.cc
similarity index 77%
rename from src/card_table.cc
rename to src/gc/card_table.cc
index 8d0ddd1..5a1e9b5 100644
--- a/src/card_table.cc
+++ b/src/gc/card_table.cc
@@ -50,7 +50,7 @@
CardTable* CardTable::Create(const byte* heap_begin, size_t heap_capacity) {
/* Set up the card table */
- size_t capacity = heap_capacity / GC_CARD_SIZE;
+ size_t capacity = heap_capacity / kCardSize;
/* Allocate an extra 256 bytes to allow fixed low-byte of base */
UniquePtr<MemMap> mem_map(MemMap::MapAnonymous("dalvik-card-table", NULL,
capacity + 256, PROT_READ | PROT_WRITE));
@@ -61,7 +61,7 @@
}
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
// don't clear the card table to avoid unnecessary pages being allocated
- CHECK_EQ(GC_CARD_CLEAN, 0);
+ COMPILE_ASSERT(kCardClean == 0, card_clean_must_be_0);
byte* cardtable_begin = mem_map->Begin();
CHECK(cardtable_begin != NULL);
@@ -70,13 +70,13 @@
// GC_CARD_DIRTY, compute a offset value to make this the case
size_t offset = 0;
byte* biased_begin = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(cardtable_begin) -
- (reinterpret_cast<uintptr_t>(heap_begin) >> GC_CARD_SHIFT));
- if (((uintptr_t)biased_begin & 0xff) != GC_CARD_DIRTY) {
- int delta = GC_CARD_DIRTY - (reinterpret_cast<int>(biased_begin) & 0xff);
+ (reinterpret_cast<uintptr_t>(heap_begin) >> kCardShift));
+ if (((uintptr_t)biased_begin & 0xff) != kCardDirty) {
+ int delta = kCardDirty - (reinterpret_cast<int>(biased_begin) & 0xff);
offset = delta + (delta < 0 ? 0x100 : 0);
biased_begin += offset;
}
- CHECK_EQ(reinterpret_cast<int>(biased_begin) & 0xff, GC_CARD_DIRTY);
+ CHECK_EQ(reinterpret_cast<int>(biased_begin) & 0xff, kCardDirty);
return new CardTable(mem_map.release(), biased_begin, offset);
}
@@ -92,20 +92,20 @@
// TODO: clear just the range of the table that has been modified
byte* card_start = CardFromAddr(space->Begin());
byte* card_end = CardFromAddr(space->End()); // Make sure to round up.
- memset(reinterpret_cast<void*>(card_start), GC_CARD_CLEAN, card_end - card_start);
+ memset(reinterpret_cast<void*>(card_start), kCardClean, card_end - card_start);
}
void CardTable::ClearCardTable() {
// TODO: clear just the range of the table that has been modified
- memset(mem_map_->Begin(), GC_CARD_CLEAN, mem_map_->Size());
+ memset(mem_map_->Begin(), kCardClean, mem_map_->Size());
}
void CardTable::PreClearCards(ContinuousSpace* space, std::vector<byte*>& out_cards) {
byte* card_end = CardFromAddr(space->End());
for (byte* card_cur = CardFromAddr(space->Begin()); card_cur < card_end; ++card_cur) {
- if (*card_cur == GC_CARD_DIRTY) {
+ if (*card_cur == kCardDirty) {
out_cards.push_back(card_cur);
- *card_cur = GC_CARD_CLEAN;
+ *card_cur = kCardClean;
}
}
}
@@ -113,22 +113,28 @@
void CardTable::GetDirtyCards(ContinuousSpace* space, std::vector<byte*>& out_cards) const {
byte* card_end = CardFromAddr(space->End());
for (byte* card_cur = CardFromAddr(space->Begin());card_cur < card_end; ++card_cur) {
- if (*card_cur == GC_CARD_DIRTY) {
+ if (*card_cur == kCardDirty) {
out_cards.push_back(card_cur);
}
}
}
+bool CardTable::AddrIsInCardTable(const void* addr) const {
+ return IsValidCard(biased_begin_ + ((uintptr_t)addr >> kCardShift));
+}
+
void CardTable::CheckAddrIsInCardTable(const byte* addr) const {
- byte* card_addr = biased_begin_ + ((uintptr_t)addr >> GC_CARD_SHIFT);
- if (!IsValidCard(card_addr)) {
- byte* begin = mem_map_->Begin() + offset_;
- byte* end = mem_map_->End();
- LOG(FATAL) << "Cardtable - begin: " << reinterpret_cast<void*>(begin)
- << " end: " << reinterpret_cast<void*>(end)
- << " addr: " << reinterpret_cast<const void*>(addr)
- << " card_addr: " << reinterpret_cast<void*>(card_addr);
- }
+ byte* card_addr = biased_begin_ + ((uintptr_t)addr >> kCardShift);
+ byte* begin = mem_map_->Begin() + offset_;
+ byte* end = mem_map_->End();
+ CHECK(AddrIsInCardTable(addr))
+ << "Card table " << this
+ << " begin: " << reinterpret_cast<void*>(begin)
+ << " end: " << reinterpret_cast<void*>(end)
+ << " card_addr: " << reinterpret_cast<void*>(card_addr)
+ << " heap begin: " << AddrFromCard(begin)
+ << " heap end: " << AddrFromCard(end)
+ << " addr: " << reinterpret_cast<const void*>(addr);
}
void CardTable::VerifyCardTable() {
diff --git a/src/card_table.h b/src/gc/card_table.h
similarity index 85%
rename from src/card_table.h
rename to src/gc/card_table.h
index f19be7d..2715f52 100644
--- a/src/card_table.h
+++ b/src/gc/card_table.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef DALVIK_ALLOC_CARDTABLE_H_
-#define DALVIK_ALLOC_CARDTABLE_H_
+#ifndef ART_SRC_GC_CARDTABLE_H_
+#define ART_SRC_GC_CARDTABLE_H_
#include "globals.h"
#include "logging.h"
@@ -30,38 +30,38 @@
class SpaceBitmap;
class Object;
-#define GC_CARD_SHIFT 7
-#define GC_CARD_SIZE (1 << GC_CARD_SHIFT)
-#define GC_CARD_CLEAN 0
-#define GC_CARD_DIRTY 0x70
-
// Maintain a card table from the the write barrier. All writes of
// non-NULL values to heap addresses should go through an entry in
// WriteBarrier, and from there to here.
class CardTable {
public:
+ static const size_t kCardShift = 7;
+ static const size_t kCardSize = (1 << kCardShift);
+ static const uint8_t kCardClean = 0x0;
+ static const uint8_t kCardDirty = 0x70;
+
static CardTable* Create(const byte* heap_begin, size_t heap_capacity);
// Set the card associated with the given address to GC_CARD_DIRTY.
void MarkCard(const void *addr) {
byte* card_addr = CardFromAddr(addr);
- *card_addr = GC_CARD_DIRTY;
+ *card_addr = kCardDirty;
}
// Is the object on a dirty card?
bool IsDirty(const Object* obj) const {
- return *CardFromAddr(obj) == GC_CARD_DIRTY;
+ return *CardFromAddr(obj) == kCardDirty;
}
- // Visit cards within memory range.
+ // Visit and clear cards within memory range, only visits dirty cards.
template <typename Visitor>
void VisitClear(const void* start, const void* end, const Visitor& visitor) {
byte* card_start = CardFromAddr(start);
byte* card_end = CardFromAddr(end);
- for (byte* cur = card_start; cur != card_end; ++cur) {
- if (*cur == GC_CARD_DIRTY) {
- *cur = GC_CARD_CLEAN;
- visitor(cur);
+ for (byte* it = card_start; it != card_end; ++it) {
+ if (*it == kCardDirty) {
+ *it = kCardClean;
+ visitor(it);
}
}
}
@@ -84,13 +84,13 @@
byte* card_end = CardFromAddr(scan_end);
while (card_cur < card_end) {
// Find the first dirty card.
- card_cur = reinterpret_cast<byte*>(memchr(card_cur, GC_CARD_DIRTY, card_end - card_cur));
+ card_cur = reinterpret_cast<byte*>(memchr(card_cur, kCardDirty, card_end - card_cur));
if (card_cur == NULL) {
break;
}
byte* run_start = card_cur++;
- while (*card_cur == GC_CARD_DIRTY && card_cur < card_end) {
+ while (*card_cur == kCardDirty && card_cur < card_end) {
card_cur++;
}
byte* run_end = card_cur;
@@ -123,18 +123,20 @@
<< " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
<< " end: " << reinterpret_cast<void*>(mem_map_->End());
uintptr_t offset = card_addr - biased_begin_;
- return reinterpret_cast<void*>(offset << GC_CARD_SHIFT);
+ return reinterpret_cast<void*>(offset << kCardShift);
}
// Returns the address of the relevant byte in the card table, given an address on the heap.
byte* CardFromAddr(const void *addr) const {
- byte *card_addr = biased_begin_ + ((uintptr_t)addr >> GC_CARD_SHIFT);
+ byte *card_addr = biased_begin_ + ((uintptr_t)addr >> kCardShift);
// Sanity check the caller was asking for address covered by the card table
DCHECK(IsValidCard(card_addr)) << "addr: " << addr
<< " card_addr: " << reinterpret_cast<void*>(card_addr);
return card_addr;
}
+ bool AddrIsInCardTable(const void* addr) const;
+
private:
CardTable(MemMap* begin, byte* biased_begin, size_t offset);
diff --git a/src/heap_bitmap.cc b/src/gc/heap_bitmap.cc
similarity index 100%
rename from src/heap_bitmap.cc
rename to src/gc/heap_bitmap.cc
diff --git a/src/heap_bitmap.h b/src/gc/heap_bitmap.h
similarity index 100%
rename from src/heap_bitmap.h
rename to src/gc/heap_bitmap.h
diff --git a/src/mark_sweep.cc b/src/gc/mark_sweep.cc
similarity index 86%
rename from src/mark_sweep.cc
rename to src/gc/mark_sweep.cc
index 34e5e2c..b82bc6e 100644
--- a/src/mark_sweep.cc
+++ b/src/gc/mark_sweep.cc
@@ -29,7 +29,6 @@
#include "logging.h"
#include "macros.h"
#include "monitor.h"
-#include "mutex.h"
#include "object.h"
#include "runtime.h"
#include "space.h"
@@ -74,23 +73,25 @@
void MarkSweep::Init() {
heap_ = Runtime::Current()->GetHeap();
mark_stack_->Reset();
-
- const Spaces& spaces = heap_->GetSpaces();
// TODO: C++0x auto
- for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) {
- if (current_mark_bitmap_ == NULL || (*cur)->GetGcRetentionPolicy() == GCRP_ALWAYS_COLLECT) {
- current_mark_bitmap_ = (*cur)->GetMarkBitmap();
- break;
- }
- }
- if (current_mark_bitmap_ == NULL) {
- GetHeap()->DumpSpaces();
- DCHECK(false) << "current_mark_bitmap_ == NULL";
- }
+ FindDefaultMarkBitmap();
// TODO: if concurrent, enable card marking in compiler
// TODO: check that the mark bitmap is entirely clear.
}
+void MarkSweep::FindDefaultMarkBitmap() {
+ const Spaces& spaces = heap_->GetSpaces();
+ for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+ if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
+ current_mark_bitmap_ = (*it)->GetMarkBitmap();
+ CHECK(current_mark_bitmap_ != NULL);
+ return;
+ }
+ }
+ GetHeap()->DumpSpaces();
+ LOG(FATAL) << "Could not find a default mark bitmap";
+}
+
inline void MarkSweep::MarkObject0(const Object* obj, bool check_finger) {
DCHECK(obj != NULL);
@@ -109,7 +110,8 @@
LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
if (!large_objects->Test(obj)) {
- CHECK(large_object_space->Contains(obj)) << "Attempting to mark object " << obj << " not in large object space";
+ CHECK(large_object_space->Contains(obj)) << "Attempting to mark object " << obj
+ << " not in large object space";
large_objects->Set(obj);
// Don't need to check finger since large objects never have any object references.
}
@@ -204,6 +206,16 @@
mark_bitmap->CopyFrom(live_bitmap);
}
+void MarkSweep::BindLiveToMarkBitmap(ContinuousSpace* space) {
+ CHECK(space->IsAllocSpace());
+ AllocSpace* alloc_space = space->AsAllocSpace();
+ SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
+ GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
+ alloc_space->temp_bitmap_.reset(mark_bitmap);
+ alloc_space->mark_bitmap_.reset(live_bitmap);
+}
+
class ScanImageRootVisitor {
public:
ScanImageRootVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
@@ -308,8 +320,8 @@
ScanObjectVisitor scan_visitor(this);
for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
ContinuousSpace* space = *it;
- if (space->GetGcRetentionPolicy() == GCRP_ALWAYS_COLLECT ||
- (!partial && space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT)
+ if (space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect ||
+ (!partial && space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)
) {
current_mark_bitmap_ = space->GetMarkBitmap();
if (current_mark_bitmap_ == NULL) {
@@ -333,23 +345,25 @@
TimingLogger& timings) {
ScanImageRootVisitor image_root_visitor(this);
SetFingerVisitor finger_visitor(this);
- for (size_t i = 0;i < cards.size();) {
+ const size_t card_count = cards.size();
+ SpaceBitmap* active_bitmap = NULL;
+ for (size_t i = 0;i < card_count;) {
Object* start_obj = reinterpret_cast<Object*>(card_table->AddrFromCard(cards[i]));
uintptr_t begin = reinterpret_cast<uintptr_t>(start_obj);
- uintptr_t end = begin + GC_CARD_SIZE;
- for (++i; reinterpret_cast<uintptr_t>(cards[i]) == end && i < cards.size(); ++i) {
- end += GC_CARD_SIZE;
+ uintptr_t end = begin + CardTable::kCardSize;
+ for (++i; reinterpret_cast<uintptr_t>(cards[i]) == end && i < card_count; ++i) {
+ end += CardTable::kCardSize;
}
- if (current_mark_bitmap_ == NULL || !current_mark_bitmap_->HasAddress(start_obj)) {
- current_mark_bitmap_ = heap_->GetMarkBitmap()->GetSpaceBitmap(start_obj);
+ if (active_bitmap == NULL || !active_bitmap->HasAddress(start_obj)) {
+ active_bitmap = heap_->GetMarkBitmap()->GetSpaceBitmap(start_obj);
#ifndef NDEBUG
- if (current_mark_bitmap_ == NULL) {
+ if (active_bitmap == NULL) {
GetHeap()->DumpSpaces();
LOG(FATAL) << "Object " << reinterpret_cast<const void*>(start_obj);
}
#endif
}
- current_mark_bitmap_->VisitMarkedRange(begin, end, image_root_visitor, finger_visitor);
+ active_bitmap->VisitMarkedRange(begin, end, image_root_visitor, finger_visitor);
}
timings.AddSplit("RecursiveMarkCards");
ProcessMarkStack();
@@ -362,12 +376,6 @@
!reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object);
}
-bool MarkSweep::IsLiveCallback(const Object* object, void* arg) {
- return
- reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object) ||
- !reinterpret_cast<MarkSweep*>(arg)->IsMarked(object);
-}
-
void MarkSweep::RecursiveMarkDirtyObjects(bool update_finger) {
ScanGrayObjects(update_finger);
ProcessMarkStack();
@@ -377,38 +385,60 @@
Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this);
}
-void MarkSweep::SweepJniWeakGlobals(bool swap_bitmaps) {
- HeapBitmap* live_bitmap = GetHeap()->GetLiveBitmap();
- HeapBitmap* mark_bitmap = GetHeap()->GetMarkBitmap();
-
- if (swap_bitmaps) {
- std::swap(live_bitmap, mark_bitmap);
- }
-
+void MarkSweep::SweepJniWeakGlobals(Heap::IsMarkedTester is_marked, void* arg) {
JavaVMExt* vm = Runtime::Current()->GetJavaVM();
MutexLock mu(Thread::Current(), vm->weak_globals_lock);
IndirectReferenceTable* table = &vm->weak_globals;
typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto
for (It it = table->begin(), end = table->end(); it != end; ++it) {
const Object** entry = *it;
- if (live_bitmap->Test(*entry) && !mark_bitmap->Test(*entry)) {
+ if (!is_marked(*entry, arg)) {
*entry = kClearedJniWeakGlobal;
}
}
}
-void MarkSweep::SweepSystemWeaks(bool swap_bitmaps) {
+struct ArrayMarkedCheck {
+ ObjectStack* live_stack;
+ MarkSweep* mark_sweep;
+};
+
+// Either marked or not live.
+bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) {
+ ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg);
+ if (array_check->mark_sweep->IsMarked(object)) {
+ return true;
+ }
+ ObjectStack* live_stack = array_check->live_stack;
+ return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End();
+}
+
+void MarkSweep::SweepSystemWeaksArray(ObjectStack* allocations) {
Runtime* runtime = Runtime::Current();
// The callbacks check
// !is_marked where is_marked is the callback but we want
// !IsMarked && IsLive
// So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
// Or for swapped (IsLive || !IsMarked).
- runtime->GetInternTable()->SweepInternTableWeaks(swap_bitmaps ? IsLiveCallback : IsMarkedCallback,
- this);
- runtime->GetMonitorList()->SweepMonitorList(swap_bitmaps ? IsLiveCallback : IsMarkedCallback,
- this);
- SweepJniWeakGlobals(swap_bitmaps);
+
+ ArrayMarkedCheck visitor;
+ visitor.live_stack = allocations;
+ visitor.mark_sweep = this;
+ runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor);
+ runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor);
+ SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor);
+}
+
+void MarkSweep::SweepSystemWeaks() {
+ Runtime* runtime = Runtime::Current();
+ // The callbacks check
+ // !is_marked where is_marked is the callback but we want
+ // !IsMarked && IsLive
+ // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
+ // Or for swapped (IsLive || !IsMarked).
+ runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this);
+ runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this);
+ SweepJniWeakGlobals(IsMarkedCallback, this);
}
bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) {
@@ -420,11 +450,14 @@
void MarkSweep::VerifyIsLive(const Object* obj) {
Heap* heap = GetHeap();
if (!heap->GetLiveBitmap()->Test(obj)) {
- if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
- heap->allocation_stack_->End()) {
- // Object not found!
- heap->DumpSpaces();
- LOG(FATAL) << "Found dead object " << obj;
+ LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+ if (!large_object_space->GetLiveObjects()->Test(obj)) {
+ if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
+ heap->allocation_stack_->End()) {
+ // Object not found!
+ heap->DumpSpaces();
+ LOG(FATAL) << "Found dead object " << obj;
+ }
}
}
}
@@ -504,7 +537,8 @@
// If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
// bitmap, resulting in occasional frees of Weaks which are still in use.
// TODO: Fix when sweeping weaks works properly with mutators unpaused + allocation list.
- // SweepSystemWeaks(swap_bitmaps);
+ SweepSystemWeaksArray(allocations);
+ logger.AddSplit("SweepSystemWeaks");
// Newly allocated objects MUST be in the alloc space and those are the only objects which we are
// going to free.
@@ -561,7 +595,7 @@
// If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
// bitmap, resulting in occasional frees of Weaks which are still in use.
- // SweepSystemWeaks(swap_bitmaps);
+ SweepSystemWeaks();
const Spaces& spaces = heap_->GetSpaces();
SweepCallbackContext scc;
@@ -571,8 +605,8 @@
for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
ContinuousSpace* space = *it;
if (
- space->GetGcRetentionPolicy() == GCRP_ALWAYS_COLLECT ||
- (!partial && space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT)
+ space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect ||
+ (!partial && space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)
) {
uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
@@ -582,7 +616,7 @@
if (swap_bitmaps) {
std::swap(live_bitmap, mark_bitmap);
}
- if (space->GetGcRetentionPolicy() == GCRP_ALWAYS_COLLECT) {
+ if (space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
// Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
&SweepCallback, reinterpret_cast<void*>(&scc));
@@ -879,6 +913,19 @@
ProcessMarkStack();
}
+inline bool MarkSweep::IsMarked(const Object* object) const
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ if (object >= immune_begin_ && object < immune_end_) {
+ return true;
+ }
+ DCHECK(current_mark_bitmap_ != NULL);
+ if (current_mark_bitmap_->HasAddress(object)) {
+ return current_mark_bitmap_->Test(object);
+ }
+ return heap_->GetMarkBitmap()->Test(object);
+}
+
+
// Unlink the reference list clearing references objects with white
// referents. Cleared references registered to a reference queue are
// scheduled for appending by the heap worker thread.
@@ -964,6 +1011,25 @@
DCHECK(*phantom_references == NULL);
}
+void MarkSweep::UnBindBitmaps() {
+ const Spaces& spaces = heap_->GetSpaces();
+ // TODO: C++0x auto
+ for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+ Space* space = *it;
+ if (space->IsAllocSpace()) {
+ AllocSpace* alloc_space = space->AsAllocSpace();
+ if (alloc_space->temp_bitmap_.get() != NULL) {
+ // At this point, the temp_bitmap holds our old mark bitmap.
+ SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release();
+ GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap);
+ CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get());
+ alloc_space->mark_bitmap_.reset(new_bitmap);
+ DCHECK(alloc_space->temp_bitmap_.get() == NULL);
+ }
+ }
+ }
+}
+
MarkSweep::~MarkSweep() {
#ifndef NDEBUG
VLOG(heap) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ << " other=" << other_count_;
@@ -975,8 +1041,9 @@
const Spaces& spaces = heap_->GetSpaces();
// TODO: C++0x auto
for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- if ((*it)->GetGcRetentionPolicy() != GCRP_NEVER_COLLECT) {
- (*it)->GetMarkBitmap()->Clear();
+ ContinuousSpace* space = *it;
+ if (space->GetGcRetentionPolicy() != kGcRetentionPolicyNeverCollect) {
+ space->GetMarkBitmap()->Clear();
}
}
mark_stack_->Reset();
diff --git a/src/mark_sweep.h b/src/gc/mark_sweep.h
similarity index 95%
rename from src/mark_sweep.h
rename to src/gc/mark_sweep.h
index 7b4ff6e..74b2aa7 100644
--- a/src/mark_sweep.h
+++ b/src/gc/mark_sweep.h
@@ -44,6 +44,9 @@
// Initializes internal structures.
void Init();
+ // Find the default mark bitmap.
+ void FindDefaultMarkBitmap();
+
// Marks the root set at the start of a garbage collection.
void MarkRoots()
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
@@ -59,6 +62,13 @@
// Copies mark bits from live bitmap of ZygoteSpace to mark bitmap for partial GCs.
void CopyMarkBits(ContinuousSpace* space);
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ void BindLiveToMarkBitmap(ContinuousSpace* space)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ void UnBindBitmaps()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Builds a mark stack with objects on dirty cards and recursively mark
// until it empties.
@@ -138,7 +148,11 @@
immune_end_ = end;
}
- void SweepSystemWeaks(bool swap_bitmaps)
+ void SweepSystemWeaks()
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Only sweep the weaks which are inside of an allocation stack.
+ void SweepSystemWeaksArray(ObjectStack* allocations)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
static bool VerifyIsLiveCallback(const Object* obj, void* arg)
@@ -168,19 +182,12 @@
private:
// Returns true if the object has its bit set in the mark bitmap.
- bool IsMarked(const Object* object) const
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
- DCHECK(current_mark_bitmap_ != NULL);
- if (current_mark_bitmap_->HasAddress(object)) {
- return current_mark_bitmap_->Test(object);
- }
- return heap_->GetMarkBitmap()->Test(object);
- }
+ bool IsMarked(const Object* object) const;
static bool IsMarkedCallback(const Object* object, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static bool IsLiveCallback(const Object* object, void* arg)
+ static bool IsMarkedArrayCallback(const Object* object, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
static void MarkObjectVisitor(const Object* root, void* arg)
@@ -363,7 +370,7 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SweepJniWeakGlobals(bool swap_bitmaps)
+ void SweepJniWeakGlobals(Heap::IsMarkedTester is_marked, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Current space, we check this space first to avoid searching for the appropriate space for an object.
diff --git a/src/mod_union_table.cc b/src/gc/mod_union_table.cc
similarity index 86%
rename from src/mod_union_table.cc
rename to src/gc/mod_union_table.cc
index ee31ef6..967f795 100644
--- a/src/mod_union_table.cc
+++ b/src/gc/mod_union_table.cc
@@ -26,8 +26,8 @@
class MarkIfReachesAllocspaceVisitor {
public:
- explicit MarkIfReachesAllocspaceVisitor(MarkSweep* const mark_sweep, SpaceBitmap* bitmap)
- : mark_sweep_(mark_sweep),
+ explicit MarkIfReachesAllocspaceVisitor(Heap* const heap, SpaceBitmap* bitmap)
+ : heap_(heap),
bitmap_(bitmap) {
}
@@ -35,7 +35,7 @@
void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */, bool /* is_static */) const {
// TODO: Optimize?
// TODO: C++0x auto
- const Spaces& spaces = mark_sweep_->heap_->GetSpaces();
+ const Spaces& spaces = heap_->GetSpaces();
for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) {
if ((*cur)->IsAllocSpace() && (*cur)->Contains(ref)) {
bitmap_->Set(obj);
@@ -45,14 +45,14 @@
}
private:
- MarkSweep* const mark_sweep_;
+ Heap* const heap_;
SpaceBitmap* bitmap_;
};
class ModUnionVisitor {
public:
- explicit ModUnionVisitor(MarkSweep* const mark_sweep, SpaceBitmap* bitmap)
- : mark_sweep_(mark_sweep),
+ explicit ModUnionVisitor(Heap* const heap, SpaceBitmap* bitmap)
+ : heap_(heap),
bitmap_(bitmap) {
}
@@ -60,13 +60,13 @@
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_) {
DCHECK(obj != NULL);
- // We don't have an early exit since we use the visitor pattern, an early
- // exit should significantly speed this up.
- MarkIfReachesAllocspaceVisitor visitor(mark_sweep_, bitmap_);
- mark_sweep_->VisitObjectReferences(obj, visitor);
+ // We don't have an early exit since we use the visitor pattern, an early exit should
+ // significantly speed this up.
+ MarkIfReachesAllocspaceVisitor visitor(heap_, bitmap_);
+ MarkSweep::VisitObjectReferences(obj, visitor);
}
private:
- MarkSweep* const mark_sweep_;
+ Heap* const heap_;
SpaceBitmap* bitmap_;
};
@@ -100,7 +100,7 @@
// Prevent fragmentation of the heap which is caused by resizing of the vector.
// TODO: Make a new vector which uses madvise (basically same as a mark stack).
cleared_cards_.reserve(32);
- const Spaces& spaces = mark_sweep_->GetHeap()->GetSpaces();
+ const Spaces& spaces = heap->GetSpaces();
// Create one heap bitmap per image space.
// TODO: C++0x auto
for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
@@ -121,20 +121,20 @@
}
void ModUnionTableBitmap::ClearCards(ContinuousSpace* space) {
- CardTable* card_table = mark_sweep_->heap_->GetCardTable();
+ CardTable* card_table = heap_->GetCardTable();
ModUnionClearCardVisitor visitor(&cleared_cards_);
// Clear dirty cards in the this image space and update the corresponding mod-union bits.
card_table->VisitClear(space->Begin(), space->End(), visitor);
}
void ModUnionTableBitmap::Update() {
- CardTable* card_table = mark_sweep_->heap_->GetCardTable();
+ CardTable* card_table = heap_->GetCardTable();
while (!cleared_cards_.empty()) {
byte* card = cleared_cards_.back();
cleared_cards_.pop_back();
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
- uintptr_t end = start + GC_CARD_SIZE;
+ uintptr_t end = start + CardTable::kCardSize;
ContinuousSpace* space = heap_->FindSpaceFromObject(reinterpret_cast<Object*>(start));
SpaceBitmap* bitmap = space->GetLiveBitmap();
@@ -144,7 +144,7 @@
// At this point we need to update the mod-union bitmap to contain all the objects which reach
// the alloc space.
- ModUnionVisitor visitor(mark_sweep_, bitmap);
+ ModUnionVisitor visitor(heap_, bitmap);
space->GetLiveBitmap()->VisitMarkedRange(start, end, visitor, IdentityFunctor());
}
}
@@ -165,14 +165,14 @@
MarkSweep* const mark_sweep_;
};
-void ModUnionTableBitmap::MarkReferences() {
+void ModUnionTableBitmap::MarkReferences(MarkSweep* mark_sweep) {
// Some tests have no image space, and therefore no mod-union bitmap.
- ModUnionScanImageRootVisitor image_root_scanner(GetMarkSweep());
- for (BitmapMap::iterator cur = bitmaps_.begin(); cur != bitmaps_.end(); ++cur) {
- const ContinuousSpace* space = cur->first;
+ ModUnionScanImageRootVisitor image_root_scanner(mark_sweep);
+ for (BitmapMap::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
+ const ContinuousSpace* space = it->first;
uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
- cur->second->VisitMarkedRange(begin, end, image_root_scanner, IdentityFunctor());
+ it->second->VisitMarkedRange(begin, end, image_root_scanner, IdentityFunctor());
}
}
@@ -185,9 +185,8 @@
}
-
void ModUnionTableReferenceCache::ClearCards(ContinuousSpace* space) {
- CardTable* card_table = GetMarkSweep()->GetHeap()->GetCardTable();
+ CardTable* card_table = GetHeap()->GetCardTable();
ModUnionClearCardSetVisitor visitor(&cleared_cards_);
// Clear dirty cards in the this space and update the corresponding mod-union bits.
card_table->VisitClear(space->Begin(), space->End(), visitor);
@@ -232,7 +231,7 @@
// We don't have an early exit since we use the visitor pattern, an early
// exit should significantly speed this up.
AddToReferenceArrayVisitor visitor(mod_union_table_, references_);
- mod_union_table_->GetMarkSweep()->VisitObjectReferences(obj, visitor);
+ MarkSweep::VisitObjectReferences(obj, visitor);
}
private:
ModUnionTableReferenceCache* const mod_union_table_;
@@ -255,7 +254,7 @@
void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
bool /* is_static */) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- Heap* heap = mod_union_table_->GetMarkSweep()->GetHeap();
+ Heap* heap = mod_union_table_->GetHeap();
if (mod_union_table_->AddReference(obj, ref) && references_.find(ref) == references_.end()) {
ContinuousSpace* from_space = heap->FindSpaceFromObject(obj);
ContinuousSpace* to_space = heap->FindSpaceFromObject(ref);
@@ -288,9 +287,8 @@
void operator ()(Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
DCHECK(obj != NULL);
- MarkSweep* mark_sweep = mod_union_table_->GetMarkSweep();
CheckReferenceVisitor visitor(mod_union_table_, references_);
- mark_sweep->VisitObjectReferences(obj, visitor);
+ MarkSweep::VisitObjectReferences(obj, visitor);
}
private:
@@ -300,7 +298,7 @@
void ModUnionTableReferenceCache::Verify() {
// Start by checking that everything in the mod union table is marked.
- Heap* heap = GetMarkSweep()->GetHeap();
+ Heap* heap = GetHeap();
for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
for (ReferenceArray::const_iterator it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref ) {
DCHECK(heap->GetLiveBitmap()->Test(*it_ref));
@@ -310,7 +308,7 @@
// Check the references of each clean card which is also in the mod union table.
for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
const byte* card = &*it->first;
- if (*card == GC_CARD_CLEAN) {
+ if (*card == CardTable::kCardClean) {
std::set<const Object*> reference_set;
for (ReferenceArray::const_iterator itr = it->second.begin(); itr != it->second.end();++itr) {
reference_set.insert(*itr);
@@ -318,7 +316,7 @@
ModUnionCheckReferences visitor(this, reference_set);
CardTable* card_table = heap->GetCardTable();
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
- uintptr_t end = start + GC_CARD_SIZE;
+ uintptr_t end = start + CardTable::kCardSize;
SpaceBitmap* live_bitmap =
heap->FindSpaceFromObject(reinterpret_cast<Object*>(start))->GetLiveBitmap();
live_bitmap->VisitMarkedRange(start, end, visitor, IdentityFunctor());
@@ -327,7 +325,7 @@
}
void ModUnionTableReferenceCache::Update() {
- Heap* heap = GetMarkSweep()->GetHeap();
+ Heap* heap = GetHeap();
CardTable* card_table = heap->GetCardTable();
ReferenceArray cards_references;
@@ -338,7 +336,7 @@
// Clear and re-compute alloc space references associated with this card.
cards_references.clear();
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
- uintptr_t end = start + GC_CARD_SIZE;
+ uintptr_t end = start + CardTable::kCardSize;
SpaceBitmap* live_bitmap =
heap->FindSpaceFromObject(reinterpret_cast<Object*>(start))->GetLiveBitmap();
live_bitmap->VisitMarkedRange(start, end, visitor, IdentityFunctor());
@@ -359,8 +357,7 @@
cleared_cards_.clear();
}
-void ModUnionTableReferenceCache::MarkReferences() {
- MarkSweep* mark_sweep = GetMarkSweep();
+void ModUnionTableReferenceCache::MarkReferences(MarkSweep* mark_sweep) {
// TODO: C++0x auto
size_t count = 0;
for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
@@ -383,20 +380,20 @@
}
void ModUnionTableCardCache::ClearCards(ContinuousSpace* space) {
- CardTable* card_table = GetMarkSweep()->GetHeap()->GetCardTable();
+ CardTable* card_table = GetHeap()->GetCardTable();
ModUnionClearCardSetVisitor visitor(&cleared_cards_);
// Clear dirty cards in the this space and update the corresponding mod-union bits.
card_table->VisitClear(space->Begin(), space->End(), visitor);
}
// Mark all references to the alloc space(s).
-void ModUnionTableCardCache::MarkReferences() {
+void ModUnionTableCardCache::MarkReferences(MarkSweep* mark_sweep) {
CardTable* card_table = heap_->GetCardTable();
- ModUnionScanImageRootVisitor visitor(GetMarkSweep());
+ ModUnionScanImageRootVisitor visitor(mark_sweep);
for (ClearedCards::const_iterator it = cleared_cards_.begin(); it != cleared_cards_.end(); ++it) {
byte* card = *it;
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
- uintptr_t end = start + GC_CARD_SIZE;
+ uintptr_t end = start + CardTable::kCardSize;
SpaceBitmap* live_bitmap =
heap_->FindSpaceFromObject(reinterpret_cast<Object*>(start))->GetLiveBitmap();
live_bitmap->VisitMarkedRange(start, end, visitor, IdentityFunctor());
diff --git a/src/mod_union_table.h b/src/gc/mod_union_table.h
similarity index 88%
rename from src/mod_union_table.h
rename to src/gc/mod_union_table.h
index 7629665..5e4733a 100644
--- a/src/mod_union_table.h
+++ b/src/gc/mod_union_table.h
@@ -33,7 +33,7 @@
typedef std::vector<const Object*> ReferenceArray;
typedef std::set<byte*> ClearedCards;
- ModUnionTable(Heap* heap) : heap_(heap), mark_sweep_(0) {
+ ModUnionTable(Heap* heap) : heap_(heap) {
}
@@ -48,7 +48,7 @@
virtual void Update() = 0;
// Mark all references which are stored in the mod union table.
- virtual void MarkReferences() = 0;
+ virtual void MarkReferences(MarkSweep* mark_sweep) = 0;
// Verification, sanity checks that we don't have clean cards which conflict with out cached data
// for said cards. Exclusive lock is required since verify sometimes uses
@@ -56,22 +56,12 @@
// bitmap or not.
virtual void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) = 0;
- // Should probably clean this up later.
- void Init(MarkSweep* mark_sweep) {
- mark_sweep_ = mark_sweep;
- }
-
- MarkSweep* GetMarkSweep() {
- return mark_sweep_;
- }
-
Heap* GetHeap() {
return heap_;
}
protected:
Heap* heap_;
- MarkSweep* mark_sweep_;
};
// Bitmap implementation.
@@ -90,7 +80,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Mark all references to the alloc space(s).
- void MarkReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void MarkReferences(MarkSweep* mark_sweep) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
protected:
// Cleared card array, used to update the mod-union table.
@@ -119,7 +109,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Mark all references to the alloc space(s).
- void MarkReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void MarkReferences(MarkSweep* mark_sweep) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
// VisitMarkedRange can't know if the callback will modify the bitmap or not.
@@ -151,7 +141,7 @@
void Update() {}
// Mark all references to the alloc space(s).
- void MarkReferences()
+ void MarkReferences(MarkSweep* mark_sweep)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -170,7 +160,7 @@
}
bool AddReference(const Object* /* obj */, const Object* ref) {
- const Spaces& spaces = Implementation::GetMarkSweep()->GetHeap()->GetSpaces();
+ const Spaces& spaces = Implementation::GetHeap()->GetSpaces();
for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
if ((*it)->Contains(ref)) {
return (*it)->IsAllocSpace();
@@ -189,10 +179,10 @@
}
bool AddReference(const Object* /* obj */, const Object* ref) {
- const Spaces& spaces = Implementation::GetMarkSweep()->GetHeap()->GetSpaces();
+ const Spaces& spaces = Implementation::GetHeap()->GetSpaces();
for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
if ((*it)->Contains(ref)) {
- return (*it)->GetGcRetentionPolicy() == GCRP_ALWAYS_COLLECT;
+ return (*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect;
}
}
if (ref != NULL) {
diff --git a/src/space.cc b/src/gc/space.cc
similarity index 97%
rename from src/space.cc
rename to src/gc/space.cc
index c1f4384..9c8819b 100644
--- a/src/space.cc
+++ b/src/gc/space.cc
@@ -72,7 +72,7 @@
AllocSpace::AllocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
byte* end, size_t growth_limit)
- : MemMapSpace(name, mem_map, end - begin, GCRP_ALWAYS_COLLECT),
+ : MemMapSpace(name, mem_map, end - begin, kGcRetentionPolicyAlwaysCollect),
num_bytes_allocated_(0), num_objects_allocated_(0),
lock_("allocation space lock", kAllocSpaceLock), mspace_(mspace),
growth_limit_(growth_limit) {
@@ -80,10 +80,9 @@
size_t bitmap_index = bitmap_index_++;
- static const uintptr_t kGcCardSize = static_cast<uintptr_t>(GC_CARD_SIZE);
+ static const uintptr_t kGcCardSize = static_cast<uintptr_t>(CardTable::kCardSize);
CHECK(reinterpret_cast<uintptr_t>(mem_map->Begin()) % kGcCardSize == 0);
CHECK(reinterpret_cast<uintptr_t>(mem_map->End()) % kGcCardSize == 0);
-
live_bitmap_.reset(SpaceBitmap::Create(
StringPrintf("allocspace-%s-live-bitmap-%d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), Capacity()));
@@ -239,8 +238,8 @@
AllocSpace* AllocSpace::CreateZygoteSpace() {
end_ = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(end_), kPageSize));
- DCHECK(IsAligned<GC_CARD_SIZE>(begin_));
- DCHECK(IsAligned<GC_CARD_SIZE>(end_));
+ DCHECK(IsAligned<CardTable::kCardSize>(begin_));
+ DCHECK(IsAligned<CardTable::kCardSize>(end_));
DCHECK(IsAligned<kPageSize>(begin_));
DCHECK(IsAligned<kPageSize>(end_));
size_t size = RoundUp(Size(), kPageSize);
@@ -254,11 +253,11 @@
// Remaining size is for the new alloc space.
const size_t growth_limit = growth_limit_ - size;
const size_t capacity = Capacity() - size;
- VLOG(heap) << "Begin " << reinterpret_cast<const void*>(begin_);
- VLOG(heap) << "End " << reinterpret_cast<const void*>(end_);
- VLOG(heap) << "Size " << size;
- VLOG(heap) << "GrowthLimit " << growth_limit_;
- VLOG(heap) << "Capacity " << Capacity();
+ VLOG(heap) << "Begin " << reinterpret_cast<const void*>(begin_) << "\n"
+ << "End " << reinterpret_cast<const void*>(end_) << "\n"
+ << "Size " << size << "\n"
+ << "GrowthLimit " << growth_limit_ << "\n"
+ << "Capacity " << Capacity();
SetGrowthLimit(RoundUp(size, kPageSize));
SetFootprintLimit(RoundUp(size, kPageSize));
// FIXME: Do we need reference counted pointers here?
@@ -417,7 +416,7 @@
size_t ImageSpace::bitmap_index_ = 0;
ImageSpace::ImageSpace(const std::string& name, MemMap* mem_map)
- : MemMapSpace(name, mem_map, mem_map->Size(), GCRP_NEVER_COLLECT) {
+ : MemMapSpace(name, mem_map, mem_map->Size(), kGcRetentionPolicyNeverCollect) {
const size_t bitmap_index = bitmap_index_++;
live_bitmap_.reset(SpaceBitmap::Create(
StringPrintf("imagespace-%s-live-bitmap-%d", name.c_str(), static_cast<int>(bitmap_index)),
@@ -552,7 +551,7 @@
}
LargeObjectSpace::LargeObjectSpace(const std::string& name)
- : DiscontinuousSpace(name, GCRP_ALWAYS_COLLECT),
+ : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
num_bytes_allocated_(0),
num_objects_allocated_(0) {
live_objects_.reset(new SpaceSetMap("large live objects"));
diff --git a/src/space.h b/src/gc/space.h
similarity index 98%
rename from src/space.h
rename to src/gc/space.h
index af6ab3b..a543500 100644
--- a/src/space.h
+++ b/src/gc/space.h
@@ -19,11 +19,11 @@
#include <string>
+#include "../mutex.h"
#include "UniquePtr.h"
#include "globals.h"
#include "image.h"
#include "macros.h"
-#include "mutex.h"
#include "dlmalloc.h"
#include "mem_map.h"
@@ -36,9 +36,9 @@
class SpaceBitmap;
enum GcRetentionPolicy {
- GCRP_NEVER_COLLECT,
- GCRP_ALWAYS_COLLECT,
- GCRP_FULL_COLLECT,
+ kGcRetentionPolicyNeverCollect,
+ kGcRetentionPolicyAlwaysCollect,
+ kGcRetentionPolicyFullCollect, // Collect only for full GC
};
std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy);
@@ -319,6 +319,7 @@
UniquePtr<SpaceBitmap> live_bitmap_;
UniquePtr<SpaceBitmap> mark_bitmap_;
+ UniquePtr<SpaceBitmap> temp_bitmap_;
// Approximate number of bytes which have been allocated into the space.
size_t num_bytes_allocated_;
@@ -351,6 +352,8 @@
// one time by a call to ClearGrowthLimit.
size_t growth_limit_;
+ friend class MarkSweep;
+
DISALLOW_COPY_AND_ASSIGN(AllocSpace);
};
@@ -451,6 +454,7 @@
}
protected:
+
LargeObjectSpace(const std::string& name);
// Approximate number of bytes which have been allocated into the space.
diff --git a/src/space_bitmap.cc b/src/gc/space_bitmap.cc
similarity index 98%
rename from src/space_bitmap.cc
rename to src/gc/space_bitmap.cc
index b0b9d90..273ae4f 100644
--- a/src/space_bitmap.cc
+++ b/src/gc/space_bitmap.cc
@@ -50,7 +50,9 @@
}
// Clean up any resources associated with the bitmap.
-SpaceBitmap::~SpaceBitmap() {}
+SpaceBitmap::~SpaceBitmap() {
+
+}
void SpaceBitmap::SetHeapLimit(uintptr_t new_end) {
DCHECK(IsAligned<kBitsPerWord * kAlignment>(new_end));
@@ -72,7 +74,7 @@
// will return zeroed memory.
int result = madvise(bitmap_begin_, bitmap_size_, MADV_DONTNEED);
if (result == -1) {
- PLOG(WARNING) << "madvise failed";
+ PLOG(FATAL) << "madvise failed";
}
}
}
diff --git a/src/space_bitmap.h b/src/gc/space_bitmap.h
similarity index 100%
rename from src/space_bitmap.h
rename to src/gc/space_bitmap.h
diff --git a/src/space_bitmap_test.cc b/src/gc/space_bitmap_test.cc
similarity index 100%
rename from src/space_bitmap_test.cc
rename to src/gc/space_bitmap_test.cc
diff --git a/src/space_test.cc b/src/gc/space_test.cc
similarity index 100%
rename from src/space_test.cc
rename to src/gc/space_test.cc
diff --git a/src/heap.cc b/src/heap.cc
index e2190ec..df0641d 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -22,20 +22,20 @@
#include <limits>
#include <vector>
-#include "atomic_stack.h"
-#include "card_table.h"
#include "debugger.h"
-#include "heap_bitmap.h"
+#include "gc/atomic_stack.h"
+#include "gc/card_table.h"
+#include "gc/heap_bitmap.h"
+#include "gc/mark_sweep.h"
+#include "gc/mod_union_table.h"
+#include "gc/space.h"
#include "image.h"
-#include "mark_sweep.h"
-#include "mod_union_table.h"
#include "object.h"
#include "object_utils.h"
#include "os.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
#include "sirt_ref.h"
-#include "space.h"
#include "stl_util.h"
#include "thread_list.h"
#include "timing_logger.h"
@@ -149,7 +149,7 @@
verify_post_gc_heap_(false),
verify_mod_union_table_(false),
partial_gc_frequency_(10),
- min_alloc_space_size_for_sticky_gc_(4 * MB),
+ min_alloc_space_size_for_sticky_gc_(2 * MB),
min_remaining_space_for_sticky_gc_(1 * MB),
last_trim_time_(0),
try_running_gc_(false),
@@ -187,9 +187,7 @@
image_space = ImageSpace::Create(image_file_name);
}
if (image_space == NULL) {
- if (!GenerateImage(image_file_name)) {
- LOG(FATAL) << "Failed to generate image: " << image_file_name;
- }
+ CHECK(GenerateImage(image_file_name)) << "Failed to generate image: " << image_file_name;
image_space = ImageSpace::Create(image_file_name);
}
}
@@ -259,7 +257,7 @@
static const size_t default_mark_stack_size = 64 * KB;
mark_stack_.reset(ObjectStack::Create("dalvik-mark-stack", default_mark_stack_size));
allocation_stack_.reset(ObjectStack::Create("dalvik-allocation-stack",
- max_allocation_stack_size_));
+ max_allocation_stack_size_));
live_stack_.reset(ObjectStack::Create("dalvik-live-stack",
max_allocation_stack_size_));
@@ -479,9 +477,11 @@
// TODO: C++0x auto
for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
ContinuousSpace* space = *it;
- LOG(INFO) << *space << "\n"
- << *space->GetLiveBitmap() << "\n"
- << *space->GetMarkBitmap();
+ SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+ LOG(INFO) << space << " " << *space << "\n"
+ << live_bitmap << " " << *live_bitmap << "\n"
+ << mark_bitmap << " " << *mark_bitmap;
}
// TODO: Dump large object space?
}
@@ -701,45 +701,37 @@
class InstanceCounter {
public:
- InstanceCounter(Class* c, bool count_assignable)
+ InstanceCounter(Class* c, bool count_assignable, size_t* const count)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : class_(c), count_assignable_(count_assignable), count_(0) {
+ : class_(c), count_assignable_(count_assignable), count_(count) {
}
- size_t GetCount() {
- return count_;
- }
-
- static void Callback(Object* o, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- reinterpret_cast<InstanceCounter*>(arg)->VisitInstance(o);
- }
-
- private:
- void VisitInstance(Object* o) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Class* instance_class = o->GetClass();
+ void operator()(const Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const Class* instance_class = o->GetClass();
if (count_assignable_) {
if (instance_class == class_) {
- ++count_;
+ ++*count_;
}
} else {
if (instance_class != NULL && class_->IsAssignableFrom(instance_class)) {
- ++count_;
+ ++*count_;
}
}
}
+ private:
Class* class_;
bool count_assignable_;
- size_t count_;
+ size_t* const count_;
};
int64_t Heap::CountInstances(Class* c, bool count_assignable) {
+ size_t count = 0;
+ InstanceCounter counter(c, count_assignable, &count);
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- InstanceCounter counter(c, count_assignable);
- GetLiveBitmap()->Walk(InstanceCounter::Callback, &counter);
- return counter.GetCount();
+ GetLiveBitmap()->Visit(counter);
+ return count;
}
void Heap::CollectGarbage(bool clear_soft_references) {
@@ -781,7 +773,7 @@
alloc_space_ = zygote_space->CreateZygoteSpace();
// Change the GC retention policy of the zygote space to only collect when full.
- zygote_space->SetGcRetentionPolicy(GCRP_FULL_COLLECT);
+ zygote_space->SetGcRetentionPolicy(kGcRetentionPolicyFullCollect);
AddSpace(alloc_space_);
have_zygote_space_ = true;
break;
@@ -910,7 +902,6 @@
Object* cleared_references = NULL;
{
MarkSweep mark_sweep(mark_stack_.get());
-
mark_sweep.Init();
timings.AddSplit("Init");
@@ -922,10 +913,6 @@
timings.AddSplit("VerifyHeapReferencesPreGC");
}
- // Make sure that the tables have the correct pointer for the mark sweep.
- mod_union_table_->Init(&mark_sweep);
- zygote_mod_union_table_->Init(&mark_sweep);
-
// Swap allocation stack and live stack, enabling us to have new allocations during this GC.
SwapStacks();
@@ -939,19 +926,7 @@
}
// Clear image space cards and keep track of cards we cleared in the mod-union table.
- for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
- ContinuousSpace* space = *it;
- if (space->IsImageSpace()) {
- mod_union_table_->ClearCards(*it);
- timings.AddSplit("ClearModUnionCards");
- } else if (space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
- zygote_mod_union_table_->ClearCards(space);
- timings.AddSplit("ClearZygoteCards");
- } else {
- card_table_->ClearSpaceCards(space);
- timings.AddSplit("ClearCards");
- }
- }
+ ClearCards(timings);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
if (gc_type == kGcTypePartial) {
@@ -959,34 +934,28 @@
// accidentally un-mark roots.
// Needed for scanning dirty objects.
for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
- if ((*it)->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
- mark_sweep.CopyMarkBits(*it);
+ if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
+ mark_sweep.BindLiveToMarkBitmap(*it);
}
}
- timings.AddSplit("CopyMarkBits");
+ timings.AddSplit("BindLiveToMarked");
// We can assume that everything from the start of the first space to the alloc space is marked.
mark_sweep.SetImmuneRange(reinterpret_cast<Object*>(spaces_[0]->Begin()),
reinterpret_cast<Object*>(alloc_space_->Begin()));
} else if (gc_type == kGcTypeSticky) {
for (Spaces::iterator it = spaces_.begin();it != spaces_.end(); ++it) {
- if ((*it)->GetGcRetentionPolicy() != GCRP_NEVER_COLLECT) {
- mark_sweep.CopyMarkBits(*it);
+ if ((*it)->GetGcRetentionPolicy() != kGcRetentionPolicyNeverCollect) {
+ mark_sweep.BindLiveToMarkBitmap(*it);
}
}
+ timings.AddSplit("BindLiveToMarkBitmap");
large_object_space_->CopyLiveToMarked();
- timings.AddSplit("CopyMarkBits");
-
+ timings.AddSplit("CopyLiveToMarked");
mark_sweep.SetImmuneRange(reinterpret_cast<Object*>(spaces_[0]->Begin()),
reinterpret_cast<Object*>(alloc_space_->Begin()));
}
-
- MarkAllocStack(alloc_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(),
- live_stack_.get());
-
- if (gc_type != kGcTypeSticky) {
- live_stack_->Reset();
- }
+ mark_sweep.FindDefaultMarkBitmap();
mark_sweep.MarkRoots();
timings.AddSplit("MarkRoots");
@@ -994,7 +963,13 @@
// Roots are marked on the bitmap and the mark_stack is empty.
DCHECK(mark_stack_->IsEmpty());
- UpdateAndMarkModUnion(timings, gc_type);
+ UpdateAndMarkModUnion(&mark_sweep, timings, gc_type);
+
+ if (gc_type != kGcTypeSticky) {
+ MarkAllocStack(alloc_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(),
+ live_stack_.get());
+ timings.AddSplit("MarkStackAsLive");
+ }
if (verify_mod_union_table_) {
zygote_mod_union_table_->Update();
@@ -1005,7 +980,6 @@
// Recursively mark all the non-image bits set in the mark bitmap.
if (gc_type != kGcTypeSticky) {
- live_stack_->Reset();
mark_sweep.RecursiveMark(gc_type == kGcTypePartial, timings);
} else {
mark_sweep.RecursiveMarkCards(card_table_.get(), dirty_cards, timings);
@@ -1016,15 +990,6 @@
mark_sweep.ProcessReferences(clear_soft_references);
timings.AddSplit("ProcessReferences");
- // This doesn't work with mutators unpaused for some reason, TODO: Fix.
- mark_sweep.SweepSystemWeaks(false);
- timings.AddSplit("SweepSystemWeaks");
-
- const bool swap = true;
- if (swap) {
- SwapBitmaps(self);
- }
-
#ifndef NDEBUG
// Verify that we only reach marked objects from the image space
mark_sweep.VerifyImageRoots();
@@ -1032,13 +997,27 @@
#endif
if (gc_type != kGcTypeSticky) {
- mark_sweep.SweepLargeObjects(swap);
+ mark_sweep.Sweep(gc_type == kGcTypePartial, false);
+ timings.AddSplit("Sweep");
+ mark_sweep.SweepLargeObjects(false);
timings.AddSplit("SweepLargeObjects");
- mark_sweep.Sweep(gc_type == kGcTypePartial, swap);
} else {
- mark_sweep.SweepArray(timings, live_stack_.get(), swap);
+ mark_sweep.SweepArray(timings, live_stack_.get(), false);
timings.AddSplit("SweepArray");
}
+ live_stack_->Reset();
+
+ // Unbind the live and mark bitmaps.
+ mark_sweep.UnBindBitmaps();
+
+ const bool swap = true;
+ if (swap) {
+ if (gc_type == kGcTypeSticky) {
+ SwapLargeObjects(self);
+ } else {
+ SwapBitmaps(self, gc_type);
+ }
+ }
if (verify_system_weaks_) {
mark_sweep.VerifySystemWeaks();
@@ -1073,7 +1052,7 @@
const size_t percent_free = GetPercentFree();
const size_t current_heap_size = GetUsedMemorySize();
const size_t total_memory = GetTotalMemory();
- LOG(INFO) << gc_cause << " " << gc_type_str.str() << " "
+ LOG(INFO) << gc_cause << " " << gc_type_str.str()
<< "GC freed " << PrettySize(bytes_freed) << ", " << percent_free << "% free, "
<< PrettySize(current_heap_size) << "/" << PrettySize(total_memory) << ", "
<< "paused " << PrettyDuration(duration);
@@ -1088,9 +1067,9 @@
logger->End(); // Next iteration.
}
-void Heap::UpdateAndMarkModUnion(TimingLogger& timings, GcType gc_type) {
+void Heap::UpdateAndMarkModUnion(MarkSweep* mark_sweep, TimingLogger& timings, GcType gc_type) {
if (gc_type == kGcTypeSticky) {
- // Don't need to do anythign for mod union table in this case since we are only scanning dirty
+ // Don't need to do anything for mod union table in this case since we are only scanning dirty
// cards.
return;
}
@@ -1100,7 +1079,7 @@
zygote_mod_union_table_->Update();
timings.AddSplit("UpdateZygoteModUnionTable");
- zygote_mod_union_table_->MarkReferences();
+ zygote_mod_union_table_->MarkReferences(mark_sweep);
timings.AddSplit("ZygoteMarkReferences");
}
@@ -1109,7 +1088,7 @@
timings.AddSplit("UpdateModUnionTable");
// Scans all objects in the mod-union table.
- mod_union_table_->MarkReferences();
+ mod_union_table_->MarkReferences(mark_sweep);
timings.AddSplit("MarkImageToAllocSpaceReferences");
}
@@ -1147,31 +1126,28 @@
ObjectStack* live_stack = heap_->live_stack_.get();
byte* card_addr = card_table->CardFromAddr(obj);
- LOG(ERROR) << "Object " << obj << " references dead object " << ref << " on IsDirty = "
- << (*card_addr == GC_CARD_DIRTY);
- LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
- LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
+ LOG(ERROR) << "Object " << obj << " references dead object " << ref << "\n"
+ << "IsDirty = " << (*card_addr == CardTable::kCardDirty) << "\n"
+ << "Obj type " << PrettyTypeOf(obj) << "\n"
+ << "Ref type " << PrettyTypeOf(ref);
card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
void* cover_begin = card_table->AddrFromCard(card_addr);
void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
- GC_CARD_SIZE);
+ CardTable::kCardSize);
LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
- << "-" << cover_end;
+ << "-" << cover_end;
SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetSpaceBitmap(obj);
// Print out how the object is live.
if (bitmap->Test(obj)) {
LOG(ERROR) << "Object " << obj << " found in live bitmap";
}
-
if (std::binary_search(alloc_stack->Begin(), alloc_stack->End(), obj)) {
LOG(ERROR) << "Object " << obj << " found in allocation stack";
}
-
if (std::binary_search(live_stack->Begin(), live_stack->End(), obj)) {
LOG(ERROR) << "Object " << obj << " found in live stack";
}
-
if (std::binary_search(live_stack->Begin(), live_stack->End(), ref)) {
LOG(ERROR) << "Reference " << ref << " found in live stack!";
}
@@ -1179,15 +1155,15 @@
// Attempt to see if the card table missed the reference.
ScanVisitor scan_visitor;
byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
- card_table->Scan(bitmap, byte_cover_begin, byte_cover_begin + GC_CARD_SIZE, scan_visitor,
- IdentityFunctor());
+ card_table->Scan(bitmap, byte_cover_begin, byte_cover_begin + CardTable::kCardSize,
+ scan_visitor, IdentityFunctor());
// Try and see if a mark sweep collector scans the reference.
ObjectStack* mark_stack = heap_->mark_stack_.get();
MarkSweep ms(mark_stack);
ms.Init();
mark_stack->Reset();
- ms.SetFinger(reinterpret_cast<Object*>(~size_t(0)));
+ ms.DisableFinger();
// All the references should end up in the mark stack.
ms.ScanRoot(obj);
@@ -1214,7 +1190,7 @@
if (bitmap->Test(obj)) {
return true;
}
- } else if (heap_->GetLargeObjectsSpace()->GetLiveObjects()->Test(obj)) {
+ } else if (heap_->GetLargeObjectsSpace()->Contains(obj)) {
return true;
} else {
heap_->DumpSpaces();
@@ -1288,11 +1264,14 @@
// analysis.
void operator ()(const Object* obj, const Object* ref, const MemberOffset& offset,
bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
- if (ref != NULL) {
+ if (ref != NULL && !obj->GetClass()->IsPrimitiveArray()) {
CardTable* card_table = heap_->GetCardTable();
// If the object is not dirty and it is referencing something in the live stack other than
// class, then it must be on a dirty card.
- if (!card_table->IsDirty(obj)) {
+ if (!card_table->AddrIsInCardTable(obj)) {
+ LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
+ *failed_ = true;
+ } else if (!card_table->IsDirty(obj)) {
ObjectStack* live_stack = heap_->live_stack_.get();
if (std::binary_search(live_stack->Begin(), live_stack->End(), ref) && !ref->IsClass()) {
if (std::binary_search(live_stack->Begin(), live_stack->End(), obj)) {
@@ -1379,22 +1358,30 @@
return true;
}
-void Heap::SwapBitmaps(Thread* self) {
+void Heap::SwapBitmaps(Thread* self, GcType gc_type) {
// Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
- // these bitmaps. Doing this enables us to sweep with the heap unlocked since new allocations
- // set the live bit, but since we have the bitmaps reversed at this point, this sets the mark bit
- // instead, resulting in no new allocated objects being incorrectly freed by sweep.
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
- ContinuousSpace* space = *it;
- // We never allocate into zygote spaces.
- if (space->GetGcRetentionPolicy() != GCRP_NEVER_COLLECT) {
- live_bitmap_->ReplaceBitmap(space->GetLiveBitmap(), space->GetMarkBitmap());
- mark_bitmap_->ReplaceBitmap(space->GetMarkBitmap(), space->GetLiveBitmap());
- space->AsAllocSpace()->SwapBitmaps();
+ // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
+ // bits of dead objects in the live bitmap.
+ {
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
+ ContinuousSpace* space = *it;
+ // We never allocate into zygote spaces.
+ if (space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect ||
+ (gc_type == kGcTypeFull &&
+ space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)) {
+ live_bitmap_->ReplaceBitmap(space->GetLiveBitmap(), space->GetMarkBitmap());
+ mark_bitmap_->ReplaceBitmap(space->GetMarkBitmap(), space->GetLiveBitmap());
+ space->AsAllocSpace()->SwapBitmaps();
+ }
}
}
+ SwapLargeObjects(self);
+}
+
+void Heap::SwapLargeObjects(Thread* self) {
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
large_object_space_->SwapBitmaps();
live_bitmap_->SetLargeObjects(large_object_space_->GetLiveObjects());
mark_bitmap_->SetLargeObjects(large_object_space_->GetMarkObjects());
@@ -1411,6 +1398,23 @@
}
}
+void Heap::ClearCards(TimingLogger& timings) {
+ // Clear image space cards and keep track of cards we cleared in the mod-union table.
+ for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
+ ContinuousSpace* space = *it;
+ if (space->IsImageSpace()) {
+ mod_union_table_->ClearCards(*it);
+ timings.AddSplit("ModUnionClearCards");
+ } else if (space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
+ zygote_mod_union_table_->ClearCards(space);
+ timings.AddSplit("ZygoteModUnionClearCards");
+ } else {
+ card_table_->ClearSpaceCards(space);
+ timings.AddSplit("ClearCards");
+ }
+ }
+}
+
void Heap::CollectGarbageConcurrentMarkSweepPlan(Thread* self, GcType gc_type, GcCause gc_cause,
bool clear_soft_references) {
TimingLogger timings("ConcurrentCollectGarbageInternal", true);
@@ -1458,29 +1462,15 @@
// TODO: Investigate using a mark stack instead of a vector.
std::vector<byte*> dirty_cards;
if (gc_type == kGcTypeSticky) {
+ dirty_cards.reserve(4 * KB);
for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
card_table_->GetDirtyCards(*it, dirty_cards);
}
+ timings.AddSplit("GetDirtyCards");
}
- // Make sure that the tables have the correct pointer for the mark sweep.
- mod_union_table_->Init(&mark_sweep);
- zygote_mod_union_table_->Init(&mark_sweep);
-
// Clear image space cards and keep track of cards we cleared in the mod-union table.
- for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
- ContinuousSpace* space = *it;
- if (space->IsImageSpace()) {
- mod_union_table_->ClearCards(*it);
- timings.AddSplit("ModUnionClearCards");
- } else if (space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
- zygote_mod_union_table_->ClearCards(space);
- timings.AddSplit("ZygoteModUnionClearCards");
- } else {
- card_table_->ClearSpaceCards(space);
- timings.AddSplit("ClearCards");
- }
- }
+ ClearCards(timings);
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -1494,24 +1484,26 @@
// accidentally un-mark roots.
// Needed for scanning dirty objects.
for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
- if ((*it)->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
- mark_sweep.CopyMarkBits(*it);
+ if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
+ mark_sweep.BindLiveToMarkBitmap(*it);
}
}
- timings.AddSplit("CopyMarkBits");
+ timings.AddSplit("BindLiveToMark");
mark_sweep.SetImmuneRange(reinterpret_cast<Object*>(spaces_.front()->Begin()),
reinterpret_cast<Object*>(alloc_space_->Begin()));
} else if (gc_type == kGcTypeSticky) {
for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
- if ((*it)->GetGcRetentionPolicy() != GCRP_NEVER_COLLECT) {
- mark_sweep.CopyMarkBits(*it);
+ if ((*it)->GetGcRetentionPolicy() != kGcRetentionPolicyNeverCollect) {
+ mark_sweep.BindLiveToMarkBitmap(*it);
}
}
+ timings.AddSplit("BindLiveToMark");
large_object_space_->CopyLiveToMarked();
- timings.AddSplit("CopyMarkBits");
+ timings.AddSplit("CopyLiveToMarked");
mark_sweep.SetImmuneRange(reinterpret_cast<Object*>(spaces_.front()->Begin()),
reinterpret_cast<Object*>(alloc_space_->Begin()));
}
+ mark_sweep.FindDefaultMarkBitmap();
// Marking roots is not necessary for sticky mark bits since we only actually require the
// remarking of roots.
@@ -1539,13 +1531,15 @@
timings.AddSplit("RootEnd");
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- UpdateAndMarkModUnion(timings, gc_type);
+ UpdateAndMarkModUnion(&mark_sweep, timings, gc_type);
- // Mark everything as live so that sweeping system weak works correctly for sticky mark bit
- // GCs.
- MarkAllocStack(alloc_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(),
- live_stack_.get());
- timings.AddSplit("MarkStackAsLive");
+ if (gc_type != kGcTypeSticky) {
+ // Mark everything allocated since the last as GC live so that we can sweep concurrently,
+ // knowing that new allocations won't be marked as live.
+ MarkAllocStack(alloc_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(),
+ live_stack_.get());
+ timings.AddSplit("MarkStackAsLive");
+ }
if (gc_type != kGcTypeSticky) {
// Recursively mark all the non-image bits set in the mark bitmap.
@@ -1568,13 +1562,6 @@
mark_sweep.ReMarkRoots();
timings.AddSplit("ReMarkRoots");
- if (verify_missing_card_marks_) {
- // Since verify missing card marks uses a sweep array to empty the allocation stack, we
- // need to make sure that we don't free weaks which wont get swept by SweepSystemWeaks.
- MarkAllocStack(alloc_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(),
- allocation_stack_.get());
- }
-
// Scan dirty objects, this is only required if we are not doing concurrent GC.
mark_sweep.RecursiveMarkDirtyObjects(false);
timings.AddSplit("RecursiveMarkDirtyObjects");
@@ -1585,32 +1572,36 @@
mark_sweep.ProcessReferences(clear_soft_references);
timings.AddSplit("ProcessReferences");
-
- // This doesn't work with mutators unpaused for some reason, TODO: Fix.
- mark_sweep.SweepSystemWeaks(false);
- timings.AddSplit("SweepSystemWeaks");
- }
-
- // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
- // these bitmaps. Doing this enables us to sweep with the heap unlocked since new allocations
- // set the live bit, but since we have the bitmaps reversed at this point, this sets the mark
- // bit instead, resulting in no new allocated objects being incorrectly freed by sweep.
- const bool swap = true;
- if (swap) {
- SwapBitmaps(self);
}
// Only need to do this if we have the card mark verification on, and only during concurrent GC.
if (verify_missing_card_marks_) {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- mark_sweep.SweepArray(timings, allocation_stack_.get(), swap);
+ mark_sweep.SweepArray(timings, allocation_stack_.get(), false);
} else {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
// We only sweep over the live stack, and the live stack should not intersect with the
// allocation stack, so it should be safe to UnMark anything in the allocation stack as live.
- UnMarkAllocStack(alloc_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(),
- allocation_stack_.get());
+ UnMarkAllocStack(alloc_space_->GetMarkBitmap(), large_object_space_->GetMarkObjects(),
+ allocation_stack_.get());
timings.AddSplit("UnMarkAllocStack");
+#ifndef NDEBUG
+ if (gc_type == kGcTypeSticky) {
+ // Make sure everything in the live stack isn't something we unmarked.
+ std::sort(allocation_stack_->Begin(), allocation_stack_->End());
+ for (Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
+ if (std::binary_search(allocation_stack_->Begin(), allocation_stack_->End(), *it)) {
+ LOG(FATAL) << "Unmarked object " << *it << " in the live stack";
+ }
+ }
+ } else {
+ for (Object** it = allocation_stack_->Begin(); it != allocation_stack_->End(); ++it) {
+ if (GetLiveBitmap()->Test(*it)) {
+ LOG(FATAL) << "Object " << *it << " is marked as live";
+ }
+ }
+ }
+#endif
}
if (kIsDebugBuild) {
@@ -1621,10 +1612,14 @@
}
if (verify_post_gc_heap_) {
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- if (!VerifyHeapReferences()) {
- LOG(FATAL) << "Post " << gc_type_str.str() << "Gc verification failed";
+ SwapBitmaps(self, gc_type);
+ {
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ if (!VerifyHeapReferences()) {
+ LOG(FATAL) << "Post " << gc_type_str.str() << "Gc verification failed";
+ }
}
+ SwapBitmaps(self, gc_type);
timings.AddSplit("VerifyHeapReferencesPostGC");
}
@@ -1636,16 +1631,33 @@
// TODO: this lock shouldn't be necessary (it's why we did the bitmap flip above).
if (gc_type != kGcTypeSticky) {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- mark_sweep.SweepLargeObjects(swap);
+ mark_sweep.Sweep(gc_type == kGcTypePartial, false);
+ timings.AddSplit("Sweep");
+ mark_sweep.SweepLargeObjects(false);
timings.AddSplit("SweepLargeObjects");
- mark_sweep.Sweep(gc_type == kGcTypePartial, swap);
} else {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- mark_sweep.SweepArray(timings, live_stack_.get(), swap);
+ mark_sweep.SweepArray(timings, live_stack_.get(), false);
timings.AddSplit("SweepArray");
}
live_stack_->Reset();
- timings.AddSplit("Sweep");
+ }
+
+ {
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ // Unbind the live and mark bitmaps.
+ mark_sweep.UnBindBitmaps();
+ }
+
+ // Swap the live and mark bitmaps for each space which we modified space. This is an
+ // optimization that enables us to not clear live bits inside of the sweep.
+ const bool swap = true;
+ if (swap) {
+ if (gc_type == kGcTypeSticky) {
+ SwapLargeObjects(self);
+ } else {
+ SwapBitmaps(self, gc_type);
+ }
}
if (verify_system_weaks_) {
diff --git a/src/heap.h b/src/heap.h
index b4bc22f..1478290 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -21,12 +21,12 @@
#include <string>
#include <vector>
-#include "atomic_stack.h"
#include "atomic_integer.h"
-#include "card_table.h"
+#include "gc/atomic_stack.h"
+#include "gc/card_table.h"
+#include "gc/heap_bitmap.h"
#include "globals.h"
#include "gtest/gtest.h"
-#include "heap_bitmap.h"
#include "locks.h"
#include "offsets.h"
#include "safe_map.h"
@@ -45,6 +45,7 @@
class HeapBitmap;
class ImageSpace;
class LargeObjectSpace;
+class MarkSweep;
class ModUnionTable;
class Mutex;
class Object;
@@ -282,7 +283,7 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Update and mark mod union table based on gc type.
- void UpdateAndMarkModUnion(TimingLogger& timings, GcType gc_type)
+ void UpdateAndMarkModUnion(MarkSweep* mark_sweep, TimingLogger& timings, GcType gc_type)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
@@ -316,7 +317,8 @@
void RequestConcurrentGC(Thread* self);
// Swap bitmaps (if we are a full Gc then we swap the zygote bitmap too).
- void SwapBitmaps(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_);
+ void SwapBitmaps(Thread* self, GcType gc_type);
+ void SwapLargeObjects(Thread* self);
void RecordAllocation(size_t size, Object* object)
LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_)
@@ -358,6 +360,9 @@
// Swap the allocation stack with the live stack.
void SwapStacks();
+ // Clear cards and update the mod union table.
+ void ClearCards(TimingLogger& timings);
+
Spaces spaces_;
// A map that we use to temporarily reserve address range for the oat file.
diff --git a/src/hprof/hprof.cc b/src/hprof/hprof.cc
index 125b19a..6965015 100644
--- a/src/hprof/hprof.cc
+++ b/src/hprof/hprof.cc
@@ -48,7 +48,7 @@
#include "os.h"
#include "safe_map.h"
#include "scoped_thread_state_change.h"
-#include "space.h"
+#include "gc/space.h"
#include "stringprintf.h"
#include "thread_list.h"
diff --git a/src/image_test.cc b/src/image_test.cc
index c569b79..05ddc54 100644
--- a/src/image_test.cc
+++ b/src/image_test.cc
@@ -23,7 +23,7 @@
#include "image_writer.h"
#include "oat_writer.h"
#include "signal_catcher.h"
-#include "space.h"
+#include "gc/space.h"
#include "utils.h"
namespace art {
diff --git a/src/image_writer.cc b/src/image_writer.cc
index 0b9c8c0..13d9374 100644
--- a/src/image_writer.cc
+++ b/src/image_writer.cc
@@ -37,7 +37,7 @@
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "sirt_ref.h"
-#include "space.h"
+#include "gc/space.h"
#include "UniquePtr.h"
#include "utils.h"
diff --git a/src/image_writer.h b/src/image_writer.h
index 5a8da1b..638add7 100644
--- a/src/image_writer.h
+++ b/src/image_writer.h
@@ -30,7 +30,7 @@
#include "object.h"
#include "os.h"
#include "safe_map.h"
-#include "space.h"
+#include "gc/space.h"
#include "UniquePtr.h"
namespace art {
diff --git a/src/native/dalvik_system_DexFile.cc b/src/native/dalvik_system_DexFile.cc
index b5e1c19..3ef5e5c 100644
--- a/src/native/dalvik_system_DexFile.cc
+++ b/src/native/dalvik_system_DexFile.cc
@@ -27,7 +27,7 @@
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
#include "ScopedUtfChars.h"
-#include "space.h"
+#include "gc/space.h"
#include "toStringArray.h"
#include "zip_archive.h"
diff --git a/src/native/dalvik_system_VMRuntime.cc b/src/native/dalvik_system_VMRuntime.cc
index 91b7b5f..93f6d5b 100644
--- a/src/native/dalvik_system_VMRuntime.cc
+++ b/src/native/dalvik_system_VMRuntime.cc
@@ -22,7 +22,7 @@
#include "object.h"
#include "object_utils.h"
#include "scoped_thread_state_change.h"
-#include "space.h"
+#include "gc/space.h"
#include "thread.h"
#include "thread_list.h"
#include "toStringArray.h"
diff --git a/src/oat_writer.cc b/src/oat_writer.cc
index 2969c21..5e12299 100644
--- a/src/oat_writer.cc
+++ b/src/oat_writer.cc
@@ -24,7 +24,7 @@
#include "os.h"
#include "safe_map.h"
#include "scoped_thread_state_change.h"
-#include "space.h"
+#include "gc/space.h"
#include "stl_util.h"
#include "verifier/method_verifier.h"
diff --git a/src/oatdump.cc b/src/oatdump.cc
index 01b2042..e4fc930 100644
--- a/src/oatdump.cc
+++ b/src/oatdump.cc
@@ -33,7 +33,7 @@
#include "runtime.h"
#include "safe_map.h"
#include "scoped_thread_state_change.h"
-#include "space.h"
+#include "gc/space.h"
#include "stringpiece.h"
#include "gc_map.h"
diff --git a/src/runtime.cc b/src/runtime.cc
index 3444f78..dc37ff2 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -41,7 +41,7 @@
#include "signal_catcher.h"
#include "signal_set.h"
#include "sirt_ref.h"
-#include "space.h"
+#include "gc/space.h"
#include "thread.h"
#include "thread_list.h"
#include "trace.h"
diff --git a/src/thread.cc b/src/thread.cc
index 6e4a633..db8c39f 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -47,7 +47,7 @@
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
#include "sirt_ref.h"
-#include "space.h"
+#include "gc/space.h"
#include "stack.h"
#include "stack_indirect_reference_table.h"
#include "thread_list.h"