summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
author Mathieu Chartier <mathieuc@google.com> 2012-08-02 14:55:54 -0700
committer Mathieu Chartier <mathieuc@google.com> 2012-08-03 12:41:54 -0700
commitdcf8d7283bd51714f3faa55b631ae4103dc98b51 (patch)
treece4f6f1b836387d9b448fa1583295a38d7d743b8 /src
parent259487a86ea29c840d586610f099e8177aab3f40 (diff)
Fix zygote live/mark bitmap size.
Fixed some errors with the sizes of mark/live bitmaps after zygote space creation. This was causing us to occasionally have overlapping mark/live bitmaps. Added a new verify objects mode called VERIFY_OBJECT_FAST which only checks objects and not their classes. Refactored/optimized some of the scanning code to use xor to clear bits instead of and+not. Change-Id: Iec87d9157f69e6a558e300950b51d8781679e3f7
Diffstat (limited to 'src')
-rw-r--r--src/globals.h1
-rw-r--r--src/heap.cc68
-rw-r--r--src/heap.h3
-rw-r--r--src/space.cc8
-rw-r--r--src/space_bitmap.cc30
-rw-r--r--src/space_bitmap.h23
-rw-r--r--src/space_test.cc4
7 files changed, 75 insertions, 62 deletions
diff --git a/src/globals.h b/src/globals.h
index 0efa7eb2ac..1eeaca26aa 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -36,6 +36,7 @@ const int kPointerSize = sizeof(void*);
const int kBitsPerByte = 8;
const int kBitsPerByteLog2 = 3;
const int kBitsPerWord = kWordSize * kBitsPerByte;
+const int kWordHighBitMask = 1 << (kBitsPerWord - 1);
// Required stack alignment
const int kStackAlignment = 16;
diff --git a/src/heap.cc b/src/heap.cc
index a96472bc55..626adf9728 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -393,13 +393,15 @@ bool Heap::IsLiveObjectLocked(const Object* obj) {
#if VERIFY_OBJECT_ENABLED
void Heap::VerifyObject(const Object* obj) {
- if (this == NULL || !verify_objects_ || Runtime::Current()->IsShuttingDown() ||
+ if (obj == NULL || this == NULL || !verify_objects_ || Runtime::Current()->IsShuttingDown() ||
Thread::Current() == NULL ||
Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
return;
}
- ScopedHeapLock heap_lock;
- Heap::VerifyObjectLocked(obj);
+ {
+ ScopedHeapLock heap_lock;
+ Heap::VerifyObjectLocked(obj);
+ }
}
#endif
@@ -412,39 +414,39 @@ void Heap::DumpSpaces() {
void Heap::VerifyObjectLocked(const Object* obj) {
lock_->AssertHeld();
- if (obj != NULL) {
- if (!IsAligned<kObjectAlignment>(obj)) {
- LOG(FATAL) << "Object isn't aligned: " << obj;
- } else if (!GetLiveBitmap()->Test(obj)) {
- Space* space = FindSpaceFromObject(obj);
- if (space == NULL) {
- DumpSpaces();
- LOG(FATAL) << "Object " << obj << " is not contained in any space";
- }
- LOG(FATAL) << "Object is dead: " << obj << " in space " << *space;
+ if (!IsAligned<kObjectAlignment>(obj)) {
+ LOG(FATAL) << "Object isn't aligned: " << obj;
+ } else if (!GetLiveBitmap()->Test(obj)) {
+ Space* space = FindSpaceFromObject(obj);
+ if (space == NULL) {
+ DumpSpaces();
+ LOG(FATAL) << "Object " << obj << " is not contained in any space";
}
- // Ignore early dawn of the universe verifications
- if (num_objects_allocated_ > 10) {
- const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
- Object::ClassOffset().Int32Value();
- const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
- if (c == NULL) {
- LOG(FATAL) << "Null class in object: " << obj;
- } else if (!IsAligned<kObjectAlignment>(c)) {
- LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
- } else if (!GetLiveBitmap()->Test(c)) {
- LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
- }
- // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
- // Note: we don't use the accessors here as they have internal sanity checks
- // that we don't want to run
- raw_addr = reinterpret_cast<const byte*>(c) + Object::ClassOffset().Int32Value();
- const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
- raw_addr = reinterpret_cast<const byte*>(c_c) + Object::ClassOffset().Int32Value();
- const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
- CHECK_EQ(c_c, c_c_c);
+ LOG(FATAL) << "Object is dead: " << obj << " in space " << *space;
+ }
+#if !VERIFY_OBJECT_FAST
+ // Ignore early dawn of the universe verifications
+ if (num_objects_allocated_ > 10) {
+ const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
+ Object::ClassOffset().Int32Value();
+ const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
+ if (c == NULL) {
+ LOG(FATAL) << "Null class in object: " << obj;
+ } else if (!IsAligned<kObjectAlignment>(c)) {
+ LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
+ } else if (!GetLiveBitmap()->Test(c)) {
+ LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
}
+ // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
+ // Note: we don't use the accessors here as they have internal sanity checks
+ // that we don't want to run
+ raw_addr = reinterpret_cast<const byte*>(c) + Object::ClassOffset().Int32Value();
+ const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
+ raw_addr = reinterpret_cast<const byte*>(c_c) + Object::ClassOffset().Int32Value();
+ const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
+ CHECK_EQ(c_c, c_c_c);
}
+#endif
}
void Heap::VerificationCallback(Object* obj, void* arg) {
diff --git a/src/heap.h b/src/heap.h
index a1b1bd9ba8..e908248f93 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -31,6 +31,9 @@
#define VERIFY_OBJECT_ENABLED 0
+// Fast verification means we do not verify the classes of objects.
+#define VERIFY_OBJECT_FAST 1
+
namespace art {
class AllocSpace;
diff --git a/src/space.cc b/src/space.cc
index 24eca26c39..02230e146d 100644
--- a/src/space.cc
+++ b/src/space.cc
@@ -206,7 +206,7 @@ AllocSpace* AllocSpace::CreateZygoteSpace() {
growth_limit_ = RoundUp(size, kPageSize);
// FIXME: Do we need reference counted pointers here?
// Make the two spaces share the same mark bitmaps since the bitmaps span both of the spaces.
- VLOG(heap) << "Creating new alloc space: ";
+ VLOG(heap) << "Creating new AllocSpace: ";
VLOG(heap) << "Size " << mem_map_->Size();
VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
VLOG(heap) << "Capacity " << PrettySize(capacity);
@@ -218,8 +218,10 @@ AllocSpace* AllocSpace::CreateZygoteSpace() {
CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name_.c_str());
}
AllocSpace* alloc_space = new AllocSpace(name_, mem_map.release(), mspace, end_, end, growth_limit);
- live_bitmap_->Trim(Capacity()); // TODO - kPageSize?
- mark_bitmap_->Trim(Capacity()); // TODO - kPageSize?
+ live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(end_));
+ CHECK(live_bitmap_->HeapLimit() == reinterpret_cast<uintptr_t>(end_));
+ mark_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(end_));
+ CHECK(mark_bitmap_->HeapLimit() == reinterpret_cast<uintptr_t>(end_));
name_ += "-zygote-transformed";
VLOG(heap) << "zygote space creation done";
return alloc_space;
diff --git a/src/space_bitmap.cc b/src/space_bitmap.cc
index 74bc07c839..7da8146a14 100644
--- a/src/space_bitmap.cc
+++ b/src/space_bitmap.cc
@@ -38,8 +38,9 @@ SpaceBitmap* SpaceBitmap::Create(const std::string& name, byte* heap_begin, size
// Clean up any resources associated with the bitmap.
SpaceBitmap::~SpaceBitmap() {}
-void SpaceBitmap::Trim(size_t heap_capacity) {
- size_t new_size = OffsetToIndex(RoundUp(heap_capacity, kAlignment * kBitsPerWord)) * kWordSize;
+void SpaceBitmap::SetHeapLimit(uintptr_t new_end) {
+ DCHECK(IsAligned<kBitsPerWord * kAlignment>(new_end));
+ size_t new_size = OffsetToIndex(new_end - heap_begin_) * kWordSize;
if (new_size < bitmap_size_) {
bitmap_size_ = new_size;
}
@@ -84,13 +85,12 @@ void SpaceBitmap::Walk(SpaceBitmap::Callback* callback, void* arg) {
for (uintptr_t i = 0; i <= end; ++i) {
word w = bitmap_begin_[i];
if (UNLIKELY(w != 0)) {
- word high_bit = 1 << (kBitsPerWord - 1);
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
while (w != 0) {
- const int shift = CLZ(w);
+ const size_t shift = CLZ(w);
Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
(*callback)(obj, arg);
- w &= ~(high_bit >> shift);
+ w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
}
}
}
@@ -130,14 +130,13 @@ void SpaceBitmap::ScanWalk(uintptr_t scan_begin, uintptr_t scan_end, ScanCallbac
for (size_t i = start; i <= end; i++) {
word w = bitmap_begin_[i];
if (UNLIKELY(w != 0)) {
- word high_bit = 1 << (kBitsPerWord - 1);
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
void* finger = reinterpret_cast<void*>(IndexToOffset(i + 1) + heap_begin_);
while (w != 0) {
- const int shift = CLZ(w);
+ const size_t shift = CLZ(w);
Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
(*callback)(obj, finger, arg);
- w &= ~(high_bit >> shift);
+ w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
}
}
}
@@ -146,14 +145,13 @@ void SpaceBitmap::ScanWalk(uintptr_t scan_begin, uintptr_t scan_end, ScanCallbac
for (size_t i = start; i <= end; i++) {
word w = bitmap_begin_[i];
if (UNLIKELY(w != 0)) {
- word high_bit = 1 << (kBitsPerWord - 1);
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
void* finger = reinterpret_cast<void*>(IndexToOffset(i + 1) + heap_begin_);
while (w != 0) {
- const int shift = CLZ(w);
+ const size_t shift = CLZ(w);
Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
(*callback)(obj, finger, arg);
- w &= ~(high_bit >> shift);
+ w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
}
}
// update 'end' in case callback modified bitmap
@@ -194,11 +192,10 @@ void SpaceBitmap::SweepWalk(const SpaceBitmap& live_bitmap,
for (size_t i = start; i <= end; i++) {
word garbage = live[i] & ~mark[i];
if (UNLIKELY(garbage != 0)) {
- word high_bit = 1 << (kBitsPerWord - 1);
uintptr_t ptr_base = IndexToOffset(i) + live_bitmap.heap_begin_;
while (garbage != 0) {
- int shift = CLZ(garbage);
- garbage &= ~(high_bit >> shift);
+ const size_t shift = CLZ(garbage);
+ garbage ^= static_cast<size_t>(kWordHighBitMask) >> shift;
*pb++ = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
}
// Make sure that there are always enough slots available for an
@@ -302,13 +299,12 @@ void SpaceBitmap::InOrderWalk(SpaceBitmap::Callback* callback, void* arg) {
for (uintptr_t i = 0; i <= end; ++i) {
word w = bitmap_begin_[i];
if (UNLIKELY(w != 0)) {
- word high_bit = 1 << (kBitsPerWord - 1);
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
while (w != 0) {
- const int shift = CLZ(w);
+ const size_t shift = CLZ(w);
Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
WalkFieldsInOrder(visited.get(), callback, obj, arg);
- w &= ~(high_bit >> shift);
+ w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
}
}
}
diff --git a/src/space_bitmap.h b/src/space_bitmap.h
index 1e8a9a776b..adf1996afe 100644
--- a/src/space_bitmap.h
+++ b/src/space_bitmap.h
@@ -60,7 +60,7 @@ class SpaceBitmap {
// Pack the bits in backwards so they come out in address order when using CLZ.
static word OffsetToMask(uintptr_t offset_) {
- return 1 << (kBitsPerWord - 1 - (offset_ / kAlignment) % kBitsPerWord);
+ return static_cast<uintptr_t>(kWordHighBitMask) >> ((offset_ / kAlignment) % kBitsPerWord);
}
inline void Set(const Object* obj) {
@@ -119,8 +119,7 @@ class SpaceBitmap {
size_t word_start = bit_index_start / kBitsPerWord;
size_t word_end = bit_index_end / kBitsPerWord;
-
- const size_t high_bit = 1 << (kBitsPerWord - 1);
+ DCHECK_LT(word_end * kWordSize, Size());
// Trim off left_bits of left bits.
size_t edge_word = bitmap_begin_[word_start];
@@ -138,7 +137,7 @@ class SpaceBitmap {
const size_t shift = CLZ(edge_word);
Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
visitor(obj);
- edge_word &= ~(high_bit >> shift);
+ edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
} while (edge_word != 0);
}
word_start++;
@@ -151,7 +150,7 @@ class SpaceBitmap {
const size_t shift = CLZ(w);
Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
visitor(obj);
- w &= ~(high_bit >> shift);
+ w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
} while (w != 0);
}
}
@@ -169,10 +168,10 @@ class SpaceBitmap {
edge_word &= ~((1 << trim_bits) - 1);
uintptr_t ptr_base = IndexToOffset(word_end) + heap_begin_;
while (edge_word != 0) {
- const int shift = CLZ(edge_word);
+ const size_t shift = CLZ(edge_word);
Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
visitor(obj);
- edge_word &= ~(high_bit >> shift);
+ edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
}
}
@@ -202,11 +201,17 @@ class SpaceBitmap {
return IndexToOffset(Size() / kWordSize);
}
- uintptr_t HeapBegin() {
+ uintptr_t HeapBegin() const {
return heap_begin_;
}
- void Trim(size_t heap_capcity);
+ // The maximum address which the bitmap can span. (HeapBegin() <= object < HeapLimit()).
+ uintptr_t HeapLimit() const {
+ return HeapBegin() + static_cast<uintptr_t>(HeapSize());
+ }
+
+ // Set the max address which can covered by the bitmap.
+ void SetHeapLimit(uintptr_t new_end);
private:
// TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
diff --git a/src/space_test.cc b/src/space_test.cc
index f377a6142c..c1c1dca895 100644
--- a/src/space_test.cc
+++ b/src/space_test.cc
@@ -70,6 +70,10 @@ TEST_F(SpaceTest, Init) {
}
}
+// TODO: This test is not very good, we should improve it.
+// The test should do more allocations before the creation of the ZygoteSpace, and then do
+// allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
+// the GC works with the ZygoteSpace.
TEST_F(SpaceTest, ZygoteSpace) {
AllocSpace* space(Space::CreateAllocSpace("test", 4 * MB, 16 * MB, 16 * MB, NULL));
ASSERT_TRUE(space != NULL);