summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/gc/collector/immune_spaces.cc68
-rw-r--r--runtime/gc/collector/immune_spaces_test.cc338
2 files changed, 304 insertions, 102 deletions
diff --git a/runtime/gc/collector/immune_spaces.cc b/runtime/gc/collector/immune_spaces.cc
index 26da4ca5a9..1e5f28382b 100644
--- a/runtime/gc/collector/immune_spaces.cc
+++ b/runtime/gc/collector/immune_spaces.cc
@@ -16,6 +16,9 @@
#include "immune_spaces.h"
+#include <vector>
+#include <tuple>
+
#include "gc/space/space-inl.h"
#include "mirror/object.h"
#include "oat_file.h"
@@ -32,11 +35,12 @@ void ImmuneSpaces::Reset() {
void ImmuneSpaces::CreateLargestImmuneRegion() {
uintptr_t best_begin = 0u;
uintptr_t best_end = 0u;
+ uintptr_t best_heap_size = 0u;
uintptr_t cur_begin = 0u;
uintptr_t cur_end = 0u;
- // TODO: If the last space is an image space, we may include its oat file in the immune region.
- // This could potentially hide heap corruption bugs if there is invalid pointers that point into
- // the boot oat code
+ uintptr_t cur_heap_size = 0u;
+ using Interval = std::tuple</*start*/uintptr_t, /*end*/uintptr_t, /*is_heap*/bool>;
+ std::vector<Interval> intervals;
for (space::ContinuousSpace* space : GetSpaces()) {
uintptr_t space_begin = reinterpret_cast<uintptr_t>(space->Begin());
uintptr_t space_end = reinterpret_cast<uintptr_t>(space->Limit());
@@ -50,29 +54,47 @@ void ImmuneSpaces::CreateLargestImmuneRegion() {
// creation, the actual oat file could be somewhere else.
const OatFile* const image_oat_file = image_space->GetOatFile();
if (image_oat_file != nullptr) {
- uintptr_t oat_begin = reinterpret_cast<uintptr_t>(image_oat_file->Begin());
- uintptr_t oat_end = reinterpret_cast<uintptr_t>(image_oat_file->End());
- if (space_end == oat_begin) {
- DCHECK_GE(oat_end, oat_begin);
- space_end = oat_end;
- }
+ intervals.push_back(Interval(reinterpret_cast<uintptr_t>(image_oat_file->Begin()),
+ reinterpret_cast<uintptr_t>(image_oat_file->End()),
+ /*image*/false));
}
}
- if (cur_begin == 0u) {
- cur_begin = space_begin;
- cur_end = space_end;
- } else if (cur_end == space_begin) {
- // Extend current region.
- cur_end = space_end;
- } else {
- // Reset.
- cur_begin = 0;
- cur_end = 0;
+ intervals.push_back(Interval(space_begin, space_end, /*is_heap*/true));
+ }
+ std::sort(intervals.begin(), intervals.end());
+ // Intervals are already sorted by begin, if a new interval begins at the end of the current
+ // region then we append, otherwise we restart the current interval. To prevent starting an
+ // interval on an oat file, ignore oat files that are not extending an existing interval.
+ // If the total number of image bytes in the current interval is larger than the current best
+ // one, then we set the best one to be the current one.
+ for (const Interval& interval : intervals) {
+ const uintptr_t begin = std::get<0>(interval);
+ const uintptr_t end = std::get<1>(interval);
+ const bool is_heap = std::get<2>(interval);
+ VLOG(collector) << "Interval " << reinterpret_cast<const void*>(begin) << "-"
+ << reinterpret_cast<const void*>(end) << " is_heap=" << is_heap;
+ DCHECK_GE(end, begin);
+ DCHECK_GE(begin, cur_end);
+ // New interval is not at the end of the current one, start a new interval if we are a heap
+ // interval. Otherwise continue since we never start a new region with non image intervals.
+ if (begin != cur_end) {
+ if (!is_heap) {
+ continue;
+ }
+ // Not extending, reset the region.
+ cur_begin = begin;
+ cur_heap_size = 0;
}
- if (cur_end - cur_begin > best_end - best_begin) {
- // Improvement, update the best range.
- best_begin = cur_begin;
- best_end = cur_end;
+ cur_end = end;
+ if (is_heap) {
+ // Only update if the total number of image bytes is greater than the current best one.
+ // We don't want to count the oat file bytes since these contain no java objects.
+ cur_heap_size += end - begin;
+ if (cur_heap_size > best_heap_size) {
+ best_begin = cur_begin;
+ best_end = cur_end;
+ best_heap_size = cur_heap_size;
+ }
}
}
largest_immune_region_.SetBegin(reinterpret_cast<mirror::Object*>(best_begin));
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 56838f5c06..cf93ec614d 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -28,50 +28,6 @@ class Object;
namespace gc {
namespace collector {
-class ImmuneSpacesTest : public CommonRuntimeTest {};
-
-class DummySpace : public space::ContinuousSpace {
- public:
- DummySpace(uint8_t* begin, uint8_t* end)
- : ContinuousSpace("DummySpace",
- space::kGcRetentionPolicyNeverCollect,
- begin,
- end,
- /*limit*/end) {}
-
- space::SpaceType GetType() const OVERRIDE {
- return space::kSpaceTypeMallocSpace;
- }
-
- bool CanMoveObjects() const OVERRIDE {
- return false;
- }
-
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
- return nullptr;
- }
-
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
- return nullptr;
- }
-};
-
-TEST_F(ImmuneSpacesTest, AppendBasic) {
- ImmuneSpaces spaces;
- uint8_t* const base = reinterpret_cast<uint8_t*>(0x1000);
- DummySpace a(base, base + 45 * KB);
- DummySpace b(a.Limit(), a.Limit() + 813 * KB);
- {
- WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- spaces.AddSpace(&a);
- spaces.AddSpace(&b);
- }
- EXPECT_TRUE(spaces.ContainsSpace(&a));
- EXPECT_TRUE(spaces.ContainsSpace(&b));
- EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), a.Begin());
- EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), b.Limit());
-}
-
class DummyOatFile : public OatFile {
public:
DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*is_executable*/ false) {
@@ -84,23 +40,50 @@ class DummyImageSpace : public space::ImageSpace {
public:
DummyImageSpace(MemMap* map,
accounting::ContinuousSpaceBitmap* live_bitmap,
- std::unique_ptr<DummyOatFile>&& oat_file)
+ std::unique_ptr<DummyOatFile>&& oat_file,
+ std::unique_ptr<MemMap>&& oat_map)
: ImageSpace("DummyImageSpace",
/*image_location*/"",
map,
live_bitmap,
- map->End()) {
+ map->End()),
+ oat_map_(std::move(oat_map)) {
oat_file_ = std::move(oat_file);
oat_file_non_owned_ = oat_file_.get();
}
- // Size is the size of the image space, oat offset is where the oat file is located
- // after the end of image space. oat_size is the size of the oat file.
- static DummyImageSpace* Create(size_t size, size_t oat_offset, size_t oat_size) {
+ private:
+ std::unique_ptr<MemMap> oat_map_;
+};
+
+class ImmuneSpacesTest : public CommonRuntimeTest {
+ static constexpr size_t kMaxBitmaps = 10;
+
+ public:
+ ImmuneSpacesTest() {}
+
+ void ReserveBitmaps() {
+ // Create a bunch of dummy bitmaps since these are required to create image spaces. The bitmaps
+ // do not need to cover the image spaces though.
+ for (size_t i = 0; i < kMaxBitmaps; ++i) {
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
+ accounting::ContinuousSpaceBitmap::Create("bitmap",
+ reinterpret_cast<uint8_t*>(kPageSize),
+ kPageSize));
+ CHECK(bitmap != nullptr);
+ live_bitmaps_.push_back(std::move(bitmap));
+ }
+ }
+
+ // Create an image space, the oat file is optional.
+ DummyImageSpace* CreateImageSpace(uint8_t* image_begin,
+ size_t image_size,
+ uint8_t* oat_begin,
+ size_t oat_size) {
std::string error_str;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("DummyImageSpace",
- nullptr,
- size,
+ image_begin,
+ image_size,
PROT_READ | PROT_WRITE,
/*low_4gb*/true,
/*reuse*/false,
@@ -109,14 +92,21 @@ class DummyImageSpace : public space::ImageSpace {
LOG(ERROR) << error_str;
return nullptr;
}
- std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(
- accounting::ContinuousSpaceBitmap::Create("bitmap", map->Begin(), map->Size()));
- if (live_bitmap == nullptr) {
+ CHECK(!live_bitmaps_.empty());
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(std::move(live_bitmaps_.back()));
+ live_bitmaps_.pop_back();
+ std::unique_ptr<MemMap> oat_map(MemMap::MapAnonymous("OatMap",
+ oat_begin,
+ oat_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/true,
+ /*reuse*/false,
+ &error_str));
+ if (oat_map == nullptr) {
+ LOG(ERROR) << error_str;
return nullptr;
}
- // The actual mapped oat file may not be directly after the image for the app image case.
- std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(map->End() + oat_offset,
- map->End() + oat_offset + oat_size));
+ std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map->Begin(), oat_map->End()));
// Create image header.
ImageSection sections[ImageHeader::kSectionCount];
new (map->Begin()) ImageHeader(
@@ -126,10 +116,10 @@ class DummyImageSpace : public space::ImageSpace {
/*image_roots*/PointerToLowMemUInt32(map->Begin()) + 1,
/*oat_checksum*/0u,
// The oat file data in the header is always right after the image space.
- /*oat_file_begin*/PointerToLowMemUInt32(map->End()),
- /*oat_data_begin*/PointerToLowMemUInt32(map->End()),
- /*oat_data_end*/PointerToLowMemUInt32(map->End() + oat_size),
- /*oat_file_end*/PointerToLowMemUInt32(map->End() + oat_size),
+ /*oat_file_begin*/PointerToLowMemUInt32(oat_begin),
+ /*oat_data_begin*/PointerToLowMemUInt32(oat_begin),
+ /*oat_data_end*/PointerToLowMemUInt32(oat_begin + oat_size),
+ /*oat_file_end*/PointerToLowMemUInt32(oat_begin + oat_size),
/*boot_image_begin*/0u,
/*boot_image_size*/0u,
/*boot_oat_begin*/0u,
@@ -139,27 +129,113 @@ class DummyImageSpace : public space::ImageSpace {
/*is_pic*/false,
ImageHeader::kStorageModeUncompressed,
/*storage_size*/0u);
- return new DummyImageSpace(map.release(), live_bitmap.release(), std::move(oat_file));
+ return new DummyImageSpace(map.release(),
+ live_bitmap.release(),
+ std::move(oat_file),
+ std::move(oat_map));
}
+
+ // Does not reserve the memory, the caller needs to be sure no other threads will map at the
+ // returned address.
+ static uint8_t* GetContinuousMemoryRegion(size_t size) {
+ std::string error_str;
+ std::unique_ptr<MemMap> map(MemMap::MapAnonymous("reserve",
+ nullptr,
+ size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/true,
+ /*reuse*/false,
+ &error_str));
+ if (map == nullptr) {
+ LOG(ERROR) << "Failed to allocate memory region " << error_str;
+ return nullptr;
+ }
+ return map->Begin();
+ }
+
+ private:
+ // Bitmap pool for pre-allocated dummy bitmaps. We need to pre-allocate them since we don't want
+ // them to randomly get placed somewhere where we want an image space.
+ std::vector<std::unique_ptr<accounting::ContinuousSpaceBitmap>> live_bitmaps_;
};
+class DummySpace : public space::ContinuousSpace {
+ public:
+ DummySpace(uint8_t* begin, uint8_t* end)
+ : ContinuousSpace("DummySpace",
+ space::kGcRetentionPolicyNeverCollect,
+ begin,
+ end,
+ /*limit*/end) {}
+
+ space::SpaceType GetType() const OVERRIDE {
+ return space::kSpaceTypeMallocSpace;
+ }
+
+ bool CanMoveObjects() const OVERRIDE {
+ return false;
+ }
+
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ return nullptr;
+ }
+
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ return nullptr;
+ }
+};
+
+TEST_F(ImmuneSpacesTest, AppendBasic) {
+ ImmuneSpaces spaces;
+ uint8_t* const base = reinterpret_cast<uint8_t*>(0x1000);
+ DummySpace a(base, base + 45 * KB);
+ DummySpace b(a.Limit(), a.Limit() + 813 * KB);
+ {
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ spaces.AddSpace(&a);
+ spaces.AddSpace(&b);
+ }
+ EXPECT_TRUE(spaces.ContainsSpace(&a));
+ EXPECT_TRUE(spaces.ContainsSpace(&b));
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), a.Begin());
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), b.Limit());
+}
+
+// Tests [image][oat][space] producing a single large immune region.
TEST_F(ImmuneSpacesTest, AppendAfterImage) {
+ ReserveBitmaps();
ImmuneSpaces spaces;
- constexpr size_t image_size = 123 * kPageSize;
- constexpr size_t image_oat_size = 321 * kPageSize;
- std::unique_ptr<DummyImageSpace> image_space(DummyImageSpace::Create(image_size,
- 0,
- image_oat_size));
+ constexpr size_t kImageSize = 123 * kPageSize;
+ constexpr size_t kImageOatSize = 321 * kPageSize;
+ constexpr size_t kOtherSpaceSize= 100 * kPageSize;
+
+ uint8_t* memory = GetContinuousMemoryRegion(kImageSize + kImageOatSize + kOtherSpaceSize);
+
+ std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(memory,
+ kImageSize,
+ memory + kImageSize,
+ kImageOatSize));
ASSERT_TRUE(image_space != nullptr);
const ImageHeader& image_header = image_space->GetImageHeader();
- EXPECT_EQ(image_header.GetImageSize(), image_size);
+ DummySpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize);
+
+ EXPECT_EQ(image_header.GetImageSize(), kImageSize);
EXPECT_EQ(static_cast<size_t>(image_header.GetOatFileEnd() - image_header.GetOatFileBegin()),
- image_oat_size);
- DummySpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + 813 * kPageSize);
- EXPECT_NE(image_space->Limit(), space.Begin());
+ kImageOatSize);
+ EXPECT_EQ(image_space->GetOatFile()->Size(), kImageOatSize);
+ // Check that we do not include the oat if there is no space after.
{
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
spaces.AddSpace(image_space.get());
+ }
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
+ image_space->Begin());
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
+ image_space->Limit());
+ // Add another space and ensure it gets appended.
+ EXPECT_NE(image_space->Limit(), space.Begin());
+ {
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
spaces.AddSpace(&space);
}
EXPECT_TRUE(spaces.ContainsSpace(image_space.get()));
@@ -170,18 +246,122 @@ TEST_F(ImmuneSpacesTest, AppendAfterImage) {
EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
image_space->Begin());
EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space.Limit());
- // Check that appending with a gap between the map does not include the oat file.
- image_space.reset(DummyImageSpace::Create(image_size, kPageSize, image_oat_size));
- spaces.Reset();
+}
+
+// Test [image1][image2][image1 oat][image2 oat][image3] producing a single large immune region.
+TEST_F(ImmuneSpacesTest, MultiImage) {
+ ReserveBitmaps();
+ // Image 2 needs to be smaller or else it may be chosen for immune region.
+ constexpr size_t kImage1Size = kPageSize * 17;
+ constexpr size_t kImage2Size = kPageSize * 13;
+ constexpr size_t kImage3Size = kPageSize * 3;
+ constexpr size_t kImage1OatSize = kPageSize * 5;
+ constexpr size_t kImage2OatSize = kPageSize * 8;
+ constexpr size_t kImage3OatSize = kPageSize;
+ constexpr size_t kImageBytes = kImage1Size + kImage2Size + kImage3Size;
+ constexpr size_t kMemorySize = kImageBytes + kImage1OatSize + kImage2OatSize + kImage3OatSize;
+ uint8_t* memory = GetContinuousMemoryRegion(kMemorySize);
+ uint8_t* space1_begin = memory;
+ memory += kImage1Size;
+ uint8_t* space2_begin = memory;
+ memory += kImage2Size;
+ uint8_t* space1_oat_begin = memory;
+ memory += kImage1OatSize;
+ uint8_t* space2_oat_begin = memory;
+ memory += kImage2OatSize;
+ uint8_t* space3_begin = memory;
+
+ std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(space1_begin,
+ kImage1Size,
+ space1_oat_begin,
+ kImage1OatSize));
+ ASSERT_TRUE(space1 != nullptr);
+
+
+ std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(space2_begin,
+ kImage2Size,
+ space2_oat_begin,
+ kImage2OatSize));
+ ASSERT_TRUE(space2 != nullptr);
+
+ // Finally put a 3rd image space.
+ std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(space3_begin,
+ kImage3Size,
+ space3_begin + kImage3Size,
+ kImage3OatSize));
+ ASSERT_TRUE(space3 != nullptr);
+
+ // Check that we do not include the oat if there is no space after.
+ ImmuneSpaces spaces;
{
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- spaces.AddSpace(image_space.get());
+ LOG(INFO) << "Adding space1 " << reinterpret_cast<const void*>(space1->Begin());
+ spaces.AddSpace(space1.get());
+ LOG(INFO) << "Adding space2 " << reinterpret_cast<const void*>(space2->Begin());
+ spaces.AddSpace(space2.get());
}
+ // There are no more heap bytes, the immune region should only be the first 2 image spaces and
+ // should exclude the image oat files.
EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
- image_space->Begin());
- // Size should be equal, we should not add the oat file since it is not adjacent to the image
- // space.
- EXPECT_EQ(spaces.GetLargestImmuneRegion().Size(), image_size);
+ space1->Begin());
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
+ space2->Limit());
+
+ // Add another space after the oat files, now it should contain the entire memory region.
+ {
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ LOG(INFO) << "Adding space3 " << reinterpret_cast<const void*>(space3->Begin());
+ spaces.AddSpace(space3.get());
+ }
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
+ space1->Begin());
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
+ space3->Limit());
+
+ // Add a smaller non-adjacent space and ensure it does not become part of the immune region.
+ // Image size is kImageBytes - kPageSize
+ // Oat size is kPageSize.
+ // Guard pages to ensure it is not adjacent to an existing immune region.
+ // Layout: [guard page][image][oat][guard page]
+ constexpr size_t kGuardSize = kPageSize;
+ constexpr size_t kImage4Size = kImageBytes - kPageSize;
+ constexpr size_t kImage4OatSize = kPageSize;
+ uint8_t* memory2 = GetContinuousMemoryRegion(kImage4Size + kImage4OatSize + kGuardSize * 2);
+ std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(memory2 + kGuardSize,
+ kImage4Size,
+ memory2 + kGuardSize + kImage4Size,
+ kImage4OatSize));
+ ASSERT_TRUE(space4 != nullptr);
+ {
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ LOG(INFO) << "Adding space4 " << reinterpret_cast<const void*>(space4->Begin());
+ spaces.AddSpace(space4.get());
+ }
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
+ space1->Begin());
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
+ space3->Limit());
+
+ // Add a larger non-adjacent space and ensure it becomes the new largest immune region.
+ // Image size is kImageBytes + kPageSize
+ // Oat size is kPageSize.
+ // Guard pages to ensure it is not adjacent to an existing immune region.
+ // Layout: [guard page][image][oat][guard page]
+ constexpr size_t kImage5Size = kImageBytes + kPageSize;
+ constexpr size_t kImage5OatSize = kPageSize;
+ uint8_t* memory3 = GetContinuousMemoryRegion(kImage5Size + kImage5OatSize + kGuardSize * 2);
+ std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(memory3 + kGuardSize,
+ kImage5Size,
+ memory3 + kGuardSize + kImage5Size,
+ kImage5OatSize));
+ ASSERT_TRUE(space5 != nullptr);
+ {
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ LOG(INFO) << "Adding space5 " << reinterpret_cast<const void*>(space5->Begin());
+ spaces.AddSpace(space5.get());
+ }
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), space5->Begin());
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space5->Limit());
}
} // namespace collector