summaryrefslogtreecommitdiff
path: root/runtime/gc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/accounting/atomic_stack.h3
-rw-r--r--runtime/gc/accounting/bitmap.cc3
-rw-r--r--runtime/gc/accounting/card_table.cc3
-rw-r--r--runtime/gc/accounting/mod_union_table_test.cc2
-rw-r--r--runtime/gc/accounting/read_barrier_table.h3
-rw-r--r--runtime/gc/accounting/space_bitmap.cc3
-rw-r--r--runtime/gc/accounting/space_bitmap.h6
-rw-r--r--runtime/gc/allocator/rosalloc.cc3
-rw-r--r--runtime/gc/collector/concurrent_copying.cc67
-rw-r--r--runtime/gc/collector/immune_spaces_test.cc198
-rw-r--r--runtime/gc/collector/mark_sweep.cc3
-rw-r--r--runtime/gc/heap.cc15
-rw-r--r--runtime/gc/heap.h3
-rw-r--r--runtime/gc/heap_test.cc2
-rw-r--r--runtime/gc/space/bump_pointer_space.cc6
-rw-r--r--runtime/gc/space/bump_pointer_space.h2
-rw-r--r--runtime/gc/space/dlmalloc_space.cc12
-rw-r--r--runtime/gc/space/dlmalloc_space.h7
-rw-r--r--runtime/gc/space/dlmalloc_space_random_test.cc10
-rw-r--r--runtime/gc/space/dlmalloc_space_static_test.cc10
-rw-r--r--runtime/gc/space/image_space.cc26
-rw-r--r--runtime/gc/space/large_object_space.cc11
-rw-r--r--runtime/gc/space/large_object_space.h2
-rw-r--r--runtime/gc/space/large_object_space_test.cc4
-rw-r--r--runtime/gc/space/malloc_space.cc6
-rw-r--r--runtime/gc/space/malloc_space.h3
-rw-r--r--runtime/gc/space/region_space.cc4
-rw-r--r--runtime/gc/space/rosalloc_space.cc15
-rw-r--r--runtime/gc/space/rosalloc_space.h7
-rw-r--r--runtime/gc/space/rosalloc_space_random_test.cc15
-rw-r--r--runtime/gc/space/rosalloc_space_static_test.cc14
-rw-r--r--runtime/gc/space/space_create_test.cc29
-rw-r--r--runtime/gc/space/space_test.h8
33 files changed, 278 insertions, 227 deletions
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 313b2b4fe4..9431f80a10 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -253,10 +253,9 @@ class AtomicStack {
void Init() {
std::string error_msg;
mem_map_ = MemMap::MapAnonymous(name_.c_str(),
- /* addr= */ nullptr,
capacity_ * sizeof(begin_[0]),
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg;
uint8_t* addr = mem_map_.Begin();
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index 80c4c76bd3..8a15af2fbc 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -49,10 +49,9 @@ MemMap Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) {
RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- /* addr= */ nullptr,
bitmap_size,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 9a5bde86b1..fdf1615f5e 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -65,10 +65,9 @@ CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) {
/* Allocate an extra 256 bytes to allow fixed low-byte of base */
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous("card table",
- /* addr= */ nullptr,
capacity + 256,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(mem_map.IsValid()) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index a6177896e1..b39628b1dc 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -185,7 +185,7 @@ void ModUnionTableTest::RunTest(ModUnionTableFactory::TableType type) {
ResetClass();
// Create another space that we can put references in.
std::unique_ptr<space::DlMallocSpace> other_space(space::DlMallocSpace::Create(
- "other space", 128 * KB, 4 * MB, 4 * MB, nullptr, false));
+ "other space", 128 * KB, 4 * MB, 4 * MB, /*can_move_objects=*/ false));
ASSERT_TRUE(other_space.get() != nullptr);
{
ScopedThreadSuspension sts(self, kSuspended);
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index b369a6685e..7eca792063 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -40,10 +40,9 @@ class ReadBarrierTable {
static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize)));
std::string error_msg;
mem_map_ = MemMap::MapAnonymous("read barrier table",
- /* addr= */ nullptr,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(mem_map_.IsValid() && mem_map_.Begin() != nullptr)
<< "couldn't allocate read barrier table: " << error_msg;
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 76d5d9de7e..dc223dbb04 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -85,10 +85,9 @@ SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create(
const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- /* addr= */ nullptr,
bitmap_size,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 6a3faefe08..fcc3007acd 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -151,6 +151,12 @@ class SpaceBitmap {
void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, Visitor&& visitor) const
NO_THREAD_SAFETY_ANALYSIS;
+ // Visit all of the set bits in HeapBegin(), HeapLimit().
+ template <typename Visitor>
+ void VisitAllMarked(Visitor&& visitor) const {
+ VisitMarkedRange(HeapBegin(), HeapLimit(), visitor);
+ }
+
// Visits set bits in address order. The callback is not permitted to change the bitmap bits or
// max during the traversal.
template <typename Visitor>
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 4e2cf2bf8c..b90a95d802 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -92,10 +92,9 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
size_t max_num_of_pages = max_capacity_ / kPageSize;
std::string error_msg;
page_map_mem_map_ = MemMap::MapAnonymous("rosalloc page map",
- /* addr= */ nullptr,
RoundUp(max_num_of_pages, kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg;
page_map_ = page_map_mem_map_.Begin();
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 1c9d051989..e0bbf43622 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -135,10 +135,9 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
std::string error_msg;
sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
"concurrent copying sweep array free buffer",
- /* addr= */ nullptr,
RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(sweep_array_free_buffer_mem_map_.IsValid())
<< "Couldn't allocate sweep array free buffer: " << error_msg;
@@ -1651,36 +1650,15 @@ size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_acc
inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
DCHECK(!region_space_->IsInFromSpace(to_ref));
space::RegionSpace::RegionType rtype = region_space_->GetRegionType(to_ref);
- auto find_space_from_ref = [this] (mirror::Object* ref)
- REQUIRES_SHARED(Locks::mutator_lock_) -> space::Space* {
- for (const auto& space : heap_->GetContinuousSpaces()) {
- if (space->Contains(ref)) {
- return space;
- }
- }
- for (const auto& space : heap_->GetDiscontinuousSpaces()) {
- if (space->Contains(ref)) {
- return space;
- }
- }
- return nullptr;
- };
- if (kUseBakerReadBarrier &&
- kIsDebugBuild &&
- to_ref->GetReadBarrierState() != ReadBarrier::GrayState()) {
- space::Space* space = find_space_from_ref(to_ref);
- LOG(FATAL_WITHOUT_ABORT) << " " << to_ref
- << " " << to_ref->GetReadBarrierState()
- << " is_marked=" << IsMarked(to_ref)
- << " type=" << to_ref->PrettyTypeOf()
- << " is_young_gc=" << young_gen_;
- if (space == region_space_) {
- LOG(FATAL) << " region_type=" << rtype;
- } else if (space != nullptr) {
- LOG(FATAL) << " space=" << space->GetName();
- } else {
- LOG(FATAL) << "no space";
- }
+ if (kUseBakerReadBarrier) {
+ DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
+ << " to_ref=" << to_ref
+ << " rb_state=" << to_ref->GetReadBarrierState()
+ << " is_marked=" << IsMarked(to_ref)
+ << " type=" << to_ref->PrettyTypeOf()
+ << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
+ << " space=" << heap_->DumpSpaceNameFromAddress(to_ref)
+ << " region_type=" << rtype;
}
bool add_to_live_bytes = false;
// Invariant: There should be no object from a newly-allocated
@@ -1716,22 +1694,15 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
Scan<false>(to_ref);
}
}
- if (kUseBakerReadBarrier &&
- kIsDebugBuild &&
- to_ref->GetReadBarrierState() != ReadBarrier::GrayState()) {
- space::Space* space = find_space_from_ref(to_ref);
- LOG(FATAL_WITHOUT_ABORT) << " " << to_ref
- << " " << to_ref->GetReadBarrierState()
- << " is_marked=" << IsMarked(to_ref)
- << " type=" << to_ref->PrettyTypeOf()
- << " is_young_gc=" << young_gen_;
- if (space == region_space_) {
- LOG(FATAL) << " region_type=" << rtype;
- } else if (space != nullptr) {
- LOG(FATAL) << " space=" << space->GetName();
- } else {
- LOG(FATAL) << "no space";
- }
+ if (kUseBakerReadBarrier) {
+ DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
+ << " to_ref=" << to_ref
+ << " rb_state=" << to_ref->GetReadBarrierState()
+ << " is_marked=" << IsMarked(to_ref)
+ << " type=" << to_ref->PrettyTypeOf()
+ << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
+ << " space=" << heap_->DumpSpaceNameFromAddress(to_ref)
+ << " region_type=" << rtype;
}
#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
mirror::Object* referent = nullptr;
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 0e5fac123e..c2a67bf9f6 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -78,18 +78,20 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
}
// Create an image space, the oat file is optional.
- DummyImageSpace* CreateImageSpace(uint8_t* image_begin,
- size_t image_size,
- uint8_t* oat_begin,
- size_t oat_size) {
+ DummyImageSpace* CreateImageSpace(size_t image_size,
+ size_t oat_size,
+ MemMap* image_reservation,
+ MemMap* oat_reservation) {
+ DCHECK(image_reservation != nullptr);
+ DCHECK(oat_reservation != nullptr);
std::string error_str;
- MemMap map = MemMap::MapAnonymous("DummyImageSpace",
- image_begin,
- image_size,
- PROT_READ | PROT_WRITE,
- /*low_4gb=*/true,
- &error_str);
- if (!map.IsValid()) {
+ MemMap image_map = MemMap::MapAnonymous("DummyImageSpace",
+ image_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ true,
+ /*reservation=*/ image_reservation,
+ &error_str);
+ if (!image_map.IsValid()) {
LOG(ERROR) << error_str;
return nullptr;
}
@@ -97,10 +99,10 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(std::move(live_bitmaps_.back()));
live_bitmaps_.pop_back();
MemMap oat_map = MemMap::MapAnonymous("OatMap",
- oat_begin,
oat_size,
PROT_READ | PROT_WRITE,
- /*low_4gb=*/true,
+ /*low_4gb=*/ true,
+ /*reservation=*/ oat_reservation,
&error_str);
if (!oat_map.IsValid()) {
LOG(ERROR) << error_str;
@@ -109,17 +111,17 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map.Begin(), oat_map.End()));
// Create image header.
ImageSection sections[ImageHeader::kSectionCount];
- new (map.Begin()) ImageHeader(
- /*image_begin=*/PointerToLowMemUInt32(map.Begin()),
- /*image_size=*/map.Size(),
+ new (image_map.Begin()) ImageHeader(
+ /*image_begin=*/ PointerToLowMemUInt32(image_map.Begin()),
+ /*image_size=*/ image_map.Size(),
sections,
- /*image_roots=*/PointerToLowMemUInt32(map.Begin()) + 1,
- /*oat_checksum=*/0u,
+ /*image_roots=*/ PointerToLowMemUInt32(image_map.Begin()) + 1,
+ /*oat_checksum=*/ 0u,
// The oat file data in the header is always right after the image space.
- /*oat_file_begin=*/PointerToLowMemUInt32(oat_begin),
- /*oat_data_begin=*/PointerToLowMemUInt32(oat_begin),
- /*oat_data_end=*/PointerToLowMemUInt32(oat_begin + oat_size),
- /*oat_file_end=*/PointerToLowMemUInt32(oat_begin + oat_size),
+ /*oat_file_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
+ /*oat_data_begin=*/PointerToLowMemUInt32(oat_map.Begin()),
+ /*oat_data_end=*/PointerToLowMemUInt32(oat_map.Begin() + oat_size),
+ /*oat_file_end=*/PointerToLowMemUInt32(oat_map.Begin() + oat_size),
/*boot_image_begin=*/0u,
/*boot_image_size=*/0u,
/*boot_oat_begin=*/0u,
@@ -127,29 +129,12 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
/*pointer_size=*/sizeof(void*),
ImageHeader::kStorageModeUncompressed,
/*data_size=*/0u);
- return new DummyImageSpace(std::move(map),
+ return new DummyImageSpace(std::move(image_map),
std::move(live_bitmap),
std::move(oat_file),
std::move(oat_map));
}
- // Does not reserve the memory, the caller needs to be sure no other threads will map at the
- // returned address.
- static uint8_t* GetContinuousMemoryRegion(size_t size) {
- std::string error_str;
- MemMap map = MemMap::MapAnonymous("reserve",
- /* addr= */ nullptr,
- size,
- PROT_READ | PROT_WRITE,
- /*low_4gb=*/ true,
- &error_str);
- if (!map.IsValid()) {
- LOG(ERROR) << "Failed to allocate memory region " << error_str;
- return nullptr;
- }
- return map.Begin();
- }
-
private:
// Bitmap pool for pre-allocated dummy bitmaps. We need to pre-allocate them since we don't want
// them to randomly get placed somewhere where we want an image space.
@@ -206,13 +191,25 @@ TEST_F(ImmuneSpacesTest, AppendAfterImage) {
constexpr size_t kImageOatSize = 321 * kPageSize;
constexpr size_t kOtherSpaceSize = 100 * kPageSize;
- uint8_t* memory = GetContinuousMemoryRegion(kImageSize + kImageOatSize + kOtherSpaceSize);
-
- std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(memory,
- kImageSize,
- memory + kImageSize,
- kImageOatSize));
+ std::string error_str;
+ MemMap reservation = MemMap::MapAnonymous("reserve",
+ kImageSize + kImageOatSize + kOtherSpaceSize,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ true,
+ &error_str);
+ ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+ MemMap image_reservation = reservation.TakeReservedMemory(kImageSize);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+
+ std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(kImageSize,
+ kImageOatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(image_space != nullptr);
+ ASSERT_FALSE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+
const ImageHeader& image_header = image_space->GetImageHeader();
DummySpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize);
@@ -257,36 +254,44 @@ TEST_F(ImmuneSpacesTest, MultiImage) {
constexpr size_t kImage3OatSize = kPageSize;
constexpr size_t kImageBytes = kImage1Size + kImage2Size + kImage3Size;
constexpr size_t kMemorySize = kImageBytes + kImage1OatSize + kImage2OatSize + kImage3OatSize;
- uint8_t* memory = GetContinuousMemoryRegion(kMemorySize);
- uint8_t* space1_begin = memory;
- memory += kImage1Size;
- uint8_t* space2_begin = memory;
- memory += kImage2Size;
- uint8_t* space1_oat_begin = memory;
- memory += kImage1OatSize;
- uint8_t* space2_oat_begin = memory;
- memory += kImage2OatSize;
- uint8_t* space3_begin = memory;
-
- std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(space1_begin,
- kImage1Size,
- space1_oat_begin,
- kImage1OatSize));
+ std::string error_str;
+ MemMap reservation = MemMap::MapAnonymous("reserve",
+ kMemorySize,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ true,
+ &error_str);
+ ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+ MemMap image_reservation = reservation.TakeReservedMemory(kImage1Size + kImage2Size);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+
+ std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(kImage1Size,
+ kImage1OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space1 != nullptr);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
-
- std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(space2_begin,
- kImage2Size,
- space2_oat_begin,
- kImage2OatSize));
+ std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(kImage2Size,
+ kImage2OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space2 != nullptr);
+ ASSERT_FALSE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
// Finally put a 3rd image space.
- std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(space3_begin,
- kImage3Size,
- space3_begin + kImage3Size,
- kImage3OatSize));
+ image_reservation = reservation.TakeReservedMemory(kImage3Size);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(kImage3Size,
+ kImage3OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space3 != nullptr);
+ ASSERT_FALSE(image_reservation.IsValid());
+ ASSERT_FALSE(reservation.IsValid());
// Check that we do not include the oat if there is no space after.
ImmuneSpaces spaces;
@@ -323,12 +328,29 @@ TEST_F(ImmuneSpacesTest, MultiImage) {
constexpr size_t kGuardSize = kPageSize;
constexpr size_t kImage4Size = kImageBytes - kPageSize;
constexpr size_t kImage4OatSize = kPageSize;
- uint8_t* memory2 = GetContinuousMemoryRegion(kImage4Size + kImage4OatSize + kGuardSize * 2);
- std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(memory2 + kGuardSize,
- kImage4Size,
- memory2 + kGuardSize + kImage4Size,
- kImage4OatSize));
+
+ reservation = MemMap::MapAnonymous("reserve",
+ kImage4Size + kImage4OatSize + kGuardSize * 2,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ true,
+ &error_str);
+ ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+ MemMap guard = reservation.TakeReservedMemory(kGuardSize);
+ ASSERT_TRUE(guard.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ guard.Reset(); // Release the guard memory.
+ image_reservation = reservation.TakeReservedMemory(kImage4Size);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(kImage4Size,
+ kImage4OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space4 != nullptr);
+ ASSERT_FALSE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ ASSERT_EQ(reservation.Size(), kGuardSize);
+ reservation.Reset(); // Release the guard memory.
{
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
LOG(INFO) << "Adding space4 " << reinterpret_cast<const void*>(space4->Begin());
@@ -346,12 +368,28 @@ TEST_F(ImmuneSpacesTest, MultiImage) {
// Layout: [guard page][image][oat][guard page]
constexpr size_t kImage5Size = kImageBytes + kPageSize;
constexpr size_t kImage5OatSize = kPageSize;
- uint8_t* memory3 = GetContinuousMemoryRegion(kImage5Size + kImage5OatSize + kGuardSize * 2);
- std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(memory3 + kGuardSize,
- kImage5Size,
- memory3 + kGuardSize + kImage5Size,
- kImage5OatSize));
+ reservation = MemMap::MapAnonymous("reserve",
+ kImage5Size + kImage5OatSize + kGuardSize * 2,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ true,
+ &error_str);
+ ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+ guard = reservation.TakeReservedMemory(kGuardSize);
+ ASSERT_TRUE(guard.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ guard.Reset(); // Release the guard memory.
+ image_reservation = reservation.TakeReservedMemory(kImage5Size);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(kImage5Size,
+ kImage5OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space5 != nullptr);
+ ASSERT_FALSE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ ASSERT_EQ(reservation.Size(), kGuardSize);
+ reservation.Reset(); // Release the guard memory.
{
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
LOG(INFO) << "Adding space5 " << reinterpret_cast<const void*>(space5->Begin());
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 399f9ff301..9e5cb9c314 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -105,10 +105,9 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
std::string error_msg;
sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
"mark sweep sweep array free buffer",
- /* addr= */ nullptr,
RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(sweep_array_free_buffer_mem_map_.IsValid())
<< "Couldn't allocate sweep array free buffer: " << error_msg;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a31cbe755f..f0f81fc67e 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -505,11 +505,11 @@ Heap::Heap(size_t initial_size,
// Create bump pointer spaces instead of a backup space.
main_mem_map_2.Reset();
bump_pointer_space_ = space::BumpPointerSpace::Create(
- "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr);
+ "Bump pointer space 1", kGSSBumpPointerSpaceCapacity);
CHECK(bump_pointer_space_ != nullptr);
AddSpace(bump_pointer_space_);
temp_space_ = space::BumpPointerSpace::Create(
- "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr);
+ "Bump pointer space 2", kGSSBumpPointerSpaceCapacity);
CHECK(temp_space_ != nullptr);
AddSpace(temp_space_);
} else if (main_mem_map_2.IsValid()) {
@@ -529,8 +529,7 @@ Heap::Heap(size_t initial_size,
CHECK(!non_moving_space_->CanMoveObjects());
// Allocate the large object space.
if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
- large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
- capacity_);
+ large_object_space_ = space::FreeListSpace::Create("free list large object space", capacity_);
CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
} else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
@@ -696,7 +695,9 @@ MemMap Heap::MapAnonymousPreferredAddress(const char* name,
request_begin,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb=*/ true,
+ /*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
out_error_str);
if (map.IsValid() || request_begin == nullptr) {
return map;
@@ -1270,6 +1271,10 @@ space::Space* Heap::FindSpaceFromAddress(const void* addr) const {
return nullptr;
}
+std::string Heap::DumpSpaceNameFromAddress(const void* addr) const {
+ space::Space* space = FindSpaceFromAddress(addr);
+ return (space != nullptr) ? space->GetName() : "no space";
+}
void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
// If we're in a stack overflow, do not create a new exception. It would require running the
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 6c4b9367d1..c3ee5267b5 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -554,6 +554,9 @@ class Heap {
space::Space* FindSpaceFromAddress(const void* ptr) const
REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string DumpSpaceNameFromAddress(const void* addr) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
// Do a pending collector transition.
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index f6db070fac..fa10150d46 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -39,6 +39,8 @@ class HeapTest : public CommonRuntimeTest {
16 * KB,
PROT_READ,
/*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
ASSERT_TRUE(reserved_.IsValid()) << error_msg;
CommonRuntimeTest::SetUp();
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 497a0c2e5f..609ccee7b4 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -24,15 +24,13 @@ namespace art {
namespace gc {
namespace space {
-BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity,
- uint8_t* requested_begin) {
+BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity) {
capacity = RoundUp(capacity, kPageSize);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- requested_begin,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
&error_msg);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 59d4d27626..383bf7abaa 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -46,7 +46,7 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
// Create a bump pointer space with the requested sizes. The requested base address is not
// guaranteed to be granted, if it is required, the caller should call Begin on the returned
// space to confirm the request was granted.
- static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
+ static BumpPointerSpace* Create(const std::string& name, size_t capacity);
static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap&& mem_map);
// Allocate num_bytes, returns null if the space is full.
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 73582a00c0..7955ff92e6 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -108,8 +108,10 @@ DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap&& mem_map,
}
}
-DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size,
- size_t growth_limit, size_t capacity, uint8_t* requested_begin,
+DlMallocSpace* DlMallocSpace::Create(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
bool can_move_objects) {
uint64_t start_time = 0;
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -117,8 +119,7 @@ DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_siz
LOG(INFO) << "DlMallocSpace::Create entering " << name
<< " initial_size=" << PrettySize(initial_size)
<< " growth_limit=" << PrettySize(growth_limit)
- << " capacity=" << PrettySize(capacity)
- << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
+ << " capacity=" << PrettySize(capacity);
}
// Memory we promise to dlmalloc before it asks for morecore.
@@ -126,8 +127,7 @@ DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_siz
// will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
// size of the large allocation) will be greater than the footprint limit.
size_t starting_size = kPageSize;
- MemMap mem_map =
- CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+ MemMap mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
<< PrettySize(capacity);
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index c63ff71849..e91602f607 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -46,8 +46,11 @@ class DlMallocSpace : public MallocSpace {
// base address is not guaranteed to be granted, if it is required,
// the caller should call Begin on the returned space to confirm the
// request was granted.
- static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin, bool can_move_objects);
+ static DlMallocSpace* Create(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
+ bool can_move_objects);
// Virtual to allow MemoryToolMallocSpace to intercept.
mirror::Object* AllocWithGrowth(Thread* self,
diff --git a/runtime/gc/space/dlmalloc_space_random_test.cc b/runtime/gc/space/dlmalloc_space_random_test.cc
index f9b41daad8..92b56bda22 100644
--- a/runtime/gc/space/dlmalloc_space_random_test.cc
+++ b/runtime/gc/space/dlmalloc_space_random_test.cc
@@ -22,14 +22,16 @@ namespace art {
namespace gc {
namespace space {
-MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin) {
- return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
+MallocSpace* CreateDlMallocSpace(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity) {
+ return DlMallocSpace::Create(
+ name, initial_size, growth_limit, capacity, /*can_move_objects=*/ false);
}
TEST_SPACE_CREATE_FN_RANDOM(DlMallocSpace, CreateDlMallocSpace)
-
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/dlmalloc_space_static_test.cc b/runtime/gc/space/dlmalloc_space_static_test.cc
index 5758e0cde9..550d1bbe77 100644
--- a/runtime/gc/space/dlmalloc_space_static_test.cc
+++ b/runtime/gc/space/dlmalloc_space_static_test.cc
@@ -22,14 +22,16 @@ namespace art {
namespace gc {
namespace space {
-MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin) {
- return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
+MallocSpace* CreateDlMallocSpace(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity) {
+ return DlMallocSpace::Create(
+ name, initial_size, growth_limit, capacity, /*can_move_objects=*/ false);
}
TEST_SPACE_CREATE_FN_STATIC(DlMallocSpace, CreateDlMallocSpace)
-
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 9e679573bd..96a2cea39f 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -622,9 +622,9 @@ class ImageSpace::Loader {
/*inout*/MemMap* image_reservation,
/*out*/std::string* error_msg) {
TimingLogger::ScopedTiming timing("MapImageFile", logger);
- uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr;
const ImageHeader::StorageMode storage_mode = image_header.GetStorageMode();
if (storage_mode == ImageHeader::kStorageModeUncompressed) {
+ uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr;
return MemMap::MapFileAtAddress(address,
image_header.GetImageSize(),
PROT_READ | PROT_WRITE,
@@ -649,11 +649,9 @@ class ImageSpace::Loader {
// Reserve output and decompress into it.
MemMap map = MemMap::MapAnonymous(image_location,
- address,
image_header.GetImageSize(),
PROT_READ | PROT_WRITE,
/*low_4gb=*/ true,
- /*reuse=*/ false,
image_reservation,
error_msg);
if (map.IsValid()) {
@@ -1172,6 +1170,19 @@ class ImageSpace::Loader {
}
dex_cache->FixupResolvedCallSites<kWithoutReadBarrier>(new_call_sites, fixup_adapter);
}
+
+ GcRoot<mirror::String>* preresolved_strings = dex_cache->GetPreResolvedStrings();
+ if (preresolved_strings != nullptr) {
+ GcRoot<mirror::String>* new_array = fixup_adapter.ForwardObject(preresolved_strings);
+ if (preresolved_strings != new_array) {
+ dex_cache->SetPreResolvedStrings(new_array);
+ }
+ const size_t num_preresolved_strings = dex_cache->NumPreResolvedStrings();
+ for (size_t j = 0; j < num_preresolved_strings; ++j) {
+ new_array[j] = GcRoot<mirror::String>(
+ fixup_adapter(new_array[j].Read<kWithoutReadBarrier>()));
+ }
+ }
}
}
{
@@ -1731,6 +1742,10 @@ class ImageSpace::BootImageLoader {
dex_cache,
mirror::DexCache::ResolvedCallSitesOffset(),
dex_cache->NumResolvedCallSites<kVerifyNone>());
+ FixupDexCacheArray<GcRoot<mirror::String>>(
+ dex_cache,
+ mirror::DexCache::PreResolvedStringsOffset(),
+ dex_cache->NumPreResolvedStrings<kVerifyNone>());
}
private:
@@ -1775,6 +1790,11 @@ class ImageSpace::BootImageLoader {
PatchGcRoot(diff_, &array[index]);
}
+ void FixupDexCacheArrayEntry(GcRoot<mirror::String>* array, uint32_t index)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ PatchGcRoot(diff_, &array[index]);
+ }
+
template <typename EntryType>
void FixupDexCacheArray(mirror::DexCache* dex_cache,
MemberOffset array_offset,
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index a7f82f6e36..1658dba413 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -137,10 +137,9 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
size_t* bytes_tl_bulk_allocated) {
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous("large object space allocation",
- /* addr= */ nullptr,
num_bytes,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(WARNING) << "Large object allocation failed: " << error_msg;
@@ -346,14 +345,13 @@ inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a,
return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
}
-FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
+FreeListSpace* FreeListSpace::Create(const std::string& name, size_t size) {
CHECK_EQ(size % kAlignment, 0U);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- requested_begin,
size,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
&error_msg);
CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
@@ -372,10 +370,9 @@ FreeListSpace::FreeListSpace(const std::string& name,
std::string error_msg;
allocation_info_map_ =
MemMap::MapAnonymous("large object free list space allocation info map",
- /* addr= */ nullptr,
alloc_info_size,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg;
allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 47167faccc..a4d6a24263 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -184,7 +184,7 @@ class FreeListSpace final : public LargeObjectSpace {
static constexpr size_t kAlignment = kPageSize;
virtual ~FreeListSpace();
- static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
+ static FreeListSpace* Create(const std::string& name, size_t capacity);
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
REQUIRES(lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index d55ccd6e40..62bc26e09d 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -42,7 +42,7 @@ void LargeObjectSpaceTest::LargeObjectTest() {
if (i == 0) {
los = space::LargeObjectMapSpace::Create("large object space");
} else {
- los = space::FreeListSpace::Create("large object space", nullptr, capacity);
+ los = space::FreeListSpace::Create("large object space", capacity);
}
// Make sure the bitmap is not empty and actually covers at least how much we expect.
@@ -157,7 +157,7 @@ void LargeObjectSpaceTest::RaceTest() {
if (los_type == 0) {
los = space::LargeObjectMapSpace::Create("large object space");
} else {
- los = space::FreeListSpace::Create("large object space", nullptr, 128 * MB);
+ los = space::FreeListSpace::Create("large object space", 128 * MB);
}
Thread* self = Thread::Current();
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 189aeb5297..b5e6b62bcd 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -82,8 +82,7 @@ MemMap MallocSpace::CreateMemMap(const std::string& name,
size_t starting_size,
size_t* initial_size,
size_t* growth_limit,
- size_t* capacity,
- uint8_t* requested_begin) {
+ size_t* capacity) {
// Sanity check arguments
if (starting_size > *initial_size) {
*initial_size = starting_size;
@@ -107,10 +106,9 @@ MemMap MallocSpace::CreateMemMap(const std::string& name,
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- requested_begin,
*capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
&error_msg);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 6bf2d71c7c..5dd8136dcb 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -157,8 +157,7 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
size_t starting_size,
size_t* initial_size,
size_t* growth_limit,
- size_t* capacity,
- uint8_t* requested_begin);
+ size_t* capacity);
// When true the low memory mode argument specifies that the heap wishes the created allocator to
// be more aggressive in releasing unused pages.
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 31bbfb8f00..2774e26acd 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -58,7 +58,9 @@ MemMap RegionSpace::CreateMemMap(const std::string& name,
requested_begin,
capacity + kRegionSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
if (mem_map.IsValid() || requested_begin == nullptr) {
break;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 10ff1c15b1..36fd864bf3 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -133,17 +133,19 @@ RosAllocSpace::~RosAllocSpace() {
delete rosalloc_;
}
-RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size,
- size_t growth_limit, size_t capacity, uint8_t* requested_begin,
- bool low_memory_mode, bool can_move_objects) {
+RosAllocSpace* RosAllocSpace::Create(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
+ bool low_memory_mode,
+ bool can_move_objects) {
uint64_t start_time = 0;
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
start_time = NanoTime();
VLOG(startup) << "RosAllocSpace::Create entering " << name
<< " initial_size=" << PrettySize(initial_size)
<< " growth_limit=" << PrettySize(growth_limit)
- << " capacity=" << PrettySize(capacity)
- << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
+ << " capacity=" << PrettySize(capacity);
}
// Memory we promise to rosalloc before it asks for morecore.
@@ -151,8 +153,7 @@ RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_siz
// will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
// size of the large allocation) will be greater than the footprint limit.
size_t starting_size = Heap::kDefaultStartingSize;
- MemMap mem_map =
- CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+ MemMap mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
<< PrettySize(capacity);
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 5162a064d1..9e95c16cb3 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -38,8 +38,11 @@ class RosAllocSpace : public MallocSpace {
// base address is not guaranteed to be granted, if it is required,
// the caller should call Begin on the returned space to confirm the
// request was granted.
- static RosAllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin, bool low_memory_mode,
+ static RosAllocSpace* Create(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
+ bool low_memory_mode,
bool can_move_objects);
static RosAllocSpace* CreateFromMemMap(MemMap&& mem_map,
const std::string& name,
diff --git a/runtime/gc/space/rosalloc_space_random_test.cc b/runtime/gc/space/rosalloc_space_random_test.cc
index b50859b8e6..f0b3231b3a 100644
--- a/runtime/gc/space/rosalloc_space_random_test.cc
+++ b/runtime/gc/space/rosalloc_space_random_test.cc
@@ -22,15 +22,20 @@ namespace art {
namespace gc {
namespace space {
-MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin) {
- return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
- Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
+MallocSpace* CreateRosAllocSpace(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity) {
+ return RosAllocSpace::Create(name,
+ initial_size,
+ growth_limit,
+ capacity,
+ Runtime::Current()->GetHeap()->IsLowMemoryMode(),
+ /*can_move_objects=*/ false);
}
TEST_SPACE_CREATE_FN_RANDOM(RosAllocSpace, CreateRosAllocSpace)
-
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/rosalloc_space_static_test.cc b/runtime/gc/space/rosalloc_space_static_test.cc
index 5e7ced6e23..d7e7e90188 100644
--- a/runtime/gc/space/rosalloc_space_static_test.cc
+++ b/runtime/gc/space/rosalloc_space_static_test.cc
@@ -22,15 +22,19 @@ namespace art {
namespace gc {
namespace space {
-MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin) {
- return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
- Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
+MallocSpace* CreateRosAllocSpace(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity) {
+ return RosAllocSpace::Create(name, initial_size,
+ growth_limit,
+ capacity,
+ Runtime::Current()->GetHeap()->IsLowMemoryMode(),
+ /*can_move_objects=*/ false);
}
TEST_SPACE_CREATE_FN_STATIC(RosAllocSpace, CreateRosAllocSpace)
-
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/space_create_test.cc b/runtime/gc/space/space_create_test.cc
index ca5f306264..d3db679c29 100644
--- a/runtime/gc/space/space_create_test.cc
+++ b/runtime/gc/space/space_create_test.cc
@@ -34,25 +34,22 @@ class SpaceCreateTest : public SpaceTest<CommonRuntimeTestWithParam<MallocSpaceT
MallocSpace* CreateSpace(const std::string& name,
size_t initial_size,
size_t growth_limit,
- size_t capacity,
- uint8_t* requested_begin) {
+ size_t capacity) {
const MallocSpaceType type = GetParam();
if (type == kMallocSpaceDlMalloc) {
return DlMallocSpace::Create(name,
initial_size,
growth_limit,
capacity,
- requested_begin,
- false);
+ /*can_move_objects=*/ false);
}
DCHECK_EQ(static_cast<uint32_t>(type), static_cast<uint32_t>(kMallocSpaceRosAlloc));
return RosAllocSpace::Create(name,
initial_size,
growth_limit,
capacity,
- requested_begin,
Runtime::Current()->GetHeap()->IsLowMemoryMode(),
- false);
+ /*can_move_objects=*/ false);
}
};
@@ -62,25 +59,25 @@ TEST_P(SpaceCreateTest, InitTestBody) {
{
// Init < max == growth
- std::unique_ptr<Space> space(CreateSpace("test", 16 * MB, 32 * MB, 32 * MB, nullptr));
+ std::unique_ptr<Space> space(CreateSpace("test", 16 * MB, 32 * MB, 32 * MB));
EXPECT_TRUE(space != nullptr);
// Init == max == growth
- space.reset(CreateSpace("test", 16 * MB, 16 * MB, 16 * MB, nullptr));
+ space.reset(CreateSpace("test", 16 * MB, 16 * MB, 16 * MB));
EXPECT_TRUE(space != nullptr);
// Init > max == growth
- space.reset(CreateSpace("test", 32 * MB, 16 * MB, 16 * MB, nullptr));
+ space.reset(CreateSpace("test", 32 * MB, 16 * MB, 16 * MB));
EXPECT_TRUE(space == nullptr);
// Growth == init < max
- space.reset(CreateSpace("test", 16 * MB, 16 * MB, 32 * MB, nullptr));
+ space.reset(CreateSpace("test", 16 * MB, 16 * MB, 32 * MB));
EXPECT_TRUE(space != nullptr);
// Growth < init < max
- space.reset(CreateSpace("test", 16 * MB, 8 * MB, 32 * MB, nullptr));
+ space.reset(CreateSpace("test", 16 * MB, 8 * MB, 32 * MB));
EXPECT_TRUE(space == nullptr);
// Init < growth < max
- space.reset(CreateSpace("test", 8 * MB, 16 * MB, 32 * MB, nullptr));
+ space.reset(CreateSpace("test", 8 * MB, 16 * MB, 32 * MB));
EXPECT_TRUE(space != nullptr);
// Init < max < growth
- space.reset(CreateSpace("test", 8 * MB, 32 * MB, 16 * MB, nullptr));
+ space.reset(CreateSpace("test", 8 * MB, 32 * MB, 16 * MB));
EXPECT_TRUE(space == nullptr);
}
}
@@ -91,7 +88,7 @@ TEST_P(SpaceCreateTest, InitTestBody) {
// the GC works with the ZygoteSpace.
TEST_P(SpaceCreateTest, ZygoteSpaceTestBody) {
size_t dummy;
- MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+ MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB));
ASSERT_TRUE(space != nullptr);
// Make space findable to the heap, will also delete space when runtime is cleaned up
@@ -225,7 +222,7 @@ TEST_P(SpaceCreateTest, ZygoteSpaceTestBody) {
TEST_P(SpaceCreateTest, AllocAndFreeTestBody) {
size_t dummy = 0;
- MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+ MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB));
ASSERT_TRUE(space != nullptr);
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
@@ -301,7 +298,7 @@ TEST_P(SpaceCreateTest, AllocAndFreeTestBody) {
}
TEST_P(SpaceCreateTest, AllocAndFreeListTestBody) {
- MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+ MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB));
ASSERT_TRUE(space != nullptr);
// Make space findable to the heap, will also delete space when runtime is cleaned up
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 5aac21721f..1b111e3496 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -123,8 +123,10 @@ class SpaceTest : public Super {
return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value();
}
- typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin);
+ typedef MallocSpace* (*CreateSpaceFn)(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity);
void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
int round, size_t growth_limit);
@@ -323,7 +325,7 @@ void SpaceTest<Super>::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size,
size_t initial_size = 4 * MB;
size_t growth_limit = 8 * MB;
size_t capacity = 16 * MB;
- MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr));
+ MallocSpace* space(create_space("test", initial_size, growth_limit, capacity));
ASSERT_TRUE(space != nullptr);
// Basic sanity