diff options
author | 2018-10-26 14:22:59 +0100 | |
---|---|---|
committer | 2018-10-30 10:20:32 +0000 | |
commit | 1130659b0d948806e7ca974ead8ad2bcc1951d13 (patch) | |
tree | 9046a016fafc7f9bff4e34732aa8f89ed81ad46c | |
parent | 1c1442a498fd3f9ddc5d2bb04baa7ccd2d3539c4 (diff) |
Refactor MemMap::MapAnonymous().
Remove the address argument from the shortcut overload and
introduce one more shortcut overload. This makes it easier
to find all uses where we pass non-null address hint.
Remove `requested_begin` parameter from some constructors
where we were always passing null. Rewrite some tests to
use the reservation API.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Bug: 118408378
Change-Id: Ibbbb96667e7cc11cf7fea119892463d8dbc9a8b5
47 files changed, 349 insertions, 303 deletions
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc index 586891a3ff..fc8cd528fa 100644 --- a/compiler/common_compiler_test.cc +++ b/compiler/common_compiler_test.cc @@ -328,6 +328,8 @@ void CommonCompilerTest::ReserveImageSpace() { (size_t)120 * 1024 * 1024, // 120MB PROT_NONE, false /* no need for 4gb flag with fixed mmap */, + /*reuse=*/ false, + /*reservation=*/ nullptr, &error_msg); CHECK(image_reservation_.IsValid()) << error_msg; } diff --git a/dex2oat/linker/elf_writer_test.cc b/dex2oat/linker/elf_writer_test.cc index 1d578ab9d1..b381765fe2 100644 --- a/dex2oat/linker/elf_writer_test.cc +++ b/dex2oat/linker/elf_writer_test.cc @@ -68,9 +68,9 @@ TEST_F(ElfWriterTest, dlsym) { { std::string error_msg; std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(), - /* writable */ false, - /* program_header_only */ false, - /*low_4gb*/false, + /*writable=*/ false, + /*program_header_only=*/ false, + /*low_4gb=*/false, &error_msg)); CHECK(ef.get() != nullptr) << error_msg; EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", false); @@ -80,9 +80,9 @@ TEST_F(ElfWriterTest, dlsym) { { std::string error_msg; std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(), - /* writable */ false, - /* program_header_only */ false, - /* low_4gb */ false, + /*writable=*/ false, + /*program_header_only=*/ false, + /*low_4gb=*/ false, &error_msg)); CHECK(ef.get() != nullptr) << error_msg; EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", true); @@ -92,24 +92,23 @@ TEST_F(ElfWriterTest, dlsym) { { std::string error_msg; std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(), - /* writable */ false, - /* program_header_only */ true, - /* low_4gb */ false, + /*writable=*/ false, + /*program_header_only=*/ true, + /*low_4gb=*/ false, &error_msg)); CHECK(ef.get() != nullptr) << error_msg; size_t size; bool success = ef->GetLoadedSize(&size, &error_msg); CHECK(success) << error_msg; MemMap reservation = MemMap::MapAnonymous("ElfWriterTest#dlsym reservation", - /* addr */ nullptr, RoundUp(size, kPageSize), PROT_NONE, - /* low_4gb */ true, + /*low_4gb=*/ true, &error_msg); CHECK(reservation.IsValid()) << error_msg; uint8_t* base = reservation.Begin(); success = - ef->Load(file.get(), /* executable */ false, /* low_4gb */ false, &reservation, &error_msg); + ef->Load(file.get(), /*executable=*/ false, /*low_4gb=*/ false, &reservation, &error_msg); CHECK(success) << error_msg; CHECK(!reservation.IsValid()); EXPECT_EQ(reinterpret_cast<uintptr_t>(dl_oatdata) + reinterpret_cast<uintptr_t>(base), @@ -131,9 +130,9 @@ TEST_F(ElfWriterTest, CheckBuildIdPresent) { { std::string error_msg; std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(), - /* writable */ false, - /* program_header_only */ false, - /* low_4gb */ false, + /*writable=*/ false, + /*program_header_only=*/ false, + /*low_4gb=*/ false, &error_msg)); CHECK(ef.get() != nullptr) << error_msg; EXPECT_TRUE(ef->HasSection(".note.gnu.build-id")); diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc index 60a4a32e7e..d43a228438 100644 --- a/dex2oat/linker/image_writer.cc +++ b/dex2oat/linker/image_writer.cc @@ -1068,10 +1068,9 @@ bool ImageWriter::AllocMemory() { std::string error_msg; image_info.image_ = MemMap::MapAnonymous("image writer image", - /* addr */ nullptr, length, PROT_READ | PROT_WRITE, - /* low_4gb */ false, + /*low_4gb=*/ false, &error_msg); if (UNLIKELY(!image_info.image_.IsValid())) { LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg; diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h index 4f92492643..525e622690 100644 --- a/libartbase/base/mem_map.h +++ b/libartbase/base/mem_map.h @@ -139,18 +139,32 @@ class MemMap { /*out*/std::string* error_msg, bool use_debug_name = true); static MemMap MapAnonymous(const char* name, - uint8_t* addr, size_t byte_count, int prot, bool low_4gb, /*out*/std::string* error_msg) { return MapAnonymous(name, - addr, + /*addr=*/ nullptr, + byte_count, + prot, + low_4gb, + /*reuse=*/ false, + /*reservation=*/ nullptr, + error_msg); + } + static MemMap MapAnonymous(const char* name, + size_t byte_count, + int prot, + bool low_4gb, + MemMap* reservation, + /*out*/std::string* error_msg) { + return MapAnonymous(name, + /*addr=*/ (reservation != nullptr) ? reservation->Begin() : nullptr, byte_count, prot, low_4gb, - /* reuse */ false, - /* reservation */ nullptr, + /*reuse=*/ false, + reservation, error_msg); } @@ -178,10 +192,10 @@ class MemMap { flags, fd, start, - /* low_4gb */ low_4gb, + /*low_4gb=*/ low_4gb, filename, - /* reuse */ false, - /* reservation */ nullptr, + /*reuse=*/ false, + /*reservation=*/ nullptr, error_msg); } diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc index 8239e997fd..0aec3a0d49 100644 --- a/libartbase/base/mem_map_test.cc +++ b/libartbase/base/mem_map_test.cc @@ -53,7 +53,6 @@ class MemMapTest : public CommonArtTest { // Find a valid map address and unmap it before returning. std::string error_msg; MemMap map = MemMap::MapAnonymous("temp", - /* addr= */ nullptr, size, PROT_READ, low_4gb, @@ -68,7 +67,6 @@ class MemMapTest : public CommonArtTest { const size_t page_size = static_cast<size_t>(kPageSize); // Map a two-page memory region. MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0", - /* addr= */ nullptr, 2 * page_size, PROT_READ | PROT_WRITE, low_4gb, @@ -165,17 +163,15 @@ TEST_F(MemMapTest, Start) { TEST_F(MemMapTest, ReplaceMapping_SameSize) { std::string error_msg; MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest", - /* addr= */ nullptr, kPageSize, PROT_READ, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); ASSERT_TRUE(dest.IsValid()); MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source", - /* addr= */ nullptr, kPageSize, PROT_WRITE | PROT_READ, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); ASSERT_TRUE(source.IsValid()); void* source_addr = source.Begin(); @@ -200,21 +196,19 @@ TEST_F(MemMapTest, ReplaceMapping_SameSize) { TEST_F(MemMapTest, ReplaceMapping_MakeLarger) { std::string error_msg; MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest", - /* addr= */ nullptr, 5 * kPageSize, // Need to make it larger // initially so we know // there won't be mappings // in the way we we move // source. PROT_READ, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); ASSERT_TRUE(dest.IsValid()); MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source", - /* addr= */ nullptr, 3 * kPageSize, PROT_WRITE | PROT_READ, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); ASSERT_TRUE(source.IsValid()); uint8_t* source_addr = source.Begin(); @@ -246,17 +240,15 @@ TEST_F(MemMapTest, ReplaceMapping_MakeLarger) { TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) { std::string error_msg; MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest", - /* addr= */ nullptr, 3 * kPageSize, PROT_READ, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); ASSERT_TRUE(dest.IsValid()); MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source", - /* addr= */ nullptr, kPageSize, PROT_WRITE | PROT_READ, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); ASSERT_TRUE(source.IsValid()); uint8_t* source_addr = source.Begin(); @@ -285,11 +277,10 @@ TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) { MemMap dest = MemMap::MapAnonymous( "MapAnonymousEmpty-atomic-replace-dest", - /* addr= */ nullptr, 3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in // the way we we move source. PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); ASSERT_TRUE(dest.IsValid()); // Resize down to 1 page so we can remap the rest. @@ -299,7 +290,9 @@ TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) { dest.Begin() + kPageSize, 2 * kPageSize, PROT_WRITE | PROT_READ, - /* low_4gb= */ false, + /*low_4gb=*/ false, + /*reuse=*/ false, + /*reservation=*/ nullptr, &error_msg); ASSERT_TRUE(source.IsValid()); ASSERT_EQ(dest.Begin() + kPageSize, source.Begin()); @@ -332,20 +325,18 @@ TEST_F(MemMapTest, MapAnonymousEmpty) { CommonInit(); std::string error_msg; MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty", - /* addr= */ nullptr, - 0, + /*byte_count=*/ 0, PROT_READ, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); ASSERT_FALSE(map.IsValid()) << error_msg; ASSERT_FALSE(error_msg.empty()); error_msg.clear(); map = MemMap::MapAnonymous("MapAnonymousNonEmpty", - /* addr= */ nullptr, kPageSize, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); ASSERT_TRUE(map.IsValid()) << error_msg; ASSERT_TRUE(error_msg.empty()); @@ -358,7 +349,9 @@ TEST_F(MemMapTest, MapAnonymousFailNullError) { reinterpret_cast<uint8_t*>(kPageSize), 0x20000, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, + /*reuse=*/ false, + /*reservation=*/ nullptr, nullptr); ASSERT_FALSE(map.IsValid()); } @@ -368,20 +361,18 @@ TEST_F(MemMapTest, MapAnonymousEmpty32bit) { CommonInit(); std::string error_msg; MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty", - /* addr= */ nullptr, - 0, + /*byte_count=*/ 0, PROT_READ, - /* low_4gb= */ true, + /*low_4gb=*/ true, &error_msg); ASSERT_FALSE(map.IsValid()) << error_msg; ASSERT_FALSE(error_msg.empty()); error_msg.clear(); map = MemMap::MapAnonymous("MapAnonymousNonEmpty", - /* addr= */ nullptr, kPageSize, PROT_READ | PROT_WRITE, - /* low_4gb= */ true, + /*low_4gb=*/ true, &error_msg); ASSERT_TRUE(map.IsValid()) << error_msg; ASSERT_TRUE(error_msg.empty()); @@ -425,17 +416,18 @@ TEST_F(MemMapTest, MapAnonymousExactAddr) { valid_address, kPageSize, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, + /*reuse=*/ false, + /*reservation=*/ nullptr, &error_msg); ASSERT_TRUE(map0.IsValid()) << error_msg; ASSERT_TRUE(error_msg.empty()); ASSERT_TRUE(map0.BaseBegin() == valid_address); // Map at an unspecified address, which should succeed. MemMap map1 = MemMap::MapAnonymous("MapAnonymous1", - /* addr= */ nullptr, kPageSize, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); ASSERT_TRUE(map1.IsValid()) << error_msg; ASSERT_TRUE(error_msg.empty()); @@ -445,7 +437,9 @@ TEST_F(MemMapTest, MapAnonymousExactAddr) { reinterpret_cast<uint8_t*>(map1.BaseBegin()), kPageSize, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, + /*reuse=*/ false, + /*reservation=*/ nullptr, &error_msg); ASSERT_FALSE(map2.IsValid()) << error_msg; ASSERT_TRUE(!error_msg.empty()); @@ -529,6 +523,8 @@ TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) { size, PROT_READ | PROT_WRITE, /*low_4gb=*/ true, + /*reuse=*/ false, + /*reservation=*/ nullptr, &error_msg); if (map.IsValid()) { break; @@ -549,7 +545,9 @@ TEST_F(MemMapTest, MapAnonymousOverflow) { reinterpret_cast<uint8_t*>(ptr), 2 * kPageSize, // brings it over the top. PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, + /*reuse=*/ false, + /*reservation=*/ nullptr, &error_msg); ASSERT_FALSE(map.IsValid()); ASSERT_FALSE(error_msg.empty()); @@ -564,7 +562,9 @@ TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) { reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)), kPageSize, PROT_READ | PROT_WRITE, - /* low_4gb= */ true, + /*low_4gb=*/ true, + /*reuse=*/ false, + /*reservation=*/ nullptr, &error_msg); ASSERT_FALSE(map.IsValid()); ASSERT_FALSE(error_msg.empty()); @@ -577,7 +577,9 @@ TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) { reinterpret_cast<uint8_t*>(0xF0000000), 0x20000000, PROT_READ | PROT_WRITE, - /* low_4gb= */ true, + /*low_4gb=*/ true, + /*reuse=*/ false, + /*reservation=*/ nullptr, &error_msg); ASSERT_FALSE(map.IsValid()); ASSERT_FALSE(error_msg.empty()); @@ -588,12 +590,9 @@ TEST_F(MemMapTest, MapAnonymousReuse) { CommonInit(); std::string error_msg; MemMap map = MemMap::MapAnonymous("MapAnonymousReserve", - nullptr, 0x20000, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, - /* reuse= */ false, - /* reservation= */ nullptr, + /*low_4gb=*/ false, &error_msg); ASSERT_TRUE(map.IsValid()); ASSERT_TRUE(error_msg.empty()); @@ -601,9 +600,9 @@ TEST_F(MemMapTest, MapAnonymousReuse) { reinterpret_cast<uint8_t*>(map.BaseBegin()), 0x10000, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, - /* reuse= */ true, - /* reservation= */ nullptr, + /*low_4gb=*/ false, + /*reuse=*/ true, + /*reservation=*/ nullptr, &error_msg); ASSERT_TRUE(map2.IsValid()); ASSERT_TRUE(error_msg.empty()); @@ -614,45 +613,45 @@ TEST_F(MemMapTest, CheckNoGaps) { std::string error_msg; constexpr size_t kNumPages = 3; // Map a 3-page mem map. - MemMap map = MemMap::MapAnonymous("MapAnonymous0", - /* addr= */ nullptr, - kPageSize * kNumPages, - PROT_READ | PROT_WRITE, - /* low_4gb= */ false, - &error_msg); - ASSERT_TRUE(map.IsValid()) << error_msg; + MemMap reservation = MemMap::MapAnonymous("MapAnonymous0", + kPageSize * kNumPages, + PROT_READ | PROT_WRITE, + /*low_4gb=*/ false, + &error_msg); + ASSERT_TRUE(reservation.IsValid()) << error_msg; ASSERT_TRUE(error_msg.empty()); // Record the base address. - uint8_t* map_base = reinterpret_cast<uint8_t*>(map.BaseBegin()); - // Unmap it. - map.Reset(); + uint8_t* map_base = reinterpret_cast<uint8_t*>(reservation.BaseBegin()); - // Map at the same address, but in page-sized separate mem maps, - // assuming the space at the address is still available. + // Map at the same address, taking from the `map` reservation. MemMap map0 = MemMap::MapAnonymous("MapAnonymous0", - map_base, kPageSize, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, + &reservation, &error_msg); ASSERT_TRUE(map0.IsValid()) << error_msg; ASSERT_TRUE(error_msg.empty()); + ASSERT_EQ(map_base, map0.Begin()); MemMap map1 = MemMap::MapAnonymous("MapAnonymous1", - map_base + kPageSize, kPageSize, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, + &reservation, &error_msg); ASSERT_TRUE(map1.IsValid()) << error_msg; ASSERT_TRUE(error_msg.empty()); + ASSERT_EQ(map_base + kPageSize, map1.Begin()); MemMap map2 = MemMap::MapAnonymous("MapAnonymous2", - map_base + kPageSize * 2, kPageSize, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, + &reservation, &error_msg); ASSERT_TRUE(map2.IsValid()) << error_msg; ASSERT_TRUE(error_msg.empty()); + ASSERT_EQ(map_base + 2 * kPageSize, map2.Begin()); + ASSERT_FALSE(reservation.IsValid()); // The entire reservation was used. // One-map cases. ASSERT_TRUE(MemMap::CheckNoGaps(map0, map0)); @@ -678,10 +677,9 @@ TEST_F(MemMapTest, AlignBy) { const size_t page_size = static_cast<size_t>(kPageSize); // Map a region. MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0", - /* addr= */ nullptr, 14 * page_size, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); ASSERT_TRUE(m0.IsValid()); uint8_t* base0 = m0.Begin(); @@ -784,10 +782,9 @@ TEST_F(MemMapTest, Reservation) { ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize)); MemMap reservation = MemMap::MapAnonymous("Test reservation", - /* addr= */ nullptr, kMapSize, PROT_NONE, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); ASSERT_TRUE(reservation.IsValid()); ASSERT_TRUE(error_msg.empty()); @@ -797,14 +794,14 @@ TEST_F(MemMapTest, Reservation) { static_assert(kChunk1Size < kMapSize, "We want to split the reservation."); uint8_t* addr1 = reservation.Begin(); MemMap map1 = MemMap::MapFileAtAddress(addr1, - /* byte_count= */ kChunk1Size, + /*byte_count=*/ kChunk1Size, PROT_READ, MAP_PRIVATE, scratch_file.GetFd(), - /* start= */ 0, - /* low_4gb= */ false, + /*start=*/ 0, + /*low_4gb=*/ false, scratch_file.GetFilename().c_str(), - /* reuse= */ false, + /*reuse=*/ false, &reservation, &error_msg); ASSERT_TRUE(map1.IsValid()) << error_msg; @@ -822,10 +819,10 @@ TEST_F(MemMapTest, Reservation) { uint8_t* addr2 = reservation.Begin(); MemMap map2 = MemMap::MapAnonymous("MiddleReservation", addr2, - /* byte_count= */ kChunk2Size, + /*byte_count=*/ kChunk2Size, PROT_READ, - /* low_4gb= */ false, - /* reuse= */ false, + /*low_4gb=*/ false, + /*reuse=*/ false, &reservation, &error_msg); ASSERT_TRUE(map2.IsValid()) << error_msg; @@ -839,14 +836,14 @@ TEST_F(MemMapTest, Reservation) { const size_t kChunk3Size = reservation.Size() - 1u; uint8_t* addr3 = reservation.Begin(); MemMap map3 = MemMap::MapFileAtAddress(addr3, - /* byte_count= */ kChunk3Size, + /*byte_count=*/ kChunk3Size, PROT_READ, MAP_PRIVATE, scratch_file.GetFd(), - /* start= */ dchecked_integral_cast<size_t>(addr3 - addr1), - /* low_4gb= */ false, + /*start=*/ dchecked_integral_cast<size_t>(addr3 - addr1), + /*low_4gb=*/ false, scratch_file.GetFilename().c_str(), - /* reuse= */ false, + /*reuse=*/ false, &reservation, &error_msg); ASSERT_TRUE(map3.IsValid()) << error_msg; diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc index f5761cfbec..8ceea83be4 100644 --- a/libartbase/base/zip_archive.cc +++ b/libartbase/base/zip_archive.cc @@ -75,10 +75,9 @@ MemMap ZipEntry::ExtractToMemMap(const char* zip_filename, name += " extracted in memory from "; name += zip_filename; MemMap map = MemMap::MapAnonymous(name.c_str(), - /* addr= */ nullptr, GetUncompressedLength(), PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, error_msg); if (!map.IsValid()) { DCHECK(!error_msg->empty()); @@ -138,7 +137,7 @@ MemMap ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* erro MAP_PRIVATE, zip_fd, offset, - /* low_4gb= */ false, + /*low_4gb=*/ false, name.c_str(), error_msg); diff --git a/openjdkjvmti/ti_class_definition.cc b/openjdkjvmti/ti_class_definition.cc index 9e8288f997..2a565127f6 100644 --- a/openjdkjvmti/ti_class_definition.cc +++ b/openjdkjvmti/ti_class_definition.cc @@ -246,14 +246,12 @@ void ArtClassDefinition::InitWithDex(GetOriginalDexFile get_original, mmap_name += name_; std::string error; dex_data_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(), - /* addr= */ nullptr, dequick_size, PROT_NONE, /*low_4gb=*/ false, &error); mmap_name += "-TEMP"; temp_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(), - /* addr= */ nullptr, dequick_size, PROT_READ | PROT_WRITE, /*low_4gb=*/ false, diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc index 5adfe42e56..a610fc78aa 100644 --- a/openjdkjvmti/ti_redefine.cc +++ b/openjdkjvmti/ti_redefine.cc @@ -306,7 +306,6 @@ art::MemMap Redefiner::MoveDataToMemMap(const std::string& original_location, std::string* error_msg) { art::MemMap map = art::MemMap::MapAnonymous( StringPrintf("%s-transformed", original_location.c_str()).c_str(), - /* addr= */ nullptr, data.size(), PROT_READ|PROT_WRITE, /*low_4gb=*/ false, diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc index 50b42d4f7b..ae7db45024 100644 --- a/runtime/base/mem_map_arena_pool.cc +++ b/runtime/base/mem_map_arena_pool.cc @@ -58,7 +58,6 @@ MemMap MemMapArena::Allocate(size_t size, bool low_4gb, const char* name) { size = RoundUp(size, kPageSize); std::string error_msg; MemMap map = MemMap::MapAnonymous(name, - /* addr= */ nullptr, size, PROT_READ | PROT_WRITE, low_4gb, diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc index 13f5fcb20e..ed3a18db28 100644 --- a/runtime/dexopt_test.cc +++ b/runtime/dexopt_test.cc @@ -206,7 +206,9 @@ void DexoptTest::ReserveImageSpaceChunk(uintptr_t start, uintptr_t end) { reinterpret_cast<uint8_t*>(start), end - start, PROT_NONE, - /* low_4gb=*/ false, + /*low_4gb=*/ false, + /*reuse=*/ false, + /*reservation=*/ nullptr, &error_msg)); ASSERT_TRUE(image_reservation_.back().IsValid()) << error_msg; LOG(INFO) << "Reserved space for image " << diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h index 313b2b4fe4..9431f80a10 100644 --- a/runtime/gc/accounting/atomic_stack.h +++ b/runtime/gc/accounting/atomic_stack.h @@ -253,10 +253,9 @@ class AtomicStack { void Init() { std::string error_msg; mem_map_ = MemMap::MapAnonymous(name_.c_str(), - /* addr= */ nullptr, capacity_ * sizeof(begin_[0]), PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg; uint8_t* addr = mem_map_.Begin(); diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc index 80c4c76bd3..8a15af2fbc 100644 --- a/runtime/gc/accounting/bitmap.cc +++ b/runtime/gc/accounting/bitmap.cc @@ -49,10 +49,9 @@ MemMap Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) { RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize); std::string error_msg; MemMap mem_map = MemMap::MapAnonymous(name.c_str(), - /* addr= */ nullptr, bitmap_size, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); if (UNLIKELY(!mem_map.IsValid())) { LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg; diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc index 9a5bde86b1..fdf1615f5e 100644 --- a/runtime/gc/accounting/card_table.cc +++ b/runtime/gc/accounting/card_table.cc @@ -65,10 +65,9 @@ CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) { /* Allocate an extra 256 bytes to allow fixed low-byte of base */ std::string error_msg; MemMap mem_map = MemMap::MapAnonymous("card table", - /* addr= */ nullptr, capacity + 256, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); CHECK(mem_map.IsValid()) << "couldn't allocate card table: " << error_msg; // All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc index a6177896e1..b39628b1dc 100644 --- a/runtime/gc/accounting/mod_union_table_test.cc +++ b/runtime/gc/accounting/mod_union_table_test.cc @@ -185,7 +185,7 @@ void ModUnionTableTest::RunTest(ModUnionTableFactory::TableType type) { ResetClass(); // Create another space that we can put references in. std::unique_ptr<space::DlMallocSpace> other_space(space::DlMallocSpace::Create( - "other space", 128 * KB, 4 * MB, 4 * MB, nullptr, false)); + "other space", 128 * KB, 4 * MB, 4 * MB, /*can_move_objects=*/ false)); ASSERT_TRUE(other_space.get() != nullptr); { ScopedThreadSuspension sts(self, kSuspended); diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h index b369a6685e..7eca792063 100644 --- a/runtime/gc/accounting/read_barrier_table.h +++ b/runtime/gc/accounting/read_barrier_table.h @@ -40,10 +40,9 @@ class ReadBarrierTable { static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize))); std::string error_msg; mem_map_ = MemMap::MapAnonymous("read barrier table", - /* addr= */ nullptr, capacity, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); CHECK(mem_map_.IsValid() && mem_map_.Begin() != nullptr) << "couldn't allocate read barrier table: " << error_msg; diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc index 76d5d9de7e..dc223dbb04 100644 --- a/runtime/gc/accounting/space_bitmap.cc +++ b/runtime/gc/accounting/space_bitmap.cc @@ -85,10 +85,9 @@ SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create( const size_t bitmap_size = ComputeBitmapSize(heap_capacity); std::string error_msg; MemMap mem_map = MemMap::MapAnonymous(name.c_str(), - /* addr= */ nullptr, bitmap_size, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); if (UNLIKELY(!mem_map.IsValid())) { LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg; diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc index 4e2cf2bf8c..b90a95d802 100644 --- a/runtime/gc/allocator/rosalloc.cc +++ b/runtime/gc/allocator/rosalloc.cc @@ -92,10 +92,9 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity, size_t max_num_of_pages = max_capacity_ / kPageSize; std::string error_msg; page_map_mem_map_ = MemMap::MapAnonymous("rosalloc page map", - /* addr= */ nullptr, RoundUp(max_num_of_pages, kPageSize), PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg; page_map_ = page_map_mem_map_.Begin(); diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 2ae4676cac..d728e7d5a2 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -135,10 +135,9 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap, std::string error_msg; sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous( "concurrent copying sweep array free buffer", - /* addr= */ nullptr, RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize), PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); CHECK(sweep_array_free_buffer_mem_map_.IsValid()) << "Couldn't allocate sweep array free buffer: " << error_msg; diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc index 0e5fac123e..c2a67bf9f6 100644 --- a/runtime/gc/collector/immune_spaces_test.cc +++ b/runtime/gc/collector/immune_spaces_test.cc @@ -78,18 +78,20 @@ class ImmuneSpacesTest : public CommonRuntimeTest { } // Create an image space, the oat file is optional. - DummyImageSpace* CreateImageSpace(uint8_t* image_begin, - size_t image_size, - uint8_t* oat_begin, - size_t oat_size) { + DummyImageSpace* CreateImageSpace(size_t image_size, + size_t oat_size, + MemMap* image_reservation, + MemMap* oat_reservation) { + DCHECK(image_reservation != nullptr); + DCHECK(oat_reservation != nullptr); std::string error_str; - MemMap map = MemMap::MapAnonymous("DummyImageSpace", - image_begin, - image_size, - PROT_READ | PROT_WRITE, - /*low_4gb=*/true, - &error_str); - if (!map.IsValid()) { + MemMap image_map = MemMap::MapAnonymous("DummyImageSpace", + image_size, + PROT_READ | PROT_WRITE, + /*low_4gb=*/ true, + /*reservation=*/ image_reservation, + &error_str); + if (!image_map.IsValid()) { LOG(ERROR) << error_str; return nullptr; } @@ -97,10 +99,10 @@ class ImmuneSpacesTest : public CommonRuntimeTest { std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(std::move(live_bitmaps_.back())); live_bitmaps_.pop_back(); MemMap oat_map = MemMap::MapAnonymous("OatMap", - oat_begin, oat_size, PROT_READ | PROT_WRITE, - /*low_4gb=*/true, + /*low_4gb=*/ true, + /*reservation=*/ oat_reservation, &error_str); if (!oat_map.IsValid()) { LOG(ERROR) << error_str; @@ -109,17 +111,17 @@ class ImmuneSpacesTest : public CommonRuntimeTest { std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map.Begin(), oat_map.End())); // Create image header. ImageSection sections[ImageHeader::kSectionCount]; - new (map.Begin()) ImageHeader( - /*image_begin=*/PointerToLowMemUInt32(map.Begin()), - /*image_size=*/map.Size(), + new (image_map.Begin()) ImageHeader( + /*image_begin=*/ PointerToLowMemUInt32(image_map.Begin()), + /*image_size=*/ image_map.Size(), sections, - /*image_roots=*/PointerToLowMemUInt32(map.Begin()) + 1, - /*oat_checksum=*/0u, + /*image_roots=*/ PointerToLowMemUInt32(image_map.Begin()) + 1, + /*oat_checksum=*/ 0u, // The oat file data in the header is always right after the image space. - /*oat_file_begin=*/PointerToLowMemUInt32(oat_begin), - /*oat_data_begin=*/PointerToLowMemUInt32(oat_begin), - /*oat_data_end=*/PointerToLowMemUInt32(oat_begin + oat_size), - /*oat_file_end=*/PointerToLowMemUInt32(oat_begin + oat_size), + /*oat_file_begin=*/ PointerToLowMemUInt32(oat_map.Begin()), + /*oat_data_begin=*/PointerToLowMemUInt32(oat_map.Begin()), + /*oat_data_end=*/PointerToLowMemUInt32(oat_map.Begin() + oat_size), + /*oat_file_end=*/PointerToLowMemUInt32(oat_map.Begin() + oat_size), /*boot_image_begin=*/0u, /*boot_image_size=*/0u, /*boot_oat_begin=*/0u, @@ -127,29 +129,12 @@ class ImmuneSpacesTest : public CommonRuntimeTest { /*pointer_size=*/sizeof(void*), ImageHeader::kStorageModeUncompressed, /*data_size=*/0u); - return new DummyImageSpace(std::move(map), + return new DummyImageSpace(std::move(image_map), std::move(live_bitmap), std::move(oat_file), std::move(oat_map)); } - // Does not reserve the memory, the caller needs to be sure no other threads will map at the - // returned address. - static uint8_t* GetContinuousMemoryRegion(size_t size) { - std::string error_str; - MemMap map = MemMap::MapAnonymous("reserve", - /* addr= */ nullptr, - size, - PROT_READ | PROT_WRITE, - /*low_4gb=*/ true, - &error_str); - if (!map.IsValid()) { - LOG(ERROR) << "Failed to allocate memory region " << error_str; - return nullptr; - } - return map.Begin(); - } - private: // Bitmap pool for pre-allocated dummy bitmaps. We need to pre-allocate them since we don't want // them to randomly get placed somewhere where we want an image space. @@ -206,13 +191,25 @@ TEST_F(ImmuneSpacesTest, AppendAfterImage) { constexpr size_t kImageOatSize = 321 * kPageSize; constexpr size_t kOtherSpaceSize = 100 * kPageSize; - uint8_t* memory = GetContinuousMemoryRegion(kImageSize + kImageOatSize + kOtherSpaceSize); - - std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(memory, - kImageSize, - memory + kImageSize, - kImageOatSize)); + std::string error_str; + MemMap reservation = MemMap::MapAnonymous("reserve", + kImageSize + kImageOatSize + kOtherSpaceSize, + PROT_READ | PROT_WRITE, + /*low_4gb=*/ true, + &error_str); + ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str; + MemMap image_reservation = reservation.TakeReservedMemory(kImageSize); + ASSERT_TRUE(image_reservation.IsValid()); + ASSERT_TRUE(reservation.IsValid()); + + std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(kImageSize, + kImageOatSize, + &image_reservation, + &reservation)); ASSERT_TRUE(image_space != nullptr); + ASSERT_FALSE(image_reservation.IsValid()); + ASSERT_TRUE(reservation.IsValid()); + const ImageHeader& image_header = image_space->GetImageHeader(); DummySpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize); @@ -257,36 +254,44 @@ TEST_F(ImmuneSpacesTest, MultiImage) { constexpr size_t kImage3OatSize = kPageSize; constexpr size_t kImageBytes = kImage1Size + kImage2Size + kImage3Size; constexpr size_t kMemorySize = kImageBytes + kImage1OatSize + kImage2OatSize + kImage3OatSize; - uint8_t* memory = GetContinuousMemoryRegion(kMemorySize); - uint8_t* space1_begin = memory; - memory += kImage1Size; - uint8_t* space2_begin = memory; - memory += kImage2Size; - uint8_t* space1_oat_begin = memory; - memory += kImage1OatSize; - uint8_t* space2_oat_begin = memory; - memory += kImage2OatSize; - uint8_t* space3_begin = memory; - - std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(space1_begin, - kImage1Size, - space1_oat_begin, - kImage1OatSize)); + std::string error_str; + MemMap reservation = MemMap::MapAnonymous("reserve", + kMemorySize, + PROT_READ | PROT_WRITE, + /*low_4gb=*/ true, + &error_str); + ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str; + MemMap image_reservation = reservation.TakeReservedMemory(kImage1Size + kImage2Size); + ASSERT_TRUE(image_reservation.IsValid()); + ASSERT_TRUE(reservation.IsValid()); + + std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(kImage1Size, + kImage1OatSize, + &image_reservation, + &reservation)); ASSERT_TRUE(space1 != nullptr); + ASSERT_TRUE(image_reservation.IsValid()); + ASSERT_TRUE(reservation.IsValid()); - - std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(space2_begin, - kImage2Size, - space2_oat_begin, - kImage2OatSize)); + std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(kImage2Size, + kImage2OatSize, + &image_reservation, + &reservation)); ASSERT_TRUE(space2 != nullptr); + ASSERT_FALSE(image_reservation.IsValid()); + ASSERT_TRUE(reservation.IsValid()); // Finally put a 3rd image space. - std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(space3_begin, - kImage3Size, - space3_begin + kImage3Size, - kImage3OatSize)); + image_reservation = reservation.TakeReservedMemory(kImage3Size); + ASSERT_TRUE(image_reservation.IsValid()); + ASSERT_TRUE(reservation.IsValid()); + std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(kImage3Size, + kImage3OatSize, + &image_reservation, + &reservation)); ASSERT_TRUE(space3 != nullptr); + ASSERT_FALSE(image_reservation.IsValid()); + ASSERT_FALSE(reservation.IsValid()); // Check that we do not include the oat if there is no space after. ImmuneSpaces spaces; @@ -323,12 +328,29 @@ TEST_F(ImmuneSpacesTest, MultiImage) { constexpr size_t kGuardSize = kPageSize; constexpr size_t kImage4Size = kImageBytes - kPageSize; constexpr size_t kImage4OatSize = kPageSize; - uint8_t* memory2 = GetContinuousMemoryRegion(kImage4Size + kImage4OatSize + kGuardSize * 2); - std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(memory2 + kGuardSize, - kImage4Size, - memory2 + kGuardSize + kImage4Size, - kImage4OatSize)); + + reservation = MemMap::MapAnonymous("reserve", + kImage4Size + kImage4OatSize + kGuardSize * 2, + PROT_READ | PROT_WRITE, + /*low_4gb=*/ true, + &error_str); + ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str; + MemMap guard = reservation.TakeReservedMemory(kGuardSize); + ASSERT_TRUE(guard.IsValid()); + ASSERT_TRUE(reservation.IsValid()); + guard.Reset(); // Release the guard memory. + image_reservation = reservation.TakeReservedMemory(kImage4Size); + ASSERT_TRUE(image_reservation.IsValid()); + ASSERT_TRUE(reservation.IsValid()); + std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(kImage4Size, + kImage4OatSize, + &image_reservation, + &reservation)); ASSERT_TRUE(space4 != nullptr); + ASSERT_FALSE(image_reservation.IsValid()); + ASSERT_TRUE(reservation.IsValid()); + ASSERT_EQ(reservation.Size(), kGuardSize); + reservation.Reset(); // Release the guard memory. { WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); LOG(INFO) << "Adding space4 " << reinterpret_cast<const void*>(space4->Begin()); @@ -346,12 +368,28 @@ TEST_F(ImmuneSpacesTest, MultiImage) { // Layout: [guard page][image][oat][guard page] constexpr size_t kImage5Size = kImageBytes + kPageSize; constexpr size_t kImage5OatSize = kPageSize; - uint8_t* memory3 = GetContinuousMemoryRegion(kImage5Size + kImage5OatSize + kGuardSize * 2); - std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(memory3 + kGuardSize, - kImage5Size, - memory3 + kGuardSize + kImage5Size, - kImage5OatSize)); + reservation = MemMap::MapAnonymous("reserve", + kImage5Size + kImage5OatSize + kGuardSize * 2, + PROT_READ | PROT_WRITE, + /*low_4gb=*/ true, + &error_str); + ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str; + guard = reservation.TakeReservedMemory(kGuardSize); + ASSERT_TRUE(guard.IsValid()); + ASSERT_TRUE(reservation.IsValid()); + guard.Reset(); // Release the guard memory. + image_reservation = reservation.TakeReservedMemory(kImage5Size); + ASSERT_TRUE(image_reservation.IsValid()); + ASSERT_TRUE(reservation.IsValid()); + std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(kImage5Size, + kImage5OatSize, + &image_reservation, + &reservation)); ASSERT_TRUE(space5 != nullptr); + ASSERT_FALSE(image_reservation.IsValid()); + ASSERT_TRUE(reservation.IsValid()); + ASSERT_EQ(reservation.Size(), kGuardSize); + reservation.Reset(); // Release the guard memory. { WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); LOG(INFO) << "Adding space5 " << reinterpret_cast<const void*>(space5->Begin()); diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 399f9ff301..9e5cb9c314 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -105,10 +105,9 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre std::string error_msg; sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous( "mark sweep sweep array free buffer", - /* addr= */ nullptr, RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize), PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); CHECK(sweep_array_free_buffer_mem_map_.IsValid()) << "Couldn't allocate sweep array free buffer: " << error_msg; diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index a31cbe755f..467b22c509 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -505,11 +505,11 @@ Heap::Heap(size_t initial_size, // Create bump pointer spaces instead of a backup space. main_mem_map_2.Reset(); bump_pointer_space_ = space::BumpPointerSpace::Create( - "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr); + "Bump pointer space 1", kGSSBumpPointerSpaceCapacity); CHECK(bump_pointer_space_ != nullptr); AddSpace(bump_pointer_space_); temp_space_ = space::BumpPointerSpace::Create( - "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr); + "Bump pointer space 2", kGSSBumpPointerSpaceCapacity); CHECK(temp_space_ != nullptr); AddSpace(temp_space_); } else if (main_mem_map_2.IsValid()) { @@ -529,8 +529,7 @@ Heap::Heap(size_t initial_size, CHECK(!non_moving_space_->CanMoveObjects()); // Allocate the large object space. if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) { - large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr, - capacity_); + large_object_space_ = space::FreeListSpace::Create("free list large object space", capacity_); CHECK(large_object_space_ != nullptr) << "Failed to create large object space"; } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) { large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space"); @@ -696,7 +695,9 @@ MemMap Heap::MapAnonymousPreferredAddress(const char* name, request_begin, capacity, PROT_READ | PROT_WRITE, - /* low_4gb=*/ true, + /*low_4gb=*/ true, + /*reuse=*/ false, + /*reservation=*/ nullptr, out_error_str); if (map.IsValid() || request_begin == nullptr) { return map; diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc index a133a1058c..606228cf36 100644 --- a/runtime/gc/heap_test.cc +++ b/runtime/gc/heap_test.cc @@ -38,6 +38,8 @@ class HeapTest : public CommonRuntimeTest { 16 * KB, PROT_READ, /*low_4gb=*/ true, + /*reuse=*/ false, + /*reservation=*/ nullptr, &error_msg); ASSERT_TRUE(reserved_.IsValid()) << error_msg; CommonRuntimeTest::SetUp(); diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc index 497a0c2e5f..609ccee7b4 100644 --- a/runtime/gc/space/bump_pointer_space.cc +++ b/runtime/gc/space/bump_pointer_space.cc @@ -24,15 +24,13 @@ namespace art { namespace gc { namespace space { -BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity, - uint8_t* requested_begin) { +BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity) { capacity = RoundUp(capacity, kPageSize); std::string error_msg; MemMap mem_map = MemMap::MapAnonymous(name.c_str(), - requested_begin, capacity, PROT_READ | PROT_WRITE, - /* low_4gb= */ true, + /*low_4gb=*/ true, &error_msg); if (!mem_map.IsValid()) { LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size " diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h index 59d4d27626..383bf7abaa 100644 --- a/runtime/gc/space/bump_pointer_space.h +++ b/runtime/gc/space/bump_pointer_space.h @@ -46,7 +46,7 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace { // Create a bump pointer space with the requested sizes. The requested base address is not // guaranteed to be granted, if it is required, the caller should call Begin on the returned // space to confirm the request was granted. - static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin); + static BumpPointerSpace* Create(const std::string& name, size_t capacity); static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap&& mem_map); // Allocate num_bytes, returns null if the space is full. diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc index 73582a00c0..7955ff92e6 100644 --- a/runtime/gc/space/dlmalloc_space.cc +++ b/runtime/gc/space/dlmalloc_space.cc @@ -108,8 +108,10 @@ DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap&& mem_map, } } -DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size, - size_t growth_limit, size_t capacity, uint8_t* requested_begin, +DlMallocSpace* DlMallocSpace::Create(const std::string& name, + size_t initial_size, + size_t growth_limit, + size_t capacity, bool can_move_objects) { uint64_t start_time = 0; if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { @@ -117,8 +119,7 @@ DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_siz LOG(INFO) << "DlMallocSpace::Create entering " << name << " initial_size=" << PrettySize(initial_size) << " growth_limit=" << PrettySize(growth_limit) - << " capacity=" << PrettySize(capacity) - << " requested_begin=" << reinterpret_cast<void*>(requested_begin); + << " capacity=" << PrettySize(capacity); } // Memory we promise to dlmalloc before it asks for morecore. @@ -126,8 +127,7 @@ DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_siz // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the // size of the large allocation) will be greater than the footprint limit. size_t starting_size = kPageSize; - MemMap mem_map = - CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin); + MemMap mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity); if (!mem_map.IsValid()) { LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size " << PrettySize(capacity); diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h index c63ff71849..e91602f607 100644 --- a/runtime/gc/space/dlmalloc_space.h +++ b/runtime/gc/space/dlmalloc_space.h @@ -46,8 +46,11 @@ class DlMallocSpace : public MallocSpace { // base address is not guaranteed to be granted, if it is required, // the caller should call Begin on the returned space to confirm the // request was granted. - static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit, - size_t capacity, uint8_t* requested_begin, bool can_move_objects); + static DlMallocSpace* Create(const std::string& name, + size_t initial_size, + size_t growth_limit, + size_t capacity, + bool can_move_objects); // Virtual to allow MemoryToolMallocSpace to intercept. mirror::Object* AllocWithGrowth(Thread* self, diff --git a/runtime/gc/space/dlmalloc_space_random_test.cc b/runtime/gc/space/dlmalloc_space_random_test.cc index f9b41daad8..92b56bda22 100644 --- a/runtime/gc/space/dlmalloc_space_random_test.cc +++ b/runtime/gc/space/dlmalloc_space_random_test.cc @@ -22,14 +22,16 @@ namespace art { namespace gc { namespace space { -MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit, - size_t capacity, uint8_t* requested_begin) { - return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false); +MallocSpace* CreateDlMallocSpace(const std::string& name, + size_t initial_size, + size_t growth_limit, + size_t capacity) { + return DlMallocSpace::Create( + name, initial_size, growth_limit, capacity, /*can_move_objects=*/ false); } TEST_SPACE_CREATE_FN_RANDOM(DlMallocSpace, CreateDlMallocSpace) - } // namespace space } // namespace gc } // namespace art diff --git a/runtime/gc/space/dlmalloc_space_static_test.cc b/runtime/gc/space/dlmalloc_space_static_test.cc index 5758e0cde9..550d1bbe77 100644 --- a/runtime/gc/space/dlmalloc_space_static_test.cc +++ b/runtime/gc/space/dlmalloc_space_static_test.cc @@ -22,14 +22,16 @@ namespace art { namespace gc { namespace space { -MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit, - size_t capacity, uint8_t* requested_begin) { - return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false); +MallocSpace* CreateDlMallocSpace(const std::string& name, + size_t initial_size, + size_t growth_limit, + size_t capacity) { + return DlMallocSpace::Create( + name, initial_size, growth_limit, capacity, /*can_move_objects=*/ false); } TEST_SPACE_CREATE_FN_STATIC(DlMallocSpace, CreateDlMallocSpace) - } // namespace space } // namespace gc } // namespace art diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index 9e679573bd..875efe2fbc 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -622,9 +622,9 @@ class ImageSpace::Loader { /*inout*/MemMap* image_reservation, /*out*/std::string* error_msg) { TimingLogger::ScopedTiming timing("MapImageFile", logger); - uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr; const ImageHeader::StorageMode storage_mode = image_header.GetStorageMode(); if (storage_mode == ImageHeader::kStorageModeUncompressed) { + uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr; return MemMap::MapFileAtAddress(address, image_header.GetImageSize(), PROT_READ | PROT_WRITE, @@ -649,11 +649,9 @@ class ImageSpace::Loader { // Reserve output and decompress into it. MemMap map = MemMap::MapAnonymous(image_location, - address, image_header.GetImageSize(), PROT_READ | PROT_WRITE, /*low_4gb=*/ true, - /*reuse=*/ false, image_reservation, error_msg); if (map.IsValid()) { diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc index a7f82f6e36..1658dba413 100644 --- a/runtime/gc/space/large_object_space.cc +++ b/runtime/gc/space/large_object_space.cc @@ -137,10 +137,9 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_tl_bulk_allocated) { std::string error_msg; MemMap mem_map = MemMap::MapAnonymous("large object space allocation", - /* addr= */ nullptr, num_bytes, PROT_READ | PROT_WRITE, - /* low_4gb= */ true, + /*low_4gb=*/ true, &error_msg); if (UNLIKELY(!mem_map.IsValid())) { LOG(WARNING) << "Large object allocation failed: " << error_msg; @@ -346,14 +345,13 @@ inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a, return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b); } -FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) { +FreeListSpace* FreeListSpace::Create(const std::string& name, size_t size) { CHECK_EQ(size % kAlignment, 0U); std::string error_msg; MemMap mem_map = MemMap::MapAnonymous(name.c_str(), - requested_begin, size, PROT_READ | PROT_WRITE, - /* low_4gb= */ true, + /*low_4gb=*/ true, &error_msg); CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg; return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End()); @@ -372,10 +370,9 @@ FreeListSpace::FreeListSpace(const std::string& name, std::string error_msg; allocation_info_map_ = MemMap::MapAnonymous("large object free list space allocation info map", - /* addr= */ nullptr, alloc_info_size, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg; allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin()); diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h index 47167faccc..a4d6a24263 100644 --- a/runtime/gc/space/large_object_space.h +++ b/runtime/gc/space/large_object_space.h @@ -184,7 +184,7 @@ class FreeListSpace final : public LargeObjectSpace { static constexpr size_t kAlignment = kPageSize; virtual ~FreeListSpace(); - static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity); + static FreeListSpace* Create(const std::string& name, size_t capacity); size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override REQUIRES(lock_); mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc index d55ccd6e40..62bc26e09d 100644 --- a/runtime/gc/space/large_object_space_test.cc +++ b/runtime/gc/space/large_object_space_test.cc @@ -42,7 +42,7 @@ void LargeObjectSpaceTest::LargeObjectTest() { if (i == 0) { los = space::LargeObjectMapSpace::Create("large object space"); } else { - los = space::FreeListSpace::Create("large object space", nullptr, capacity); + los = space::FreeListSpace::Create("large object space", capacity); } // Make sure the bitmap is not empty and actually covers at least how much we expect. @@ -157,7 +157,7 @@ void LargeObjectSpaceTest::RaceTest() { if (los_type == 0) { los = space::LargeObjectMapSpace::Create("large object space"); } else { - los = space::FreeListSpace::Create("large object space", nullptr, 128 * MB); + los = space::FreeListSpace::Create("large object space", 128 * MB); } Thread* self = Thread::Current(); diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc index 189aeb5297..b5e6b62bcd 100644 --- a/runtime/gc/space/malloc_space.cc +++ b/runtime/gc/space/malloc_space.cc @@ -82,8 +82,7 @@ MemMap MallocSpace::CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size, size_t* growth_limit, - size_t* capacity, - uint8_t* requested_begin) { + size_t* capacity) { // Sanity check arguments if (starting_size > *initial_size) { *initial_size = starting_size; @@ -107,10 +106,9 @@ MemMap MallocSpace::CreateMemMap(const std::string& name, std::string error_msg; MemMap mem_map = MemMap::MapAnonymous(name.c_str(), - requested_begin, *capacity, PROT_READ | PROT_WRITE, - /* low_4gb= */ true, + /*low_4gb=*/ true, &error_msg); if (!mem_map.IsValid()) { LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size " diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h index 6bf2d71c7c..5dd8136dcb 100644 --- a/runtime/gc/space/malloc_space.h +++ b/runtime/gc/space/malloc_space.h @@ -157,8 +157,7 @@ class MallocSpace : public ContinuousMemMapAllocSpace { size_t starting_size, size_t* initial_size, size_t* growth_limit, - size_t* capacity, - uint8_t* requested_begin); + size_t* capacity); // When true the low memory mode argument specifies that the heap wishes the created allocator to // be more aggressive in releasing unused pages. diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc index 31bbfb8f00..2774e26acd 100644 --- a/runtime/gc/space/region_space.cc +++ b/runtime/gc/space/region_space.cc @@ -58,7 +58,9 @@ MemMap RegionSpace::CreateMemMap(const std::string& name, requested_begin, capacity + kRegionSize, PROT_READ | PROT_WRITE, - /* low_4gb= */ true, + /*low_4gb=*/ true, + /*reuse=*/ false, + /*reservation=*/ nullptr, &error_msg); if (mem_map.IsValid() || requested_begin == nullptr) { break; diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc index 10ff1c15b1..36fd864bf3 100644 --- a/runtime/gc/space/rosalloc_space.cc +++ b/runtime/gc/space/rosalloc_space.cc @@ -133,17 +133,19 @@ RosAllocSpace::~RosAllocSpace() { delete rosalloc_; } -RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size, - size_t growth_limit, size_t capacity, uint8_t* requested_begin, - bool low_memory_mode, bool can_move_objects) { +RosAllocSpace* RosAllocSpace::Create(const std::string& name, + size_t initial_size, + size_t growth_limit, + size_t capacity, + bool low_memory_mode, + bool can_move_objects) { uint64_t start_time = 0; if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { start_time = NanoTime(); VLOG(startup) << "RosAllocSpace::Create entering " << name << " initial_size=" << PrettySize(initial_size) << " growth_limit=" << PrettySize(growth_limit) - << " capacity=" << PrettySize(capacity) - << " requested_begin=" << reinterpret_cast<void*>(requested_begin); + << " capacity=" << PrettySize(capacity); } // Memory we promise to rosalloc before it asks for morecore. @@ -151,8 +153,7 @@ RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_siz // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the // size of the large allocation) will be greater than the footprint limit. size_t starting_size = Heap::kDefaultStartingSize; - MemMap mem_map = - CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin); + MemMap mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity); if (!mem_map.IsValid()) { LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size " << PrettySize(capacity); diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h index 5162a064d1..9e95c16cb3 100644 --- a/runtime/gc/space/rosalloc_space.h +++ b/runtime/gc/space/rosalloc_space.h @@ -38,8 +38,11 @@ class RosAllocSpace : public MallocSpace { // base address is not guaranteed to be granted, if it is required, // the caller should call Begin on the returned space to confirm the // request was granted. - static RosAllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit, - size_t capacity, uint8_t* requested_begin, bool low_memory_mode, + static RosAllocSpace* Create(const std::string& name, + size_t initial_size, + size_t growth_limit, + size_t capacity, + bool low_memory_mode, bool can_move_objects); static RosAllocSpace* CreateFromMemMap(MemMap&& mem_map, const std::string& name, diff --git a/runtime/gc/space/rosalloc_space_random_test.cc b/runtime/gc/space/rosalloc_space_random_test.cc index b50859b8e6..f0b3231b3a 100644 --- a/runtime/gc/space/rosalloc_space_random_test.cc +++ b/runtime/gc/space/rosalloc_space_random_test.cc @@ -22,15 +22,20 @@ namespace art { namespace gc { namespace space { -MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit, - size_t capacity, uint8_t* requested_begin) { - return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, - Runtime::Current()->GetHeap()->IsLowMemoryMode(), false); +MallocSpace* CreateRosAllocSpace(const std::string& name, + size_t initial_size, + size_t growth_limit, + size_t capacity) { + return RosAllocSpace::Create(name, + initial_size, + growth_limit, + capacity, + Runtime::Current()->GetHeap()->IsLowMemoryMode(), + /*can_move_objects=*/ false); } TEST_SPACE_CREATE_FN_RANDOM(RosAllocSpace, CreateRosAllocSpace) - } // namespace space } // namespace gc } // namespace art diff --git a/runtime/gc/space/rosalloc_space_static_test.cc b/runtime/gc/space/rosalloc_space_static_test.cc index 5e7ced6e23..d7e7e90188 100644 --- a/runtime/gc/space/rosalloc_space_static_test.cc +++ b/runtime/gc/space/rosalloc_space_static_test.cc @@ -22,15 +22,19 @@ namespace art { namespace gc { namespace space { -MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit, - size_t capacity, uint8_t* requested_begin) { - return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, - Runtime::Current()->GetHeap()->IsLowMemoryMode(), false); +MallocSpace* CreateRosAllocSpace(const std::string& name, + size_t initial_size, + size_t growth_limit, + size_t capacity) { + return RosAllocSpace::Create(name, initial_size, + growth_limit, + capacity, + Runtime::Current()->GetHeap()->IsLowMemoryMode(), + /*can_move_objects=*/ false); } TEST_SPACE_CREATE_FN_STATIC(RosAllocSpace, CreateRosAllocSpace) - } // namespace space } // namespace gc } // namespace art diff --git a/runtime/gc/space/space_create_test.cc b/runtime/gc/space/space_create_test.cc index ca5f306264..d3db679c29 100644 --- a/runtime/gc/space/space_create_test.cc +++ b/runtime/gc/space/space_create_test.cc @@ -34,25 +34,22 @@ class SpaceCreateTest : public SpaceTest<CommonRuntimeTestWithParam<MallocSpaceT MallocSpace* CreateSpace(const std::string& name, size_t initial_size, size_t growth_limit, - size_t capacity, - uint8_t* requested_begin) { + size_t capacity) { const MallocSpaceType type = GetParam(); if (type == kMallocSpaceDlMalloc) { return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, - requested_begin, - false); + /*can_move_objects=*/ false); } DCHECK_EQ(static_cast<uint32_t>(type), static_cast<uint32_t>(kMallocSpaceRosAlloc)); return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, - requested_begin, Runtime::Current()->GetHeap()->IsLowMemoryMode(), - false); + /*can_move_objects=*/ false); } }; @@ -62,25 +59,25 @@ TEST_P(SpaceCreateTest, InitTestBody) { { // Init < max == growth - std::unique_ptr<Space> space(CreateSpace("test", 16 * MB, 32 * MB, 32 * MB, nullptr)); + std::unique_ptr<Space> space(CreateSpace("test", 16 * MB, 32 * MB, 32 * MB)); EXPECT_TRUE(space != nullptr); // Init == max == growth - space.reset(CreateSpace("test", 16 * MB, 16 * MB, 16 * MB, nullptr)); + space.reset(CreateSpace("test", 16 * MB, 16 * MB, 16 * MB)); EXPECT_TRUE(space != nullptr); // Init > max == growth - space.reset(CreateSpace("test", 32 * MB, 16 * MB, 16 * MB, nullptr)); + space.reset(CreateSpace("test", 32 * MB, 16 * MB, 16 * MB)); EXPECT_TRUE(space == nullptr); // Growth == init < max - space.reset(CreateSpace("test", 16 * MB, 16 * MB, 32 * MB, nullptr)); + space.reset(CreateSpace("test", 16 * MB, 16 * MB, 32 * MB)); EXPECT_TRUE(space != nullptr); // Growth < init < max - space.reset(CreateSpace("test", 16 * MB, 8 * MB, 32 * MB, nullptr)); + space.reset(CreateSpace("test", 16 * MB, 8 * MB, 32 * MB)); EXPECT_TRUE(space == nullptr); // Init < growth < max - space.reset(CreateSpace("test", 8 * MB, 16 * MB, 32 * MB, nullptr)); + space.reset(CreateSpace("test", 8 * MB, 16 * MB, 32 * MB)); EXPECT_TRUE(space != nullptr); // Init < max < growth - space.reset(CreateSpace("test", 8 * MB, 32 * MB, 16 * MB, nullptr)); + space.reset(CreateSpace("test", 8 * MB, 32 * MB, 16 * MB)); EXPECT_TRUE(space == nullptr); } } @@ -91,7 +88,7 @@ TEST_P(SpaceCreateTest, InitTestBody) { // the GC works with the ZygoteSpace. TEST_P(SpaceCreateTest, ZygoteSpaceTestBody) { size_t dummy; - MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr)); + MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB)); ASSERT_TRUE(space != nullptr); // Make space findable to the heap, will also delete space when runtime is cleaned up @@ -225,7 +222,7 @@ TEST_P(SpaceCreateTest, ZygoteSpaceTestBody) { TEST_P(SpaceCreateTest, AllocAndFreeTestBody) { size_t dummy = 0; - MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr)); + MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB)); ASSERT_TRUE(space != nullptr); Thread* self = Thread::Current(); ScopedObjectAccess soa(self); @@ -301,7 +298,7 @@ TEST_P(SpaceCreateTest, AllocAndFreeTestBody) { } TEST_P(SpaceCreateTest, AllocAndFreeListTestBody) { - MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr)); + MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB)); ASSERT_TRUE(space != nullptr); // Make space findable to the heap, will also delete space when runtime is cleaned up diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h index 5aac21721f..1b111e3496 100644 --- a/runtime/gc/space/space_test.h +++ b/runtime/gc/space/space_test.h @@ -123,8 +123,10 @@ class SpaceTest : public Super { return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value(); } - typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit, - size_t capacity, uint8_t* requested_begin); + typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, + size_t initial_size, + size_t growth_limit, + size_t capacity); void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size, int round, size_t growth_limit); @@ -323,7 +325,7 @@ void SpaceTest<Super>::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, size_t initial_size = 4 * MB; size_t growth_limit = 8 * MB; size_t capacity = 16 * MB; - MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr)); + MallocSpace* space(create_space("test", initial_size, growth_limit, capacity)); ASSERT_TRUE(space != nullptr); // Basic sanity diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc index 6db47903b2..361dccbd13 100644 --- a/runtime/indirect_reference_table.cc +++ b/runtime/indirect_reference_table.cc @@ -80,10 +80,9 @@ IndirectReferenceTable::IndirectReferenceTable(size_t max_count, const size_t table_bytes = max_count * sizeof(IrtEntry); table_mem_map_ = MemMap::MapAnonymous("indirect ref table", - /* addr= */ nullptr, table_bytes, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, error_msg); if (!table_mem_map_.IsValid() && error_msg->empty()) { *error_msg = "Unable to map memory for indirect ref table"; @@ -223,10 +222,9 @@ bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) { const size_t table_bytes = new_size * sizeof(IrtEntry); MemMap new_map = MemMap::MapAnonymous("indirect ref table", - /* addr= */ nullptr, table_bytes, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, error_msg); if (!new_map.IsValid()) { return false; diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 8239602b50..082b311fbe 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -304,12 +304,9 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity, base_flags = MAP_PRIVATE | MAP_ANON; data_pages = MemMap::MapAnonymous( "data-code-cache", - /* addr= */ nullptr, data_capacity + exec_capacity, kProtRW, /* low_4gb= */ true, - /* reuse= */ false, - /* reservation= */ nullptr, &error_str); } diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc index f9f87d83f6..203d200be3 100644 --- a/runtime/native/dalvik_system_DexFile.cc +++ b/runtime/native/dalvik_system_DexFile.cc @@ -175,10 +175,9 @@ static MemMap AllocateDexMemoryMap(JNIEnv* env, jint start, jint end) { std::string error_message; size_t length = static_cast<size_t>(end - start); MemMap dex_mem_map = MemMap::MapAnonymous("DEX data", - /* addr= */ nullptr, length, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_message); if (!dex_mem_map.IsValid()) { ScopedObjectAccess soa(env); diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 7fa5607582..8ac7450b81 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -1160,8 +1160,10 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { reinterpret_cast<uint8_t*>(kSentinelAddr), kPageSize, PROT_NONE, - /* low_4gb= */ true, - /* error_msg= */ nullptr); + /*low_4gb=*/ true, + /*reuse=*/ false, + /*reservation=*/ nullptr, + /*error_msg=*/ nullptr); if (!protected_fault_page_.IsValid()) { LOG(WARNING) << "Could not reserve sentinel fault page"; } else if (reinterpret_cast<uintptr_t>(protected_fault_page_.Begin()) != kSentinelAddr) { diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc index 20b33277b3..f2e5012991 100644 --- a/runtime/runtime_callbacks_test.cc +++ b/runtime/runtime_callbacks_test.cc @@ -191,10 +191,9 @@ TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackJava) TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackAttach) { std::string error_msg; MemMap stack = MemMap::MapAnonymous("ThreadLifecycleCallback Thread", - /* addr= */ nullptr, 128 * kPageSize, // Just some small stack. PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); ASSERT_TRUE(stack.IsValid()) << error_msg; diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc index a245f659d7..8723c99706 100644 --- a/runtime/thread_pool.cc +++ b/runtime/thread_pool.cc @@ -47,10 +47,9 @@ ThreadPoolWorker::ThreadPoolWorker(ThreadPool* thread_pool, const std::string& n stack_size += kPageSize; std::string error_msg; stack_ = MemMap::MapAnonymous(name.c_str(), - /* addr= */ nullptr, stack_size, PROT_READ | PROT_WRITE, - /* low_4gb= */ false, + /*low_4gb=*/ false, &error_msg); CHECK(stack_.IsValid()) << error_msg; CHECK_ALIGNED(stack_.Begin(), kPageSize); |