Merge "Refactor MemMap::MapAnonymous()."
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 586891a..fc8cd52 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -328,6 +328,8 @@
(size_t)120 * 1024 * 1024, // 120MB
PROT_NONE,
false /* no need for 4gb flag with fixed mmap */,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
CHECK(image_reservation_.IsValid()) << error_msg;
}
diff --git a/dex2oat/linker/elf_writer_test.cc b/dex2oat/linker/elf_writer_test.cc
index 1d578ab..b381765 100644
--- a/dex2oat/linker/elf_writer_test.cc
+++ b/dex2oat/linker/elf_writer_test.cc
@@ -68,9 +68,9 @@
{
std::string error_msg;
std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
- /* writable */ false,
- /* program_header_only */ false,
- /*low_4gb*/false,
+ /*writable=*/ false,
+ /*program_header_only=*/ false,
+ /*low_4gb=*/false,
&error_msg));
CHECK(ef.get() != nullptr) << error_msg;
EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", false);
@@ -80,9 +80,9 @@
{
std::string error_msg;
std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
- /* writable */ false,
- /* program_header_only */ false,
- /* low_4gb */ false,
+ /*writable=*/ false,
+ /*program_header_only=*/ false,
+ /*low_4gb=*/ false,
&error_msg));
CHECK(ef.get() != nullptr) << error_msg;
EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", true);
@@ -92,24 +92,23 @@
{
std::string error_msg;
std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
- /* writable */ false,
- /* program_header_only */ true,
- /* low_4gb */ false,
+ /*writable=*/ false,
+ /*program_header_only=*/ true,
+ /*low_4gb=*/ false,
&error_msg));
CHECK(ef.get() != nullptr) << error_msg;
size_t size;
bool success = ef->GetLoadedSize(&size, &error_msg);
CHECK(success) << error_msg;
MemMap reservation = MemMap::MapAnonymous("ElfWriterTest#dlsym reservation",
- /* addr */ nullptr,
RoundUp(size, kPageSize),
PROT_NONE,
- /* low_4gb */ true,
+ /*low_4gb=*/ true,
&error_msg);
CHECK(reservation.IsValid()) << error_msg;
uint8_t* base = reservation.Begin();
success =
- ef->Load(file.get(), /* executable */ false, /* low_4gb */ false, &reservation, &error_msg);
+ ef->Load(file.get(), /*executable=*/ false, /*low_4gb=*/ false, &reservation, &error_msg);
CHECK(success) << error_msg;
CHECK(!reservation.IsValid());
EXPECT_EQ(reinterpret_cast<uintptr_t>(dl_oatdata) + reinterpret_cast<uintptr_t>(base),
@@ -131,9 +130,9 @@
{
std::string error_msg;
std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
- /* writable */ false,
- /* program_header_only */ false,
- /* low_4gb */ false,
+ /*writable=*/ false,
+ /*program_header_only=*/ false,
+ /*low_4gb=*/ false,
&error_msg));
CHECK(ef.get() != nullptr) << error_msg;
EXPECT_TRUE(ef->HasSection(".note.gnu.build-id"));
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index be62058..4ddbf74 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -1069,10 +1069,9 @@
std::string error_msg;
image_info.image_ = MemMap::MapAnonymous("image writer image",
- /* addr */ nullptr,
length,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /*low_4gb=*/ false,
&error_msg);
if (UNLIKELY(!image_info.image_.IsValid())) {
LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 4f92492..525e622 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -139,18 +139,32 @@
/*out*/std::string* error_msg,
bool use_debug_name = true);
static MemMap MapAnonymous(const char* name,
- uint8_t* addr,
size_t byte_count,
int prot,
bool low_4gb,
/*out*/std::string* error_msg) {
return MapAnonymous(name,
- addr,
+ /*addr=*/ nullptr,
byte_count,
prot,
low_4gb,
- /* reuse */ false,
- /* reservation */ nullptr,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
+ error_msg);
+ }
+ static MemMap MapAnonymous(const char* name,
+ size_t byte_count,
+ int prot,
+ bool low_4gb,
+ MemMap* reservation,
+ /*out*/std::string* error_msg) {
+ return MapAnonymous(name,
+ /*addr=*/ (reservation != nullptr) ? reservation->Begin() : nullptr,
+ byte_count,
+ prot,
+ low_4gb,
+ /*reuse=*/ false,
+ reservation,
error_msg);
}
@@ -178,10 +192,10 @@
flags,
fd,
start,
- /* low_4gb */ low_4gb,
+ /*low_4gb=*/ low_4gb,
filename,
- /* reuse */ false,
- /* reservation */ nullptr,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
error_msg);
}
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index e4e227f..2d9cd59 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -54,7 +54,6 @@
// Find a valid map address and unmap it before returning.
std::string error_msg;
MemMap map = MemMap::MapAnonymous("temp",
- /* addr= */ nullptr,
size,
PROT_READ,
low_4gb,
@@ -69,7 +68,6 @@
const size_t page_size = static_cast<size_t>(kPageSize);
// Map a two-page memory region.
MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
- /* addr= */ nullptr,
2 * page_size,
PROT_READ | PROT_WRITE,
low_4gb,
@@ -166,17 +164,15 @@
TEST_F(MemMapTest, ReplaceMapping_SameSize) {
std::string error_msg;
MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- /* addr= */ nullptr,
kPageSize,
PROT_READ,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- /* addr= */ nullptr,
kPageSize,
PROT_WRITE | PROT_READ,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
void* source_addr = source.Begin();
@@ -201,21 +197,19 @@
TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
std::string error_msg;
MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- /* addr= */ nullptr,
5 * kPageSize, // Need to make it larger
// initially so we know
// there won't be mappings
// in the way we we move
// source.
PROT_READ,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- /* addr= */ nullptr,
3 * kPageSize,
PROT_WRITE | PROT_READ,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
uint8_t* source_addr = source.Begin();
@@ -247,17 +241,15 @@
TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
std::string error_msg;
MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- /* addr= */ nullptr,
3 * kPageSize,
PROT_READ,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- /* addr= */ nullptr,
kPageSize,
PROT_WRITE | PROT_READ,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
uint8_t* source_addr = source.Begin();
@@ -286,11 +278,10 @@
MemMap dest =
MemMap::MapAnonymous(
"MapAnonymousEmpty-atomic-replace-dest",
- /* addr= */ nullptr,
3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
// the way we we move source.
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
// Resize down to 1 page so we can remap the rest.
@@ -300,7 +291,9 @@
dest.Begin() + kPageSize,
2 * kPageSize,
PROT_WRITE | PROT_READ,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
ASSERT_TRUE(source.IsValid());
ASSERT_EQ(dest.Begin() + kPageSize, source.Begin());
@@ -333,20 +326,18 @@
CommonInit();
std::string error_msg;
MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
- /* addr= */ nullptr,
- 0,
+ /*byte_count=*/ 0,
PROT_READ,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_FALSE(map.IsValid()) << error_msg;
ASSERT_FALSE(error_msg.empty());
error_msg.clear();
map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
- /* addr= */ nullptr,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -359,7 +350,9 @@
reinterpret_cast<uint8_t*>(kPageSize),
0x20000,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
nullptr);
ASSERT_FALSE(map.IsValid());
}
@@ -369,20 +362,18 @@
CommonInit();
std::string error_msg;
MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
- /* addr= */ nullptr,
- 0,
+ /*byte_count=*/ 0,
PROT_READ,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
&error_msg);
ASSERT_FALSE(map.IsValid()) << error_msg;
ASSERT_FALSE(error_msg.empty());
error_msg.clear();
map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
- /* addr= */ nullptr,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
&error_msg);
ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -426,17 +417,18 @@
valid_address,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
ASSERT_TRUE(map0.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
ASSERT_TRUE(map0.BaseBegin() == valid_address);
// Map at an unspecified address, which should succeed.
MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
- /* addr= */ nullptr,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(map1.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -446,7 +438,9 @@
reinterpret_cast<uint8_t*>(map1.BaseBegin()),
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
ASSERT_FALSE(map2.IsValid()) << error_msg;
ASSERT_TRUE(!error_msg.empty());
@@ -530,6 +524,8 @@
size,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
if (map.IsValid()) {
break;
@@ -550,7 +546,9 @@
reinterpret_cast<uint8_t*>(ptr),
2 * kPageSize, // brings it over the top.
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
@@ -565,7 +563,9 @@
reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
@@ -578,7 +578,9 @@
reinterpret_cast<uint8_t*>(0xF0000000),
0x20000000,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
@@ -589,12 +591,9 @@
CommonInit();
std::string error_msg;
MemMap map = MemMap::MapAnonymous("MapAnonymousReserve",
- nullptr,
0x20000,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
- /* reuse= */ false,
- /* reservation= */ nullptr,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(map.IsValid());
ASSERT_TRUE(error_msg.empty());
@@ -602,9 +601,9 @@
reinterpret_cast<uint8_t*>(map.BaseBegin()),
0x10000,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
- /* reuse= */ true,
- /* reservation= */ nullptr,
+ /*low_4gb=*/ false,
+ /*reuse=*/ true,
+ /*reservation=*/ nullptr,
&error_msg);
ASSERT_TRUE(map2.IsValid());
ASSERT_TRUE(error_msg.empty());
@@ -615,45 +614,45 @@
std::string error_msg;
constexpr size_t kNumPages = 3;
// Map a 3-page mem map.
- MemMap map = MemMap::MapAnonymous("MapAnonymous0",
- /* addr= */ nullptr,
- kPageSize * kNumPages,
- PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
- &error_msg);
- ASSERT_TRUE(map.IsValid()) << error_msg;
+ MemMap reservation = MemMap::MapAnonymous("MapAnonymous0",
+ kPageSize * kNumPages,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ false,
+ &error_msg);
+ ASSERT_TRUE(reservation.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
// Record the base address.
- uint8_t* map_base = reinterpret_cast<uint8_t*>(map.BaseBegin());
- // Unmap it.
- map.Reset();
+ uint8_t* map_base = reinterpret_cast<uint8_t*>(reservation.BaseBegin());
- // Map at the same address, but in page-sized separate mem maps,
- // assuming the space at the address is still available.
+ // Map at the same address, taking from the `map` reservation.
MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
- map_base,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
+ &reservation,
&error_msg);
ASSERT_TRUE(map0.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(map_base, map0.Begin());
MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
- map_base + kPageSize,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
+ &reservation,
&error_msg);
ASSERT_TRUE(map1.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(map_base + kPageSize, map1.Begin());
MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
- map_base + kPageSize * 2,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
+ &reservation,
&error_msg);
ASSERT_TRUE(map2.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(map_base + 2 * kPageSize, map2.Begin());
+ ASSERT_FALSE(reservation.IsValid()); // The entire reservation was used.
// One-map cases.
ASSERT_TRUE(MemMap::CheckNoGaps(map0, map0));
@@ -679,10 +678,9 @@
const size_t page_size = static_cast<size_t>(kPageSize);
// Map a region.
MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
- /* addr= */ nullptr,
14 * page_size,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(m0.IsValid());
uint8_t* base0 = m0.Begin();
@@ -785,10 +783,9 @@
ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
MemMap reservation = MemMap::MapAnonymous("Test reservation",
- /* addr= */ nullptr,
kMapSize,
PROT_NONE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(reservation.IsValid());
ASSERT_TRUE(error_msg.empty());
@@ -798,14 +795,14 @@
static_assert(kChunk1Size < kMapSize, "We want to split the reservation.");
uint8_t* addr1 = reservation.Begin();
MemMap map1 = MemMap::MapFileAtAddress(addr1,
- /* byte_count= */ kChunk1Size,
+ /*byte_count=*/ kChunk1Size,
PROT_READ,
MAP_PRIVATE,
scratch_file.GetFd(),
- /* start= */ 0,
- /* low_4gb= */ false,
+ /*start=*/ 0,
+ /*low_4gb=*/ false,
scratch_file.GetFilename().c_str(),
- /* reuse= */ false,
+ /*reuse=*/ false,
&reservation,
&error_msg);
ASSERT_TRUE(map1.IsValid()) << error_msg;
@@ -823,10 +820,10 @@
uint8_t* addr2 = reservation.Begin();
MemMap map2 = MemMap::MapAnonymous("MiddleReservation",
addr2,
- /* byte_count= */ kChunk2Size,
+ /*byte_count=*/ kChunk2Size,
PROT_READ,
- /* low_4gb= */ false,
- /* reuse= */ false,
+ /*low_4gb=*/ false,
+ /*reuse=*/ false,
&reservation,
&error_msg);
ASSERT_TRUE(map2.IsValid()) << error_msg;
@@ -840,14 +837,14 @@
const size_t kChunk3Size = reservation.Size() - 1u;
uint8_t* addr3 = reservation.Begin();
MemMap map3 = MemMap::MapFileAtAddress(addr3,
- /* byte_count= */ kChunk3Size,
+ /*byte_count=*/ kChunk3Size,
PROT_READ,
MAP_PRIVATE,
scratch_file.GetFd(),
- /* start= */ dchecked_integral_cast<size_t>(addr3 - addr1),
- /* low_4gb= */ false,
+ /*start=*/ dchecked_integral_cast<size_t>(addr3 - addr1),
+ /*low_4gb=*/ false,
scratch_file.GetFilename().c_str(),
- /* reuse= */ false,
+ /*reuse=*/ false,
&reservation,
&error_msg);
ASSERT_TRUE(map3.IsValid()) << error_msg;
diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc
index f5761cf..8ceea83 100644
--- a/libartbase/base/zip_archive.cc
+++ b/libartbase/base/zip_archive.cc
@@ -75,10 +75,9 @@
name += " extracted in memory from ";
name += zip_filename;
MemMap map = MemMap::MapAnonymous(name.c_str(),
- /* addr= */ nullptr,
GetUncompressedLength(),
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
error_msg);
if (!map.IsValid()) {
DCHECK(!error_msg->empty());
@@ -138,7 +137,7 @@
MAP_PRIVATE,
zip_fd,
offset,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
name.c_str(),
error_msg);
diff --git a/openjdkjvmti/ti_class_definition.cc b/openjdkjvmti/ti_class_definition.cc
index 9e8288f..2a56512 100644
--- a/openjdkjvmti/ti_class_definition.cc
+++ b/openjdkjvmti/ti_class_definition.cc
@@ -246,14 +246,12 @@
mmap_name += name_;
std::string error;
dex_data_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
- /* addr= */ nullptr,
dequick_size,
PROT_NONE,
/*low_4gb=*/ false,
&error);
mmap_name += "-TEMP";
temp_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
- /* addr= */ nullptr,
dequick_size,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index f3f45d9..7cd1039 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -309,7 +309,6 @@
std::string* error_msg) {
art::MemMap map = art::MemMap::MapAnonymous(
StringPrintf("%s-transformed", original_location.c_str()).c_str(),
- /* addr= */ nullptr,
data.size(),
PROT_READ|PROT_WRITE,
/*low_4gb=*/ false,
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index 50b42d4..ae7db45 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -58,7 +58,6 @@
size = RoundUp(size, kPageSize);
std::string error_msg;
MemMap map = MemMap::MapAnonymous(name,
- /* addr= */ nullptr,
size,
PROT_READ | PROT_WRITE,
low_4gb,
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index 13f5fcb..ed3a18d 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -206,7 +206,9 @@
reinterpret_cast<uint8_t*>(start),
end - start,
PROT_NONE,
- /* low_4gb=*/ false,
+ /*low_4gb=*/ false,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(image_reservation_.back().IsValid()) << error_msg;
LOG(INFO) << "Reserved space for image " <<
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 313b2b4..9431f80 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -253,10 +253,9 @@
void Init() {
std::string error_msg;
mem_map_ = MemMap::MapAnonymous(name_.c_str(),
- /* addr= */ nullptr,
capacity_ * sizeof(begin_[0]),
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg;
uint8_t* addr = mem_map_.Begin();
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index 80c4c76..8a15af2 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -49,10 +49,9 @@
RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- /* addr= */ nullptr,
bitmap_size,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 9a5bde8..fdf1615 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -65,10 +65,9 @@
/* Allocate an extra 256 bytes to allow fixed low-byte of base */
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous("card table",
- /* addr= */ nullptr,
capacity + 256,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(mem_map.IsValid()) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index a617789..b39628b 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -185,7 +185,7 @@
ResetClass();
// Create another space that we can put references in.
std::unique_ptr<space::DlMallocSpace> other_space(space::DlMallocSpace::Create(
- "other space", 128 * KB, 4 * MB, 4 * MB, nullptr, false));
+ "other space", 128 * KB, 4 * MB, 4 * MB, /*can_move_objects=*/ false));
ASSERT_TRUE(other_space.get() != nullptr);
{
ScopedThreadSuspension sts(self, kSuspended);
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index b369a66..7eca792 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -40,10 +40,9 @@
static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize)));
std::string error_msg;
mem_map_ = MemMap::MapAnonymous("read barrier table",
- /* addr= */ nullptr,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(mem_map_.IsValid() && mem_map_.Begin() != nullptr)
<< "couldn't allocate read barrier table: " << error_msg;
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 76d5d9d..dc223db 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -85,10 +85,9 @@
const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- /* addr= */ nullptr,
bitmap_size,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 4e2cf2b..b90a95d 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -92,10 +92,9 @@
size_t max_num_of_pages = max_capacity_ / kPageSize;
std::string error_msg;
page_map_mem_map_ = MemMap::MapAnonymous("rosalloc page map",
- /* addr= */ nullptr,
RoundUp(max_num_of_pages, kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg;
page_map_ = page_map_mem_map_.Begin();
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 1c9d051..be1014c 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -135,10 +135,9 @@
std::string error_msg;
sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
"concurrent copying sweep array free buffer",
- /* addr= */ nullptr,
RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(sweep_array_free_buffer_mem_map_.IsValid())
<< "Couldn't allocate sweep array free buffer: " << error_msg;
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 0e5fac1..c2a67bf 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -78,18 +78,20 @@
}
// Create an image space, the oat file is optional.
- DummyImageSpace* CreateImageSpace(uint8_t* image_begin,
- size_t image_size,
- uint8_t* oat_begin,
- size_t oat_size) {
+ DummyImageSpace* CreateImageSpace(size_t image_size,
+ size_t oat_size,
+ MemMap* image_reservation,
+ MemMap* oat_reservation) {
+ DCHECK(image_reservation != nullptr);
+ DCHECK(oat_reservation != nullptr);
std::string error_str;
- MemMap map = MemMap::MapAnonymous("DummyImageSpace",
- image_begin,
- image_size,
- PROT_READ | PROT_WRITE,
- /*low_4gb=*/true,
- &error_str);
- if (!map.IsValid()) {
+ MemMap image_map = MemMap::MapAnonymous("DummyImageSpace",
+ image_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ true,
+ /*reservation=*/ image_reservation,
+ &error_str);
+ if (!image_map.IsValid()) {
LOG(ERROR) << error_str;
return nullptr;
}
@@ -97,10 +99,10 @@
std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(std::move(live_bitmaps_.back()));
live_bitmaps_.pop_back();
MemMap oat_map = MemMap::MapAnonymous("OatMap",
- oat_begin,
oat_size,
PROT_READ | PROT_WRITE,
- /*low_4gb=*/true,
+ /*low_4gb=*/ true,
+ /*reservation=*/ oat_reservation,
&error_str);
if (!oat_map.IsValid()) {
LOG(ERROR) << error_str;
@@ -109,17 +111,17 @@
std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map.Begin(), oat_map.End()));
// Create image header.
ImageSection sections[ImageHeader::kSectionCount];
- new (map.Begin()) ImageHeader(
- /*image_begin=*/PointerToLowMemUInt32(map.Begin()),
- /*image_size=*/map.Size(),
+ new (image_map.Begin()) ImageHeader(
+ /*image_begin=*/ PointerToLowMemUInt32(image_map.Begin()),
+ /*image_size=*/ image_map.Size(),
sections,
- /*image_roots=*/PointerToLowMemUInt32(map.Begin()) + 1,
- /*oat_checksum=*/0u,
+ /*image_roots=*/ PointerToLowMemUInt32(image_map.Begin()) + 1,
+ /*oat_checksum=*/ 0u,
// The oat file data in the header is always right after the image space.
- /*oat_file_begin=*/PointerToLowMemUInt32(oat_begin),
- /*oat_data_begin=*/PointerToLowMemUInt32(oat_begin),
- /*oat_data_end=*/PointerToLowMemUInt32(oat_begin + oat_size),
- /*oat_file_end=*/PointerToLowMemUInt32(oat_begin + oat_size),
+ /*oat_file_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
+ /*oat_data_begin=*/PointerToLowMemUInt32(oat_map.Begin()),
+ /*oat_data_end=*/PointerToLowMemUInt32(oat_map.Begin() + oat_size),
+ /*oat_file_end=*/PointerToLowMemUInt32(oat_map.Begin() + oat_size),
/*boot_image_begin=*/0u,
/*boot_image_size=*/0u,
/*boot_oat_begin=*/0u,
@@ -127,29 +129,12 @@
/*pointer_size=*/sizeof(void*),
ImageHeader::kStorageModeUncompressed,
/*data_size=*/0u);
- return new DummyImageSpace(std::move(map),
+ return new DummyImageSpace(std::move(image_map),
std::move(live_bitmap),
std::move(oat_file),
std::move(oat_map));
}
- // Does not reserve the memory, the caller needs to be sure no other threads will map at the
- // returned address.
- static uint8_t* GetContinuousMemoryRegion(size_t size) {
- std::string error_str;
- MemMap map = MemMap::MapAnonymous("reserve",
- /* addr= */ nullptr,
- size,
- PROT_READ | PROT_WRITE,
- /*low_4gb=*/ true,
- &error_str);
- if (!map.IsValid()) {
- LOG(ERROR) << "Failed to allocate memory region " << error_str;
- return nullptr;
- }
- return map.Begin();
- }
-
private:
// Bitmap pool for pre-allocated dummy bitmaps. We need to pre-allocate them since we don't want
// them to randomly get placed somewhere where we want an image space.
@@ -206,13 +191,25 @@
constexpr size_t kImageOatSize = 321 * kPageSize;
constexpr size_t kOtherSpaceSize = 100 * kPageSize;
- uint8_t* memory = GetContinuousMemoryRegion(kImageSize + kImageOatSize + kOtherSpaceSize);
+ std::string error_str;
+ MemMap reservation = MemMap::MapAnonymous("reserve",
+ kImageSize + kImageOatSize + kOtherSpaceSize,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ true,
+ &error_str);
+ ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+ MemMap image_reservation = reservation.TakeReservedMemory(kImageSize);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
- std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(memory,
- kImageSize,
- memory + kImageSize,
- kImageOatSize));
+ std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(kImageSize,
+ kImageOatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(image_space != nullptr);
+ ASSERT_FALSE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+
const ImageHeader& image_header = image_space->GetImageHeader();
DummySpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize);
@@ -257,36 +254,44 @@
constexpr size_t kImage3OatSize = kPageSize;
constexpr size_t kImageBytes = kImage1Size + kImage2Size + kImage3Size;
constexpr size_t kMemorySize = kImageBytes + kImage1OatSize + kImage2OatSize + kImage3OatSize;
- uint8_t* memory = GetContinuousMemoryRegion(kMemorySize);
- uint8_t* space1_begin = memory;
- memory += kImage1Size;
- uint8_t* space2_begin = memory;
- memory += kImage2Size;
- uint8_t* space1_oat_begin = memory;
- memory += kImage1OatSize;
- uint8_t* space2_oat_begin = memory;
- memory += kImage2OatSize;
- uint8_t* space3_begin = memory;
+ std::string error_str;
+ MemMap reservation = MemMap::MapAnonymous("reserve",
+ kMemorySize,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ true,
+ &error_str);
+ ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+ MemMap image_reservation = reservation.TakeReservedMemory(kImage1Size + kImage2Size);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
- std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(space1_begin,
- kImage1Size,
- space1_oat_begin,
- kImage1OatSize));
+ std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(kImage1Size,
+ kImage1OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space1 != nullptr);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
-
- std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(space2_begin,
- kImage2Size,
- space2_oat_begin,
- kImage2OatSize));
+ std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(kImage2Size,
+ kImage2OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space2 != nullptr);
+ ASSERT_FALSE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
// Finally put a 3rd image space.
- std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(space3_begin,
- kImage3Size,
- space3_begin + kImage3Size,
- kImage3OatSize));
+ image_reservation = reservation.TakeReservedMemory(kImage3Size);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(kImage3Size,
+ kImage3OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space3 != nullptr);
+ ASSERT_FALSE(image_reservation.IsValid());
+ ASSERT_FALSE(reservation.IsValid());
// Check that we do not include the oat if there is no space after.
ImmuneSpaces spaces;
@@ -323,12 +328,29 @@
constexpr size_t kGuardSize = kPageSize;
constexpr size_t kImage4Size = kImageBytes - kPageSize;
constexpr size_t kImage4OatSize = kPageSize;
- uint8_t* memory2 = GetContinuousMemoryRegion(kImage4Size + kImage4OatSize + kGuardSize * 2);
- std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(memory2 + kGuardSize,
- kImage4Size,
- memory2 + kGuardSize + kImage4Size,
- kImage4OatSize));
+
+ reservation = MemMap::MapAnonymous("reserve",
+ kImage4Size + kImage4OatSize + kGuardSize * 2,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ true,
+ &error_str);
+ ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+ MemMap guard = reservation.TakeReservedMemory(kGuardSize);
+ ASSERT_TRUE(guard.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ guard.Reset(); // Release the guard memory.
+ image_reservation = reservation.TakeReservedMemory(kImage4Size);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(kImage4Size,
+ kImage4OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space4 != nullptr);
+ ASSERT_FALSE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ ASSERT_EQ(reservation.Size(), kGuardSize);
+ reservation.Reset(); // Release the guard memory.
{
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
LOG(INFO) << "Adding space4 " << reinterpret_cast<const void*>(space4->Begin());
@@ -346,12 +368,28 @@
// Layout: [guard page][image][oat][guard page]
constexpr size_t kImage5Size = kImageBytes + kPageSize;
constexpr size_t kImage5OatSize = kPageSize;
- uint8_t* memory3 = GetContinuousMemoryRegion(kImage5Size + kImage5OatSize + kGuardSize * 2);
- std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(memory3 + kGuardSize,
- kImage5Size,
- memory3 + kGuardSize + kImage5Size,
- kImage5OatSize));
+ reservation = MemMap::MapAnonymous("reserve",
+ kImage5Size + kImage5OatSize + kGuardSize * 2,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ true,
+ &error_str);
+ ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+ guard = reservation.TakeReservedMemory(kGuardSize);
+ ASSERT_TRUE(guard.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ guard.Reset(); // Release the guard memory.
+ image_reservation = reservation.TakeReservedMemory(kImage5Size);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(kImage5Size,
+ kImage5OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space5 != nullptr);
+ ASSERT_FALSE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ ASSERT_EQ(reservation.Size(), kGuardSize);
+ reservation.Reset(); // Release the guard memory.
{
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
LOG(INFO) << "Adding space5 " << reinterpret_cast<const void*>(space5->Begin());
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 399f9ff..9e5cb9c 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -105,10 +105,9 @@
std::string error_msg;
sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
"mark sweep sweep array free buffer",
- /* addr= */ nullptr,
RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(sweep_array_free_buffer_mem_map_.IsValid())
<< "Couldn't allocate sweep array free buffer: " << error_msg;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a31cbe7..467b22c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -505,11 +505,11 @@
// Create bump pointer spaces instead of a backup space.
main_mem_map_2.Reset();
bump_pointer_space_ = space::BumpPointerSpace::Create(
- "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr);
+ "Bump pointer space 1", kGSSBumpPointerSpaceCapacity);
CHECK(bump_pointer_space_ != nullptr);
AddSpace(bump_pointer_space_);
temp_space_ = space::BumpPointerSpace::Create(
- "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr);
+ "Bump pointer space 2", kGSSBumpPointerSpaceCapacity);
CHECK(temp_space_ != nullptr);
AddSpace(temp_space_);
} else if (main_mem_map_2.IsValid()) {
@@ -529,8 +529,7 @@
CHECK(!non_moving_space_->CanMoveObjects());
// Allocate the large object space.
if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
- large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
- capacity_);
+ large_object_space_ = space::FreeListSpace::Create("free list large object space", capacity_);
CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
} else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
@@ -696,7 +695,9 @@
request_begin,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb=*/ true,
+ /*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
out_error_str);
if (map.IsValid() || request_begin == nullptr) {
return map;
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index f6db070..fa10150 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -39,6 +39,8 @@
16 * KB,
PROT_READ,
/*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
ASSERT_TRUE(reserved_.IsValid()) << error_msg;
CommonRuntimeTest::SetUp();
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 497a0c2..609ccee 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -24,15 +24,13 @@
namespace gc {
namespace space {
-BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity,
- uint8_t* requested_begin) {
+BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity) {
capacity = RoundUp(capacity, kPageSize);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- requested_begin,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
&error_msg);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 59d4d27..383bf7a 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -46,7 +46,7 @@
// Create a bump pointer space with the requested sizes. The requested base address is not
// guaranteed to be granted, if it is required, the caller should call Begin on the returned
// space to confirm the request was granted.
- static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
+ static BumpPointerSpace* Create(const std::string& name, size_t capacity);
static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap&& mem_map);
// Allocate num_bytes, returns null if the space is full.
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 73582a0..7955ff9 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -108,8 +108,10 @@
}
}
-DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size,
- size_t growth_limit, size_t capacity, uint8_t* requested_begin,
+DlMallocSpace* DlMallocSpace::Create(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
bool can_move_objects) {
uint64_t start_time = 0;
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -117,8 +119,7 @@
LOG(INFO) << "DlMallocSpace::Create entering " << name
<< " initial_size=" << PrettySize(initial_size)
<< " growth_limit=" << PrettySize(growth_limit)
- << " capacity=" << PrettySize(capacity)
- << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
+ << " capacity=" << PrettySize(capacity);
}
// Memory we promise to dlmalloc before it asks for morecore.
@@ -126,8 +127,7 @@
// will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
// size of the large allocation) will be greater than the footprint limit.
size_t starting_size = kPageSize;
- MemMap mem_map =
- CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+ MemMap mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
<< PrettySize(capacity);
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index c63ff71..e91602f 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -46,8 +46,11 @@
// base address is not guaranteed to be granted, if it is required,
// the caller should call Begin on the returned space to confirm the
// request was granted.
- static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin, bool can_move_objects);
+ static DlMallocSpace* Create(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
+ bool can_move_objects);
// Virtual to allow MemoryToolMallocSpace to intercept.
mirror::Object* AllocWithGrowth(Thread* self,
diff --git a/runtime/gc/space/dlmalloc_space_random_test.cc b/runtime/gc/space/dlmalloc_space_random_test.cc
index f9b41da..92b56bd 100644
--- a/runtime/gc/space/dlmalloc_space_random_test.cc
+++ b/runtime/gc/space/dlmalloc_space_random_test.cc
@@ -22,14 +22,16 @@
namespace gc {
namespace space {
-MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin) {
- return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
+MallocSpace* CreateDlMallocSpace(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity) {
+ return DlMallocSpace::Create(
+ name, initial_size, growth_limit, capacity, /*can_move_objects=*/ false);
}
TEST_SPACE_CREATE_FN_RANDOM(DlMallocSpace, CreateDlMallocSpace)
-
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/dlmalloc_space_static_test.cc b/runtime/gc/space/dlmalloc_space_static_test.cc
index 5758e0c..550d1bb 100644
--- a/runtime/gc/space/dlmalloc_space_static_test.cc
+++ b/runtime/gc/space/dlmalloc_space_static_test.cc
@@ -22,14 +22,16 @@
namespace gc {
namespace space {
-MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin) {
- return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
+MallocSpace* CreateDlMallocSpace(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity) {
+ return DlMallocSpace::Create(
+ name, initial_size, growth_limit, capacity, /*can_move_objects=*/ false);
}
TEST_SPACE_CREATE_FN_STATIC(DlMallocSpace, CreateDlMallocSpace)
-
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 9e67957..875efe2 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -622,9 +622,9 @@
/*inout*/MemMap* image_reservation,
/*out*/std::string* error_msg) {
TimingLogger::ScopedTiming timing("MapImageFile", logger);
- uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr;
const ImageHeader::StorageMode storage_mode = image_header.GetStorageMode();
if (storage_mode == ImageHeader::kStorageModeUncompressed) {
+ uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr;
return MemMap::MapFileAtAddress(address,
image_header.GetImageSize(),
PROT_READ | PROT_WRITE,
@@ -649,11 +649,9 @@
// Reserve output and decompress into it.
MemMap map = MemMap::MapAnonymous(image_location,
- address,
image_header.GetImageSize(),
PROT_READ | PROT_WRITE,
/*low_4gb=*/ true,
- /*reuse=*/ false,
image_reservation,
error_msg);
if (map.IsValid()) {
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index a7f82f6..1658dba 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -137,10 +137,9 @@
size_t* bytes_tl_bulk_allocated) {
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous("large object space allocation",
- /* addr= */ nullptr,
num_bytes,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(WARNING) << "Large object allocation failed: " << error_msg;
@@ -346,14 +345,13 @@
return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
}
-FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
+FreeListSpace* FreeListSpace::Create(const std::string& name, size_t size) {
CHECK_EQ(size % kAlignment, 0U);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- requested_begin,
size,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
&error_msg);
CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
@@ -372,10 +370,9 @@
std::string error_msg;
allocation_info_map_ =
MemMap::MapAnonymous("large object free list space allocation info map",
- /* addr= */ nullptr,
alloc_info_size,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg;
allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 47167fa..a4d6a24 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -184,7 +184,7 @@
static constexpr size_t kAlignment = kPageSize;
virtual ~FreeListSpace();
- static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
+ static FreeListSpace* Create(const std::string& name, size_t capacity);
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
REQUIRES(lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index d55ccd6..62bc26e 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -42,7 +42,7 @@
if (i == 0) {
los = space::LargeObjectMapSpace::Create("large object space");
} else {
- los = space::FreeListSpace::Create("large object space", nullptr, capacity);
+ los = space::FreeListSpace::Create("large object space", capacity);
}
// Make sure the bitmap is not empty and actually covers at least how much we expect.
@@ -157,7 +157,7 @@
if (los_type == 0) {
los = space::LargeObjectMapSpace::Create("large object space");
} else {
- los = space::FreeListSpace::Create("large object space", nullptr, 128 * MB);
+ los = space::FreeListSpace::Create("large object space", 128 * MB);
}
Thread* self = Thread::Current();
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 189aeb5..b5e6b62 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -82,8 +82,7 @@
size_t starting_size,
size_t* initial_size,
size_t* growth_limit,
- size_t* capacity,
- uint8_t* requested_begin) {
+ size_t* capacity) {
// Sanity check arguments
if (starting_size > *initial_size) {
*initial_size = starting_size;
@@ -107,10 +106,9 @@
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- requested_begin,
*capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
&error_msg);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 6bf2d71..5dd8136 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -157,8 +157,7 @@
size_t starting_size,
size_t* initial_size,
size_t* growth_limit,
- size_t* capacity,
- uint8_t* requested_begin);
+ size_t* capacity);
// When true the low memory mode argument specifies that the heap wishes the created allocator to
// be more aggressive in releasing unused pages.
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 31bbfb8..2774e26 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -58,7 +58,9 @@
requested_begin,
capacity + kRegionSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
if (mem_map.IsValid() || requested_begin == nullptr) {
break;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 10ff1c1..36fd864 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -133,17 +133,19 @@
delete rosalloc_;
}
-RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size,
- size_t growth_limit, size_t capacity, uint8_t* requested_begin,
- bool low_memory_mode, bool can_move_objects) {
+RosAllocSpace* RosAllocSpace::Create(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
+ bool low_memory_mode,
+ bool can_move_objects) {
uint64_t start_time = 0;
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
start_time = NanoTime();
VLOG(startup) << "RosAllocSpace::Create entering " << name
<< " initial_size=" << PrettySize(initial_size)
<< " growth_limit=" << PrettySize(growth_limit)
- << " capacity=" << PrettySize(capacity)
- << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
+ << " capacity=" << PrettySize(capacity);
}
// Memory we promise to rosalloc before it asks for morecore.
@@ -151,8 +153,7 @@
// will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
// size of the large allocation) will be greater than the footprint limit.
size_t starting_size = Heap::kDefaultStartingSize;
- MemMap mem_map =
- CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+ MemMap mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
<< PrettySize(capacity);
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 5162a06..9e95c16 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -38,8 +38,11 @@
// base address is not guaranteed to be granted, if it is required,
// the caller should call Begin on the returned space to confirm the
// request was granted.
- static RosAllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin, bool low_memory_mode,
+ static RosAllocSpace* Create(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
+ bool low_memory_mode,
bool can_move_objects);
static RosAllocSpace* CreateFromMemMap(MemMap&& mem_map,
const std::string& name,
diff --git a/runtime/gc/space/rosalloc_space_random_test.cc b/runtime/gc/space/rosalloc_space_random_test.cc
index b50859b..f0b3231 100644
--- a/runtime/gc/space/rosalloc_space_random_test.cc
+++ b/runtime/gc/space/rosalloc_space_random_test.cc
@@ -22,15 +22,20 @@
namespace gc {
namespace space {
-MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin) {
- return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
- Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
+MallocSpace* CreateRosAllocSpace(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity) {
+ return RosAllocSpace::Create(name,
+ initial_size,
+ growth_limit,
+ capacity,
+ Runtime::Current()->GetHeap()->IsLowMemoryMode(),
+ /*can_move_objects=*/ false);
}
TEST_SPACE_CREATE_FN_RANDOM(RosAllocSpace, CreateRosAllocSpace)
-
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/rosalloc_space_static_test.cc b/runtime/gc/space/rosalloc_space_static_test.cc
index 5e7ced6..d7e7e90 100644
--- a/runtime/gc/space/rosalloc_space_static_test.cc
+++ b/runtime/gc/space/rosalloc_space_static_test.cc
@@ -22,15 +22,19 @@
namespace gc {
namespace space {
-MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin) {
- return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
- Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
+MallocSpace* CreateRosAllocSpace(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity) {
+ return RosAllocSpace::Create(name, initial_size,
+ growth_limit,
+ capacity,
+ Runtime::Current()->GetHeap()->IsLowMemoryMode(),
+ /*can_move_objects=*/ false);
}
TEST_SPACE_CREATE_FN_STATIC(RosAllocSpace, CreateRosAllocSpace)
-
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/space_create_test.cc b/runtime/gc/space/space_create_test.cc
index ca5f306..d3db679 100644
--- a/runtime/gc/space/space_create_test.cc
+++ b/runtime/gc/space/space_create_test.cc
@@ -34,25 +34,22 @@
MallocSpace* CreateSpace(const std::string& name,
size_t initial_size,
size_t growth_limit,
- size_t capacity,
- uint8_t* requested_begin) {
+ size_t capacity) {
const MallocSpaceType type = GetParam();
if (type == kMallocSpaceDlMalloc) {
return DlMallocSpace::Create(name,
initial_size,
growth_limit,
capacity,
- requested_begin,
- false);
+ /*can_move_objects=*/ false);
}
DCHECK_EQ(static_cast<uint32_t>(type), static_cast<uint32_t>(kMallocSpaceRosAlloc));
return RosAllocSpace::Create(name,
initial_size,
growth_limit,
capacity,
- requested_begin,
Runtime::Current()->GetHeap()->IsLowMemoryMode(),
- false);
+ /*can_move_objects=*/ false);
}
};
@@ -62,25 +59,25 @@
{
// Init < max == growth
- std::unique_ptr<Space> space(CreateSpace("test", 16 * MB, 32 * MB, 32 * MB, nullptr));
+ std::unique_ptr<Space> space(CreateSpace("test", 16 * MB, 32 * MB, 32 * MB));
EXPECT_TRUE(space != nullptr);
// Init == max == growth
- space.reset(CreateSpace("test", 16 * MB, 16 * MB, 16 * MB, nullptr));
+ space.reset(CreateSpace("test", 16 * MB, 16 * MB, 16 * MB));
EXPECT_TRUE(space != nullptr);
// Init > max == growth
- space.reset(CreateSpace("test", 32 * MB, 16 * MB, 16 * MB, nullptr));
+ space.reset(CreateSpace("test", 32 * MB, 16 * MB, 16 * MB));
EXPECT_TRUE(space == nullptr);
// Growth == init < max
- space.reset(CreateSpace("test", 16 * MB, 16 * MB, 32 * MB, nullptr));
+ space.reset(CreateSpace("test", 16 * MB, 16 * MB, 32 * MB));
EXPECT_TRUE(space != nullptr);
// Growth < init < max
- space.reset(CreateSpace("test", 16 * MB, 8 * MB, 32 * MB, nullptr));
+ space.reset(CreateSpace("test", 16 * MB, 8 * MB, 32 * MB));
EXPECT_TRUE(space == nullptr);
// Init < growth < max
- space.reset(CreateSpace("test", 8 * MB, 16 * MB, 32 * MB, nullptr));
+ space.reset(CreateSpace("test", 8 * MB, 16 * MB, 32 * MB));
EXPECT_TRUE(space != nullptr);
// Init < max < growth
- space.reset(CreateSpace("test", 8 * MB, 32 * MB, 16 * MB, nullptr));
+ space.reset(CreateSpace("test", 8 * MB, 32 * MB, 16 * MB));
EXPECT_TRUE(space == nullptr);
}
}
@@ -91,7 +88,7 @@
// the GC works with the ZygoteSpace.
TEST_P(SpaceCreateTest, ZygoteSpaceTestBody) {
size_t dummy;
- MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+ MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB));
ASSERT_TRUE(space != nullptr);
// Make space findable to the heap, will also delete space when runtime is cleaned up
@@ -225,7 +222,7 @@
TEST_P(SpaceCreateTest, AllocAndFreeTestBody) {
size_t dummy = 0;
- MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+ MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB));
ASSERT_TRUE(space != nullptr);
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
@@ -301,7 +298,7 @@
}
TEST_P(SpaceCreateTest, AllocAndFreeListTestBody) {
- MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+ MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB));
ASSERT_TRUE(space != nullptr);
// Make space findable to the heap, will also delete space when runtime is cleaned up
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 5aac217..1b111e3 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -123,8 +123,10 @@
return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value();
}
- typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin);
+ typedef MallocSpace* (*CreateSpaceFn)(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity);
void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
int round, size_t growth_limit);
@@ -323,7 +325,7 @@
size_t initial_size = 4 * MB;
size_t growth_limit = 8 * MB;
size_t capacity = 16 * MB;
- MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr));
+ MallocSpace* space(create_space("test", initial_size, growth_limit, capacity));
ASSERT_TRUE(space != nullptr);
// Basic sanity
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 6db4790..361dccb 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -80,10 +80,9 @@
const size_t table_bytes = max_count * sizeof(IrtEntry);
table_mem_map_ = MemMap::MapAnonymous("indirect ref table",
- /* addr= */ nullptr,
table_bytes,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
error_msg);
if (!table_mem_map_.IsValid() && error_msg->empty()) {
*error_msg = "Unable to map memory for indirect ref table";
@@ -223,10 +222,9 @@
const size_t table_bytes = new_size * sizeof(IrtEntry);
MemMap new_map = MemMap::MapAnonymous("indirect ref table",
- /* addr= */ nullptr,
table_bytes,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
error_msg);
if (!new_map.IsValid()) {
return false;
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 8239602..082b311 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -304,12 +304,9 @@
base_flags = MAP_PRIVATE | MAP_ANON;
data_pages = MemMap::MapAnonymous(
"data-code-cache",
- /* addr= */ nullptr,
data_capacity + exec_capacity,
kProtRW,
/* low_4gb= */ true,
- /* reuse= */ false,
- /* reservation= */ nullptr,
&error_str);
}
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index f9f87d8..203d200 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -175,10 +175,9 @@
std::string error_message;
size_t length = static_cast<size_t>(end - start);
MemMap dex_mem_map = MemMap::MapAnonymous("DEX data",
- /* addr= */ nullptr,
length,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_message);
if (!dex_mem_map.IsValid()) {
ScopedObjectAccess soa(env);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a152692..c312126 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1161,8 +1161,10 @@
reinterpret_cast<uint8_t*>(kSentinelAddr),
kPageSize,
PROT_NONE,
- /* low_4gb= */ true,
- /* error_msg= */ nullptr);
+ /*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
+ /*error_msg=*/ nullptr);
if (!protected_fault_page_.IsValid()) {
LOG(WARNING) << "Could not reserve sentinel fault page";
} else if (reinterpret_cast<uintptr_t>(protected_fault_page_.Begin()) != kSentinelAddr) {
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 20b3327..f2e5012 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -191,10 +191,9 @@
TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackAttach) {
std::string error_msg;
MemMap stack = MemMap::MapAnonymous("ThreadLifecycleCallback Thread",
- /* addr= */ nullptr,
128 * kPageSize, // Just some small stack.
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(stack.IsValid()) << error_msg;
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index a245f65..8723c99 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -47,10 +47,9 @@
stack_size += kPageSize;
std::string error_msg;
stack_ = MemMap::MapAnonymous(name.c_str(),
- /* addr= */ nullptr,
stack_size,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(stack_.IsValid()) << error_msg;
CHECK_ALIGNED(stack_.Begin(), kPageSize);