summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/common_compiler_test.cc1
-rw-r--r--dex2oat/linker/image_writer.cc1
-rw-r--r--libartbase/base/mem_map.h8
-rw-r--r--libartbase/base/mem_map_test.cc27
-rw-r--r--libartbase/base/zip_archive.cc1
-rw-r--r--openjdkjvmti/ti_class_definition.cc2
-rw-r--r--openjdkjvmti/ti_redefine.cc1
-rw-r--r--runtime/base/mem_map_arena_pool.cc1
-rw-r--r--runtime/dexopt_test.cc1
-rw-r--r--runtime/elf_file.cc1
-rw-r--r--runtime/gc/accounting/atomic_stack.h1
-rw-r--r--runtime/gc/accounting/bitmap.cc1
-rw-r--r--runtime/gc/accounting/card_table.cc1
-rw-r--r--runtime/gc/accounting/read_barrier_table.h1
-rw-r--r--runtime/gc/accounting/space_bitmap.cc1
-rw-r--r--runtime/gc/allocator/rosalloc.cc1
-rw-r--r--runtime/gc/collector/concurrent_copying.cc1
-rw-r--r--runtime/gc/collector/immune_spaces_test.cc3
-rw-r--r--runtime/gc/collector/mark_sweep.cc1
-rw-r--r--runtime/gc/heap.cc2
-rw-r--r--runtime/gc/heap_test.cc1
-rw-r--r--runtime/gc/space/bump_pointer_space.cc1
-rw-r--r--runtime/gc/space/image_space.cc1
-rw-r--r--runtime/gc/space/large_object_space.cc3
-rw-r--r--runtime/gc/space/malloc_space.cc1
-rw-r--r--runtime/gc/space/region_space.cc1
-rw-r--r--runtime/indirect_reference_table.cc2
-rw-r--r--runtime/native/dalvik_system_DexFile.cc1
-rw-r--r--runtime/runtime.cc1
-rw-r--r--runtime/runtime_callbacks_test.cc3
-rw-r--r--runtime/thread_pool.cc1
31 files changed, 9 insertions, 64 deletions
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 2f017662e2..d603d9673c 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -328,7 +328,6 @@ void CommonCompilerTest::ReserveImageSpace() {
(size_t)120 * 1024 * 1024, // 120MB
PROT_NONE,
false /* no need for 4gb flag with fixed mmap */,
- false /* not reusing existing reservation */,
&error_msg);
CHECK(image_reservation_.IsValid()) << error_msg;
}
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 27e797446e..67ded3278e 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -735,7 +735,6 @@ bool ImageWriter::AllocMemory() {
length,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
if (UNLIKELY(!image_info.image_.IsValid())) {
LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 525fade9c1..cd7d502796 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -131,6 +131,14 @@ class MemMap {
bool reuse,
std::string* error_msg,
bool use_ashmem = true);
+ static MemMap MapAnonymous(const char* name,
+ uint8_t* addr,
+ size_t byte_count,
+ int prot,
+ bool low_4gb,
+ std::string* error_msg) {
+ return MapAnonymous(name, addr, byte_count, prot, low_4gb, /* reuse */ false, error_msg);
+ }
// Create placeholder for a region allocated by direct call to mmap.
// This is useful when we do not have control over the code calling mmap,
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index b2f5c728e4..396f12b421 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -57,7 +57,6 @@ class MemMapTest : public CommonArtTest {
size,
PROT_READ,
low_4gb,
- /* reuse */ false,
&error_msg);
CHECK(map.IsValid());
return map.Begin();
@@ -73,7 +72,6 @@ class MemMapTest : public CommonArtTest {
2 * page_size,
PROT_READ | PROT_WRITE,
low_4gb,
- /* reuse */ false,
&error_msg);
// Check its state and write to it.
ASSERT_TRUE(m0.IsValid());
@@ -171,7 +169,6 @@ TEST_F(MemMapTest, ReplaceMapping_SameSize) {
kPageSize,
PROT_READ,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
@@ -179,7 +176,6 @@ TEST_F(MemMapTest, ReplaceMapping_SameSize) {
kPageSize,
PROT_WRITE | PROT_READ,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
void* source_addr = source.Begin();
@@ -212,7 +208,6 @@ TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
// source.
PROT_READ,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
@@ -220,7 +215,6 @@ TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
3 * kPageSize,
PROT_WRITE | PROT_READ,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
uint8_t* source_addr = source.Begin();
@@ -256,7 +250,6 @@ TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
3 * kPageSize,
PROT_READ,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
@@ -264,7 +257,6 @@ TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
kPageSize,
PROT_WRITE | PROT_READ,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
uint8_t* source_addr = source.Begin();
@@ -298,7 +290,6 @@ TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
// the way we we move source.
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
// Resize down to 1 page so we can remap the rest.
@@ -309,7 +300,6 @@ TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
2 * kPageSize,
PROT_WRITE | PROT_READ,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
ASSERT_EQ(dest.Begin() + kPageSize, source.Begin());
@@ -346,7 +336,6 @@ TEST_F(MemMapTest, MapAnonymousEmpty) {
0,
PROT_READ,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_FALSE(map.IsValid()) << error_msg;
ASSERT_FALSE(error_msg.empty());
@@ -357,7 +346,6 @@ TEST_F(MemMapTest, MapAnonymousEmpty) {
kPageSize,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -371,7 +359,6 @@ TEST_F(MemMapTest, MapAnonymousFailNullError) {
0x20000,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
nullptr);
ASSERT_FALSE(map.IsValid());
}
@@ -385,7 +372,6 @@ TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
0,
PROT_READ,
/* low_4gb */ true,
- /* reuse */ false,
&error_msg);
ASSERT_FALSE(map.IsValid()) << error_msg;
ASSERT_FALSE(error_msg.empty());
@@ -396,7 +382,6 @@ TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
kPageSize,
PROT_READ | PROT_WRITE,
/* low_4gb */ true,
- /* reuse */ false,
&error_msg);
ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -435,7 +420,6 @@ TEST_F(MemMapTest, MapAnonymousExactAddr) {
kPageSize,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_TRUE(map0.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -446,7 +430,6 @@ TEST_F(MemMapTest, MapAnonymousExactAddr) {
kPageSize,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_TRUE(map1.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -457,7 +440,6 @@ TEST_F(MemMapTest, MapAnonymousExactAddr) {
kPageSize,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_FALSE(map2.IsValid()) << error_msg;
ASSERT_TRUE(!error_msg.empty());
@@ -494,7 +476,6 @@ TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
size,
PROT_READ | PROT_WRITE,
/*low_4gb*/ true,
- /* reuse */ false,
&error_msg);
if (map.IsValid()) {
break;
@@ -516,7 +497,6 @@ TEST_F(MemMapTest, MapAnonymousOverflow) {
2 * kPageSize, // brings it over the top.
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
@@ -532,7 +512,6 @@ TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
kPageSize,
PROT_READ | PROT_WRITE,
/* low_4gb */ true,
- /* reuse */ false,
&error_msg);
ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
@@ -546,7 +525,6 @@ TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
0x20000000,
PROT_READ | PROT_WRITE,
/* low_4gb */ true,
- /* reuse */ false,
&error_msg);
ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
@@ -586,7 +564,6 @@ TEST_F(MemMapTest, CheckNoGaps) {
kPageSize * kNumPages,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -602,7 +579,6 @@ TEST_F(MemMapTest, CheckNoGaps) {
kPageSize,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_TRUE(map0.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -611,7 +587,6 @@ TEST_F(MemMapTest, CheckNoGaps) {
kPageSize,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_TRUE(map1.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -620,7 +595,6 @@ TEST_F(MemMapTest, CheckNoGaps) {
kPageSize,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_TRUE(map2.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -653,7 +627,6 @@ TEST_F(MemMapTest, AlignBy) {
14 * page_size,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
ASSERT_TRUE(m0.IsValid());
uint8_t* base0 = m0.Begin();
diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc
index 3c68ca1de8..a841bae8ea 100644
--- a/libartbase/base/zip_archive.cc
+++ b/libartbase/base/zip_archive.cc
@@ -79,7 +79,6 @@ MemMap ZipEntry::ExtractToMemMap(const char* zip_filename,
GetUncompressedLength(),
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
error_msg);
if (!map.IsValid()) {
DCHECK(!error_msg->empty());
diff --git a/openjdkjvmti/ti_class_definition.cc b/openjdkjvmti/ti_class_definition.cc
index 030ad98d02..895e73450e 100644
--- a/openjdkjvmti/ti_class_definition.cc
+++ b/openjdkjvmti/ti_class_definition.cc
@@ -250,7 +250,6 @@ void ArtClassDefinition::InitWithDex(GetOriginalDexFile get_original,
dequick_size,
PROT_NONE,
/*low_4gb*/ false,
- /*reuse*/ false,
&error);
mmap_name += "-TEMP";
temp_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
@@ -258,7 +257,6 @@ void ArtClassDefinition::InitWithDex(GetOriginalDexFile get_original,
dequick_size,
PROT_READ | PROT_WRITE,
/*low_4gb*/ false,
- /*reuse*/ false,
&error);
if (UNLIKELY(dex_data_mmap_.IsValid() && temp_mmap_.IsValid())) {
// Need to save the initial dexfile so we don't need to search for it in the fault-handler.
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 6cba48a0c6..8707e272c6 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -309,7 +309,6 @@ art::MemMap Redefiner::MoveDataToMemMap(const std::string& original_location,
data.size(),
PROT_READ|PROT_WRITE,
/*low_4gb*/ false,
- /*reuse*/ false,
error_msg);
if (LIKELY(map.IsValid())) {
memcpy(map.Begin(), data.data(), data.size());
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index 0f472e2604..a9fbafe7ab 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -62,7 +62,6 @@ MemMap MemMapArena::Allocate(size_t size, bool low_4gb, const char* name) {
size,
PROT_READ | PROT_WRITE,
low_4gb,
- /* reuse */ false,
&error_msg);
CHECK(map.IsValid()) << error_msg;
return map;
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index b0eef00551..9e3159d5e7 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -254,7 +254,6 @@ void DexoptTest::ReserveImageSpaceChunk(uintptr_t start, uintptr_t end) {
end - start,
PROT_NONE,
/* low_4gb*/ false,
- /* reuse */ false,
&error_msg));
ASSERT_TRUE(image_reservation_.back().IsValid()) << error_msg;
LOG(INFO) << "Reserved space for image " <<
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 4ae736299b..d45a689fd6 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1168,7 +1168,6 @@ bool ElfFileImpl<ElfTypes>::Load(File* file,
loaded_size,
PROT_NONE,
low_4gb,
- /* reuse */ false,
error_msg);
if (!reserve.IsValid()) {
*error_msg = StringPrintf("Failed to allocate %s: %s",
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 2a71dec4d5..10af10d1a6 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -257,7 +257,6 @@ class AtomicStack {
capacity_ * sizeof(begin_[0]),
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg;
uint8_t* addr = mem_map_.Begin();
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index e157e5e8c4..bb2beaa94c 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -53,7 +53,6 @@ MemMap Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) {
bitmap_size,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 89645e0083..7cddec6242 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -69,7 +69,6 @@ CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) {
capacity + 256,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
CHECK(mem_map.IsValid()) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index d8b1bb2d88..8bdf6da6fe 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -44,7 +44,6 @@ class ReadBarrierTable {
capacity,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
CHECK(mem_map_.IsValid() && mem_map_.Begin() != nullptr)
<< "couldn't allocate read barrier table: " << error_msg;
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index f87a67e0de..2946486dfb 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -89,7 +89,6 @@ SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create(
bitmap_size,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 1639a82718..0dbafde2a5 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -96,7 +96,6 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
RoundUp(max_num_of_pages, kPageSize),
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg;
page_map_ = page_map_mem_map_.Begin();
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 53fd1f42cf..fdd0b62d3e 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -139,7 +139,6 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
CHECK(sweep_array_free_buffer_mem_map_.IsValid())
<< "Couldn't allocate sweep array free buffer: " << error_msg;
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 558a4a7fd0..145bd0208d 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -88,7 +88,6 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
image_size,
PROT_READ | PROT_WRITE,
/*low_4gb*/true,
- /*reuse*/false,
&error_str);
if (!map.IsValid()) {
LOG(ERROR) << error_str;
@@ -102,7 +101,6 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
oat_size,
PROT_READ | PROT_WRITE,
/*low_4gb*/true,
- /*reuse*/false,
&error_str);
if (!oat_map.IsValid()) {
LOG(ERROR) << error_str;
@@ -146,7 +144,6 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
size,
PROT_READ | PROT_WRITE,
/*low_4gb*/ true,
- /*reuse*/ false,
&error_str);
if (!map.IsValid()) {
LOG(ERROR) << "Failed to allocate memory region " << error_str;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 334c7a02e0..997d3b6f87 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -109,7 +109,6 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
CHECK(sweep_array_free_buffer_mem_map_.IsValid())
<< "Couldn't allocate sweep array free buffer: " << error_msg;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 1578db5613..7913354433 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -409,7 +409,6 @@ Heap::Heap(size_t initial_size,
capacity_,
PROT_READ | PROT_WRITE,
/* low_4gb */ true,
- /* reuse */ false,
&error_str);
}
CHECK(main_mem_map_1.IsValid()) << error_str;
@@ -669,7 +668,6 @@ MemMap Heap::MapAnonymousPreferredAddress(const char* name,
capacity,
PROT_READ | PROT_WRITE,
/* low_4gb*/ true,
- /* reuse */ false,
out_error_str);
if (map.IsValid() || request_begin == nullptr) {
return map;
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index d35ae38f34..8720a3e014 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -38,7 +38,6 @@ class HeapTest : public CommonRuntimeTest {
16 * KB,
PROT_READ,
/*low_4gb*/ true,
- /*reuse*/ false,
&error_msg);
ASSERT_TRUE(reserved_.IsValid()) << error_msg;
CommonRuntimeTest::SetUp();
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 2712ec2a35..42453f581a 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -33,7 +33,6 @@ BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capac
capacity,
PROT_READ | PROT_WRITE,
/* low_4gb */ true,
- /* reuse */ false,
&error_msg);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 2a4803ab14..71786279e2 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -818,7 +818,6 @@ class ImageSpace::Loader {
image_header.GetImageSize(),
PROT_READ | PROT_WRITE,
/*low_4gb*/ true,
- /*reuse*/ false,
error_msg);
if (map.IsValid()) {
const size_t stored_size = image_header.GetDataSize();
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index ada59b30f4..76ea9fda29 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -140,7 +140,6 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
num_bytes,
PROT_READ | PROT_WRITE,
/* low_4gb */ true,
- /* reuse */ false,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(WARNING) << "Large object allocation failed: " << error_msg;
@@ -354,7 +353,6 @@ FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested
size,
PROT_READ | PROT_WRITE,
/* low_4gb */ true,
- /* reuse */ false,
&error_msg);
CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
@@ -378,7 +376,6 @@ FreeListSpace::FreeListSpace(const std::string& name,
alloc_info_size,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg;
allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 91e0ce8102..445560ad8d 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -110,7 +110,6 @@ MemMap MallocSpace::CreateMemMap(const std::string& name,
*capacity,
PROT_READ | PROT_WRITE,
/* low_4gb */ true,
- /* reuse */ false,
&error_msg);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index a2e2e95540..f74fa86467 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -59,7 +59,6 @@ MemMap RegionSpace::CreateMemMap(const std::string& name,
capacity + kRegionSize,
PROT_READ | PROT_WRITE,
/* low_4gb */ true,
- /* reuse */ false,
&error_msg);
if (mem_map.IsValid() || requested_begin == nullptr) {
break;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 098db9f743..8ab4a9b47e 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -83,7 +83,6 @@ IndirectReferenceTable::IndirectReferenceTable(size_t max_count,
table_bytes,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
error_msg);
if (!table_mem_map_.IsValid() && error_msg->empty()) {
*error_msg = "Unable to map memory for indirect ref table";
@@ -227,7 +226,6 @@ bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) {
table_bytes,
PROT_READ | PROT_WRITE,
/* is_low_4gb */ false,
- /* reuse */ false,
error_msg);
if (!new_map.IsValid()) {
return false;
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index d49ebd1e80..71fabd0250 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -177,7 +177,6 @@ static MemMap AllocateDexMemoryMap(JNIEnv* env, jint start, jint end) {
length,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_message);
if (!dex_mem_map.IsValid()) {
ScopedObjectAccess soa(env);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 9248bb928c..30d45871ff 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1167,7 +1167,6 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
kPageSize,
PROT_NONE,
/* low_4g */ true,
- /* reuse */ false,
/* error_msg */ nullptr);
if (!protected_fault_page_.IsValid()) {
LOG(WARNING) << "Could not reserve sentinel fault page";
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 4c4dcd893c..ed0472f414 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -194,8 +194,7 @@ TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackAttac
/* addr */ nullptr,
128 * kPageSize, // Just some small stack.
PROT_READ | PROT_WRITE,
- false,
- false,
+ /* low_4gb */ false,
&error_msg);
ASSERT_TRUE(stack.IsValid()) << error_msg;
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 2a69bc6c10..28fc59c814 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -51,7 +51,6 @@ ThreadPoolWorker::ThreadPoolWorker(ThreadPool* thread_pool, const std::string& n
stack_size,
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
- /* reuse */ false,
&error_msg);
CHECK(stack_.IsValid()) << error_msg;
CHECK_ALIGNED(stack_.Begin(), kPageSize);