Refactor MemMap::MapAnonymous().

Remove the address argument from the shortcut overload and
introduce one more shortcut overload. This makes it easier
to find all uses where we pass non-null address hint.

Remove `requested_begin` parameter from some constructors
where we were always passing null. Rewrite some tests to
use the reservation API.

Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Bug: 118408378
Change-Id: Ibbbb96667e7cc11cf7fea119892463d8dbc9a8b5
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 313b2b4..9431f80 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -253,10 +253,9 @@
   void Init() {
     std::string error_msg;
     mem_map_ = MemMap::MapAnonymous(name_.c_str(),
-                                    /* addr= */ nullptr,
                                     capacity_ * sizeof(begin_[0]),
                                     PROT_READ | PROT_WRITE,
-                                    /* low_4gb= */ false,
+                                    /*low_4gb=*/ false,
                                     &error_msg);
     CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg;
     uint8_t* addr = mem_map_.Begin();
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index 80c4c76..8a15af2 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -49,10 +49,9 @@
       RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
-                                        /* addr= */ nullptr,
                                         bitmap_size,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb= */ false,
+                                        /*low_4gb=*/ false,
                                         &error_msg);
   if (UNLIKELY(!mem_map.IsValid())) {
     LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 9a5bde8..fdf1615 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -65,10 +65,9 @@
   /* Allocate an extra 256 bytes to allow fixed low-byte of base */
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous("card table",
-                                        /* addr= */ nullptr,
                                         capacity + 256,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb= */ false,
+                                        /*low_4gb=*/ false,
                                         &error_msg);
   CHECK(mem_map.IsValid()) << "couldn't allocate card table: " << error_msg;
   // All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index a617789..b39628b 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -185,7 +185,7 @@
   ResetClass();
   // Create another space that we can put references in.
   std::unique_ptr<space::DlMallocSpace> other_space(space::DlMallocSpace::Create(
-      "other space", 128 * KB, 4 * MB, 4 * MB, nullptr, false));
+      "other space", 128 * KB, 4 * MB, 4 * MB, /*can_move_objects=*/ false));
   ASSERT_TRUE(other_space.get() != nullptr);
   {
     ScopedThreadSuspension sts(self, kSuspended);
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index b369a66..7eca792 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -40,10 +40,9 @@
               static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize)));
     std::string error_msg;
     mem_map_ = MemMap::MapAnonymous("read barrier table",
-                                    /* addr= */ nullptr,
                                     capacity,
                                     PROT_READ | PROT_WRITE,
-                                    /* low_4gb= */ false,
+                                    /*low_4gb=*/ false,
                                     &error_msg);
     CHECK(mem_map_.IsValid() && mem_map_.Begin() != nullptr)
         << "couldn't allocate read barrier table: " << error_msg;
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 76d5d9d..dc223db 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -85,10 +85,9 @@
   const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
-                                        /* addr= */ nullptr,
                                         bitmap_size,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb= */ false,
+                                        /*low_4gb=*/ false,
                                         &error_msg);
   if (UNLIKELY(!mem_map.IsValid())) {
     LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 4e2cf2b..b90a95d 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -92,10 +92,9 @@
   size_t max_num_of_pages = max_capacity_ / kPageSize;
   std::string error_msg;
   page_map_mem_map_ = MemMap::MapAnonymous("rosalloc page map",
-                                           /* addr= */ nullptr,
                                            RoundUp(max_num_of_pages, kPageSize),
                                            PROT_READ | PROT_WRITE,
-                                           /* low_4gb= */ false,
+                                           /*low_4gb=*/ false,
                                            &error_msg);
   CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg;
   page_map_ = page_map_mem_map_.Begin();
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 2ae4676..d728e7d 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -135,10 +135,9 @@
     std::string error_msg;
     sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
         "concurrent copying sweep array free buffer",
-        /* addr= */ nullptr,
         RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
         PROT_READ | PROT_WRITE,
-        /* low_4gb= */ false,
+        /*low_4gb=*/ false,
         &error_msg);
     CHECK(sweep_array_free_buffer_mem_map_.IsValid())
         << "Couldn't allocate sweep array free buffer: " << error_msg;
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 0e5fac1..c2a67bf 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -78,18 +78,20 @@
   }
 
   // Create an image space, the oat file is optional.
-  DummyImageSpace* CreateImageSpace(uint8_t* image_begin,
-                                    size_t image_size,
-                                    uint8_t* oat_begin,
-                                    size_t oat_size) {
+  DummyImageSpace* CreateImageSpace(size_t image_size,
+                                    size_t oat_size,
+                                    MemMap* image_reservation,
+                                    MemMap* oat_reservation) {
+    DCHECK(image_reservation != nullptr);
+    DCHECK(oat_reservation != nullptr);
     std::string error_str;
-    MemMap map = MemMap::MapAnonymous("DummyImageSpace",
-                                      image_begin,
-                                      image_size,
-                                      PROT_READ | PROT_WRITE,
-                                      /*low_4gb=*/true,
-                                      &error_str);
-    if (!map.IsValid()) {
+    MemMap image_map = MemMap::MapAnonymous("DummyImageSpace",
+                                            image_size,
+                                            PROT_READ | PROT_WRITE,
+                                            /*low_4gb=*/ true,
+                                            /*reservation=*/ image_reservation,
+                                            &error_str);
+    if (!image_map.IsValid()) {
       LOG(ERROR) << error_str;
       return nullptr;
     }
@@ -97,10 +99,10 @@
     std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(std::move(live_bitmaps_.back()));
     live_bitmaps_.pop_back();
     MemMap oat_map = MemMap::MapAnonymous("OatMap",
-                                          oat_begin,
                                           oat_size,
                                           PROT_READ | PROT_WRITE,
-                                          /*low_4gb=*/true,
+                                          /*low_4gb=*/ true,
+                                          /*reservation=*/ oat_reservation,
                                           &error_str);
     if (!oat_map.IsValid()) {
       LOG(ERROR) << error_str;
@@ -109,17 +111,17 @@
     std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map.Begin(), oat_map.End()));
     // Create image header.
     ImageSection sections[ImageHeader::kSectionCount];
-    new (map.Begin()) ImageHeader(
-        /*image_begin=*/PointerToLowMemUInt32(map.Begin()),
-        /*image_size=*/map.Size(),
+    new (image_map.Begin()) ImageHeader(
+        /*image_begin=*/ PointerToLowMemUInt32(image_map.Begin()),
+        /*image_size=*/ image_map.Size(),
         sections,
-        /*image_roots=*/PointerToLowMemUInt32(map.Begin()) + 1,
-        /*oat_checksum=*/0u,
+        /*image_roots=*/ PointerToLowMemUInt32(image_map.Begin()) + 1,
+        /*oat_checksum=*/ 0u,
         // The oat file data in the header is always right after the image space.
-        /*oat_file_begin=*/PointerToLowMemUInt32(oat_begin),
-        /*oat_data_begin=*/PointerToLowMemUInt32(oat_begin),
-        /*oat_data_end=*/PointerToLowMemUInt32(oat_begin + oat_size),
-        /*oat_file_end=*/PointerToLowMemUInt32(oat_begin + oat_size),
+        /*oat_file_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
+        /*oat_data_begin=*/PointerToLowMemUInt32(oat_map.Begin()),
+        /*oat_data_end=*/PointerToLowMemUInt32(oat_map.Begin() + oat_size),
+        /*oat_file_end=*/PointerToLowMemUInt32(oat_map.Begin() + oat_size),
         /*boot_image_begin=*/0u,
         /*boot_image_size=*/0u,
         /*boot_oat_begin=*/0u,
@@ -127,29 +129,12 @@
         /*pointer_size=*/sizeof(void*),
         ImageHeader::kStorageModeUncompressed,
         /*data_size=*/0u);
-    return new DummyImageSpace(std::move(map),
+    return new DummyImageSpace(std::move(image_map),
                                std::move(live_bitmap),
                                std::move(oat_file),
                                std::move(oat_map));
   }
 
-  // Does not reserve the memory, the caller needs to be sure no other threads will map at the
-  // returned address.
-  static uint8_t* GetContinuousMemoryRegion(size_t size) {
-    std::string error_str;
-    MemMap map = MemMap::MapAnonymous("reserve",
-                                      /* addr= */ nullptr,
-                                      size,
-                                      PROT_READ | PROT_WRITE,
-                                      /*low_4gb=*/ true,
-                                      &error_str);
-    if (!map.IsValid()) {
-      LOG(ERROR) << "Failed to allocate memory region " << error_str;
-      return nullptr;
-    }
-    return map.Begin();
-  }
-
  private:
   // Bitmap pool for pre-allocated dummy bitmaps. We need to pre-allocate them since we don't want
   // them to randomly get placed somewhere where we want an image space.
@@ -206,13 +191,25 @@
   constexpr size_t kImageOatSize = 321 * kPageSize;
   constexpr size_t kOtherSpaceSize = 100 * kPageSize;
 
-  uint8_t* memory = GetContinuousMemoryRegion(kImageSize + kImageOatSize + kOtherSpaceSize);
+  std::string error_str;
+  MemMap reservation = MemMap::MapAnonymous("reserve",
+                                            kImageSize + kImageOatSize + kOtherSpaceSize,
+                                            PROT_READ | PROT_WRITE,
+                                            /*low_4gb=*/ true,
+                                            &error_str);
+  ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+  MemMap image_reservation = reservation.TakeReservedMemory(kImageSize);
+  ASSERT_TRUE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
 
-  std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(memory,
-                                                                kImageSize,
-                                                                memory + kImageSize,
-                                                                kImageOatSize));
+  std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(kImageSize,
+                                                                kImageOatSize,
+                                                                &image_reservation,
+                                                                &reservation));
   ASSERT_TRUE(image_space != nullptr);
+  ASSERT_FALSE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
+
   const ImageHeader& image_header = image_space->GetImageHeader();
   DummySpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize);
 
@@ -257,36 +254,44 @@
   constexpr size_t kImage3OatSize = kPageSize;
   constexpr size_t kImageBytes = kImage1Size + kImage2Size + kImage3Size;
   constexpr size_t kMemorySize = kImageBytes + kImage1OatSize + kImage2OatSize + kImage3OatSize;
-  uint8_t* memory = GetContinuousMemoryRegion(kMemorySize);
-  uint8_t* space1_begin = memory;
-  memory += kImage1Size;
-  uint8_t* space2_begin = memory;
-  memory += kImage2Size;
-  uint8_t* space1_oat_begin = memory;
-  memory += kImage1OatSize;
-  uint8_t* space2_oat_begin = memory;
-  memory += kImage2OatSize;
-  uint8_t* space3_begin = memory;
+  std::string error_str;
+  MemMap reservation = MemMap::MapAnonymous("reserve",
+                                            kMemorySize,
+                                            PROT_READ | PROT_WRITE,
+                                            /*low_4gb=*/ true,
+                                            &error_str);
+  ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+  MemMap image_reservation = reservation.TakeReservedMemory(kImage1Size + kImage2Size);
+  ASSERT_TRUE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
 
-  std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(space1_begin,
-                                                           kImage1Size,
-                                                           space1_oat_begin,
-                                                           kImage1OatSize));
+  std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(kImage1Size,
+                                                           kImage1OatSize,
+                                                           &image_reservation,
+                                                           &reservation));
   ASSERT_TRUE(space1 != nullptr);
+  ASSERT_TRUE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
 
-
-  std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(space2_begin,
-                                                           kImage2Size,
-                                                           space2_oat_begin,
-                                                           kImage2OatSize));
+  std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(kImage2Size,
+                                                           kImage2OatSize,
+                                                           &image_reservation,
+                                                           &reservation));
   ASSERT_TRUE(space2 != nullptr);
+  ASSERT_FALSE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
 
   // Finally put a 3rd image space.
-  std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(space3_begin,
-                                                           kImage3Size,
-                                                           space3_begin + kImage3Size,
-                                                           kImage3OatSize));
+  image_reservation = reservation.TakeReservedMemory(kImage3Size);
+  ASSERT_TRUE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
+  std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(kImage3Size,
+                                                           kImage3OatSize,
+                                                           &image_reservation,
+                                                           &reservation));
   ASSERT_TRUE(space3 != nullptr);
+  ASSERT_FALSE(image_reservation.IsValid());
+  ASSERT_FALSE(reservation.IsValid());
 
   // Check that we do not include the oat if there is no space after.
   ImmuneSpaces spaces;
@@ -323,12 +328,29 @@
   constexpr size_t kGuardSize = kPageSize;
   constexpr size_t kImage4Size = kImageBytes - kPageSize;
   constexpr size_t kImage4OatSize = kPageSize;
-  uint8_t* memory2 = GetContinuousMemoryRegion(kImage4Size + kImage4OatSize + kGuardSize * 2);
-  std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(memory2 + kGuardSize,
-                                                           kImage4Size,
-                                                           memory2 + kGuardSize + kImage4Size,
-                                                           kImage4OatSize));
+
+  reservation = MemMap::MapAnonymous("reserve",
+                                     kImage4Size + kImage4OatSize + kGuardSize * 2,
+                                     PROT_READ | PROT_WRITE,
+                                     /*low_4gb=*/ true,
+                                     &error_str);
+  ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+  MemMap guard = reservation.TakeReservedMemory(kGuardSize);
+  ASSERT_TRUE(guard.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
+  guard.Reset();  // Release the guard memory.
+  image_reservation = reservation.TakeReservedMemory(kImage4Size);
+  ASSERT_TRUE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
+  std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(kImage4Size,
+                                                           kImage4OatSize,
+                                                           &image_reservation,
+                                                           &reservation));
   ASSERT_TRUE(space4 != nullptr);
+  ASSERT_FALSE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
+  ASSERT_EQ(reservation.Size(), kGuardSize);
+  reservation.Reset();  // Release the guard memory.
   {
     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
     LOG(INFO) << "Adding space4 " << reinterpret_cast<const void*>(space4->Begin());
@@ -346,12 +368,28 @@
   // Layout:  [guard page][image][oat][guard page]
   constexpr size_t kImage5Size = kImageBytes + kPageSize;
   constexpr size_t kImage5OatSize = kPageSize;
-  uint8_t* memory3 = GetContinuousMemoryRegion(kImage5Size + kImage5OatSize + kGuardSize * 2);
-  std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(memory3 + kGuardSize,
-                                                           kImage5Size,
-                                                           memory3 + kGuardSize + kImage5Size,
-                                                           kImage5OatSize));
+  reservation = MemMap::MapAnonymous("reserve",
+                                     kImage5Size + kImage5OatSize + kGuardSize * 2,
+                                     PROT_READ | PROT_WRITE,
+                                     /*low_4gb=*/ true,
+                                     &error_str);
+  ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+  guard = reservation.TakeReservedMemory(kGuardSize);
+  ASSERT_TRUE(guard.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
+  guard.Reset();  // Release the guard memory.
+  image_reservation = reservation.TakeReservedMemory(kImage5Size);
+  ASSERT_TRUE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
+  std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(kImage5Size,
+                                                           kImage5OatSize,
+                                                           &image_reservation,
+                                                           &reservation));
   ASSERT_TRUE(space5 != nullptr);
+  ASSERT_FALSE(image_reservation.IsValid());
+  ASSERT_TRUE(reservation.IsValid());
+  ASSERT_EQ(reservation.Size(), kGuardSize);
+  reservation.Reset();  // Release the guard memory.
   {
     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
     LOG(INFO) << "Adding space5 " << reinterpret_cast<const void*>(space5->Begin());
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 399f9ff..9e5cb9c 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -105,10 +105,9 @@
   std::string error_msg;
   sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
       "mark sweep sweep array free buffer",
-      /* addr= */ nullptr,
       RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
       PROT_READ | PROT_WRITE,
-      /* low_4gb= */ false,
+      /*low_4gb=*/ false,
       &error_msg);
   CHECK(sweep_array_free_buffer_mem_map_.IsValid())
       << "Couldn't allocate sweep array free buffer: " << error_msg;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a31cbe7..467b22c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -505,11 +505,11 @@
       // Create bump pointer spaces instead of a backup space.
       main_mem_map_2.Reset();
       bump_pointer_space_ = space::BumpPointerSpace::Create(
-          "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr);
+          "Bump pointer space 1", kGSSBumpPointerSpaceCapacity);
       CHECK(bump_pointer_space_ != nullptr);
       AddSpace(bump_pointer_space_);
       temp_space_ = space::BumpPointerSpace::Create(
-          "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr);
+          "Bump pointer space 2", kGSSBumpPointerSpaceCapacity);
       CHECK(temp_space_ != nullptr);
       AddSpace(temp_space_);
     } else if (main_mem_map_2.IsValid()) {
@@ -529,8 +529,7 @@
   CHECK(!non_moving_space_->CanMoveObjects());
   // Allocate the large object space.
   if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
-    large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
-                                                       capacity_);
+    large_object_space_ = space::FreeListSpace::Create("free list large object space", capacity_);
     CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
   } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
     large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
@@ -696,7 +695,9 @@
                                       request_begin,
                                       capacity,
                                       PROT_READ | PROT_WRITE,
-                                      /* low_4gb=*/ true,
+                                      /*low_4gb=*/ true,
+                                      /*reuse=*/ false,
+                                      /*reservation=*/ nullptr,
                                       out_error_str);
     if (map.IsValid() || request_begin == nullptr) {
       return map;
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index a133a10..606228c 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -38,6 +38,8 @@
                                      16 * KB,
                                      PROT_READ,
                                      /*low_4gb=*/ true,
+                                     /*reuse=*/ false,
+                                     /*reservation=*/ nullptr,
                                      &error_msg);
     ASSERT_TRUE(reserved_.IsValid()) << error_msg;
     CommonRuntimeTest::SetUp();
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 497a0c2..609ccee 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -24,15 +24,13 @@
 namespace gc {
 namespace space {
 
-BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity,
-                                           uint8_t* requested_begin) {
+BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity) {
   capacity = RoundUp(capacity, kPageSize);
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
-                                        requested_begin,
                                         capacity,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb= */ true,
+                                        /*low_4gb=*/ true,
                                         &error_msg);
   if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 59d4d27..383bf7a 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -46,7 +46,7 @@
   // Create a bump pointer space with the requested sizes. The requested base address is not
   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
   // space to confirm the request was granted.
-  static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
+  static BumpPointerSpace* Create(const std::string& name, size_t capacity);
   static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap&& mem_map);
 
   // Allocate num_bytes, returns null if the space is full.
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 73582a0..7955ff9 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -108,8 +108,10 @@
   }
 }
 
-DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size,
-                                     size_t growth_limit, size_t capacity, uint8_t* requested_begin,
+DlMallocSpace* DlMallocSpace::Create(const std::string& name,
+                                     size_t initial_size,
+                                     size_t growth_limit,
+                                     size_t capacity,
                                      bool can_move_objects) {
   uint64_t start_time = 0;
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -117,8 +119,7 @@
     LOG(INFO) << "DlMallocSpace::Create entering " << name
         << " initial_size=" << PrettySize(initial_size)
         << " growth_limit=" << PrettySize(growth_limit)
-        << " capacity=" << PrettySize(capacity)
-        << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
+        << " capacity=" << PrettySize(capacity);
   }
 
   // Memory we promise to dlmalloc before it asks for morecore.
@@ -126,8 +127,7 @@
   // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
   // size of the large allocation) will be greater than the footprint limit.
   size_t starting_size = kPageSize;
-  MemMap mem_map =
-      CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+  MemMap mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity);
   if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
                << PrettySize(capacity);
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index c63ff71..e91602f 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -46,8 +46,11 @@
   // base address is not guaranteed to be granted, if it is required,
   // the caller should call Begin on the returned space to confirm the
   // request was granted.
-  static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
-                               size_t capacity, uint8_t* requested_begin, bool can_move_objects);
+  static DlMallocSpace* Create(const std::string& name,
+                               size_t initial_size,
+                               size_t growth_limit,
+                               size_t capacity,
+                               bool can_move_objects);
 
   // Virtual to allow MemoryToolMallocSpace to intercept.
   mirror::Object* AllocWithGrowth(Thread* self,
diff --git a/runtime/gc/space/dlmalloc_space_random_test.cc b/runtime/gc/space/dlmalloc_space_random_test.cc
index f9b41da..92b56bd 100644
--- a/runtime/gc/space/dlmalloc_space_random_test.cc
+++ b/runtime/gc/space/dlmalloc_space_random_test.cc
@@ -22,14 +22,16 @@
 namespace gc {
 namespace space {
 
-MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, uint8_t* requested_begin) {
-  return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
+MallocSpace* CreateDlMallocSpace(const std::string& name,
+                                 size_t initial_size,
+                                 size_t growth_limit,
+                                 size_t capacity) {
+  return DlMallocSpace::Create(
+      name, initial_size, growth_limit, capacity, /*can_move_objects=*/ false);
 }
 
 TEST_SPACE_CREATE_FN_RANDOM(DlMallocSpace, CreateDlMallocSpace)
 
-
 }  // namespace space
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/space/dlmalloc_space_static_test.cc b/runtime/gc/space/dlmalloc_space_static_test.cc
index 5758e0c..550d1bb 100644
--- a/runtime/gc/space/dlmalloc_space_static_test.cc
+++ b/runtime/gc/space/dlmalloc_space_static_test.cc
@@ -22,14 +22,16 @@
 namespace gc {
 namespace space {
 
-MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, uint8_t* requested_begin) {
-  return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
+MallocSpace* CreateDlMallocSpace(const std::string& name,
+                                 size_t initial_size,
+                                 size_t growth_limit,
+                                 size_t capacity) {
+  return DlMallocSpace::Create(
+      name, initial_size, growth_limit, capacity, /*can_move_objects=*/ false);
 }
 
 TEST_SPACE_CREATE_FN_STATIC(DlMallocSpace, CreateDlMallocSpace)
 
-
 }  // namespace space
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 9e67957..875efe2 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -622,9 +622,9 @@
                               /*inout*/MemMap* image_reservation,
                               /*out*/std::string* error_msg) {
     TimingLogger::ScopedTiming timing("MapImageFile", logger);
-    uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr;
     const ImageHeader::StorageMode storage_mode = image_header.GetStorageMode();
     if (storage_mode == ImageHeader::kStorageModeUncompressed) {
+      uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr;
       return MemMap::MapFileAtAddress(address,
                                       image_header.GetImageSize(),
                                       PROT_READ | PROT_WRITE,
@@ -649,11 +649,9 @@
 
     // Reserve output and decompress into it.
     MemMap map = MemMap::MapAnonymous(image_location,
-                                      address,
                                       image_header.GetImageSize(),
                                       PROT_READ | PROT_WRITE,
                                       /*low_4gb=*/ true,
-                                      /*reuse=*/ false,
                                       image_reservation,
                                       error_msg);
     if (map.IsValid()) {
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index a7f82f6..1658dba 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -137,10 +137,9 @@
                                            size_t* bytes_tl_bulk_allocated) {
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous("large object space allocation",
-                                        /* addr= */ nullptr,
                                         num_bytes,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb= */ true,
+                                        /*low_4gb=*/ true,
                                         &error_msg);
   if (UNLIKELY(!mem_map.IsValid())) {
     LOG(WARNING) << "Large object allocation failed: " << error_msg;
@@ -346,14 +345,13 @@
   return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
 }
 
-FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
+FreeListSpace* FreeListSpace::Create(const std::string& name, size_t size) {
   CHECK_EQ(size % kAlignment, 0U);
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
-                                        requested_begin,
                                         size,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb= */ true,
+                                        /*low_4gb=*/ true,
                                         &error_msg);
   CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
   return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
@@ -372,10 +370,9 @@
   std::string error_msg;
   allocation_info_map_ =
       MemMap::MapAnonymous("large object free list space allocation info map",
-                           /* addr= */ nullptr,
                            alloc_info_size,
                            PROT_READ | PROT_WRITE,
-                           /* low_4gb= */ false,
+                           /*low_4gb=*/ false,
                            &error_msg);
   CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg;
   allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 47167fa..a4d6a24 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -184,7 +184,7 @@
   static constexpr size_t kAlignment = kPageSize;
 
   virtual ~FreeListSpace();
-  static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
+  static FreeListSpace* Create(const std::string& name, size_t capacity);
   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
       REQUIRES(lock_);
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index d55ccd6..62bc26e 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -42,7 +42,7 @@
     if (i == 0) {
       los = space::LargeObjectMapSpace::Create("large object space");
     } else {
-      los = space::FreeListSpace::Create("large object space", nullptr, capacity);
+      los = space::FreeListSpace::Create("large object space", capacity);
     }
 
     // Make sure the bitmap is not empty and actually covers at least how much we expect.
@@ -157,7 +157,7 @@
     if (los_type == 0) {
       los = space::LargeObjectMapSpace::Create("large object space");
     } else {
-      los = space::FreeListSpace::Create("large object space", nullptr, 128 * MB);
+      los = space::FreeListSpace::Create("large object space", 128 * MB);
     }
 
     Thread* self = Thread::Current();
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 189aeb5..b5e6b62 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -82,8 +82,7 @@
                                  size_t starting_size,
                                  size_t* initial_size,
                                  size_t* growth_limit,
-                                 size_t* capacity,
-                                 uint8_t* requested_begin) {
+                                 size_t* capacity) {
   // Sanity check arguments
   if (starting_size > *initial_size) {
     *initial_size = starting_size;
@@ -107,10 +106,9 @@
 
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
-                                        requested_begin,
                                         *capacity,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb= */ true,
+                                        /*low_4gb=*/ true,
                                         &error_msg);
   if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 6bf2d71..5dd8136 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -157,8 +157,7 @@
                              size_t starting_size,
                              size_t* initial_size,
                              size_t* growth_limit,
-                             size_t* capacity,
-                             uint8_t* requested_begin);
+                             size_t* capacity);
 
   // When true the low memory mode argument specifies that the heap wishes the created allocator to
   // be more aggressive in releasing unused pages.
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 31bbfb8..2774e26 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -58,7 +58,9 @@
                                    requested_begin,
                                    capacity + kRegionSize,
                                    PROT_READ | PROT_WRITE,
-                                   /* low_4gb= */ true,
+                                   /*low_4gb=*/ true,
+                                   /*reuse=*/ false,
+                                   /*reservation=*/ nullptr,
                                    &error_msg);
     if (mem_map.IsValid() || requested_begin == nullptr) {
       break;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 10ff1c1..36fd864 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -133,17 +133,19 @@
   delete rosalloc_;
 }
 
-RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size,
-                                     size_t growth_limit, size_t capacity, uint8_t* requested_begin,
-                                     bool low_memory_mode, bool can_move_objects) {
+RosAllocSpace* RosAllocSpace::Create(const std::string& name,
+                                     size_t initial_size,
+                                     size_t growth_limit,
+                                     size_t capacity,
+                                     bool low_memory_mode,
+                                     bool can_move_objects) {
   uint64_t start_time = 0;
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     start_time = NanoTime();
     VLOG(startup) << "RosAllocSpace::Create entering " << name
                   << " initial_size=" << PrettySize(initial_size)
                   << " growth_limit=" << PrettySize(growth_limit)
-                  << " capacity=" << PrettySize(capacity)
-                  << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
+                  << " capacity=" << PrettySize(capacity);
   }
 
   // Memory we promise to rosalloc before it asks for morecore.
@@ -151,8 +153,7 @@
   // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
   // size of the large allocation) will be greater than the footprint limit.
   size_t starting_size = Heap::kDefaultStartingSize;
-  MemMap mem_map =
-      CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+  MemMap mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity);
   if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
                << PrettySize(capacity);
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 5162a06..9e95c16 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -38,8 +38,11 @@
   // base address is not guaranteed to be granted, if it is required,
   // the caller should call Begin on the returned space to confirm the
   // request was granted.
-  static RosAllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
-                               size_t capacity, uint8_t* requested_begin, bool low_memory_mode,
+  static RosAllocSpace* Create(const std::string& name,
+                               size_t initial_size,
+                               size_t growth_limit,
+                               size_t capacity,
+                               bool low_memory_mode,
                                bool can_move_objects);
   static RosAllocSpace* CreateFromMemMap(MemMap&& mem_map,
                                          const std::string& name,
diff --git a/runtime/gc/space/rosalloc_space_random_test.cc b/runtime/gc/space/rosalloc_space_random_test.cc
index b50859b..f0b3231 100644
--- a/runtime/gc/space/rosalloc_space_random_test.cc
+++ b/runtime/gc/space/rosalloc_space_random_test.cc
@@ -22,15 +22,20 @@
 namespace gc {
 namespace space {
 
-MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, uint8_t* requested_begin) {
-  return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
-                               Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
+MallocSpace* CreateRosAllocSpace(const std::string& name,
+                                 size_t initial_size,
+                                 size_t growth_limit,
+                                 size_t capacity) {
+  return RosAllocSpace::Create(name,
+                               initial_size,
+                               growth_limit,
+                               capacity,
+                               Runtime::Current()->GetHeap()->IsLowMemoryMode(),
+                               /*can_move_objects=*/ false);
 }
 
 TEST_SPACE_CREATE_FN_RANDOM(RosAllocSpace, CreateRosAllocSpace)
 
-
 }  // namespace space
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/space/rosalloc_space_static_test.cc b/runtime/gc/space/rosalloc_space_static_test.cc
index 5e7ced6..d7e7e90 100644
--- a/runtime/gc/space/rosalloc_space_static_test.cc
+++ b/runtime/gc/space/rosalloc_space_static_test.cc
@@ -22,15 +22,19 @@
 namespace gc {
 namespace space {
 
-MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
-                                 size_t capacity, uint8_t* requested_begin) {
-  return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
-                               Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
+MallocSpace* CreateRosAllocSpace(const std::string& name,
+                                 size_t initial_size,
+                                 size_t growth_limit,
+                                 size_t capacity) {
+  return RosAllocSpace::Create(name, initial_size,
+                               growth_limit,
+                               capacity,
+                               Runtime::Current()->GetHeap()->IsLowMemoryMode(),
+                               /*can_move_objects=*/ false);
 }
 
 TEST_SPACE_CREATE_FN_STATIC(RosAllocSpace, CreateRosAllocSpace)
 
-
 }  // namespace space
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/space/space_create_test.cc b/runtime/gc/space/space_create_test.cc
index ca5f306..d3db679 100644
--- a/runtime/gc/space/space_create_test.cc
+++ b/runtime/gc/space/space_create_test.cc
@@ -34,25 +34,22 @@
   MallocSpace* CreateSpace(const std::string& name,
                            size_t initial_size,
                            size_t growth_limit,
-                           size_t capacity,
-                           uint8_t* requested_begin) {
+                           size_t capacity) {
     const MallocSpaceType type = GetParam();
     if (type == kMallocSpaceDlMalloc) {
       return DlMallocSpace::Create(name,
                                    initial_size,
                                    growth_limit,
                                    capacity,
-                                   requested_begin,
-                                   false);
+                                   /*can_move_objects=*/ false);
     }
     DCHECK_EQ(static_cast<uint32_t>(type), static_cast<uint32_t>(kMallocSpaceRosAlloc));
     return RosAllocSpace::Create(name,
                                  initial_size,
                                  growth_limit,
                                  capacity,
-                                 requested_begin,
                                  Runtime::Current()->GetHeap()->IsLowMemoryMode(),
-                                 false);
+                                 /*can_move_objects=*/ false);
   }
 };
 
@@ -62,25 +59,25 @@
 
   {
     // Init < max == growth
-    std::unique_ptr<Space> space(CreateSpace("test", 16 * MB, 32 * MB, 32 * MB, nullptr));
+    std::unique_ptr<Space> space(CreateSpace("test", 16 * MB, 32 * MB, 32 * MB));
     EXPECT_TRUE(space != nullptr);
     // Init == max == growth
-    space.reset(CreateSpace("test", 16 * MB, 16 * MB, 16 * MB, nullptr));
+    space.reset(CreateSpace("test", 16 * MB, 16 * MB, 16 * MB));
     EXPECT_TRUE(space != nullptr);
     // Init > max == growth
-    space.reset(CreateSpace("test", 32 * MB, 16 * MB, 16 * MB, nullptr));
+    space.reset(CreateSpace("test", 32 * MB, 16 * MB, 16 * MB));
     EXPECT_TRUE(space == nullptr);
     // Growth == init < max
-    space.reset(CreateSpace("test", 16 * MB, 16 * MB, 32 * MB, nullptr));
+    space.reset(CreateSpace("test", 16 * MB, 16 * MB, 32 * MB));
     EXPECT_TRUE(space != nullptr);
     // Growth < init < max
-    space.reset(CreateSpace("test", 16 * MB, 8 * MB, 32 * MB, nullptr));
+    space.reset(CreateSpace("test", 16 * MB, 8 * MB, 32 * MB));
     EXPECT_TRUE(space == nullptr);
     // Init < growth < max
-    space.reset(CreateSpace("test", 8 * MB, 16 * MB, 32 * MB, nullptr));
+    space.reset(CreateSpace("test", 8 * MB, 16 * MB, 32 * MB));
     EXPECT_TRUE(space != nullptr);
     // Init < max < growth
-    space.reset(CreateSpace("test", 8 * MB, 32 * MB, 16 * MB, nullptr));
+    space.reset(CreateSpace("test", 8 * MB, 32 * MB, 16 * MB));
     EXPECT_TRUE(space == nullptr);
   }
 }
@@ -91,7 +88,7 @@
 // the GC works with the ZygoteSpace.
 TEST_P(SpaceCreateTest, ZygoteSpaceTestBody) {
   size_t dummy;
-  MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+  MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB));
   ASSERT_TRUE(space != nullptr);
 
   // Make space findable to the heap, will also delete space when runtime is cleaned up
@@ -225,7 +222,7 @@
 
 TEST_P(SpaceCreateTest, AllocAndFreeTestBody) {
   size_t dummy = 0;
-  MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+  MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB));
   ASSERT_TRUE(space != nullptr);
   Thread* self = Thread::Current();
   ScopedObjectAccess soa(self);
@@ -301,7 +298,7 @@
 }
 
 TEST_P(SpaceCreateTest, AllocAndFreeListTestBody) {
-  MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+  MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB));
   ASSERT_TRUE(space != nullptr);
 
   // Make space findable to the heap, will also delete space when runtime is cleaned up
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 5aac217..1b111e3 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -123,8 +123,10 @@
     return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value();
   }
 
-  typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
-                                        size_t capacity, uint8_t* requested_begin);
+  typedef MallocSpace* (*CreateSpaceFn)(const std::string& name,
+                                        size_t initial_size,
+                                        size_t growth_limit,
+                                        size_t capacity);
 
   void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
                                            int round, size_t growth_limit);
@@ -323,7 +325,7 @@
   size_t initial_size = 4 * MB;
   size_t growth_limit = 8 * MB;
   size_t capacity = 16 * MB;
-  MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr));
+  MallocSpace* space(create_space("test", initial_size, growth_limit, capacity));
   ASSERT_TRUE(space != nullptr);
 
   // Basic sanity