Remove unnecessary indirection from MemMap.

Avoid plain MemMap pointers being passed around by changing
the MemMap to moveable and return MemMap objects by value.
Previously we could have a valid zero-size MemMap but this
is now forbidden.

MemMap::RemapAtEnd() is changed to avoid the explicit call
to munmap(); mmap() with MAP_FIXED automatically removes
old mappings for overlapping regions.

Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: Pixel 2 XL boots.
Test: m test-art-target-gtest
Test: testrunner.py --target --optimizing
Change-Id: I12bd453c26a396edc20eb141bfd4dad20923f170
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index 702f0e4..0f472e2 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -38,22 +38,34 @@
   void Release() OVERRIDE;
 
  private:
-  std::unique_ptr<MemMap> map_;
+  static MemMap Allocate(size_t size, bool low_4gb, const char* name);
+
+  MemMap map_;
 };
 
-MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name) {
+MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name)
+    : map_(Allocate(size, low_4gb, name)) {
+  memory_ = map_.Begin();
+  static_assert(ArenaAllocator::kArenaAlignment <= kPageSize,
+                "Arena should not need stronger alignment than kPageSize.");
+  DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
+  size_ = map_.Size();
+}
+
+MemMap MemMapArena::Allocate(size_t size, bool low_4gb, const char* name) {
   // Round up to a full page as that's the smallest unit of allocation for mmap()
   // and we want to be able to use all memory that we actually allocate.
   size = RoundUp(size, kPageSize);
   std::string error_msg;
-  map_.reset(MemMap::MapAnonymous(
-      name, nullptr, size, PROT_READ | PROT_WRITE, low_4gb, false, &error_msg));
-  CHECK(map_.get() != nullptr) << error_msg;
-  memory_ = map_->Begin();
-  static_assert(ArenaAllocator::kArenaAlignment <= kPageSize,
-                "Arena should not need stronger alignment than kPageSize.");
-  DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
-  size_ = map_->Size();
+  MemMap map = MemMap::MapAnonymous(name,
+                                    /* addr */ nullptr,
+                                    size,
+                                    PROT_READ | PROT_WRITE,
+                                    low_4gb,
+                                    /* reuse */ false,
+                                    &error_msg);
+  CHECK(map.IsValid()) << error_msg;
+  return map;
 }
 
 MemMapArena::~MemMapArena() {
@@ -62,7 +74,7 @@
 
 void MemMapArena::Release() {
   if (bytes_allocated_ > 0) {
-    map_->MadviseDontNeedAndZero();
+    map_.MadviseDontNeedAndZero();
     bytes_allocated_ = 0;
   }
 }
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index f8388f3..b0eef00 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -249,14 +249,17 @@
 void DexoptTest::ReserveImageSpaceChunk(uintptr_t start, uintptr_t end) {
   if (start < end) {
     std::string error_msg;
-    image_reservation_.push_back(std::unique_ptr<MemMap>(
-        MemMap::MapAnonymous("image reservation",
-            reinterpret_cast<uint8_t*>(start), end - start,
-            PROT_NONE, false, false, &error_msg)));
-    ASSERT_TRUE(image_reservation_.back().get() != nullptr) << error_msg;
+    image_reservation_.push_back(MemMap::MapAnonymous("image reservation",
+                                                      reinterpret_cast<uint8_t*>(start),
+                                                      end - start,
+                                                      PROT_NONE,
+                                                      /* low_4gb*/ false,
+                                                      /* reuse */ false,
+                                                      &error_msg));
+    ASSERT_TRUE(image_reservation_.back().IsValid()) << error_msg;
     LOG(INFO) << "Reserved space for image " <<
-      reinterpret_cast<void*>(image_reservation_.back()->Begin()) << "-" <<
-      reinterpret_cast<void*>(image_reservation_.back()->End());
+      reinterpret_cast<void*>(image_reservation_.back().Begin()) << "-" <<
+      reinterpret_cast<void*>(image_reservation_.back().End());
   }
 }
 
diff --git a/runtime/dexopt_test.h b/runtime/dexopt_test.h
index 6e8dc09..3203ee5 100644
--- a/runtime/dexopt_test.h
+++ b/runtime/dexopt_test.h
@@ -91,7 +91,7 @@
   // before the image is loaded.
   void UnreserveImageSpace();
 
-  std::vector<std::unique_ptr<MemMap>> image_reservation_;
+  std::vector<MemMap> image_reservation_;
 };
 
 }  // namespace art
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 026b5da..4ae7362 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -283,7 +283,6 @@
 
 template <typename ElfTypes>
 ElfFileImpl<ElfTypes>::~ElfFileImpl() {
-  STLDeleteElements(&segments_);
   delete symtab_symbol_table_;
   delete dynsym_symbol_table_;
 }
@@ -418,17 +417,17 @@
 }
 
 template <typename ElfTypes>
-bool ElfFileImpl<ElfTypes>::SetMap(File* file, MemMap* map, std::string* error_msg) {
-  if (map == nullptr) {
+bool ElfFileImpl<ElfTypes>::SetMap(File* file, MemMap&& map, std::string* error_msg) {
+  if (!map.IsValid()) {
     // MemMap::Open should have already set an error.
     DCHECK(!error_msg->empty());
     return false;
   }
-  map_.reset(map);
-  CHECK(map_.get() != nullptr) << file->GetPath();
-  CHECK(map_->Begin() != nullptr) << file->GetPath();
+  map_ = std::move(map);
+  CHECK(map_.IsValid()) << file->GetPath();
+  CHECK(map_.Begin() != nullptr) << file->GetPath();
 
-  header_ = reinterpret_cast<Elf_Ehdr*>(map_->Begin());
+  header_ = reinterpret_cast<Elf_Ehdr*>(map_.Begin());
   if ((ELFMAG0 != header_->e_ident[EI_MAG0])
       || (ELFMAG1 != header_->e_ident[EI_MAG1])
       || (ELFMAG2 != header_->e_ident[EI_MAG2])
@@ -1164,14 +1163,14 @@
         DCHECK(!error_msg->empty());
         return false;
       }
-      std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
-                                                           reserve_base_override,
-                                                           loaded_size,
-                                                           PROT_NONE,
-                                                           low_4gb,
-                                                           false,
-                                                           error_msg));
-      if (reserve.get() == nullptr) {
+      MemMap reserve = MemMap::MapAnonymous(reservation_name.c_str(),
+                                            reserve_base_override,
+                                            loaded_size,
+                                            PROT_NONE,
+                                            low_4gb,
+                                            /* reuse */ false,
+                                            error_msg);
+      if (!reserve.IsValid()) {
         *error_msg = StringPrintf("Failed to allocate %s: %s",
                                   reservation_name.c_str(), error_msg->c_str());
         return false;
@@ -1179,14 +1178,14 @@
       reserved = true;
 
       // Base address is the difference of actual mapped location and the p_vaddr
-      base_address_ = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(reserve->Begin())
+      base_address_ = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(reserve.Begin())
                        - reinterpret_cast<uintptr_t>(reserve_base));
       // By adding the p_vaddr of a section/symbol to base_address_ we will always get the
       // dynamic memory address of where that object is actually mapped
       //
       // TODO: base_address_ needs to be calculated in ::Open, otherwise
       // FindDynamicSymbolAddress returns the wrong values until Load is called.
-      segments_.push_back(reserve.release());
+      segments_.push_back(std::move(reserve));
     }
     // empty segment, nothing to map
     if (program_header->p_memsz == 0) {
@@ -1234,7 +1233,7 @@
       return false;
     }
     if (program_header->p_filesz != 0u) {
-      std::unique_ptr<MemMap> segment(
+      MemMap segment =
           MemMap::MapFileAtAddress(p_vaddr,
                                    program_header->p_filesz,
                                    prot,
@@ -1244,40 +1243,42 @@
                                    /*low4_gb*/false,
                                    /*reuse*/true,  // implies MAP_FIXED
                                    file->GetPath().c_str(),
-                                   error_msg));
-      if (segment.get() == nullptr) {
+                                   error_msg);
+      if (!segment.IsValid()) {
         *error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s",
                                   i, file->GetPath().c_str(), error_msg->c_str());
         return false;
       }
-      if (segment->Begin() != p_vaddr) {
+      if (segment.Begin() != p_vaddr) {
         *error_msg = StringPrintf("Failed to map ELF file segment %d from %s at expected address %p, "
                                   "instead mapped to %p",
-                                  i, file->GetPath().c_str(), p_vaddr, segment->Begin());
+                                  i, file->GetPath().c_str(), p_vaddr, segment.Begin());
         return false;
       }
-      segments_.push_back(segment.release());
+      segments_.push_back(std::move(segment));
     }
     if (program_header->p_filesz < program_header->p_memsz) {
       std::string name = StringPrintf("Zero-initialized segment %" PRIu64 " of ELF file %s",
                                       static_cast<uint64_t>(i), file->GetPath().c_str());
-      std::unique_ptr<MemMap> segment(
-          MemMap::MapAnonymous(name.c_str(),
-                               p_vaddr + program_header->p_filesz,
-                               program_header->p_memsz - program_header->p_filesz,
-                               prot, false, true /* reuse */, error_msg));
-      if (segment == nullptr) {
+      MemMap segment = MemMap::MapAnonymous(name.c_str(),
+                                            p_vaddr + program_header->p_filesz,
+                                            program_header->p_memsz - program_header->p_filesz,
+                                            prot,
+                                            /* low_4gb */ false,
+                                            /* reuse */ true,
+                                            error_msg);
+      if (!segment.IsValid()) {
         *error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s: %s",
                                   i, file->GetPath().c_str(), error_msg->c_str());
         return false;
       }
-      if (segment->Begin() != p_vaddr) {
+      if (segment.Begin() != p_vaddr) {
         *error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s "
                                   "at expected address %p, instead mapped to %p",
-                                  i, file->GetPath().c_str(), p_vaddr, segment->Begin());
+                                  i, file->GetPath().c_str(), p_vaddr, segment.Begin());
         return false;
       }
-      segments_.push_back(segment.release());
+      segments_.push_back(std::move(segment));
     }
   }
 
@@ -1343,9 +1344,8 @@
 
 template <typename ElfTypes>
 bool ElfFileImpl<ElfTypes>::ValidPointer(const uint8_t* start) const {
-  for (size_t i = 0; i < segments_.size(); ++i) {
-    const MemMap* segment = segments_[i];
-    if (segment->Begin() <= start && start < segment->End()) {
+  for (const MemMap& segment : segments_) {
+    if (segment.Begin() <= start && start < segment.End()) {
       return true;
     }
   }
@@ -1712,18 +1712,18 @@
                               file->GetPath().c_str());
     return nullptr;
   }
-  std::unique_ptr<MemMap> map(MemMap::MapFile(EI_NIDENT,
-                                              PROT_READ,
-                                              MAP_PRIVATE,
-                                              file->Fd(),
-                                              0,
-                                              low_4gb,
-                                              file->GetPath().c_str(),
-                                              error_msg));
-  if (map == nullptr || map->Size() != EI_NIDENT) {
+  MemMap map = MemMap::MapFile(EI_NIDENT,
+                               PROT_READ,
+                               MAP_PRIVATE,
+                               file->Fd(),
+                               0,
+                               low_4gb,
+                               file->GetPath().c_str(),
+                               error_msg);
+  if (!map.IsValid() || map.Size() != EI_NIDENT) {
     return nullptr;
   }
-  uint8_t* header = map->Begin();
+  uint8_t* header = map.Begin();
   if (header[EI_CLASS] == ELFCLASS64) {
     ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file,
                                                        writable,
@@ -1763,18 +1763,18 @@
                               file->GetPath().c_str());
     return nullptr;
   }
-  std::unique_ptr<MemMap> map(MemMap::MapFile(EI_NIDENT,
-                                              PROT_READ,
-                                              MAP_PRIVATE,
-                                              file->Fd(),
-                                              0,
-                                              low_4gb,
-                                              file->GetPath().c_str(),
-                                              error_msg));
-  if (map == nullptr || map->Size() != EI_NIDENT) {
+  MemMap map = MemMap::MapFile(EI_NIDENT,
+                               PROT_READ,
+                               MAP_PRIVATE,
+                               file->Fd(),
+                               /* start */ 0,
+                               low_4gb,
+                               file->GetPath().c_str(),
+                               error_msg);
+  if (!map.IsValid() || map.Size() != EI_NIDENT) {
     return nullptr;
   }
-  uint8_t* header = map->Begin();
+  uint8_t* header = map.Begin();
   if (header[EI_CLASS] == ELFCLASS64) {
     ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file,
                                                        mmap_prot,
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index a5808e2..58c38a4 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -62,15 +62,15 @@
   }
 
   uint8_t* Begin() const {
-    return map_->Begin();
+    return map_.Begin();
   }
 
   uint8_t* End() const {
-    return map_->End();
+    return map_.End();
   }
 
   size_t Size() const {
-    return map_->Size();
+    return map_.Size();
   }
 
   Elf_Ehdr& GetHeader() const;
@@ -135,7 +135,7 @@
 
   bool Setup(File* file, int prot, int flags, bool low_4gb, std::string* error_msg);
 
-  bool SetMap(File* file, MemMap* map, std::string* error_msg);
+  bool SetMap(File* file, MemMap&& map, std::string* error_msg);
 
   uint8_t* GetProgramHeadersStart() const;
   uint8_t* GetSectionHeadersStart() const;
@@ -193,9 +193,9 @@
 
   // ELF header mapping. If program_header_only_ is false, will
   // actually point to the entire elf file.
-  std::unique_ptr<MemMap> map_;
+  MemMap map_;
   Elf_Ehdr* header_;
-  std::vector<MemMap*> segments_;
+  std::vector<MemMap> segments_;
 
   // Pointer to start of first PT_LOAD program segment after Load()
   // when program_header_only_ is true.
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index e30fef4..2a71dec 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -72,12 +72,12 @@
   ~AtomicStack() {}
 
   void Reset() {
-    DCHECK(mem_map_.get() != nullptr);
+    DCHECK(mem_map_.IsValid());
     DCHECK(begin_ != nullptr);
     front_index_.store(0, std::memory_order_relaxed);
     back_index_.store(0, std::memory_order_relaxed);
     debug_is_sorted_ = true;
-    mem_map_->MadviseDontNeedAndZero();
+    mem_map_.MadviseDontNeedAndZero();
   }
 
   // Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
@@ -252,10 +252,15 @@
   // Size in number of elements.
   void Init() {
     std::string error_msg;
-    mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), nullptr, capacity_ * sizeof(begin_[0]),
-                                        PROT_READ | PROT_WRITE, false, false, &error_msg));
-    CHECK(mem_map_.get() != nullptr) << "couldn't allocate mark stack.\n" << error_msg;
-    uint8_t* addr = mem_map_->Begin();
+    mem_map_ = MemMap::MapAnonymous(name_.c_str(),
+                                    /* addr */ nullptr,
+                                    capacity_ * sizeof(begin_[0]),
+                                    PROT_READ | PROT_WRITE,
+                                    /* low_4gb */ false,
+                                    /* reuse */ false,
+                                    &error_msg);
+    CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg;
+    uint8_t* addr = mem_map_.Begin();
     CHECK(addr != nullptr);
     debug_is_sorted_ = true;
     begin_ = reinterpret_cast<StackReference<T>*>(addr);
@@ -265,7 +270,7 @@
   // Name of the mark stack.
   std::string name_;
   // Memory mapping of the atomic stack.
-  std::unique_ptr<MemMap> mem_map_;
+  MemMap mem_map_;
   // Back index (index after the last element pushed).
   AtomicInteger back_index_;
   // Front index, used for implementing PopFront.
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index d45a0cc..e157e5e 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -27,47 +27,51 @@
 namespace gc {
 namespace accounting {
 
-Bitmap* Bitmap::CreateFromMemMap(MemMap* mem_map, size_t num_bits) {
-  CHECK(mem_map != nullptr);
-  return new Bitmap(mem_map, num_bits);
+Bitmap* Bitmap::CreateFromMemMap(MemMap&& mem_map, size_t num_bits) {
+  CHECK(mem_map.IsValid());
+  return new Bitmap(std::move(mem_map), num_bits);
 }
 
-Bitmap::Bitmap(MemMap* mem_map, size_t bitmap_size)
-    : mem_map_(mem_map), bitmap_begin_(reinterpret_cast<uintptr_t*>(mem_map->Begin())),
+Bitmap::Bitmap(MemMap&& mem_map, size_t bitmap_size)
+    : mem_map_(std::move(mem_map)),
+      bitmap_begin_(reinterpret_cast<uintptr_t*>(mem_map_.Begin())),
       bitmap_size_(bitmap_size) {
   CHECK(bitmap_begin_ != nullptr);
   CHECK_NE(bitmap_size, 0U);
 }
 
 Bitmap::~Bitmap() {
-  // Destroys MemMap via std::unique_ptr<>.
+  // Destroys member MemMap.
 }
 
-MemMap* Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) {
+MemMap Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) {
   const size_t bitmap_size = RoundUp(
       RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
   std::string error_msg;
-  std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
-                                                       PROT_READ | PROT_WRITE, false, false,
-                                                       &error_msg));
-  if (UNLIKELY(mem_map.get() == nullptr)) {
+  MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+                                        /* addr */ nullptr,
+                                        bitmap_size,
+                                        PROT_READ | PROT_WRITE,
+                                        /* low_4gb */ false,
+                                        /* reuse */ false,
+                                        &error_msg);
+  if (UNLIKELY(!mem_map.IsValid())) {
     LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
-    return nullptr;
   }
-  return mem_map.release();
+  return mem_map;
 }
 
 Bitmap* Bitmap::Create(const std::string& name, size_t num_bits) {
-  auto* const mem_map = AllocateMemMap(name, num_bits);
-  if (mem_map == nullptr) {
+  MemMap mem_map = AllocateMemMap(name, num_bits);
+  if (UNLIKELY(!mem_map.IsValid())) {
     return nullptr;
   }
-  return CreateFromMemMap(mem_map, num_bits);
+  return CreateFromMemMap(std::move(mem_map), num_bits);
 }
 
 void Bitmap::Clear() {
   if (bitmap_begin_ != nullptr) {
-    mem_map_->MadviseDontNeedAndZero();
+    mem_map_.MadviseDontNeedAndZero();
   }
 }
 
@@ -83,14 +87,15 @@
   CHECK_ALIGNED(cover_begin, kAlignment);
   CHECK_ALIGNED(cover_end, kAlignment);
   const size_t num_bits = (cover_end - cover_begin) / kAlignment;
-  auto* const mem_map = Bitmap::AllocateMemMap(name, num_bits);
-  return CreateFromMemMap(mem_map, cover_begin, num_bits);
+  MemMap mem_map = Bitmap::AllocateMemMap(name, num_bits);
+  CHECK(mem_map.IsValid());
+  return CreateFromMemMap(std::move(mem_map), cover_begin, num_bits);
 }
 
 template<size_t kAlignment>
 MemoryRangeBitmap<kAlignment>* MemoryRangeBitmap<kAlignment>::CreateFromMemMap(
-    MemMap* mem_map, uintptr_t begin, size_t num_bits) {
-  return new MemoryRangeBitmap(mem_map, begin, num_bits);
+    MemMap&& mem_map, uintptr_t begin, size_t num_bits) {
+  return new MemoryRangeBitmap(std::move(mem_map), begin, num_bits);
 }
 
 template class MemoryRangeBitmap<CardTable::kCardSize>;
diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h
index 2d83a8a..ffef566 100644
--- a/runtime/gc/accounting/bitmap.h
+++ b/runtime/gc/accounting/bitmap.h
@@ -24,12 +24,11 @@
 #include <vector>
 
 #include "base/globals.h"
+#include "base/mem_map.h"
 #include "base/mutex.h"
 
 namespace art {
 
-class MemMap;
-
 namespace gc {
 namespace accounting {
 
@@ -42,7 +41,7 @@
   // Initialize a space bitmap using the provided mem_map as the live bits. Takes ownership of the
   // mem map. The address range covered starts at heap_begin and is of size equal to heap_capacity.
   // Objects are kAlignement-aligned.
-  static Bitmap* CreateFromMemMap(MemMap* mem_map, size_t num_bits);
+  static Bitmap* CreateFromMemMap(MemMap&& mem_map, size_t num_bits);
 
   // offset is the difference from base to a index.
   static ALWAYS_INLINE constexpr size_t BitIndexToWordIndex(uintptr_t offset) {
@@ -101,17 +100,17 @@
  protected:
   static constexpr size_t kBitsPerBitmapWord = sizeof(uintptr_t) * kBitsPerByte;
 
-  Bitmap(MemMap* mem_map, size_t bitmap_size);
+  Bitmap(MemMap&& mem_map, size_t bitmap_size);
   ~Bitmap();
 
   // Allocate the mem-map for a bitmap based on how many bits are required.
-  static MemMap* AllocateMemMap(const std::string& name, size_t num_bits);
+  static MemMap AllocateMemMap(const std::string& name, size_t num_bits);
 
   template<bool kSetBit>
   ALWAYS_INLINE bool ModifyBit(uintptr_t bit_index);
 
   // Backing storage for bitmap.
-  std::unique_ptr<MemMap> mem_map_;
+  MemMap mem_map_;
 
   // This bitmap itself, word sized for efficiency in scanning.
   uintptr_t* const bitmap_begin_;
@@ -127,10 +126,10 @@
 template<size_t kAlignment>
 class MemoryRangeBitmap : public Bitmap {
  public:
-  static MemoryRangeBitmap* Create(const std::string& name, uintptr_t cover_begin,
-                                   uintptr_t cover_end);
-  static MemoryRangeBitmap* CreateFromMemMap(MemMap* mem_map, uintptr_t cover_begin,
-                                             size_t num_bits);
+  static MemoryRangeBitmap* Create(
+      const std::string& name, uintptr_t cover_begin, uintptr_t cover_end);
+  static MemoryRangeBitmap* CreateFromMemMap(
+      MemMap&& mem_map, uintptr_t cover_begin, size_t num_bits);
 
   // Beginning of the memory range that the bitmap covers.
   ALWAYS_INLINE uintptr_t CoverBegin() const {
@@ -177,9 +176,10 @@
   }
 
  private:
-  MemoryRangeBitmap(MemMap* mem_map, uintptr_t begin, size_t num_bits)
-     : Bitmap(mem_map, num_bits), cover_begin_(begin), cover_end_(begin + kAlignment * num_bits) {
-  }
+  MemoryRangeBitmap(MemMap&& mem_map, uintptr_t begin, size_t num_bits)
+      : Bitmap(std::move(mem_map), num_bits),
+        cover_begin_(begin),
+        cover_end_(begin + kAlignment * num_bits) {}
 
   uintptr_t const cover_begin_;
   uintptr_t const cover_end_;
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 357a498..1e7d76c 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -213,8 +213,8 @@
 inline void* CardTable::AddrFromCard(const uint8_t *card_addr) const {
   DCHECK(IsValidCard(card_addr))
     << " card_addr: " << reinterpret_cast<const void*>(card_addr)
-    << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
-    << " end: " << reinterpret_cast<void*>(mem_map_->End());
+    << " begin: " << reinterpret_cast<void*>(mem_map_.Begin() + offset_)
+    << " end: " << reinterpret_cast<void*>(mem_map_.End());
   uintptr_t offset = card_addr - biased_begin_;
   return reinterpret_cast<void*>(offset << kCardShift);
 }
@@ -228,16 +228,16 @@
 }
 
 inline bool CardTable::IsValidCard(const uint8_t* card_addr) const {
-  uint8_t* begin = mem_map_->Begin() + offset_;
-  uint8_t* end = mem_map_->End();
+  uint8_t* begin = mem_map_.Begin() + offset_;
+  uint8_t* end = mem_map_.End();
   return card_addr >= begin && card_addr < end;
 }
 
 inline void CardTable::CheckCardValid(uint8_t* card) const {
   DCHECK(IsValidCard(card))
       << " card_addr: " << reinterpret_cast<const void*>(card)
-      << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
-      << " end: " << reinterpret_cast<void*>(mem_map_->End());
+      << " begin: " << reinterpret_cast<void*>(mem_map_.Begin() + offset_)
+      << " end: " << reinterpret_cast<void*>(mem_map_.End());
 }
 
 }  // namespace accounting
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 22104a3..89645e0 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -64,15 +64,19 @@
   size_t capacity = heap_capacity / kCardSize;
   /* Allocate an extra 256 bytes to allow fixed low-byte of base */
   std::string error_msg;
-  std::unique_ptr<MemMap> mem_map(
-      MemMap::MapAnonymous("card table", nullptr, capacity + 256, PROT_READ | PROT_WRITE,
-                           false, false, &error_msg));
-  CHECK(mem_map.get() != nullptr) << "couldn't allocate card table: " << error_msg;
+  MemMap mem_map = MemMap::MapAnonymous("card table",
+                                        /* addr */ nullptr,
+                                        capacity + 256,
+                                        PROT_READ | PROT_WRITE,
+                                        /* low_4gb */ false,
+                                        /* reuse */ false,
+                                        &error_msg);
+  CHECK(mem_map.IsValid()) << "couldn't allocate card table: " << error_msg;
   // All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
   // don't clear the card table to avoid unnecessary pages being allocated
   static_assert(kCardClean == 0, "kCardClean must be 0");
 
-  uint8_t* cardtable_begin = mem_map->Begin();
+  uint8_t* cardtable_begin = mem_map.Begin();
   CHECK(cardtable_begin != nullptr);
 
   // We allocated up to a bytes worth of extra space to allow `biased_begin`'s byte value to equal
@@ -87,11 +91,11 @@
     biased_begin += offset;
   }
   CHECK_EQ(reinterpret_cast<uintptr_t>(biased_begin) & 0xff, kCardDirty);
-  return new CardTable(mem_map.release(), biased_begin, offset);
+  return new CardTable(std::move(mem_map), biased_begin, offset);
 }
 
-CardTable::CardTable(MemMap* mem_map, uint8_t* biased_begin, size_t offset)
-    : mem_map_(mem_map), biased_begin_(biased_begin), offset_(offset) {
+CardTable::CardTable(MemMap&& mem_map, uint8_t* biased_begin, size_t offset)
+    : mem_map_(std::move(mem_map)), biased_begin_(biased_begin), offset_(offset) {
 }
 
 CardTable::~CardTable() {
@@ -100,7 +104,7 @@
 
 void CardTable::ClearCardTable() {
   static_assert(kCardClean == 0, "kCardClean must be 0");
-  mem_map_->MadviseDontNeedAndZero();
+  mem_map_.MadviseDontNeedAndZero();
 }
 
 void CardTable::ClearCardRange(uint8_t* start, uint8_t* end) {
@@ -118,8 +122,8 @@
 
 void CardTable::CheckAddrIsInCardTable(const uint8_t* addr) const {
   uint8_t* card_addr = biased_begin_ + ((uintptr_t)addr >> kCardShift);
-  uint8_t* begin = mem_map_->Begin() + offset_;
-  uint8_t* end = mem_map_->End();
+  uint8_t* begin = mem_map_.Begin() + offset_;
+  uint8_t* end = mem_map_.End();
   CHECK(AddrIsInCardTable(addr))
       << "Card table " << this
       << " begin: " << reinterpret_cast<void*>(begin)
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index b8520b7..47e2430 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -20,12 +20,11 @@
 #include <memory>
 
 #include "base/globals.h"
+#include "base/mem_map.h"
 #include "base/mutex.h"
 
 namespace art {
 
-class MemMap;
-
 namespace mirror {
 class Object;
 }  // namespace mirror
@@ -133,7 +132,7 @@
   bool AddrIsInCardTable(const void* addr) const;
 
  private:
-  CardTable(MemMap* begin, uint8_t* biased_begin, size_t offset);
+  CardTable(MemMap&& mem_map, uint8_t* biased_begin, size_t offset);
 
   // Returns true iff the card table address is within the bounds of the card table.
   bool IsValidCard(const uint8_t* card_addr) const ALWAYS_INLINE;
@@ -144,7 +143,7 @@
   void VerifyCardTable();
 
   // Mmapped pages for the card table
-  std::unique_ptr<MemMap> mem_map_;
+  MemMap mem_map_;
   // Value used to compute card table addresses from object addresses, see GetBiasedBegin
   uint8_t* const biased_begin_;
   // Card table doesn't begin at the beginning of the mem_map_, instead it is displaced by offset
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 4b5a8c6..d8b1bb2 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -39,11 +39,15 @@
     DCHECK_EQ(kHeapCapacity / kRegionSize,
               static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize)));
     std::string error_msg;
-    MemMap* mem_map = MemMap::MapAnonymous("read barrier table", nullptr, capacity,
-                                           PROT_READ | PROT_WRITE, false, false, &error_msg);
-    CHECK(mem_map != nullptr && mem_map->Begin() != nullptr)
+    mem_map_ = MemMap::MapAnonymous("read barrier table",
+                                    /* addr */ nullptr,
+                                    capacity,
+                                    PROT_READ | PROT_WRITE,
+                                    /* low_4gb */ false,
+                                    /* reuse */ false,
+                                    &error_msg);
+    CHECK(mem_map_.IsValid() && mem_map_.Begin() != nullptr)
         << "couldn't allocate read barrier table: " << error_msg;
-    mem_map_.reset(mem_map);
   }
   void ClearForSpace(space::ContinuousSpace* space) {
     uint8_t* entry_start = EntryFromAddr(space->Begin());
@@ -66,14 +70,14 @@
     return entry_value == kSetEntryValue;
   }
   void ClearAll() {
-    mem_map_->MadviseDontNeedAndZero();
+    mem_map_.MadviseDontNeedAndZero();
   }
   void SetAll() {
-    memset(mem_map_->Begin(), kSetEntryValue, mem_map_->Size());
+    memset(mem_map_.Begin(), kSetEntryValue, mem_map_.Size());
   }
   bool IsAllCleared() const {
-    for (uint32_t* p = reinterpret_cast<uint32_t*>(mem_map_->Begin());
-         p < reinterpret_cast<uint32_t*>(mem_map_->End()); ++p) {
+    for (uint32_t* p = reinterpret_cast<uint32_t*>(mem_map_.Begin());
+         p < reinterpret_cast<uint32_t*>(mem_map_.End()); ++p) {
       if (*p != 0) {
         return false;
       }
@@ -90,7 +94,7 @@
 
   uint8_t* EntryFromAddr(const void* heap_addr) const {
     DCHECK(IsValidHeapAddr(heap_addr)) << heap_addr;
-    uint8_t* entry_addr = mem_map_->Begin() + reinterpret_cast<uintptr_t>(heap_addr) / kRegionSize;
+    uint8_t* entry_addr = mem_map_.Begin() + reinterpret_cast<uintptr_t>(heap_addr) / kRegionSize;
     DCHECK(IsValidEntry(entry_addr)) << "heap_addr: " << heap_addr
                                      << " entry_addr: " << reinterpret_cast<void*>(entry_addr);
     return entry_addr;
@@ -106,12 +110,12 @@
   }
 
   bool IsValidEntry(const uint8_t* entry_addr) const {
-    uint8_t* begin = mem_map_->Begin();
-    uint8_t* end = mem_map_->End();
+    uint8_t* begin = mem_map_.Begin();
+    uint8_t* end = mem_map_.End();
     return entry_addr >= begin && entry_addr < end;
   }
 
-  std::unique_ptr<MemMap> mem_map_;
+  MemMap mem_map_;
 };
 
 }  // namespace accounting
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index ced62cd..f87a67e 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -49,21 +49,22 @@
 
 template<size_t kAlignment>
 SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::CreateFromMemMap(
-    const std::string& name, MemMap* mem_map, uint8_t* heap_begin, size_t heap_capacity) {
-  CHECK(mem_map != nullptr);
-  uintptr_t* bitmap_begin = reinterpret_cast<uintptr_t*>(mem_map->Begin());
+    const std::string& name, MemMap&& mem_map, uint8_t* heap_begin, size_t heap_capacity) {
+  CHECK(mem_map.IsValid());
+  uintptr_t* bitmap_begin = reinterpret_cast<uintptr_t*>(mem_map.Begin());
   const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
-  return new SpaceBitmap(name, mem_map, bitmap_begin, bitmap_size, heap_begin, heap_capacity);
+  return new SpaceBitmap(
+      name, std::move(mem_map), bitmap_begin, bitmap_size, heap_begin, heap_capacity);
 }
 
 template<size_t kAlignment>
 SpaceBitmap<kAlignment>::SpaceBitmap(const std::string& name,
-                                     MemMap* mem_map,
+                                     MemMap&& mem_map,
                                      uintptr_t* bitmap_begin,
                                      size_t bitmap_size,
                                      const void* heap_begin,
                                      size_t heap_capacity)
-    : mem_map_(mem_map),
+    : mem_map_(std::move(mem_map)),
       bitmap_begin_(reinterpret_cast<Atomic<uintptr_t>*>(bitmap_begin)),
       bitmap_size_(bitmap_size),
       heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
@@ -83,14 +84,18 @@
   // (we represent one word as an `intptr_t`).
   const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
   std::string error_msg;
-  std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
-                                                       PROT_READ | PROT_WRITE, false, false,
-                                                       &error_msg));
-  if (UNLIKELY(mem_map.get() == nullptr)) {
+  MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+                                        /* addr */ nullptr,
+                                        bitmap_size,
+                                        PROT_READ | PROT_WRITE,
+                                        /* low_4gb */ false,
+                                        /* reuse */ false,
+                                        &error_msg);
+  if (UNLIKELY(!mem_map.IsValid())) {
     LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
     return nullptr;
   }
-  return CreateFromMemMap(name, mem_map.release(), heap_begin, heap_capacity);
+  return CreateFromMemMap(name, std::move(mem_map), heap_begin, heap_capacity);
 }
 
 template<size_t kAlignment>
@@ -114,7 +119,7 @@
 template<size_t kAlignment>
 void SpaceBitmap<kAlignment>::Clear() {
   if (bitmap_begin_ != nullptr) {
-    mem_map_->MadviseDontNeedAndZero();
+    mem_map_.MadviseDontNeedAndZero();
   }
 }
 
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 1237f6e..6a3faef 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -24,6 +24,7 @@
 #include <vector>
 
 #include "base/globals.h"
+#include "base/mem_map.h"
 #include "base/mutex.h"
 
 namespace art {
@@ -32,7 +33,6 @@
 class Class;
 class Object;
 }  // namespace mirror
-class MemMap;
 
 namespace gc {
 namespace accounting {
@@ -50,8 +50,10 @@
   // Initialize a space bitmap using the provided mem_map as the live bits. Takes ownership of the
   // mem map. The address range covered starts at heap_begin and is of size equal to heap_capacity.
   // Objects are kAlignement-aligned.
-  static SpaceBitmap* CreateFromMemMap(const std::string& name, MemMap* mem_map,
-                                       uint8_t* heap_begin, size_t heap_capacity);
+  static SpaceBitmap* CreateFromMemMap(const std::string& name,
+                                       MemMap&& mem_map,
+                                       uint8_t* heap_begin,
+                                       size_t heap_capacity);
 
   ~SpaceBitmap();
 
@@ -215,7 +217,7 @@
   // TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
   // however, we document that this is expected on heap_end_
   SpaceBitmap(const std::string& name,
-              MemMap* mem_map,
+              MemMap&& mem_map,
               uintptr_t* bitmap_begin,
               size_t bitmap_size,
               const void* heap_begin,
@@ -227,7 +229,7 @@
   bool Modify(const mirror::Object* obj);
 
   // Backing storage for bitmap.
-  std::unique_ptr<MemMap> mem_map_;
+  MemMap mem_map_;
 
   // This bitmap itself, word sized for efficiency in scanning.
   Atomic<uintptr_t>* const bitmap_begin_;
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index a4095d8..1639a82 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -91,11 +91,15 @@
   size_t num_of_pages = footprint_ / kPageSize;
   size_t max_num_of_pages = max_capacity_ / kPageSize;
   std::string error_msg;
-  page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", nullptr,
-                                               RoundUp(max_num_of_pages, kPageSize),
-                                               PROT_READ | PROT_WRITE, false, false, &error_msg));
-  CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
-  page_map_ = page_map_mem_map_->Begin();
+  page_map_mem_map_ = MemMap::MapAnonymous("rosalloc page map",
+                                           /* addr */ nullptr,
+                                           RoundUp(max_num_of_pages, kPageSize),
+                                           PROT_READ | PROT_WRITE,
+                                           /* low_4gb */ false,
+                                           /* reuse */ false,
+                                           &error_msg);
+  CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg;
+  page_map_ = page_map_mem_map_.Begin();
   page_map_size_ = num_of_pages;
   max_page_map_size_ = max_num_of_pages;
   free_page_run_size_map_.resize(num_of_pages);
@@ -1364,8 +1368,8 @@
     // Zero out the tail of the page map.
     uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
     uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
-    DCHECK_LE(madvise_begin, page_map_mem_map_->End());
-    size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
+    DCHECK_LE(madvise_begin, page_map_mem_map_.End());
+    size_t madvise_size = page_map_mem_map_.End() - madvise_begin;
     if (madvise_size > 0) {
       DCHECK_ALIGNED(madvise_begin, kPageSize);
       DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 30213d5..0562167 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -31,13 +31,12 @@
 #include "base/allocator.h"
 #include "base/bit_utils.h"
 #include "base/globals.h"
+#include "base/mem_map.h"
 #include "base/mutex.h"
 #include "thread.h"
 
 namespace art {
 
-class MemMap;
-
 namespace gc {
 namespace allocator {
 
@@ -746,7 +745,7 @@
   volatile uint8_t* page_map_;  // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
   size_t page_map_size_;
   size_t max_page_map_size_;
-  std::unique_ptr<MemMap> page_map_mem_map_;
+  MemMap page_map_mem_map_;
 
   // The table that indicates the size of free page runs. These sizes
   // are stored here to avoid storing in the free page header and
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 9767807..558a4a7 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -40,22 +40,22 @@
 
 class DummyImageSpace : public space::ImageSpace {
  public:
-  DummyImageSpace(MemMap* map,
+  DummyImageSpace(MemMap&& map,
                   accounting::ContinuousSpaceBitmap* live_bitmap,
                   std::unique_ptr<DummyOatFile>&& oat_file,
-                  std::unique_ptr<MemMap>&& oat_map)
+                  MemMap&& oat_map)
       : ImageSpace("DummyImageSpace",
                    /*image_location*/"",
-                   map,
+                   std::move(map),
                    live_bitmap,
-                   map->End()),
+                   map.End()),
         oat_map_(std::move(oat_map)) {
     oat_file_ = std::move(oat_file);
     oat_file_non_owned_ = oat_file_.get();
   }
 
  private:
-  std::unique_ptr<MemMap> oat_map_;
+  MemMap oat_map_;
 };
 
 class ImmuneSpacesTest : public CommonRuntimeTest {
@@ -83,39 +83,39 @@
                                     uint8_t* oat_begin,
                                     size_t oat_size) {
     std::string error_str;
-    std::unique_ptr<MemMap> map(MemMap::MapAnonymous("DummyImageSpace",
-                                                     image_begin,
-                                                     image_size,
-                                                     PROT_READ | PROT_WRITE,
-                                                     /*low_4gb*/true,
-                                                     /*reuse*/false,
-                                                     &error_str));
-    if (map == nullptr) {
+    MemMap map = MemMap::MapAnonymous("DummyImageSpace",
+                                      image_begin,
+                                      image_size,
+                                      PROT_READ | PROT_WRITE,
+                                      /*low_4gb*/true,
+                                      /*reuse*/false,
+                                      &error_str);
+    if (!map.IsValid()) {
       LOG(ERROR) << error_str;
       return nullptr;
     }
     CHECK(!live_bitmaps_.empty());
     std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(std::move(live_bitmaps_.back()));
     live_bitmaps_.pop_back();
-    std::unique_ptr<MemMap> oat_map(MemMap::MapAnonymous("OatMap",
-                                                         oat_begin,
-                                                         oat_size,
-                                                         PROT_READ | PROT_WRITE,
-                                                         /*low_4gb*/true,
-                                                         /*reuse*/false,
-                                                         &error_str));
-    if (oat_map == nullptr) {
+    MemMap oat_map = MemMap::MapAnonymous("OatMap",
+                                          oat_begin,
+                                          oat_size,
+                                          PROT_READ | PROT_WRITE,
+                                          /*low_4gb*/true,
+                                          /*reuse*/false,
+                                          &error_str);
+    if (!oat_map.IsValid()) {
       LOG(ERROR) << error_str;
       return nullptr;
     }
-    std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map->Begin(), oat_map->End()));
+    std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map.Begin(), oat_map.End()));
     // Create image header.
     ImageSection sections[ImageHeader::kSectionCount];
-    new (map->Begin()) ImageHeader(
-        /*image_begin*/PointerToLowMemUInt32(map->Begin()),
-        /*image_size*/map->Size(),
+    new (map.Begin()) ImageHeader(
+        /*image_begin*/PointerToLowMemUInt32(map.Begin()),
+        /*image_size*/map.Size(),
         sections,
-        /*image_roots*/PointerToLowMemUInt32(map->Begin()) + 1,
+        /*image_roots*/PointerToLowMemUInt32(map.Begin()) + 1,
         /*oat_checksum*/0u,
         // The oat file data in the header is always right after the image space.
         /*oat_file_begin*/PointerToLowMemUInt32(oat_begin),
@@ -131,7 +131,7 @@
         /*is_pic*/false,
         ImageHeader::kStorageModeUncompressed,
         /*storage_size*/0u);
-    return new DummyImageSpace(map.release(),
+    return new DummyImageSpace(std::move(map),
                                live_bitmap.release(),
                                std::move(oat_file),
                                std::move(oat_map));
@@ -141,18 +141,18 @@
   // returned address.
   static uint8_t* GetContinuousMemoryRegion(size_t size) {
     std::string error_str;
-    std::unique_ptr<MemMap> map(MemMap::MapAnonymous("reserve",
-                                                     nullptr,
-                                                     size,
-                                                     PROT_READ | PROT_WRITE,
-                                                     /*low_4gb*/true,
-                                                     /*reuse*/false,
-                                                     &error_str));
-    if (map == nullptr) {
+    MemMap map = MemMap::MapAnonymous("reserve",
+                                      /* addr */ nullptr,
+                                      size,
+                                      PROT_READ | PROT_WRITE,
+                                      /*low_4gb*/ true,
+                                      /*reuse*/ false,
+                                      &error_str);
+    if (!map.IsValid()) {
       LOG(ERROR) << "Failed to allocate memory region " << error_str;
       return nullptr;
     }
-    return map->Begin();
+    return map.Begin();
   }
 
  private:
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 2335964..334c7a0 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -103,12 +103,16 @@
       is_concurrent_(is_concurrent),
       live_stack_freeze_size_(0) {
   std::string error_msg;
-  MemMap* mem_map = MemMap::MapAnonymous(
-      "mark sweep sweep array free buffer", nullptr,
+  sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
+      "mark sweep sweep array free buffer",
+      /* addr */ nullptr,
       RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
-      PROT_READ | PROT_WRITE, false, false, &error_msg);
-  CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
-  sweep_array_free_buffer_mem_map_.reset(mem_map);
+      PROT_READ | PROT_WRITE,
+      /* low_4gb */ false,
+      /* reuse */ false,
+      &error_msg);
+  CHECK(sweep_array_free_buffer_mem_map_.IsValid())
+      << "Couldn't allocate sweep array free buffer: " << error_msg;
 }
 
 void MarkSweep::InitializePhase() {
@@ -1207,7 +1211,7 @@
   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
   Thread* self = Thread::Current();
   mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
-      sweep_array_free_buffer_mem_map_->BaseBegin());
+      sweep_array_free_buffer_mem_map_.BaseBegin());
   size_t chunk_free_pos = 0;
   ObjectBytePair freed;
   ObjectBytePair freed_los;
@@ -1300,7 +1304,7 @@
     t2.NewTiming("ResetStack");
     allocations->Reset();
   }
-  sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
+  sweep_array_free_buffer_mem_map_.MadviseDontNeedAndZero();
 }
 
 void MarkSweep::Sweep(bool swap_bitmaps) {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 5e0fe06..70e4432 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -351,7 +351,7 @@
   // Verification.
   size_t live_stack_freeze_size_;
 
-  std::unique_ptr<MemMap> sweep_array_free_buffer_mem_map_;
+  MemMap sweep_array_free_buffer_mem_map_;
 
  private:
   class CardScanTask;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 58becb1..a1a1a5c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -358,8 +358,8 @@
   if (foreground_collector_type_ == kCollectorTypeGSS) {
     separate_non_moving_space = false;
   }
-  std::unique_ptr<MemMap> main_mem_map_1;
-  std::unique_ptr<MemMap> main_mem_map_2;
+  MemMap main_mem_map_1;
+  MemMap main_mem_map_2;
 
   // Gross hack to make dex2oat deterministic.
   if (foreground_collector_type_ == kCollectorTypeMS &&
@@ -374,7 +374,7 @@
     request_begin += non_moving_space_capacity;
   }
   std::string error_str;
-  std::unique_ptr<MemMap> non_moving_space_mem_map;
+  MemMap non_moving_space_mem_map;
   if (separate_non_moving_space) {
     ScopedTrace trace2("Create separate non moving space");
     // If we are the zygote, the non moving space becomes the zygote space when we run
@@ -383,11 +383,9 @@
     const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
     // Reserve the non moving mem map before the other two since it needs to be at a specific
     // address.
-    non_moving_space_mem_map.reset(MapAnonymousPreferredAddress(space_name,
-                                                                requested_alloc_space_begin,
-                                                                non_moving_space_capacity,
-                                                                &error_str));
-    CHECK(non_moving_space_mem_map != nullptr) << error_str;
+    non_moving_space_mem_map = MapAnonymousPreferredAddress(
+        space_name, requested_alloc_space_begin, non_moving_space_capacity, &error_str);
+    CHECK(non_moving_space_mem_map.IsValid()) << error_str;
     // Try to reserve virtual memory at a lower address if we have a separate non moving space.
     request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
   }
@@ -395,27 +393,29 @@
   if (foreground_collector_type_ != kCollectorTypeCC) {
     ScopedTrace trace2("Create main mem map");
     if (separate_non_moving_space || !is_zygote) {
-      main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0],
-                                                        request_begin,
-                                                        capacity_,
-                                                        &error_str));
+      main_mem_map_1 = MapAnonymousPreferredAddress(
+          kMemMapSpaceName[0], request_begin, capacity_, &error_str);
     } else {
       // If no separate non-moving space and we are the zygote, the main space must come right
       // after the image space to avoid a gap. This is required since we want the zygote space to
       // be adjacent to the image space.
-      main_mem_map_1.reset(MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
-                                                PROT_READ | PROT_WRITE, true, false,
-                                                &error_str));
+      main_mem_map_1 = MemMap::MapAnonymous(kMemMapSpaceName[0],
+                                            request_begin,
+                                            capacity_,
+                                            PROT_READ | PROT_WRITE,
+                                            /* low_4gb */ true,
+                                            /* reuse */ false,
+                                            &error_str);
     }
-    CHECK(main_mem_map_1.get() != nullptr) << error_str;
+    CHECK(main_mem_map_1.IsValid()) << error_str;
   }
   if (support_homogeneous_space_compaction ||
       background_collector_type_ == kCollectorTypeSS ||
       foreground_collector_type_ == kCollectorTypeSS) {
     ScopedTrace trace2("Create main mem map 2");
-    main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
-                                                      capacity_, &error_str));
-    CHECK(main_mem_map_2.get() != nullptr) << error_str;
+    main_mem_map_2 = MapAnonymousPreferredAddress(
+        kMemMapSpaceName[1], main_mem_map_1.End(), capacity_, &error_str);
+    CHECK(main_mem_map_2.IsValid()) << error_str;
   }
 
   // Create the non moving space first so that bitmaps don't take up the address range.
@@ -423,10 +423,14 @@
     ScopedTrace trace2("Add non moving space");
     // Non moving space is always dlmalloc since we currently don't have support for multiple
     // active rosalloc spaces.
-    const size_t size = non_moving_space_mem_map->Size();
-    non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
-        non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
-        initial_size, size, size, false);
+    const size_t size = non_moving_space_mem_map.Size();
+    non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(std::move(non_moving_space_mem_map),
+                                                               "zygote / non moving space",
+                                                               kDefaultStartingSize,
+                                                               initial_size,
+                                                               size,
+                                                               size,
+                                                               /* can_move_objects */ false);
     non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
     CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
         << requested_alloc_space_begin;
@@ -436,11 +440,10 @@
   if (foreground_collector_type_ == kCollectorTypeCC) {
     CHECK(separate_non_moving_space);
     // Reserve twice the capacity, to allow evacuating every region for explicit GCs.
-    MemMap* region_space_mem_map = space::RegionSpace::CreateMemMap(kRegionSpaceName,
-                                                                    capacity_ * 2,
-                                                                    request_begin);
-    CHECK(region_space_mem_map != nullptr) << "No region space mem map";
-    region_space_ = space::RegionSpace::Create(kRegionSpaceName, region_space_mem_map);
+    MemMap region_space_mem_map =
+        space::RegionSpace::CreateMemMap(kRegionSpaceName, capacity_ * 2, request_begin);
+    CHECK(region_space_mem_map.IsValid()) << "No region space mem map";
+    region_space_ = space::RegionSpace::Create(kRegionSpaceName, std::move(region_space_mem_map));
     AddSpace(region_space_);
   } else if (IsMovingGc(foreground_collector_type_) &&
       foreground_collector_type_ != kCollectorTypeGSS) {
@@ -448,16 +451,16 @@
     // We only to create the bump pointer if the foreground collector is a compacting GC.
     // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
     bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
-                                                                    main_mem_map_1.release());
+                                                                    std::move(main_mem_map_1));
     CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
     AddSpace(bump_pointer_space_);
     temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
-                                                            main_mem_map_2.release());
+                                                            std::move(main_mem_map_2));
     CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
     AddSpace(temp_space_);
     CHECK(separate_non_moving_space);
   } else {
-    CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
+    CreateMainMallocSpace(std::move(main_mem_map_1), initial_size, growth_limit_, capacity_);
     CHECK(main_space_ != nullptr);
     AddSpace(main_space_);
     if (!separate_non_moving_space) {
@@ -467,19 +470,23 @@
     if (foreground_collector_type_ == kCollectorTypeGSS) {
       CHECK_EQ(foreground_collector_type_, background_collector_type_);
       // Create bump pointer spaces instead of a backup space.
-      main_mem_map_2.release();
-      bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
-                                                            kGSSBumpPointerSpaceCapacity, nullptr);
+      main_mem_map_2.Reset();
+      bump_pointer_space_ = space::BumpPointerSpace::Create(
+          "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
       CHECK(bump_pointer_space_ != nullptr);
       AddSpace(bump_pointer_space_);
-      temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
-                                                    kGSSBumpPointerSpaceCapacity, nullptr);
+      temp_space_ = space::BumpPointerSpace::Create(
+          "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
       CHECK(temp_space_ != nullptr);
       AddSpace(temp_space_);
-    } else if (main_mem_map_2.get() != nullptr) {
+    } else if (main_mem_map_2.IsValid()) {
       const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
-      main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
-                                                           growth_limit_, capacity_, name, true));
+      main_space_backup_.reset(CreateMallocSpaceFromMemMap(std::move(main_mem_map_2),
+                                                           initial_size,
+                                                           growth_limit_,
+                                                           capacity_,
+                                                           name,
+                                                           /* can_move_objects */ true));
       CHECK(main_space_backup_.get() != nullptr);
       // Add the space so its accounted for in the heap_begin and heap_end.
       AddSpace(main_space_backup_.get());
@@ -613,7 +620,7 @@
         first_space = space;
       }
     }
-    bool no_gap = MemMap::CheckNoGaps(first_space->GetMemMap(), non_moving_space_->GetMemMap());
+    bool no_gap = MemMap::CheckNoGaps(*first_space->GetMemMap(), *non_moving_space_->GetMemMap());
     if (!no_gap) {
       PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
       MemMap::DumpMaps(LOG_STREAM(ERROR), true);
@@ -632,14 +639,19 @@
   }
 }
 
-MemMap* Heap::MapAnonymousPreferredAddress(const char* name,
-                                           uint8_t* request_begin,
-                                           size_t capacity,
-                                           std::string* out_error_str) {
+MemMap Heap::MapAnonymousPreferredAddress(const char* name,
+                                          uint8_t* request_begin,
+                                          size_t capacity,
+                                          std::string* out_error_str) {
   while (true) {
-    MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
-                                       PROT_READ | PROT_WRITE, true, false, out_error_str);
-    if (map != nullptr || request_begin == nullptr) {
+    MemMap map = MemMap::MapAnonymous(name,
+                                      request_begin,
+                                      capacity,
+                                      PROT_READ | PROT_WRITE,
+                                      /* low_4gb*/ true,
+                                      /* reuse */ false,
+                                      out_error_str);
+    if (map.IsValid() || request_begin == nullptr) {
       return map;
     }
     // Retry a  second time with no specified request begin.
@@ -651,7 +663,7 @@
   return foreground_collector_type_ == type || background_collector_type_ == type;
 }
 
-space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map,
+space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap&& mem_map,
                                                       size_t initial_size,
                                                       size_t growth_limit,
                                                       size_t capacity,
@@ -660,12 +672,21 @@
   space::MallocSpace* malloc_space = nullptr;
   if (kUseRosAlloc) {
     // Create rosalloc space.
-    malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
-                                                          initial_size, growth_limit, capacity,
-                                                          low_memory_mode_, can_move_objects);
+    malloc_space = space::RosAllocSpace::CreateFromMemMap(std::move(mem_map),
+                                                          name,
+                                                          kDefaultStartingSize,
+                                                          initial_size,
+                                                          growth_limit,
+                                                          capacity,
+                                                          low_memory_mode_,
+                                                          can_move_objects);
   } else {
-    malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
-                                                          initial_size, growth_limit, capacity,
+    malloc_space = space::DlMallocSpace::CreateFromMemMap(std::move(mem_map),
+                                                          name,
+                                                          kDefaultStartingSize,
+                                                          initial_size,
+                                                          growth_limit,
+                                                          capacity,
                                                           can_move_objects);
   }
   if (collector::SemiSpace::kUseRememberedSet) {
@@ -679,7 +700,9 @@
   return malloc_space;
 }
 
-void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
+void Heap::CreateMainMallocSpace(MemMap&& mem_map,
+                                 size_t initial_size,
+                                 size_t growth_limit,
                                  size_t capacity) {
   // Is background compaction is enabled?
   bool can_move_objects = IsMovingGc(background_collector_type_) !=
@@ -698,7 +721,10 @@
     RemoveRememberedSet(main_space_);
   }
   const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
-  main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
+  main_space_ = CreateMallocSpaceFromMemMap(std::move(mem_map),
+                                            initial_size,
+                                            growth_limit,
+                                            capacity, name,
                                             can_move_objects);
   SetSpaceAsDefault(main_space_);
   VLOG(heap) << "Created main space " << main_space_;
@@ -2012,17 +2038,17 @@
         if (!IsMovingGc(collector_type_)) {
           // Create the bump pointer space from the backup space.
           CHECK(main_space_backup_ != nullptr);
-          std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
+          MemMap mem_map = main_space_backup_->ReleaseMemMap();
           // We are transitioning from non moving GC -> moving GC, since we copied from the bump
           // pointer space last transition it will be protected.
-          CHECK(mem_map != nullptr);
-          mem_map->Protect(PROT_READ | PROT_WRITE);
+          CHECK(mem_map.IsValid());
+          mem_map.Protect(PROT_READ | PROT_WRITE);
           bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
-                                                                          mem_map.release());
+                                                                          std::move(mem_map));
           AddSpace(bump_pointer_space_);
           collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
           // Use the now empty main space mem map for the bump pointer temp space.
-          mem_map.reset(main_space_->ReleaseMemMap());
+          mem_map = main_space_->ReleaseMemMap();
           // Unset the pointers just in case.
           if (dlmalloc_space_ == main_space_) {
             dlmalloc_space_ = nullptr;
@@ -2038,7 +2064,7 @@
           RemoveRememberedSet(main_space_backup_.get());
           main_space_backup_.reset(nullptr);  // Deletes the space.
           temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
-                                                                  mem_map.release());
+                                                                  std::move(mem_map));
           AddSpace(temp_space_);
         }
         break;
@@ -2048,37 +2074,35 @@
       case kCollectorTypeCMS: {
         if (IsMovingGc(collector_type_)) {
           CHECK(temp_space_ != nullptr);
-          std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
+          MemMap mem_map = temp_space_->ReleaseMemMap();
           RemoveSpace(temp_space_);
           temp_space_ = nullptr;
-          mem_map->Protect(PROT_READ | PROT_WRITE);
-          CreateMainMallocSpace(mem_map.get(),
+          mem_map.Protect(PROT_READ | PROT_WRITE);
+          CreateMainMallocSpace(std::move(mem_map),
                                 kDefaultInitialSize,
-                                std::min(mem_map->Size(), growth_limit_),
-                                mem_map->Size());
-          mem_map.release();
+                                std::min(mem_map.Size(), growth_limit_),
+                                mem_map.Size());
           // Compact to the main space from the bump pointer space, don't need to swap semispaces.
           AddSpace(main_space_);
           collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
-          mem_map.reset(bump_pointer_space_->ReleaseMemMap());
+          mem_map = bump_pointer_space_->ReleaseMemMap();
           RemoveSpace(bump_pointer_space_);
           bump_pointer_space_ = nullptr;
           const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
           // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
           if (kIsDebugBuild && kUseRosAlloc) {
-            mem_map->Protect(PROT_READ | PROT_WRITE);
+            mem_map.Protect(PROT_READ | PROT_WRITE);
           }
           main_space_backup_.reset(CreateMallocSpaceFromMemMap(
-              mem_map.get(),
+              std::move(mem_map),
               kDefaultInitialSize,
-              std::min(mem_map->Size(), growth_limit_),
-              mem_map->Size(),
+              std::min(mem_map.Size(), growth_limit_),
+              mem_map.Size(),
               name,
               true));
           if (kIsDebugBuild && kUseRosAlloc) {
-            mem_map->Protect(PROT_NONE);
+            main_space_backup_->GetMemMap()->Protect(PROT_NONE);
           }
-          mem_map.release();
         }
         break;
       }
@@ -2323,11 +2347,13 @@
     if (reset_main_space) {
       main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
       madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
-      MemMap* mem_map = main_space_->ReleaseMemMap();
+      MemMap mem_map = main_space_->ReleaseMemMap();
       RemoveSpace(main_space_);
       space::Space* old_main_space = main_space_;
-      CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
-                            mem_map->Size());
+      CreateMainMallocSpace(std::move(mem_map),
+                            kDefaultInitialSize,
+                            std::min(mem_map.Size(), growth_limit_),
+                            mem_map.Size());
       delete old_main_space;
       AddSpace(main_space_);
     } else {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 5c34c56..0dcf4f5 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -835,8 +835,10 @@
   void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
 
   // Create a mem map with a preferred base address.
-  static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
-                                              size_t capacity, std::string* out_error_str);
+  static MemMap MapAnonymousPreferredAddress(const char* name,
+                                             uint8_t* request_begin,
+                                             size_t capacity,
+                                             std::string* out_error_str);
 
   bool SupportHSpaceCompaction() const {
     // Returns true if we can do hspace compaction
@@ -979,13 +981,13 @@
   collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
 
   // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
-  void CreateMainMallocSpace(MemMap* mem_map,
+  void CreateMainMallocSpace(MemMap&& mem_map,
                              size_t initial_size,
                              size_t growth_limit,
                              size_t capacity);
 
   // Create a malloc space based on a mem map. Does not set the space as default.
-  space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map,
+  space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap&& mem_map,
                                                   size_t initial_size,
                                                   size_t growth_limit,
                                                   size_t capacity,
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index c6b2120..d35ae38 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -33,19 +33,19 @@
     MemMap::Init();
     std::string error_msg;
     // Reserve the preferred address to force the heap to use another one for testing.
-    reserved_.reset(MemMap::MapAnonymous("ReserveMap",
-                                         gc::Heap::kPreferredAllocSpaceBegin,
-                                         16 * KB,
-                                         PROT_READ,
-                                         /*low_4gb*/ true,
-                                         /*reuse*/ false,
-                                         &error_msg));
-    ASSERT_TRUE(reserved_ != nullptr) << error_msg;
+    reserved_ = MemMap::MapAnonymous("ReserveMap",
+                                     gc::Heap::kPreferredAllocSpaceBegin,
+                                     16 * KB,
+                                     PROT_READ,
+                                     /*low_4gb*/ true,
+                                     /*reuse*/ false,
+                                     &error_msg);
+    ASSERT_TRUE(reserved_.IsValid()) << error_msg;
     CommonRuntimeTest::SetUp();
   }
 
  private:
-  std::unique_ptr<MemMap> reserved_;
+  MemMap reserved_;
 };
 
 TEST_F(HeapTest, ClearGrowthLimit) {
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index e95da01..2712ec2 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -28,23 +28,31 @@
                                            uint8_t* requested_begin) {
   capacity = RoundUp(capacity, kPageSize);
   std::string error_msg;
-  std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
-                                                       PROT_READ | PROT_WRITE, true, false,
-                                                       &error_msg));
-  if (mem_map.get() == nullptr) {
+  MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+                                        requested_begin,
+                                        capacity,
+                                        PROT_READ | PROT_WRITE,
+                                        /* low_4gb */ true,
+                                        /* reuse */ false,
+                                        &error_msg);
+  if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
         << PrettySize(capacity) << " with message " << error_msg;
     return nullptr;
   }
-  return new BumpPointerSpace(name, mem_map.release());
+  return new BumpPointerSpace(name, std::move(mem_map));
 }
 
-BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap* mem_map) {
-  return new BumpPointerSpace(name, mem_map);
+BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap&& mem_map) {
+  return new BumpPointerSpace(name, std::move(mem_map));
 }
 
 BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit)
-    : ContinuousMemMapAllocSpace(name, nullptr, begin, begin, limit,
+    : ContinuousMemMapAllocSpace(name,
+                                 MemMap::Invalid(),
+                                 begin,
+                                 begin,
+                                 limit,
                                  kGcRetentionPolicyAlwaysCollect),
       growth_end_(limit),
       objects_allocated_(0), bytes_allocated_(0),
@@ -53,10 +61,14 @@
       num_blocks_(0) {
 }
 
-BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map)
-    : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->Begin(), mem_map->End(),
+BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap&& mem_map)
+    : ContinuousMemMapAllocSpace(name,
+                                 std::move(mem_map),
+                                 mem_map.Begin(),
+                                 mem_map.Begin(),
+                                 mem_map.End(),
                                  kGcRetentionPolicyAlwaysCollect),
-      growth_end_(mem_map->End()),
+      growth_end_(mem_map_.End()),
       objects_allocated_(0), bytes_allocated_(0),
       block_lock_("Block lock", kBumpPointerSpaceBlockLock),
       main_block_size_(0),
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 5ba13ca..9b31558 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -47,7 +47,7 @@
   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
   // space to confirm the request was granted.
   static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
-  static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap* mem_map);
+  static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap&& mem_map);
 
   // Allocate num_bytes, returns null if the space is full.
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -166,7 +166,7 @@
   static constexpr size_t kAlignment = 8;
 
  protected:
-  BumpPointerSpace(const std::string& name, MemMap* mem_map);
+  BumpPointerSpace(const std::string& name, MemMap&& mem_map);
 
   // Allocate a raw block of bytes.
   uint8_t* AllocBlock(size_t bytes) REQUIRES(block_lock_);
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 025c3f0..36d2161 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -38,41 +38,73 @@
 
 static constexpr bool kPrefetchDuringDlMallocFreeList = true;
 
-DlMallocSpace::DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
-                             void* mspace, uint8_t* begin, uint8_t* end, uint8_t* limit,
-                             size_t growth_limit, bool can_move_objects, size_t starting_size)
-    : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
+DlMallocSpace::DlMallocSpace(MemMap&& mem_map,
+                             size_t initial_size,
+                             const std::string& name,
+                             void* mspace,
+                             uint8_t* begin,
+                             uint8_t* end,
+                             uint8_t* limit,
+                             size_t growth_limit,
+                             bool can_move_objects,
+                             size_t starting_size)
+    : MallocSpace(name,
+                  std::move(mem_map),
+                  begin,
+                  end,
+                  limit,
+                  growth_limit,
+                  /* create_bitmaps */ true,
+                  can_move_objects,
                   starting_size, initial_size),
       mspace_(mspace) {
   CHECK(mspace != nullptr);
 }
 
-DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
-                                               size_t starting_size, size_t initial_size,
-                                               size_t growth_limit, size_t capacity,
+DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap&& mem_map,
+                                               const std::string& name,
+                                               size_t starting_size,
+                                               size_t initial_size,
+                                               size_t growth_limit,
+                                               size_t capacity,
                                                bool can_move_objects) {
-  DCHECK(mem_map != nullptr);
-  void* mspace = CreateMspace(mem_map->Begin(), starting_size, initial_size);
+  DCHECK(mem_map.IsValid());
+  void* mspace = CreateMspace(mem_map.Begin(), starting_size, initial_size);
   if (mspace == nullptr) {
     LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
     return nullptr;
   }
 
   // Protect memory beyond the starting size. morecore will add r/w permissions when necessory
-  uint8_t* end = mem_map->Begin() + starting_size;
+  uint8_t* end = mem_map.Begin() + starting_size;
   if (capacity - starting_size > 0) {
     CheckedCall(mprotect, name.c_str(), end, capacity - starting_size, PROT_NONE);
   }
 
   // Everything is set so record in immutable structure and leave
-  uint8_t* begin = mem_map->Begin();
+  uint8_t* begin = mem_map.Begin();
   if (Runtime::Current()->IsRunningOnMemoryTool()) {
     return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
-        mem_map, initial_size, name, mspace, begin, end, begin + capacity, growth_limit,
-        can_move_objects, starting_size);
+        std::move(mem_map),
+        initial_size,
+        name,
+        mspace,
+        begin,
+        end,
+        begin + capacity, growth_limit,
+        can_move_objects,
+        starting_size);
   } else {
-    return new DlMallocSpace(mem_map, initial_size, name, mspace, begin, end, begin + capacity,
-                             growth_limit, can_move_objects, starting_size);
+    return new DlMallocSpace(std::move(mem_map),
+                             initial_size,
+                             name,
+                             mspace,
+                             begin,
+                             end,
+                             begin + capacity,
+                             growth_limit,
+                             can_move_objects,
+                             starting_size);
   }
 }
 
@@ -94,15 +126,20 @@
   // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
   // size of the large allocation) will be greater than the footprint limit.
   size_t starting_size = kPageSize;
-  MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
-                                 requested_begin);
-  if (mem_map == nullptr) {
+  MemMap mem_map =
+      CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+  if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
                << PrettySize(capacity);
     return nullptr;
   }
-  DlMallocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
-                                          growth_limit, capacity, can_move_objects);
+  DlMallocSpace* space = CreateFromMemMap(std::move(mem_map),
+                                          name,
+                                          starting_size,
+                                          initial_size,
+                                          growth_limit,
+                                          capacity,
+                                          can_move_objects);
   // We start out with only the initial size possibly containing objects.
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "DlMallocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
@@ -152,17 +189,37 @@
   return result;
 }
 
-MallocSpace* DlMallocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
-                                           void* allocator, uint8_t* begin, uint8_t* end,
-                                           uint8_t* limit, size_t growth_limit,
+MallocSpace* DlMallocSpace::CreateInstance(MemMap&& mem_map,
+                                           const std::string& name,
+                                           void* allocator,
+                                           uint8_t* begin,
+                                           uint8_t* end,
+                                           uint8_t* limit,
+                                           size_t growth_limit,
                                            bool can_move_objects) {
   if (Runtime::Current()->IsRunningOnMemoryTool()) {
     return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
-        mem_map, initial_size_, name, allocator, begin, end, limit, growth_limit,
-        can_move_objects, starting_size_);
+        std::move(mem_map),
+        initial_size_,
+        name,
+        allocator,
+        begin,
+        end,
+        limit,
+        growth_limit,
+        can_move_objects,
+        starting_size_);
   } else {
-    return new DlMallocSpace(mem_map, initial_size_, name, allocator, begin, end, limit,
-                             growth_limit, can_move_objects, starting_size_);
+    return new DlMallocSpace(std::move(mem_map),
+                             initial_size_,
+                             name,
+                             allocator,
+                             begin,
+                             end,
+                             limit,
+                             growth_limit,
+                             can_move_objects,
+                             starting_size_);
   }
 }
 
@@ -283,7 +340,7 @@
   live_bitmap_->Clear();
   mark_bitmap_->Clear();
   SetEnd(Begin() + starting_size_);
-  mspace_ = CreateMspace(mem_map_->Begin(), starting_size_, initial_size_);
+  mspace_ = CreateMspace(mem_map_.Begin(), starting_size_, initial_size_);
   SetFootprintLimit(footprint_limit);
 }
 
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 4c7fcfd..66537d5 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -34,9 +34,12 @@
 class DlMallocSpace : public MallocSpace {
  public:
   // Create a DlMallocSpace from an existing mem_map.
-  static DlMallocSpace* CreateFromMemMap(MemMap* mem_map, const std::string& name,
-                                         size_t starting_size, size_t initial_size,
-                                         size_t growth_limit, size_t capacity,
+  static DlMallocSpace* CreateFromMemMap(MemMap&& mem_map,
+                                         const std::string& name,
+                                         size_t starting_size,
+                                         size_t initial_size,
+                                         size_t growth_limit,
+                                         size_t capacity,
                                          bool can_move_objects);
 
   // Create a DlMallocSpace with the requested sizes. The requested
@@ -118,9 +121,14 @@
   // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
   void SetFootprintLimit(size_t limit) OVERRIDE;
 
-  MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
-                              uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
-                              bool can_move_objects);
+  MallocSpace* CreateInstance(MemMap&& mem_map,
+                              const std::string& name,
+                              void* allocator,
+                              uint8_t* begin,
+                              uint8_t* end,
+                              uint8_t* limit,
+                              size_t growth_limit,
+                              bool can_move_objects) OVERRIDE;
 
   uint64_t GetBytesAllocated() OVERRIDE;
   uint64_t GetObjectsAllocated() OVERRIDE;
@@ -139,9 +147,16 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  protected:
-  DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name, void* mspace,
-                uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
-                bool can_move_objects, size_t starting_size);
+  DlMallocSpace(MemMap&& mem_map,
+                size_t initial_size,
+                const std::string& name,
+                void* mspace,
+                uint8_t* begin,
+                uint8_t* end,
+                uint8_t* limit,
+                size_t growth_limit,
+                bool can_move_objects,
+                size_t starting_size);
 
  private:
   mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated,
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 826f382..ae4b9da 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -62,12 +62,12 @@
 
 ImageSpace::ImageSpace(const std::string& image_filename,
                        const char* image_location,
-                       MemMap* mem_map,
+                       MemMap&& mem_map,
                        accounting::ContinuousSpaceBitmap* live_bitmap,
                        uint8_t* end)
     : MemMapSpace(image_filename,
-                  mem_map,
-                  mem_map->Begin(),
+                  std::move(mem_map),
+                  mem_map.Begin(),
                   end,
                   end,
                   kGcRetentionPolicyNeverCollect),
@@ -636,53 +636,53 @@
       return nullptr;
     }
 
-    std::unique_ptr<MemMap> map;
+    MemMap map;
 
     // GetImageBegin is the preferred address to map the image. If we manage to map the
     // image at the image begin, the amount of fixup work required is minimized.
     // If it is pic we will retry with error_msg for the failure case. Pass a null error_msg to
     // avoid reading proc maps for a mapping failure and slowing everything down.
-    map.reset(LoadImageFile(image_filename,
-                            image_location,
-                            *image_header,
-                            image_header->GetImageBegin(),
-                            file->Fd(),
-                            logger,
-                            image_header->IsPic() ? nullptr : error_msg));
+    map = LoadImageFile(image_filename,
+                        image_location,
+                        *image_header,
+                        image_header->GetImageBegin(),
+                        file->Fd(),
+                        logger,
+                        image_header->IsPic() ? nullptr : error_msg);
     // If the header specifies PIC mode, we can also map at a random low_4gb address since we can
     // relocate in-place.
-    if (map == nullptr && image_header->IsPic()) {
-      map.reset(LoadImageFile(image_filename,
-                              image_location,
-                              *image_header,
-                              /* address */ nullptr,
-                              file->Fd(),
-                              logger,
-                              error_msg));
+    if (!map.IsValid() && image_header->IsPic()) {
+      map = LoadImageFile(image_filename,
+                          image_location,
+                          *image_header,
+                          /* address */ nullptr,
+                          file->Fd(),
+                          logger,
+                          error_msg);
     }
     // Were we able to load something and continue?
-    if (map == nullptr) {
+    if (!map.IsValid()) {
       DCHECK(!error_msg->empty());
       return nullptr;
     }
-    DCHECK_EQ(0, memcmp(image_header, map->Begin(), sizeof(ImageHeader)));
+    DCHECK_EQ(0, memcmp(image_header, map.Begin(), sizeof(ImageHeader)));
 
-    std::unique_ptr<MemMap> image_bitmap_map(MemMap::MapFileAtAddress(nullptr,
-                                                                      bitmap_section.Size(),
-                                                                      PROT_READ, MAP_PRIVATE,
-                                                                      file->Fd(),
-                                                                      image_bitmap_offset,
-                                                                      /*low_4gb*/false,
-                                                                      /*reuse*/false,
-                                                                      image_filename,
-                                                                      error_msg));
-    if (image_bitmap_map == nullptr) {
+    MemMap image_bitmap_map = MemMap::MapFileAtAddress(nullptr,
+                                                       bitmap_section.Size(),
+                                                       PROT_READ, MAP_PRIVATE,
+                                                       file->Fd(),
+                                                       image_bitmap_offset,
+                                                       /*low_4gb*/false,
+                                                       /*reuse*/false,
+                                                       image_filename,
+                                                       error_msg);
+    if (!image_bitmap_map.IsValid()) {
       *error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str());
       return nullptr;
     }
     // Loaded the map, use the image header from the file now in case we patch it with
     // RelocateInPlace.
-    image_header = reinterpret_cast<ImageHeader*>(map->Begin());
+    image_header = reinterpret_cast<ImageHeader*>(map.Begin());
     const uint32_t bitmap_index = ImageSpace::bitmap_index_.fetch_add(1, std::memory_order_seq_cst);
     std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u",
                                          image_filename,
@@ -690,15 +690,15 @@
     // Bitmap only needs to cover until the end of the mirror objects section.
     const ImageSection& image_objects = image_header->GetObjectsSection();
     // We only want the mirror object, not the ArtFields and ArtMethods.
-    uint8_t* const image_end = map->Begin() + image_objects.End();
+    uint8_t* const image_end = map.Begin() + image_objects.End();
     std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap;
     {
       TimingLogger::ScopedTiming timing("CreateImageBitmap", &logger);
       bitmap.reset(
           accounting::ContinuousSpaceBitmap::CreateFromMemMap(
               bitmap_name,
-              image_bitmap_map.release(),
-              reinterpret_cast<uint8_t*>(map->Begin()),
+              std::move(image_bitmap_map),
+              reinterpret_cast<uint8_t*>(map.Begin()),
               // Make sure the bitmap is aligned to card size instead of just bitmap word size.
               RoundUp(image_objects.End(), gc::accounting::CardTable::kCardSize)));
       if (bitmap == nullptr) {
@@ -709,7 +709,7 @@
     {
       TimingLogger::ScopedTiming timing("RelocateImage", &logger);
       if (!RelocateInPlace(*image_header,
-                           map->Begin(),
+                           map.Begin(),
                            bitmap.get(),
                            oat_file,
                            error_msg)) {
@@ -719,7 +719,7 @@
     // We only want the mirror object, not the ArtFields and ArtMethods.
     std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename,
                                                      image_location,
-                                                     map.release(),
+                                                     std::move(map),
                                                      bitmap.release(),
                                                      image_end));
 
@@ -807,13 +807,13 @@
   }
 
  private:
-  static MemMap* LoadImageFile(const char* image_filename,
-                               const char* image_location,
-                               const ImageHeader& image_header,
-                               uint8_t* address,
-                               int fd,
-                               TimingLogger& logger,
-                               std::string* error_msg) {
+  static MemMap LoadImageFile(const char* image_filename,
+                              const char* image_location,
+                              const ImageHeader& image_header,
+                              uint8_t* address,
+                              int fd,
+                              TimingLogger& logger,
+                              std::string* error_msg) {
     TimingLogger::ScopedTiming timing("MapImageFile", &logger);
     const ImageHeader::StorageMode storage_mode = image_header.GetStorageMode();
     if (storage_mode == ImageHeader::kStorageModeUncompressed) {
@@ -835,45 +835,45 @@
         *error_msg = StringPrintf("Invalid storage mode in image header %d",
                                   static_cast<int>(storage_mode));
       }
-      return nullptr;
+      return MemMap::Invalid();
     }
 
     // Reserve output and decompress into it.
-    std::unique_ptr<MemMap> map(MemMap::MapAnonymous(image_location,
-                                                     address,
-                                                     image_header.GetImageSize(),
-                                                     PROT_READ | PROT_WRITE,
-                                                     /*low_4gb*/true,
-                                                     /*reuse*/false,
-                                                     error_msg));
-    if (map != nullptr) {
+    MemMap map = MemMap::MapAnonymous(image_location,
+                                      address,
+                                      image_header.GetImageSize(),
+                                      PROT_READ | PROT_WRITE,
+                                      /*low_4gb*/ true,
+                                      /*reuse*/ false,
+                                      error_msg);
+    if (map.IsValid()) {
       const size_t stored_size = image_header.GetDataSize();
       const size_t decompress_offset = sizeof(ImageHeader);  // Skip the header.
-      std::unique_ptr<MemMap> temp_map(MemMap::MapFile(sizeof(ImageHeader) + stored_size,
-                                                       PROT_READ,
-                                                       MAP_PRIVATE,
-                                                       fd,
-                                                       /*offset*/0,
-                                                       /*low_4gb*/false,
-                                                       image_filename,
-                                                       error_msg));
-      if (temp_map == nullptr) {
+      MemMap temp_map = MemMap::MapFile(sizeof(ImageHeader) + stored_size,
+                                        PROT_READ,
+                                        MAP_PRIVATE,
+                                        fd,
+                                        /*offset*/0,
+                                        /*low_4gb*/false,
+                                        image_filename,
+                                        error_msg);
+      if (!temp_map.IsValid()) {
         DCHECK(error_msg == nullptr || !error_msg->empty());
-        return nullptr;
+        return MemMap::Invalid();
       }
-      memcpy(map->Begin(), &image_header, sizeof(ImageHeader));
+      memcpy(map.Begin(), &image_header, sizeof(ImageHeader));
       const uint64_t start = NanoTime();
       // LZ4HC and LZ4 have same internal format, both use LZ4_decompress.
       TimingLogger::ScopedTiming timing2("LZ4 decompress image", &logger);
       const size_t decompressed_size = LZ4_decompress_safe(
-          reinterpret_cast<char*>(temp_map->Begin()) + sizeof(ImageHeader),
-          reinterpret_cast<char*>(map->Begin()) + decompress_offset,
+          reinterpret_cast<char*>(temp_map.Begin()) + sizeof(ImageHeader),
+          reinterpret_cast<char*>(map.Begin()) + decompress_offset,
           stored_size,
-          map->Size() - decompress_offset);
+          map.Size() - decompress_offset);
       const uint64_t time = NanoTime() - start;
       // Add one 1 ns to prevent possible divide by 0.
       VLOG(image) << "Decompressing image took " << PrettyDuration(time) << " ("
-                  << PrettySize(static_cast<uint64_t>(map->Size()) * MsToNs(1000) / (time + 1))
+                  << PrettySize(static_cast<uint64_t>(map.Size()) * MsToNs(1000) / (time + 1))
                   << "/s)";
       if (decompressed_size + sizeof(ImageHeader) != image_header.GetImageSize()) {
         if (error_msg != nullptr) {
@@ -882,11 +882,11 @@
               decompressed_size + sizeof(ImageHeader),
               image_header.GetImageSize());
         }
-        return nullptr;
+        return MemMap::Invalid();
       }
     }
 
-    return map.release();
+    return map;
   }
 
   class FixupVisitor : public ValueObject {
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 3383d6b..89038e5 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -182,7 +182,7 @@
 
   ImageSpace(const std::string& name,
              const char* image_location,
-             MemMap* mem_map,
+             MemMap&& mem_map,
              accounting::ContinuousSpaceBitmap* live_bitmap,
              uint8_t* end);
 
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index a24ca32..ada59b3 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -48,10 +48,6 @@
     // Historical note: We were deleting large objects to keep Valgrind happy if there were
     // any large objects such as Dex cache arrays which aren't freed since they are held live
     // by the class linker.
-    MutexLock mu(Thread::Current(), lock_);
-    for (auto& m : large_objects_) {
-      delete m.second.mem_map;
-    }
   }
 
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -139,16 +135,21 @@
                                            size_t* bytes_allocated, size_t* usable_size,
                                            size_t* bytes_tl_bulk_allocated) {
   std::string error_msg;
-  MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
-                                         PROT_READ | PROT_WRITE, true, false, &error_msg);
-  if (UNLIKELY(mem_map == nullptr)) {
+  MemMap mem_map = MemMap::MapAnonymous("large object space allocation",
+                                        /* addr */ nullptr,
+                                        num_bytes,
+                                        PROT_READ | PROT_WRITE,
+                                        /* low_4gb */ true,
+                                        /* reuse */ false,
+                                        &error_msg);
+  if (UNLIKELY(!mem_map.IsValid())) {
     LOG(WARNING) << "Large object allocation failed: " << error_msg;
     return nullptr;
   }
-  mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
+  mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map.Begin());
+  const size_t allocation_size = mem_map.BaseSize();
   MutexLock mu(self, lock_);
-  large_objects_.Put(obj, LargeObject {mem_map, false /* not zygote */});
-  const size_t allocation_size = mem_map->BaseSize();
+  large_objects_.Put(obj, LargeObject {std::move(mem_map), false /* not zygote */});
   DCHECK(bytes_allocated != nullptr);
 
   if (begin_ == nullptr || begin_ > reinterpret_cast<uint8_t*>(obj)) {
@@ -191,13 +192,11 @@
     Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
     LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
   }
-  MemMap* mem_map = it->second.mem_map;
-  const size_t map_size = mem_map->BaseSize();
+  const size_t map_size = it->second.mem_map.BaseSize();
   DCHECK_GE(num_bytes_allocated_, map_size);
   size_t allocation_size = map_size;
   num_bytes_allocated_ -= allocation_size;
   --num_objects_allocated_;
-  delete mem_map;
   large_objects_.erase(it);
   return allocation_size;
 }
@@ -206,7 +205,7 @@
   MutexLock mu(Thread::Current(), lock_);
   auto it = large_objects_.find(obj);
   CHECK(it != large_objects_.end()) << "Attempted to get size of a large object which is not live";
-  size_t alloc_size = it->second.mem_map->BaseSize();
+  size_t alloc_size = it->second.mem_map.BaseSize();
   if (usable_size != nullptr) {
     *usable_size = alloc_size;
   }
@@ -227,7 +226,7 @@
 void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
   MutexLock mu(Thread::Current(), lock_);
   for (auto& pair : large_objects_) {
-    MemMap* mem_map = pair.second.mem_map;
+    MemMap* mem_map = &pair.second.mem_map;
     callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
     callback(nullptr, nullptr, 0, arg);
   }
@@ -326,7 +325,7 @@
 
 size_t FreeListSpace::GetSlotIndexForAllocationInfo(const AllocationInfo* info) const {
   DCHECK_GE(info, allocation_info_);
-  DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_->End()));
+  DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_.End()));
   return info - allocation_info_;
 }
 
@@ -350,28 +349,39 @@
 FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
   CHECK_EQ(size % kAlignment, 0U);
   std::string error_msg;
-  MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
-                                         PROT_READ | PROT_WRITE, true, false, &error_msg);
-  CHECK(mem_map != nullptr) << "Failed to allocate large object space mem map: " << error_msg;
-  return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
+  MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+                                        requested_begin,
+                                        size,
+                                        PROT_READ | PROT_WRITE,
+                                        /* low_4gb */ true,
+                                        /* reuse */ false,
+                                        &error_msg);
+  CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
+  return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
 }
 
-FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end)
+FreeListSpace::FreeListSpace(const std::string& name,
+                             MemMap&& mem_map,
+                             uint8_t* begin,
+                             uint8_t* end)
     : LargeObjectSpace(name, begin, end),
-      mem_map_(mem_map),
+      mem_map_(std::move(mem_map)),
       lock_("free list space lock", kAllocSpaceLock) {
   const size_t space_capacity = end - begin;
   free_end_ = space_capacity;
   CHECK_ALIGNED(space_capacity, kAlignment);
   const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / kAlignment);
   std::string error_msg;
-  allocation_info_map_.reset(
+  allocation_info_map_ =
       MemMap::MapAnonymous("large object free list space allocation info map",
-                           nullptr, alloc_info_size, PROT_READ | PROT_WRITE,
-                           false, false, &error_msg));
-  CHECK(allocation_info_map_.get() != nullptr) << "Failed to allocate allocation info map"
-      << error_msg;
-  allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_->Begin());
+                           /* addr */ nullptr,
+                           alloc_info_size,
+                           PROT_READ | PROT_WRITE,
+                           /* low_4gb */ false,
+                           /* reuse */ false,
+                           &error_msg);
+  CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg;
+  allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
 }
 
 FreeListSpace::~FreeListSpace() {}
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index f37d814..b69bd91 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -148,7 +148,7 @@
 
  protected:
   struct LargeObject {
-    MemMap* mem_map;
+    MemMap mem_map;
     bool is_zygote;
   };
   explicit LargeObjectMapSpace(const std::string& name);
@@ -182,7 +182,7 @@
   std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
 
  protected:
-  FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end);
+  FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end);
   size_t GetSlotIndexForAddress(uintptr_t address) const {
     DCHECK(Contains(reinterpret_cast<mirror::Object*>(address)));
     return (address - reinterpret_cast<uintptr_t>(Begin())) / kAlignment;
@@ -210,9 +210,9 @@
 
   // There is not footer for any allocations at the end of the space, so we keep track of how much
   // free space there is at the end manually.
-  std::unique_ptr<MemMap> mem_map_;
+  MemMap mem_map_;
   // Side table for allocation info, one per page.
-  std::unique_ptr<MemMap> allocation_info_map_;
+  MemMap allocation_info_map_;
   AllocationInfo* allocation_info_;
 
   mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 6936fdc..91e0ce8 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -40,19 +40,26 @@
 
 size_t MallocSpace::bitmap_index_ = 0;
 
-MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map,
-                         uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
-                         bool create_bitmaps, bool can_move_objects, size_t starting_size,
+MallocSpace::MallocSpace(const std::string& name,
+                         MemMap&& mem_map,
+                         uint8_t* begin,
+                         uint8_t* end,
+                         uint8_t* limit,
+                         size_t growth_limit,
+                         bool create_bitmaps,
+                         bool can_move_objects,
+                         size_t starting_size,
                          size_t initial_size)
-    : ContinuousMemMapAllocSpace(name, mem_map, begin, end, limit, kGcRetentionPolicyAlwaysCollect),
+    : ContinuousMemMapAllocSpace(
+        name, std::move(mem_map), begin, end, limit, kGcRetentionPolicyAlwaysCollect),
       recent_free_pos_(0), lock_("allocation space lock", kAllocSpaceLock),
       growth_limit_(growth_limit), can_move_objects_(can_move_objects),
       starting_size_(starting_size), initial_size_(initial_size) {
   if (create_bitmaps) {
     size_t bitmap_index = bitmap_index_++;
     static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
-    CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->Begin()), kGcCardSize);
-    CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->End()), kGcCardSize);
+    CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map_.Begin()), kGcCardSize);
+    CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map_.End()), kGcCardSize);
     live_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
         StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
         Begin(), NonGrowthLimitCapacity()));
@@ -70,8 +77,12 @@
   }
 }
 
-MemMap* MallocSpace::CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
-                                  size_t* growth_limit, size_t* capacity, uint8_t* requested_begin) {
+MemMap MallocSpace::CreateMemMap(const std::string& name,
+                                 size_t starting_size,
+                                 size_t* initial_size,
+                                 size_t* growth_limit,
+                                 size_t* capacity,
+                                 uint8_t* requested_begin) {
   // Sanity check arguments
   if (starting_size > *initial_size) {
     *initial_size = starting_size;
@@ -80,13 +91,13 @@
     LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size ("
         << PrettySize(*initial_size) << ") is larger than its capacity ("
         << PrettySize(*growth_limit) << ")";
-    return nullptr;
+    return MemMap::Invalid();
   }
   if (*growth_limit > *capacity) {
     LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity ("
         << PrettySize(*growth_limit) << ") is larger than the capacity ("
         << PrettySize(*capacity) << ")";
-    return nullptr;
+    return MemMap::Invalid();
   }
 
   // Page align growth limit and capacity which will be used to manage mmapped storage
@@ -94,9 +105,14 @@
   *capacity = RoundUp(*capacity, kPageSize);
 
   std::string error_msg;
-  MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, *capacity,
-                                         PROT_READ | PROT_WRITE, true, false, &error_msg);
-  if (mem_map == nullptr) {
+  MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+                                        requested_begin,
+                                        *capacity,
+                                        PROT_READ | PROT_WRITE,
+                                        /* low_4gb */ true,
+                                        /* reuse */ false,
+                                        &error_msg);
+  if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
                << PrettySize(*capacity) << ": " << error_msg;
   }
@@ -194,18 +210,24 @@
   VLOG(heap) << "Capacity " << PrettySize(capacity);
   // Remap the tail.
   std::string error_msg;
-  std::unique_ptr<MemMap> mem_map(GetMemMap()->RemapAtEnd(End(), alloc_space_name,
-                                                          PROT_READ | PROT_WRITE, &error_msg));
-  CHECK(mem_map.get() != nullptr) << error_msg;
-  void* allocator = CreateAllocator(End(), starting_size_, initial_size_, capacity,
-                                    low_memory_mode);
+  MemMap mem_map = GetMemMap()->RemapAtEnd(
+      End(), alloc_space_name, PROT_READ | PROT_WRITE, &error_msg);
+  CHECK(mem_map.IsValid()) << error_msg;
+  void* allocator =
+      CreateAllocator(End(), starting_size_, initial_size_, capacity, low_memory_mode);
   // Protect memory beyond the initial size.
-  uint8_t* end = mem_map->Begin() + starting_size_;
+  uint8_t* end = mem_map.Begin() + starting_size_;
   if (capacity > initial_size_) {
     CheckedCall(mprotect, alloc_space_name, end, capacity - initial_size_, PROT_NONE);
   }
-  *out_malloc_space = CreateInstance(mem_map.release(), alloc_space_name, allocator, End(), end,
-                                     limit_, growth_limit, CanMoveObjects());
+  *out_malloc_space = CreateInstance(std::move(mem_map),
+                                     alloc_space_name,
+                                     allocator,
+                                     End(),
+                                     end,
+                                     limit_,
+                                     growth_limit,
+                                     CanMoveObjects());
   SetLimit(End());
   live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
   CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index c1f4841..e4a6f15 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -113,9 +113,14 @@
 
   void SetGrowthLimit(size_t growth_limit);
 
-  virtual MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
-                                      uint8_t* begin, uint8_t* end, uint8_t* limit,
-                                      size_t growth_limit, bool can_move_objects) = 0;
+  virtual MallocSpace* CreateInstance(MemMap&& mem_map,
+                                      const std::string& name,
+                                      void* allocator,
+                                      uint8_t* begin,
+                                      uint8_t* end,
+                                      uint8_t* limit,
+                                      size_t growth_limit,
+                                      bool can_move_objects) = 0;
 
   // Splits ourself into a zygote space and new malloc space which has our unused memory. When true,
   // the low memory mode argument specifies that the heap wishes the created space to be more
@@ -137,12 +142,23 @@
   }
 
  protected:
-  MallocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end,
-              uint8_t* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
-              size_t starting_size, size_t initial_size);
+  MallocSpace(const std::string& name,
+              MemMap&& mem_map,
+              uint8_t* begin,
+              uint8_t* end,
+              uint8_t* limit,
+              size_t growth_limit,
+              bool create_bitmaps,
+              bool can_move_objects,
+              size_t starting_size,
+              size_t initial_size);
 
-  static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
-                              size_t* growth_limit, size_t* capacity, uint8_t* requested_begin);
+  static MemMap CreateMemMap(const std::string& name,
+                             size_t starting_size,
+                             size_t* initial_size,
+                             size_t* growth_limit,
+                             size_t* capacity,
+                             uint8_t* requested_begin);
 
   // When true the low memory mode argument specifies that the heap wishes the created allocator to
   // be more aggressive in releasing unused pages.
diff --git a/runtime/gc/space/memory_tool_malloc_space-inl.h b/runtime/gc/space/memory_tool_malloc_space-inl.h
index c022171..f1c1cb8 100644
--- a/runtime/gc/space/memory_tool_malloc_space-inl.h
+++ b/runtime/gc/space/memory_tool_malloc_space-inl.h
@@ -267,8 +267,8 @@
                       kMemoryToolRedZoneBytes,
                       kAdjustForRedzoneInAllocSize,
                       kUseObjSizeForUsable>::MemoryToolMallocSpace(
-                          MemMap* mem_map, size_t initial_size, Params... params)
-                          : S(mem_map, initial_size, params...) {
+                          MemMap&& mem_map, size_t initial_size, Params... params)
+                          : S(std::move(mem_map), initial_size, params...) {
   // Don't want to change the memory tool states of the mem map here as the allocator is already
   // initialized at this point and that may interfere with what the allocator does internally. Note
   // that the tail beyond the initial size is mprotected.
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index e53f009..32bd204 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -53,7 +53,7 @@
   size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
 
   template <typename... Params>
-  MemoryToolMallocSpace(MemMap* mem_map, size_t initial_size, Params... params);
+  MemoryToolMallocSpace(MemMap&& mem_map, size_t initial_size, Params... params);
   virtual ~MemoryToolMallocSpace() {}
 
  private:
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 6d494fa..85e6919 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -45,60 +45,65 @@
 // Whether we check a region's live bytes count against the region bitmap.
 static constexpr bool kCheckLiveBytesAgainstRegionBitmap = kIsDebugBuild;
 
-MemMap* RegionSpace::CreateMemMap(const std::string& name, size_t capacity,
-                                  uint8_t* requested_begin) {
+MemMap RegionSpace::CreateMemMap(const std::string& name,
+                                 size_t capacity,
+                                 uint8_t* requested_begin) {
   CHECK_ALIGNED(capacity, kRegionSize);
   std::string error_msg;
   // Ask for the capacity of an additional kRegionSize so that we can align the map by kRegionSize
   // even if we get unaligned base address. This is necessary for the ReadBarrierTable to work.
-  std::unique_ptr<MemMap> mem_map;
+  MemMap mem_map;
   while (true) {
-    mem_map.reset(MemMap::MapAnonymous(name.c_str(),
-                                       requested_begin,
-                                       capacity + kRegionSize,
-                                       PROT_READ | PROT_WRITE,
-                                       true,
-                                       false,
-                                       &error_msg));
-    if (mem_map.get() != nullptr || requested_begin == nullptr) {
+    mem_map = MemMap::MapAnonymous(name.c_str(),
+                                   requested_begin,
+                                   capacity + kRegionSize,
+                                   PROT_READ | PROT_WRITE,
+                                   /* low_4gb */ true,
+                                   /* reuse */ false,
+                                   &error_msg);
+    if (mem_map.IsValid() || requested_begin == nullptr) {
       break;
     }
     // Retry with no specified request begin.
     requested_begin = nullptr;
   }
-  if (mem_map.get() == nullptr) {
+  if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
         << PrettySize(capacity) << " with message " << error_msg;
     MemMap::DumpMaps(LOG_STREAM(ERROR));
-    return nullptr;
+    return MemMap::Invalid();
   }
-  CHECK_EQ(mem_map->Size(), capacity + kRegionSize);
-  CHECK_EQ(mem_map->Begin(), mem_map->BaseBegin());
-  CHECK_EQ(mem_map->Size(), mem_map->BaseSize());
-  if (IsAlignedParam(mem_map->Begin(), kRegionSize)) {
+  CHECK_EQ(mem_map.Size(), capacity + kRegionSize);
+  CHECK_EQ(mem_map.Begin(), mem_map.BaseBegin());
+  CHECK_EQ(mem_map.Size(), mem_map.BaseSize());
+  if (IsAlignedParam(mem_map.Begin(), kRegionSize)) {
     // Got an aligned map. Since we requested a map that's kRegionSize larger. Shrink by
     // kRegionSize at the end.
-    mem_map->SetSize(capacity);
+    mem_map.SetSize(capacity);
   } else {
     // Got an unaligned map. Align the both ends.
-    mem_map->AlignBy(kRegionSize);
+    mem_map.AlignBy(kRegionSize);
   }
-  CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
-  CHECK_ALIGNED(mem_map->End(), kRegionSize);
-  CHECK_EQ(mem_map->Size(), capacity);
-  return mem_map.release();
+  CHECK_ALIGNED(mem_map.Begin(), kRegionSize);
+  CHECK_ALIGNED(mem_map.End(), kRegionSize);
+  CHECK_EQ(mem_map.Size(), capacity);
+  return mem_map;
 }
 
-RegionSpace* RegionSpace::Create(const std::string& name, MemMap* mem_map) {
-  return new RegionSpace(name, mem_map);
+RegionSpace* RegionSpace::Create(const std::string& name, MemMap&& mem_map) {
+  return new RegionSpace(name, std::move(mem_map));
 }
 
-RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
-    : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
+RegionSpace::RegionSpace(const std::string& name, MemMap&& mem_map)
+    : ContinuousMemMapAllocSpace(name,
+                                 std::move(mem_map),
+                                 mem_map.Begin(),
+                                 mem_map.End(),
+                                 mem_map.End(),
                                  kGcRetentionPolicyAlwaysCollect),
       region_lock_("Region lock", kRegionSpaceRegionLock),
       time_(1U),
-      num_regions_(mem_map->Size() / kRegionSize),
+      num_regions_(mem_map_.Size() / kRegionSize),
       num_non_free_regions_(0U),
       num_evac_regions_(0U),
       max_peak_num_non_free_regions_(0U),
@@ -106,11 +111,11 @@
       current_region_(&full_region_),
       evac_region_(nullptr),
       cyclic_alloc_region_index_(0U) {
-  CHECK_ALIGNED(mem_map->Size(), kRegionSize);
-  CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
+  CHECK_ALIGNED(mem_map_.Size(), kRegionSize);
+  CHECK_ALIGNED(mem_map_.Begin(), kRegionSize);
   DCHECK_GT(num_regions_, 0U);
   regions_.reset(new Region[num_regions_]);
-  uint8_t* region_addr = mem_map->Begin();
+  uint8_t* region_addr = mem_map_.Begin();
   for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
     regions_[i].Init(i, region_addr, region_addr + kRegionSize);
   }
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index ef2e137..beedfd2 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -50,8 +50,8 @@
   // Create a region space mem map with the requested sizes. The requested base address is not
   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
   // space to confirm the request was granted.
-  static MemMap* CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
-  static RegionSpace* Create(const std::string& name, MemMap* mem_map);
+  static MemMap CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
+  static RegionSpace* Create(const std::string& name, MemMap&& mem_map);
 
   // Allocate `num_bytes`, returns null if the space is full.
   mirror::Object* Alloc(Thread* self,
@@ -301,7 +301,7 @@
   }
 
  private:
-  RegionSpace(const std::string& name, MemMap* mem_map);
+  RegionSpace(const std::string& name, MemMap&& mem_map);
 
   template<bool kToSpaceOnly, typename Visitor>
   ALWAYS_INLINE void WalkInternal(Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index b0402e4..10ff1c1 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -44,48 +44,88 @@
 // TODO: Fix
 // template class MemoryToolMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
 
-RosAllocSpace::RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
-                             art::gc::allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end,
-                             uint8_t* limit, size_t growth_limit, bool can_move_objects,
-                             size_t starting_size, bool low_memory_mode)
-    : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
+RosAllocSpace::RosAllocSpace(MemMap&& mem_map,
+                             size_t initial_size,
+                             const std::string& name,
+                             art::gc::allocator::RosAlloc* rosalloc,
+                             uint8_t* begin,
+                             uint8_t* end,
+                             uint8_t* limit,
+                             size_t growth_limit,
+                             bool can_move_objects,
+                             size_t starting_size,
+                             bool low_memory_mode)
+    : MallocSpace(name,
+                  std::move(mem_map),
+                  begin,
+                  end,
+                  limit,
+                  growth_limit,
+                  true,
+                  can_move_objects,
                   starting_size, initial_size),
       rosalloc_(rosalloc), low_memory_mode_(low_memory_mode) {
   CHECK(rosalloc != nullptr);
 }
 
-RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
-                                               size_t starting_size, size_t initial_size,
-                                               size_t growth_limit, size_t capacity,
-                                               bool low_memory_mode, bool can_move_objects) {
-  DCHECK(mem_map != nullptr);
+RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap&& mem_map,
+                                               const std::string& name,
+                                               size_t starting_size,
+                                               size_t initial_size,
+                                               size_t growth_limit,
+                                               size_t capacity,
+                                               bool low_memory_mode,
+                                               bool can_move_objects) {
+  DCHECK(mem_map.IsValid());
 
   bool running_on_memory_tool = Runtime::Current()->IsRunningOnMemoryTool();
 
-  allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
-                                                 capacity, low_memory_mode, running_on_memory_tool);
+  allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map.Begin(),
+                                                 starting_size,
+                                                 initial_size,
+                                                 capacity,
+                                                 low_memory_mode,
+                                                 running_on_memory_tool);
   if (rosalloc == nullptr) {
     LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
     return nullptr;
   }
 
   // Protect memory beyond the starting size. MoreCore will add r/w permissions when necessory
-  uint8_t* end = mem_map->Begin() + starting_size;
+  uint8_t* end = mem_map.Begin() + starting_size;
   if (capacity - starting_size > 0) {
     CheckedCall(mprotect, name.c_str(), end, capacity - starting_size, PROT_NONE);
   }
 
   // Everything is set so record in immutable structure and leave
-  uint8_t* begin = mem_map->Begin();
+  uint8_t* begin = mem_map.Begin();
   // TODO: Fix RosAllocSpace to support ASan. There is currently some issues with
   // AllocationSize caused by redzones. b/12944686
   if (running_on_memory_tool) {
     return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
-        mem_map, initial_size, name, rosalloc, begin, end, begin + capacity, growth_limit,
-        can_move_objects, starting_size, low_memory_mode);
+        std::move(mem_map),
+        initial_size,
+        name,
+        rosalloc,
+        begin,
+        end,
+        begin + capacity,
+        growth_limit,
+        can_move_objects,
+        starting_size,
+        low_memory_mode);
   } else {
-    return new RosAllocSpace(mem_map, initial_size, name, rosalloc, begin, end, begin + capacity,
-                             growth_limit, can_move_objects, starting_size, low_memory_mode);
+    return new RosAllocSpace(std::move(mem_map),
+                             initial_size,
+                             name,
+                             rosalloc,
+                             begin,
+                             end,
+                             begin + capacity,
+                             growth_limit,
+                             can_move_objects,
+                             starting_size,
+                             low_memory_mode);
   }
 }
 
@@ -111,16 +151,21 @@
   // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
   // size of the large allocation) will be greater than the footprint limit.
   size_t starting_size = Heap::kDefaultStartingSize;
-  MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
-                                 requested_begin);
-  if (mem_map == nullptr) {
+  MemMap mem_map =
+      CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+  if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
                << PrettySize(capacity);
     return nullptr;
   }
 
-  RosAllocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
-                                          growth_limit, capacity, low_memory_mode,
+  RosAllocSpace* space = CreateFromMemMap(std::move(mem_map),
+                                          name,
+                                          starting_size,
+                                          initial_size,
+                                          growth_limit,
+                                          capacity,
+                                          low_memory_mode,
                                           can_move_objects);
   // We start out with only the initial size possibly containing objects.
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -175,18 +220,39 @@
   return result;
 }
 
-MallocSpace* RosAllocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
-                                           void* allocator, uint8_t* begin, uint8_t* end,
-                                           uint8_t* limit, size_t growth_limit,
+MallocSpace* RosAllocSpace::CreateInstance(MemMap&& mem_map,
+                                           const std::string& name,
+                                           void* allocator,
+                                           uint8_t* begin,
+                                           uint8_t* end,
+                                           uint8_t* limit,
+                                           size_t growth_limit,
                                            bool can_move_objects) {
   if (Runtime::Current()->IsRunningOnMemoryTool()) {
     return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
-        mem_map, initial_size_, name, reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end,
-        limit, growth_limit, can_move_objects, starting_size_, low_memory_mode_);
+        std::move(mem_map),
+        initial_size_,
+        name,
+        reinterpret_cast<allocator::RosAlloc*>(allocator),
+        begin,
+        end,
+        limit,
+        growth_limit,
+        can_move_objects,
+        starting_size_,
+        low_memory_mode_);
   } else {
-    return new RosAllocSpace(mem_map, initial_size_, name,
-                             reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end, limit,
-                             growth_limit, can_move_objects, starting_size_, low_memory_mode_);
+    return new RosAllocSpace(std::move(mem_map),
+                             initial_size_,
+                             name,
+                             reinterpret_cast<allocator::RosAlloc*>(allocator),
+                             begin,
+                             end,
+                             limit,
+                             growth_limit,
+                             can_move_objects,
+                             starting_size_,
+                             low_memory_mode_);
   }
 }
 
@@ -364,8 +430,11 @@
   mark_bitmap_->Clear();
   SetEnd(begin_ + starting_size_);
   delete rosalloc_;
-  rosalloc_ = CreateRosAlloc(mem_map_->Begin(), starting_size_, initial_size_,
-                             NonGrowthLimitCapacity(), low_memory_mode_,
+  rosalloc_ = CreateRosAlloc(mem_map_.Begin(),
+                             starting_size_,
+                             initial_size_,
+                             NonGrowthLimitCapacity(),
+                             low_memory_mode_,
                              Runtime::Current()->IsRunningOnMemoryTool());
   SetFootprintLimit(footprint_limit);
 }
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 4c17233..c630826 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -41,10 +41,14 @@
   static RosAllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
                                size_t capacity, uint8_t* requested_begin, bool low_memory_mode,
                                bool can_move_objects);
-  static RosAllocSpace* CreateFromMemMap(MemMap* mem_map, const std::string& name,
-                                         size_t starting_size, size_t initial_size,
-                                         size_t growth_limit, size_t capacity,
-                                         bool low_memory_mode, bool can_move_objects);
+  static RosAllocSpace* CreateFromMemMap(MemMap&& mem_map,
+                                         const std::string& name,
+                                         size_t starting_size,
+                                         size_t initial_size,
+                                         size_t growth_limit,
+                                         size_t capacity,
+                                         bool low_memory_mode,
+                                         bool can_move_objects);
 
   mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                   size_t* usable_size, size_t* bytes_tl_bulk_allocated)
@@ -111,8 +115,13 @@
 
   void Clear() OVERRIDE;
 
-  MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
-                              uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
+  MallocSpace* CreateInstance(MemMap&& mem_map,
+                              const std::string& name,
+                              void* allocator,
+                              uint8_t* begin,
+                              uint8_t* end,
+                              uint8_t* limit,
+                              size_t growth_limit,
                               bool can_move_objects) OVERRIDE;
 
   uint64_t GetBytesAllocated() OVERRIDE;
@@ -147,9 +156,16 @@
   void DumpStats(std::ostream& os);
 
  protected:
-  RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
-                allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end, uint8_t* limit,
-                size_t growth_limit, bool can_move_objects, size_t starting_size,
+  RosAllocSpace(MemMap&& mem_map,
+                size_t initial_size,
+                const std::string& name,
+                allocator::RosAlloc* rosalloc,
+                uint8_t* begin,
+                uint8_t* end,
+                uint8_t* limit,
+                size_t growth_limit,
+                bool can_move_objects,
+                size_t starting_size,
                 bool low_memory_mode);
 
  private:
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 4f43d9f..4e173a8 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -377,30 +377,30 @@
   }
 
   MemMap* GetMemMap() {
-    return mem_map_.get();
+    return &mem_map_;
   }
 
   const MemMap* GetMemMap() const {
-    return mem_map_.get();
+    return &mem_map_;
   }
 
-  MemMap* ReleaseMemMap() {
-    return mem_map_.release();
+  MemMap ReleaseMemMap() {
+    return std::move(mem_map_);
   }
 
  protected:
   MemMapSpace(const std::string& name,
-              MemMap* mem_map,
+              MemMap&& mem_map,
               uint8_t* begin,
               uint8_t* end,
               uint8_t* limit,
               GcRetentionPolicy gc_retention_policy)
       : ContinuousSpace(name, gc_retention_policy, begin, end, limit),
-        mem_map_(mem_map) {
+        mem_map_(std::move(mem_map)) {
   }
 
   // Underlying storage of the space
-  std::unique_ptr<MemMap> mem_map_;
+  MemMap mem_map_;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(MemMapSpace);
@@ -451,9 +451,13 @@
   std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
   std::unique_ptr<accounting::ContinuousSpaceBitmap> temp_bitmap_;
 
-  ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin,
-                             uint8_t* end, uint8_t* limit, GcRetentionPolicy gc_retention_policy)
-      : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) {
+  ContinuousMemMapAllocSpace(const std::string& name,
+                             MemMap&& mem_map,
+                             uint8_t* begin,
+                             uint8_t* end,
+                             uint8_t* limit,
+                             GcRetentionPolicy gc_retention_policy)
+      : MemMapSpace(name, std::move(mem_map), begin, end, limit, gc_retention_policy) {
   }
 
  private:
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 8c73ef9..ed85b06 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -41,7 +41,8 @@
   size_t* const objects_allocated_;
 };
 
-ZygoteSpace* ZygoteSpace::Create(const std::string& name, MemMap* mem_map,
+ZygoteSpace* ZygoteSpace::Create(const std::string& name,
+                                 MemMap&& mem_map,
                                  accounting::ContinuousSpaceBitmap* live_bitmap,
                                  accounting::ContinuousSpaceBitmap* mark_bitmap) {
   DCHECK(live_bitmap != nullptr);
@@ -49,9 +50,9 @@
   size_t objects_allocated = 0;
   CountObjectsAllocated visitor(&objects_allocated);
   ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-  live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(mem_map->Begin()),
-                                reinterpret_cast<uintptr_t>(mem_map->End()), visitor);
-  ZygoteSpace* zygote_space = new ZygoteSpace(name, mem_map, objects_allocated);
+  live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(mem_map.Begin()),
+                                reinterpret_cast<uintptr_t>(mem_map.End()), visitor);
+  ZygoteSpace* zygote_space = new ZygoteSpace(name, std::move(mem_map), objects_allocated);
   CHECK(zygote_space->live_bitmap_.get() == nullptr);
   CHECK(zygote_space->mark_bitmap_.get() == nullptr);
   zygote_space->live_bitmap_.reset(live_bitmap);
@@ -64,8 +65,12 @@
   UNREACHABLE();
 }
 
-ZygoteSpace::ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated)
-    : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
+ZygoteSpace::ZygoteSpace(const std::string& name, MemMap&& mem_map, size_t objects_allocated)
+    : ContinuousMemMapAllocSpace(name,
+                                 std::move(mem_map),
+                                 mem_map.Begin(),
+                                 mem_map.End(),
+                                 mem_map.End(),
                                  kGcRetentionPolicyFullCollect),
       objects_allocated_(objects_allocated) {
 }
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 6fe21d9..200c79f 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -30,7 +30,8 @@
 class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
  public:
   // Returns the remaining storage in the out_map field.
-  static ZygoteSpace* Create(const std::string& name, MemMap* mem_map,
+  static ZygoteSpace* Create(const std::string& name,
+                             MemMap&& mem_map,
                              accounting::ContinuousSpaceBitmap* live_bitmap,
                              accounting::ContinuousSpaceBitmap* mark_bitmap)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -85,7 +86,7 @@
   }
 
  private:
-  ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated);
+  ZygoteSpace(const std::string& name, MemMap&& mem_map, size_t objects_allocated);
   static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
 
   AtomicInteger objects_allocated_;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 950a54d..098db9f 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -78,14 +78,19 @@
   CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(IrtEntry));
 
   const size_t table_bytes = max_count * sizeof(IrtEntry);
-  table_mem_map_.reset(MemMap::MapAnonymous("indirect ref table", nullptr, table_bytes,
-                                            PROT_READ | PROT_WRITE, false, false, error_msg));
-  if (table_mem_map_.get() == nullptr && error_msg->empty()) {
+  table_mem_map_ = MemMap::MapAnonymous("indirect ref table",
+                                        /* addr */ nullptr,
+                                        table_bytes,
+                                        PROT_READ | PROT_WRITE,
+                                        /* low_4gb */ false,
+                                        /* reuse */ false,
+                                        error_msg);
+  if (!table_mem_map_.IsValid() && error_msg->empty()) {
     *error_msg = "Unable to map memory for indirect ref table";
   }
 
-  if (table_mem_map_.get() != nullptr) {
-    table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin());
+  if (table_mem_map_.IsValid()) {
+    table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
   } else {
     table_ = nullptr;
   }
@@ -125,7 +130,7 @@
 }
 
 bool IndirectReferenceTable::IsValid() const {
-  return table_mem_map_.get() != nullptr;
+  return table_mem_map_.IsValid();
 }
 
 // Holes:
@@ -217,20 +222,20 @@
   // Note: the above check also ensures that there is no overflow below.
 
   const size_t table_bytes = new_size * sizeof(IrtEntry);
-  std::unique_ptr<MemMap> new_map(MemMap::MapAnonymous("indirect ref table",
-                                                       nullptr,
-                                                       table_bytes,
-                                                       PROT_READ | PROT_WRITE,
-                                                       false,
-                                                       false,
-                                                       error_msg));
-  if (new_map == nullptr) {
+  MemMap new_map = MemMap::MapAnonymous("indirect ref table",
+                                        /* addr */ nullptr,
+                                        table_bytes,
+                                        PROT_READ | PROT_WRITE,
+                                        /* is_low_4gb */ false,
+                                        /* reuse */ false,
+                                        error_msg);
+  if (!new_map.IsValid()) {
     return false;
   }
 
-  memcpy(new_map->Begin(), table_mem_map_->Begin(), table_mem_map_->Size());
+  memcpy(new_map.Begin(), table_mem_map_.Begin(), table_mem_map_.Size());
   table_mem_map_ = std::move(new_map);
-  table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin());
+  table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
   max_entries_ = new_size;
 
   return true;
@@ -444,7 +449,7 @@
   ScopedTrace trace(__PRETTY_FUNCTION__);
   const size_t top_index = Capacity();
   auto* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize);
-  uint8_t* release_end = table_mem_map_->End();
+  uint8_t* release_end = table_mem_map_.End();
   madvise(release_start, release_end - release_start, MADV_DONTNEED);
 }
 
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index d2093f2..8c63c00 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -27,6 +27,7 @@
 
 #include "base/bit_utils.h"
 #include "base/macros.h"
+#include "base/mem_map.h"
 #include "base/mutex.h"
 #include "gc_root.h"
 #include "obj_ptr.h"
@@ -41,8 +42,6 @@
 class Object;
 }  // namespace mirror
 
-class MemMap;
-
 // Maintain a table of indirect references.  Used for local/global JNI references.
 //
 // The table contains object references, where the strong (local/global) references are part of the
@@ -398,7 +397,7 @@
   IRTSegmentState segment_state_;
 
   // Mem map where we store the indirect refs.
-  std::unique_ptr<MemMap> table_mem_map_;
+  MemMap table_mem_map_;
   // bottom of the stack. Do not directly access the object references
   // in this as they are roots. Use Get() that has a read barrier.
   IrtEntry* table_;
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 74aa787..d4b51af 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -517,24 +517,23 @@
   result->SetZ(class_name == nullptr);
 }
 
-static std::unique_ptr<MemMap> FindAndExtractEntry(const std::string& jar_file,
-                                                   const char* entry_name,
-                                                   size_t* size,
-                                                   std::string* error_msg) {
+static MemMap FindAndExtractEntry(const std::string& jar_file,
+                                  const char* entry_name,
+                                  size_t* size,
+                                  std::string* error_msg) {
   CHECK(size != nullptr);
 
   std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(jar_file.c_str(), error_msg));
   if (zip_archive == nullptr) {
-    return nullptr;
+    return MemMap::Invalid();
   }
   std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(entry_name, error_msg));
   if (zip_entry == nullptr) {
-    return nullptr;
+    return MemMap::Invalid();
   }
-  std::unique_ptr<MemMap> tmp_map(
-      zip_entry->ExtractToMemMap(jar_file.c_str(), entry_name, error_msg));
-  if (tmp_map == nullptr) {
-    return nullptr;
+  MemMap tmp_map = zip_entry->ExtractToMemMap(jar_file.c_str(), entry_name, error_msg);
+  if (!tmp_map.IsValid()) {
+    return MemMap::Invalid();
   }
 
   // OK, from here everything seems fine.
@@ -577,18 +576,18 @@
     return;
   }
 
-  std::unique_ptr<MemMap> mem_map;
+  MemMap mem_map;
   size_t map_size;
   std::string last_error_msg;  // Only store the last message (we could concatenate).
 
   for (const std::string& jar_file : split) {
     mem_map = FindAndExtractEntry(jar_file, resource_cstr, &map_size, &last_error_msg);
-    if (mem_map != nullptr) {
+    if (mem_map.IsValid()) {
       break;
     }
   }
 
-  if (mem_map == nullptr) {
+  if (!mem_map.IsValid()) {
     // Didn't find it. There's a good chance this will be the same at runtime, but still
     // conservatively abort the transaction here.
     AbortTransactionOrFail(self,
@@ -607,9 +606,9 @@
     return;
   }
   // Copy in content.
-  memcpy(h_array->GetData(), mem_map->Begin(), map_size);
+  memcpy(h_array->GetData(), mem_map.Begin(), map_size);
   // Be proactive releasing memory.
-  mem_map.reset();
+  mem_map.Reset();
 
   // Create a ByteArrayInputStream.
   Handle<mirror::Class> h_class(hs.NewHandle(
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index a8692a0..d9c7900 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -205,15 +205,16 @@
   // We could do PC-relative addressing to avoid this problem, but that
   // would require reserving code and data area before submitting, which
   // means more windows for the code memory to be RWX.
-  std::unique_ptr<MemMap> data_map(MemMap::MapAnonymous(
-      "data-code-cache", nullptr,
+  MemMap data_map = MemMap::MapAnonymous(
+      "data-code-cache",
+      /* addr */ nullptr,
       max_capacity,
       kProtData,
       /* low_4gb */ true,
       /* reuse */ false,
       &error_str,
-      use_ashmem));
-  if (data_map == nullptr) {
+      use_ashmem);
+  if (!data_map.IsValid()) {
     std::ostringstream oss;
     oss << "Failed to create read write cache: " << error_str << " size=" << max_capacity;
     *error_msg = oss.str();
@@ -229,26 +230,23 @@
   size_t data_size = max_capacity / 2;
   size_t code_size = max_capacity - data_size;
   DCHECK_EQ(code_size + data_size, max_capacity);
-  uint8_t* divider = data_map->Begin() + data_size;
+  uint8_t* divider = data_map.Begin() + data_size;
 
-  MemMap* code_map = data_map->RemapAtEnd(
-      divider,
-      "jit-code-cache",
-      memmap_flags_prot_code | PROT_WRITE,
-      &error_str, use_ashmem);
-  if (code_map == nullptr) {
+  MemMap code_map = data_map.RemapAtEnd(
+      divider, "jit-code-cache", memmap_flags_prot_code | PROT_WRITE, &error_str, use_ashmem);
+  if (!code_map.IsValid()) {
     std::ostringstream oss;
     oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
     *error_msg = oss.str();
     return nullptr;
   }
-  DCHECK_EQ(code_map->Begin(), divider);
+  DCHECK_EQ(code_map.Begin(), divider);
   data_size = initial_capacity / 2;
   code_size = initial_capacity - data_size;
   DCHECK_EQ(code_size + data_size, initial_capacity);
   return new JitCodeCache(
-      code_map,
-      data_map.release(),
+      std::move(code_map),
+      std::move(data_map),
       code_size,
       data_size,
       max_capacity,
@@ -256,8 +254,8 @@
       memmap_flags_prot_code);
 }
 
-JitCodeCache::JitCodeCache(MemMap* code_map,
-                           MemMap* data_map,
+JitCodeCache::JitCodeCache(MemMap&& code_map,
+                           MemMap&& data_map,
                            size_t initial_code_capacity,
                            size_t initial_data_capacity,
                            size_t max_capacity,
@@ -266,8 +264,8 @@
     : lock_("Jit code cache", kJitCodeCacheLock),
       lock_cond_("Jit code cache condition variable", lock_),
       collection_in_progress_(false),
-      code_map_(code_map),
-      data_map_(data_map),
+      code_map_(std::move(code_map)),
+      data_map_(std::move(data_map)),
       max_capacity_(max_capacity),
       current_capacity_(initial_code_capacity + initial_data_capacity),
       code_end_(initial_code_capacity),
@@ -287,8 +285,8 @@
       memmap_flags_prot_code_(memmap_flags_prot_code) {
 
   DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
-  code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
-  data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_end_, false /*locked*/);
+  code_mspace_ = create_mspace_with_base(code_map_.Begin(), code_end_, false /*locked*/);
+  data_mspace_ = create_mspace_with_base(data_map_.Begin(), data_end_, false /*locked*/);
 
   if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
     PLOG(FATAL) << "create_mspace_with_base failed";
@@ -298,13 +296,13 @@
 
   CheckedCall(mprotect,
               "mprotect jit code cache",
-              code_map_->Begin(),
-              code_map_->Size(),
+              code_map_.Begin(),
+              code_map_.Size(),
               memmap_flags_prot_code_);
   CheckedCall(mprotect,
               "mprotect jit data cache",
-              data_map_->Begin(),
-              data_map_->Size(),
+              data_map_.Begin(),
+              data_map_.Size(),
               kProtData);
 
   VLOG(jit) << "Created jit code cache: initial data size="
@@ -316,7 +314,7 @@
 JitCodeCache::~JitCodeCache() {}
 
 bool JitCodeCache::ContainsPc(const void* ptr) const {
-  return code_map_->Begin() <= ptr && ptr < code_map_->End();
+  return code_map_.Begin() <= ptr && ptr < code_map_.End();
 }
 
 bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
@@ -387,8 +385,8 @@
     CheckedCall(
         mprotect,
         "make code writable",
-        code_cache_->code_map_->Begin(),
-        code_cache_->code_map_->Size(),
+        code_cache_->code_map_.Begin(),
+        code_cache_->code_map_.Size(),
         code_cache_->memmap_flags_prot_code_ | PROT_WRITE);
   }
 
@@ -397,8 +395,8 @@
     CheckedCall(
         mprotect,
         "make code protected",
-        code_cache_->code_map_->Begin(),
-        code_cache_->code_map_->Size(),
+        code_cache_->code_map_.Begin(),
+        code_cache_->code_map_.Size(),
         code_cache_->memmap_flags_prot_code_);
   }
 
@@ -1237,8 +1235,8 @@
       number_of_collections_++;
       live_bitmap_.reset(CodeCacheBitmap::Create(
           "code-cache-bitmap",
-          reinterpret_cast<uintptr_t>(code_map_->Begin()),
-          reinterpret_cast<uintptr_t>(code_map_->Begin() + current_capacity_ / 2)));
+          reinterpret_cast<uintptr_t>(code_map_.Begin()),
+          reinterpret_cast<uintptr_t>(code_map_.Begin() + current_capacity_ / 2)));
       collection_in_progress_ = true;
     }
   }
@@ -1610,12 +1608,12 @@
   if (code_mspace_ == mspace) {
     size_t result = code_end_;
     code_end_ += increment;
-    return reinterpret_cast<void*>(result + code_map_->Begin());
+    return reinterpret_cast<void*>(result + code_map_.Begin());
   } else {
     DCHECK_EQ(data_mspace_, mspace);
     size_t result = data_end_;
     data_end_ += increment;
-    return reinterpret_cast<void*>(result + data_map_->Begin());
+    return reinterpret_cast<void*>(result + data_map_.Begin());
   }
 }
 
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 632b45b..a4a0f8f 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -28,6 +28,7 @@
 #include "base/atomic.h"
 #include "base/histogram.h"
 #include "base/macros.h"
+#include "base/mem_map.h"
 #include "base/mutex.h"
 #include "base/safe_map.h"
 
@@ -39,7 +40,6 @@
 class InlineCache;
 class IsMarkedVisitor;
 class JitJniStubTestHelper;
-class MemMap;
 class OatQuickMethodHeader;
 struct ProfileMethodInfo;
 class ProfilingInfo;
@@ -279,8 +279,8 @@
 
  private:
   // Take ownership of maps.
-  JitCodeCache(MemMap* code_map,
-               MemMap* data_map,
+  JitCodeCache(MemMap&& code_map,
+               MemMap&& data_map,
                size_t initial_code_capacity,
                size_t initial_data_capacity,
                size_t max_capacity,
@@ -396,9 +396,9 @@
   // Whether there is a code cache collection in progress.
   bool collection_in_progress_ GUARDED_BY(lock_);
   // Mem map which holds code.
-  std::unique_ptr<MemMap> code_map_;
+  MemMap code_map_;
   // Mem map which holds data (stack maps and profiling info).
-  std::unique_ptr<MemMap> data_map_;
+  MemMap data_map_;
   // The opaque mspace for allocating code.
   void* code_mspace_ GUARDED_BY(lock_);
   // The opaque mspace for allocating data.
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index b598df3..d49ebd1 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -163,33 +163,34 @@
   void operator=(const NullableScopedUtfChars&);
 };
 
-static std::unique_ptr<MemMap> AllocateDexMemoryMap(JNIEnv* env, jint start, jint end) {
+static MemMap AllocateDexMemoryMap(JNIEnv* env, jint start, jint end) {
   if (end <= start) {
     ScopedObjectAccess soa(env);
     ThrowWrappedIOException("Bad range");
-    return nullptr;
+    return MemMap::Invalid();
   }
 
   std::string error_message;
   size_t length = static_cast<size_t>(end - start);
-  std::unique_ptr<MemMap> dex_mem_map(MemMap::MapAnonymous("DEX data",
-                                                           nullptr,
-                                                           length,
-                                                           PROT_READ | PROT_WRITE,
-                                                           /* low_4gb */ false,
-                                                           /* reuse */ false,
-                                                           &error_message));
-  if (dex_mem_map == nullptr) {
+  MemMap dex_mem_map = MemMap::MapAnonymous("DEX data",
+                                            /* addr */ nullptr,
+                                            length,
+                                            PROT_READ | PROT_WRITE,
+                                            /* low_4gb */ false,
+                                            /* reuse */ false,
+                                            &error_message);
+  if (!dex_mem_map.IsValid()) {
     ScopedObjectAccess soa(env);
     ThrowWrappedIOException("%s", error_message.c_str());
+    return MemMap::Invalid();
   }
   return dex_mem_map;
 }
 
-static const DexFile* CreateDexFile(JNIEnv* env, std::unique_ptr<MemMap> dex_mem_map) {
+static const DexFile* CreateDexFile(JNIEnv* env, MemMap&& dex_mem_map) {
   std::string location = StringPrintf("Anonymous-DexFile@%p-%p",
-                                      dex_mem_map->Begin(),
-                                      dex_mem_map->End());
+                                      dex_mem_map.Begin(),
+                                      dex_mem_map.End());
   std::string error_message;
   const ArtDexFileLoader dex_file_loader;
   std::unique_ptr<const DexFile> dex_file(dex_file_loader.Open(location,
@@ -213,7 +214,7 @@
   return dex_file.release();
 }
 
-static jobject CreateSingleDexFileCookie(JNIEnv* env, std::unique_ptr<MemMap> data) {
+static jobject CreateSingleDexFileCookie(JNIEnv* env, MemMap&& data) {
   std::unique_ptr<const DexFile> dex_file(CreateDexFile(env, std::move(data)));
   if (dex_file.get() == nullptr) {
     DCHECK(env->ExceptionCheck());
@@ -236,14 +237,14 @@
     return nullptr;
   }
 
-  std::unique_ptr<MemMap> dex_mem_map(AllocateDexMemoryMap(env, start, end));
-  if (dex_mem_map == nullptr) {
+  MemMap dex_mem_map = AllocateDexMemoryMap(env, start, end);
+  if (!dex_mem_map.IsValid()) {
     DCHECK(Thread::Current()->IsExceptionPending());
     return nullptr;
   }
 
   size_t length = static_cast<size_t>(end - start);
-  memcpy(dex_mem_map->Begin(), base_address, length);
+  memcpy(dex_mem_map.Begin(), base_address, length);
   return CreateSingleDexFileCookie(env, std::move(dex_mem_map));
 }
 
@@ -252,13 +253,13 @@
                                              jbyteArray buffer,
                                              jint start,
                                              jint end) {
-  std::unique_ptr<MemMap> dex_mem_map(AllocateDexMemoryMap(env, start, end));
-  if (dex_mem_map == nullptr) {
+  MemMap dex_mem_map = AllocateDexMemoryMap(env, start, end);
+  if (!dex_mem_map.IsValid()) {
     DCHECK(Thread::Current()->IsExceptionPending());
     return nullptr;
   }
 
-  auto destination = reinterpret_cast<jbyte*>(dex_mem_map.get()->Begin());
+  auto destination = reinterpret_cast<jbyte*>(dex_mem_map.Begin());
   env->GetByteArrayRegion(buffer, start, end - start, destination);
   return CreateSingleDexFileCookie(env, std::move(dex_mem_map));
 }
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 58e16ed..c7daef8 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -956,7 +956,7 @@
   void* dlopen_handle_;  // TODO: Unique_ptr with custom deleter.
 
   // Dummy memory map objects corresponding to the regions mapped by dlopen.
-  std::vector<std::unique_ptr<MemMap>> dlopen_mmaps_;
+  std::vector<MemMap> dlopen_mmaps_;
 
   // The number of shared objects the linker told us about before loading. Used to
   // (optimistically) optimize the PreSetup stage (see comment there).
@@ -1122,8 +1122,8 @@
             uint8_t* vaddr = reinterpret_cast<uint8_t*>(info->dlpi_addr +
                 info->dlpi_phdr[i].p_vaddr);
             size_t memsz = info->dlpi_phdr[i].p_memsz;
-            MemMap* mmap = MemMap::MapDummy(info->dlpi_name, vaddr, memsz);
-            context->dlopen_mmaps_->push_back(std::unique_ptr<MemMap>(mmap));
+            MemMap mmap = MemMap::MapDummy(info->dlpi_name, vaddr, memsz);
+            context->dlopen_mmaps_->push_back(std::move(mmap));
           }
         }
         return 1;  // Stop iteration and return 1 from dl_iterate_phdr.
@@ -1131,7 +1131,7 @@
       return 0;  // Continue iteration and return 0 from dl_iterate_phdr when finished.
     }
     const uint8_t* const begin_;
-    std::vector<std::unique_ptr<MemMap>>* const dlopen_mmaps_;
+    std::vector<MemMap>* const dlopen_mmaps_;
     const size_t shared_objects_before;
     size_t shared_objects_seen;
   };
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index facebda..9248bb9 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -425,7 +425,7 @@
   low_4gb_arena_pool_.reset();
   arena_pool_.reset();
   jit_arena_pool_.reset();
-  protected_fault_page_.reset();
+  protected_fault_page_.Reset();
   MemMap::Shutdown();
 
   // TODO: acquire a static mutex on Runtime to avoid racing.
@@ -1162,18 +1162,18 @@
   {
     constexpr uintptr_t kSentinelAddr =
         RoundDown(static_cast<uintptr_t>(Context::kBadGprBase), kPageSize);
-    protected_fault_page_.reset(MemMap::MapAnonymous("Sentinel fault page",
-                                                     reinterpret_cast<uint8_t*>(kSentinelAddr),
-                                                     kPageSize,
-                                                     PROT_NONE,
-                                                     /* low_4g */ true,
-                                                     /* reuse */ false,
-                                                     /* error_msg */ nullptr));
-    if (protected_fault_page_ == nullptr) {
+    protected_fault_page_ = MemMap::MapAnonymous("Sentinel fault page",
+                                                 reinterpret_cast<uint8_t*>(kSentinelAddr),
+                                                 kPageSize,
+                                                 PROT_NONE,
+                                                 /* low_4g */ true,
+                                                 /* reuse */ false,
+                                                 /* error_msg */ nullptr);
+    if (!protected_fault_page_.IsValid()) {
       LOG(WARNING) << "Could not reserve sentinel fault page";
-    } else if (reinterpret_cast<uintptr_t>(protected_fault_page_->Begin()) != kSentinelAddr) {
+    } else if (reinterpret_cast<uintptr_t>(protected_fault_page_.Begin()) != kSentinelAddr) {
       LOG(WARNING) << "Could not reserve sentinel fault page at the right address.";
-      protected_fault_page_.reset();
+      protected_fault_page_.Reset();
     }
   }
 
diff --git a/runtime/runtime.h b/runtime/runtime.h
index a98e8a8..f98d7b9 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -29,6 +29,7 @@
 
 #include "arch/instruction_set.h"
 #include "base/macros.h"
+#include "base/mem_map.h"
 #include "base/mutex.h"
 #include "deoptimization_kind.h"
 #include "dex/dex_file_types.h"
@@ -86,7 +87,6 @@
 class IsMarkedVisitor;
 class JavaVMExt;
 class LinearAlloc;
-class MemMap;
 class MonitorList;
 class MonitorPool;
 class NullPointerHandler;
@@ -1090,7 +1090,7 @@
   std::atomic<uint32_t> deoptimization_counts_[
       static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
 
-  std::unique_ptr<MemMap> protected_fault_page_;
+  MemMap protected_fault_page_;
 
   uint32_t verifier_logging_threshold_ms_;
 
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 794ac19..4c4dcd8 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -190,19 +190,19 @@
 
 TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackAttach) {
   std::string error_msg;
-  std::unique_ptr<MemMap> stack(MemMap::MapAnonymous("ThreadLifecycleCallback Thread",
-                                                     nullptr,
-                                                     128 * kPageSize,  // Just some small stack.
-                                                     PROT_READ | PROT_WRITE,
-                                                     false,
-                                                     false,
-                                                     &error_msg));
-  ASSERT_FALSE(stack == nullptr) << error_msg;
+  MemMap stack = MemMap::MapAnonymous("ThreadLifecycleCallback Thread",
+                                      /* addr */ nullptr,
+                                      128 * kPageSize,  // Just some small stack.
+                                      PROT_READ | PROT_WRITE,
+                                      false,
+                                      false,
+                                      &error_msg);
+  ASSERT_TRUE(stack.IsValid()) << error_msg;
 
   const char* reason = "ThreadLifecycleCallback test thread";
   pthread_attr_t attr;
   CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), reason);
-  CHECK_PTHREAD_CALL(pthread_attr_setstack, (&attr, stack->Begin(), stack->Size()), reason);
+  CHECK_PTHREAD_CALL(pthread_attr_setstack, (&attr, stack.Begin(), stack.Size()), reason);
   pthread_t pthread;
   CHECK_PTHREAD_CALL(pthread_create,
                      (&pthread,
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 26ca190..2a69bc6 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -46,19 +46,24 @@
   // Add an inaccessible page to catch stack overflow.
   stack_size += kPageSize;
   std::string error_msg;
-  stack_.reset(MemMap::MapAnonymous(name.c_str(), nullptr, stack_size, PROT_READ | PROT_WRITE,
-                                    false, false, &error_msg));
-  CHECK(stack_.get() != nullptr) << error_msg;
-  CHECK_ALIGNED(stack_->Begin(), kPageSize);
+  stack_ = MemMap::MapAnonymous(name.c_str(),
+                                /* addr */ nullptr,
+                                stack_size,
+                                PROT_READ | PROT_WRITE,
+                                /* low_4gb */ false,
+                                /* reuse */ false,
+                                &error_msg);
+  CHECK(stack_.IsValid()) << error_msg;
+  CHECK_ALIGNED(stack_.Begin(), kPageSize);
   CheckedCall(mprotect,
               "mprotect bottom page of thread pool worker stack",
-              stack_->Begin(),
+              stack_.Begin(),
               kPageSize,
               PROT_NONE);
   const char* reason = "new thread pool worker thread";
   pthread_attr_t attr;
   CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), reason);
-  CHECK_PTHREAD_CALL(pthread_attr_setstack, (&attr, stack_->Begin(), stack_->Size()), reason);
+  CHECK_PTHREAD_CALL(pthread_attr_setstack, (&attr, stack_.Begin(), stack_.Size()), reason);
   CHECK_PTHREAD_CALL(pthread_create, (&pthread_, &attr, &Callback, this), reason);
   CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), reason);
 }
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 2784953..98a1193 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -53,8 +53,8 @@
   static const size_t kDefaultStackSize = 1 * MB;
 
   size_t GetStackSize() const {
-    DCHECK(stack_.get() != nullptr);
-    return stack_->Size();
+    DCHECK(stack_.IsValid());
+    return stack_.Size();
   }
 
   virtual ~ThreadPoolWorker();
@@ -71,7 +71,7 @@
 
   ThreadPool* const thread_pool_;
   const std::string name_;
-  std::unique_ptr<MemMap> stack_;
+  MemMap stack_;
   pthread_t pthread_;
   Thread* thread_;
 
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 32aa86d..ad34584 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -144,7 +144,7 @@
     mmap_reuse = false;
   }
   CHECK(!mmap_reuse || mmap_addr != nullptr);
-  std::unique_ptr<MemMap> mmap(MemMap::MapFileAtAddress(
+  MemMap mmap = MemMap::MapFileAtAddress(
       mmap_addr,
       vdex_length,
       (writable || unquicken) ? PROT_READ | PROT_WRITE : PROT_READ,
@@ -154,13 +154,13 @@
       low_4gb,
       mmap_reuse,
       vdex_filename.c_str(),
-      error_msg));
-  if (mmap == nullptr) {
+      error_msg);
+  if (!mmap.IsValid()) {
     *error_msg = "Failed to mmap file " + vdex_filename + " : " + *error_msg;
     return nullptr;
   }
 
-  std::unique_ptr<VdexFile> vdex(new VdexFile(mmap.release()));
+  std::unique_ptr<VdexFile> vdex(new VdexFile(std::move(mmap)));
   if (!vdex->IsValid()) {
     *error_msg = "Vdex file is not valid";
     return nullptr;
@@ -175,7 +175,7 @@
                     /* decompile_return_instruction */ false);
     // Update the quickening info size to pretend there isn't any.
     size_t offset = vdex->GetDexSectionHeaderOffset();
-    reinterpret_cast<DexSectionHeader*>(vdex->mmap_->Begin() + offset)->quickening_info_size_ = 0;
+    reinterpret_cast<DexSectionHeader*>(vdex->mmap_.Begin() + offset)->quickening_info_size_ = 0;
   }
 
   *error_msg = "Success";
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index 866a57e..a39ec31 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -153,7 +153,7 @@
   typedef uint32_t VdexChecksum;
   using QuickeningTableOffsetType = uint32_t;
 
-  explicit VdexFile(MemMap* mmap) : mmap_(mmap) {}
+  explicit VdexFile(MemMap&& mmap) : mmap_(std::move(mmap)) {}
 
   // Returns nullptr if the vdex file cannot be opened or is not valid.
   // The mmap_* parameters can be left empty (nullptr/0/false) to allocate at random address.
@@ -215,9 +215,9 @@
                          error_msg);
   }
 
-  const uint8_t* Begin() const { return mmap_->Begin(); }
-  const uint8_t* End() const { return mmap_->End(); }
-  size_t Size() const { return mmap_->Size(); }
+  const uint8_t* Begin() const { return mmap_.Begin(); }
+  const uint8_t* End() const { return mmap_.End(); }
+  size_t Size() const { return mmap_.Size(); }
 
   const VerifierDepsHeader& GetVerifierDepsHeader() const {
     return *reinterpret_cast<const VerifierDepsHeader*>(Begin());
@@ -260,7 +260,7 @@
   }
 
   bool IsValid() const {
-    return mmap_->Size() >= sizeof(VerifierDepsHeader) && GetVerifierDepsHeader().IsValid();
+    return mmap_.Size() >= sizeof(VerifierDepsHeader) && GetVerifierDepsHeader().IsValid();
   }
 
   // This method is for iterating over the dex files in the vdex. If `cursor` is null,
@@ -328,7 +328,7 @@
     return DexBegin() + GetDexSectionHeader().GetDexSize();
   }
 
-  std::unique_ptr<MemMap> mmap_;
+  MemMap mmap_;
 
   DISALLOW_COPY_AND_ASSIGN(VdexFile);
 };