Merge "Fix a double unmap issue in MemMap::UnMapAtEnd()." into dalvik-dev
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 8c13d79..9ebc16a 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -287,8 +287,6 @@
   size_t size = RoundUp(Size(), kPageSize);
   // Trim the heap so that we minimize the size of the Zygote space.
   Trim();
-  // Trim our mem-map to free unused pages.
-  GetMemMap()->UnMapAtEnd(end_);
   // TODO: Not hardcode these in?
   const size_t starting_size = kPageSize;
   const size_t initial_size = 2 * MB;
@@ -308,9 +306,10 @@
   VLOG(heap) << "Size " << GetMemMap()->Size();
   VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
   VLOG(heap) << "Capacity " << PrettySize(capacity);
+  // Remap the tail.
   std::string error_msg;
-  UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(alloc_space_name, End(), capacity,
-                                                 PROT_READ | PROT_WRITE, &error_msg));
+  UniquePtr<MemMap> mem_map(GetMemMap()->RemapAtEnd(end_, alloc_space_name,
+                                                    PROT_READ | PROT_WRITE, &error_msg));
   CHECK(mem_map.get() != nullptr) << error_msg;
   void* mspace = CreateMallocSpace(end_, starting_size, initial_size);
   // Protect memory beyond the initial size.
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 00316f7..70d3457 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -170,12 +170,73 @@
   }
 };
 
-void MemMap::UnMapAtEnd(byte* new_end) {
+MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
+                           std::string* error_msg) {
   DCHECK_GE(new_end, Begin());
   DCHECK_LE(new_end, End());
-  size_t unmap_size = End() - new_end;
-  munmap(new_end, unmap_size);
-  size_ -= unmap_size;
+  DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
+  DCHECK(IsAligned<kPageSize>(begin_));
+  DCHECK(IsAligned<kPageSize>(base_begin_));
+  DCHECK(IsAligned<kPageSize>(reinterpret_cast<byte*>(base_begin_) + base_size_));
+  DCHECK(IsAligned<kPageSize>(new_end));
+  byte* old_end = begin_ + size_;
+  byte* old_base_end = reinterpret_cast<byte*>(base_begin_) + base_size_;
+  byte* new_base_end = new_end;
+  DCHECK_LE(new_base_end, old_base_end);
+  if (new_base_end == old_base_end) {
+    return new MemMap(tail_name, NULL, 0, NULL, 0, tail_prot);
+  }
+  size_ = new_end - reinterpret_cast<byte*>(begin_);
+  base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_);
+  DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
+  size_t tail_size = old_end - new_end;
+  byte* tail_base_begin = new_base_end;
+  size_t tail_base_size = old_base_end - new_base_end;
+  DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
+  DCHECK(IsAligned<kPageSize>(tail_base_size));
+
+#ifdef USE_ASHMEM
+  // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
+  // prefixed "dalvik-".
+  std::string debug_friendly_name("dalvik-");
+  debug_friendly_name += tail_name;
+  ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
+  int flags = MAP_PRIVATE;
+  if (fd.get() == -1) {
+    *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
+                              tail_name, strerror(errno));
+    return nullptr;
+  }
+#else
+  ScopedFd fd(-1);
+  int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+#endif
+
+  // Unmap/map the tail region.
+  int result = munmap(tail_base_begin, tail_base_size);
+  if (result == -1) {
+    std::string maps;
+    ReadFileToString("/proc/self/maps", &maps);
+    *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'\n%s",
+                              tail_base_begin, tail_base_size, name_.c_str(),
+                              maps.c_str());
+    return nullptr;
+  }
+  // Don't cause memory allocation between the munmap and the mmap
+  // calls. Otherwise, libc (or something else) might take this memory
+  // region. Note this isn't perfect as there's no way to prevent
+  // other threads to try to take this memory region here.
+  byte* actual = reinterpret_cast<byte*>(mmap(tail_base_begin, tail_base_size, tail_prot,
+                                              flags, fd.get(), 0));
+  if (actual == MAP_FAILED) {
+    std::string maps;
+    ReadFileToString("/proc/self/maps", &maps);
+    *error_msg = StringPrintf("anonymous mmap(%p, %zd, %x, %x, %d, 0) failed\n%s",
+                              tail_base_begin, tail_base_size, tail_prot, flags, fd.get(),
+                              maps.c_str());
+    return nullptr;
+  }
+  return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot);
 }
 
 bool MemMap::Protect(int prot) {
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 919463c..2c65833 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -84,8 +84,9 @@
     return Begin() <= addr && addr < End();
   }
 
-  // Trim by unmapping pages at the end of the map.
-  void UnMapAtEnd(byte* new_end);
+  // Unmap the pages at end and remap them to create another memory map.
+  MemMap* RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
+                     std::string* error_msg);
 
  private:
   MemMap(const std::string& name, byte* begin, size_t size, void* base_begin, size_t base_size,
@@ -96,8 +97,10 @@
   size_t size_;  // Length of data.
 
   void* const base_begin_;  // Page-aligned base address.
-  const size_t base_size_;  // Length of mapping.
+  size_t base_size_;  // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
   int prot_;  // Protection of the map.
+
+  friend class MemMapTest;  // To allow access to base_begin_ and base_size_.
 };
 
 }  // namespace art
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index 09de320..cf2c9d0 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -21,7 +21,15 @@
 
 namespace art {
 
-class MemMapTest : public testing::Test {};
+class MemMapTest : public testing::Test {
+ public:
+  byte* BaseBegin(MemMap* mem_map) {
+    return reinterpret_cast<byte*>(mem_map->base_begin_);
+  }
+  size_t BaseSize(MemMap* mem_map) {
+    return mem_map->base_size_;
+  }
+};
 
 TEST_F(MemMapTest, MapAnonymousEmpty) {
   std::string error_msg;
@@ -34,4 +42,57 @@
   ASSERT_TRUE(error_msg.empty());
 }
 
+TEST_F(MemMapTest, RemapAtEnd) {
+  std::string error_msg;
+  // Cast the page size to size_t.
+  const size_t page_size = static_cast<size_t>(kPageSize);
+  // Map a two-page memory region.
+  MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
+                                    NULL,
+                                    2 * page_size,
+                                    PROT_READ | PROT_WRITE,
+                                    &error_msg);
+  // Check its state and write to it.
+  byte* base0 = m0->Begin();
+  ASSERT_TRUE(base0 != NULL) << error_msg;
+  size_t size0 = m0->Size();
+  EXPECT_EQ(m0->Size(), 2 * page_size);
+  EXPECT_EQ(BaseBegin(m0), base0);
+  EXPECT_EQ(BaseSize(m0), size0);
+  memset(base0, 42, 2 * page_size);
+  // Remap the latter half into a second MemMap.
+  MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
+                              "MemMapTest_RemapAtEndTest_map1",
+                              PROT_READ | PROT_WRITE,
+                              &error_msg);
+  // Check the states of the two maps.
+  EXPECT_EQ(m0->Begin(), base0) << error_msg;
+  EXPECT_EQ(m0->Size(), page_size);
+  EXPECT_EQ(BaseBegin(m0), base0);
+  EXPECT_EQ(BaseSize(m0), page_size);
+  byte* base1 = m1->Begin();
+  size_t size1 = m1->Size();
+  EXPECT_EQ(base1, base0 + page_size);
+  EXPECT_EQ(size1, page_size);
+  EXPECT_EQ(BaseBegin(m1), base1);
+  EXPECT_EQ(BaseSize(m1), size1);
+  // Write to the second region.
+  memset(base1, 43, page_size);
+  // Check the contents of the two regions.
+  for (size_t i = 0; i < page_size; ++i) {
+    EXPECT_EQ(base0[i], 42);
+  }
+  for (size_t i = 0; i < page_size; ++i) {
+    EXPECT_EQ(base1[i], 43);
+  }
+  // Unmap the first region.
+  delete m0;
+  // Make sure the second region is still accessible after the first
+  // region is unmapped.
+  for (size_t i = 0; i < page_size; ++i) {
+    EXPECT_EQ(base1[i], 43);
+  }
+  delete m1;
+}
+
 }  // namespace art