Add helpers to allocate JIT memory in zygote.

These helpers will seal the fd and prevent any new writable
mappings from being made.

Using memfd when available, and falling back to ashmem if not.
ashmem being scheduled for removal, have that implemented in palette.

Bug: 119800099
Test: jit_memory_region_test.cc
Change-Id: Id32b6c52a2ec681295ea7eca5b77ab342c78b469
diff --git a/libartbase/base/memfd.h b/libartbase/base/memfd.h
index 329e3ee..b5945fb 100644
--- a/libartbase/base/memfd.h
+++ b/libartbase/base/memfd.h
@@ -17,6 +17,10 @@
 #ifndef ART_LIBARTBASE_BASE_MEMFD_H_
 #define ART_LIBARTBASE_BASE_MEMFD_H_
 
+#if defined(__BIONIC__)
+#include <linux/memfd.h>  // To access memfd flags.
+#endif
+
 namespace art {
 
 // Call memfd(2) if available on platform and return result. This call also makes a kernel version
diff --git a/libartpalette/apex/palette.cc b/libartpalette/apex/palette.cc
index 3570798..041fe7a 100644
--- a/libartpalette/apex/palette.cc
+++ b/libartpalette/apex/palette.cc
@@ -151,4 +151,16 @@
   return m(name, value);
 }
 
+enum PaletteStatus PaletteAshmemCreateRegion(const char* name, size_t size, int* fd) {
+  PaletteAshmemCreateRegionMethod m =
+      PaletteLoader::Instance().GetPaletteAshmemCreateRegionMethod();
+  return m(name, size, fd);
+}
+
+enum PaletteStatus PaletteAshmemSetProtRegion(int fd, int prot) {
+  PaletteAshmemSetProtRegionMethod m =
+      PaletteLoader::Instance().GetPaletteAshmemSetProtRegionMethod();
+  return m(fd, prot);
+}
+
 }  // extern "C"
diff --git a/libartpalette/include/palette/palette_method_list.h b/libartpalette/include/palette/palette_method_list.h
index 2738b57..1140399 100644
--- a/libartpalette/include/palette/palette_method_list.h
+++ b/libartpalette/include/palette/palette_method_list.h
@@ -29,6 +29,8 @@
   M(PaletteTraceEnabled, /*out*/int32_t* enabled)                           \
   M(PaletteTraceBegin, const char* name)                                    \
   M(PaletteTraceEnd)                                                        \
-  M(PaletteTraceIntegerValue, const char* name, int32_t value)
+  M(PaletteTraceIntegerValue, const char* name, int32_t value)              \
+  M(PaletteAshmemCreateRegion, const char* name, size_t size, int* fd)      \
+  M(PaletteAshmemSetProtRegion, int, int)
 
 #endif  // ART_LIBARTPALETTE_INCLUDE_PALETTE_PALETTE_METHOD_LIST_H_
diff --git a/libartpalette/libartpalette.map.txt b/libartpalette/libartpalette.map.txt
index e589986..d2c90d5 100644
--- a/libartpalette/libartpalette.map.txt
+++ b/libartpalette/libartpalette.map.txt
@@ -25,6 +25,8 @@
     PaletteTraceBegin;
     PaletteTraceEnd;
     PaletteTraceIntegerValue;
+    PaletteAshmemCreateRegion;
+    PaletteAshmemSetProtRegion;
 
   local:
     *;
diff --git a/libartpalette/system/palette_android.cc b/libartpalette/system/palette_android.cc
index 0c9db9d..bb9841a 100644
--- a/libartpalette/system/palette_android.cc
+++ b/libartpalette/system/palette_android.cc
@@ -19,6 +19,7 @@
 #include "palette/palette.h"
 
 #include <errno.h>
+#include <linux/ashmem.h>
 #include <sys/resource.h>
 #include <sys/time.h>
 #include <unistd.h>
@@ -171,3 +172,41 @@
   ATRACE_INT(name, value);
   return PaletteStatus::kOkay;
 }
+
+enum PaletteStatus PaletteAshmemCreateRegion(const char* name, size_t size, int* fd) {
+  // We implement our own ashmem creation, as the libcutils implementation does
+  // a binder call, and our only use of ashmem in ART is for zygote, which
+  // cannot communicate to binder.
+  *fd = TEMP_FAILURE_RETRY(open("/dev/ashmem", O_RDWR | O_CLOEXEC));
+  if (*fd == -1) {
+    return PaletteStatus::kCheckErrno;
+  }
+
+  if (TEMP_FAILURE_RETRY(ioctl(*fd, ASHMEM_SET_SIZE, size)) < 0) {
+    goto error;
+  }
+
+  if (name != nullptr) {
+    char buf[ASHMEM_NAME_LEN] = {0};
+    strlcpy(buf, name, sizeof(buf));
+    if (TEMP_FAILURE_RETRY(ioctl(*fd, ASHMEM_SET_NAME, buf)) < 0) {
+      goto error;
+    }
+  }
+
+  return PaletteStatus::kOkay;
+
+error:
+  // Save errno before closing.
+  int save_errno = errno;
+  close(*fd);
+  errno = save_errno;
+  return PaletteStatus::kCheckErrno;
+}
+
+enum PaletteStatus PaletteAshmemSetProtRegion(int fd, int prot) {
+  if (TEMP_FAILURE_RETRY(ioctl(fd, ASHMEM_SET_PROT_MASK, prot)) < 0) {
+    return PaletteStatus::kCheckErrno;
+  }
+  return PaletteStatus::kOkay;
+}
diff --git a/libartpalette/system/palette_fake.cc b/libartpalette/system/palette_fake.cc
index 4cc00d0..dc0ee76 100644
--- a/libartpalette/system/palette_fake.cc
+++ b/libartpalette/system/palette_fake.cc
@@ -75,3 +75,15 @@
                                             int32_t value ATTRIBUTE_UNUSED) {
   return PaletteStatus::kOkay;
 }
+
+enum PaletteStatus PaletteAshmemCreateRegion(const char* name ATTRIBUTE_UNUSED,
+                                             size_t size ATTRIBUTE_UNUSED,
+                                             int* fd) {
+  *fd = -1;
+  return PaletteStatus::kNotSupported;
+}
+
+enum PaletteStatus PaletteAshmemSetProtRegion(int fd ATTRIBUTE_UNUSED,
+                                              int prot ATTRIBUTE_UNUSED) {
+  return PaletteStatus::kNotSupported;
+}
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 701d8c3..6b691fd 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -625,6 +625,7 @@
         "interpreter/safe_math_test.cc",
         "interpreter/unstarted_runtime_test.cc",
         "jdwp/jdwp_options_test.cc",
+        "jit/jit_memory_region_test.cc",
         "jit/profiling_info_test.cc",
         "jni/java_vm_ext_test.cc",
         "jni/jni_internal_test.cc",
diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc
index ac02bd8..3c08ff6 100644
--- a/runtime/jit/jit_memory_region.cc
+++ b/runtime/jit/jit_memory_region.cc
@@ -16,6 +16,9 @@
 
 #include "jit_memory_region.h"
 
+#include <fcntl.h>
+#include <unistd.h>
+
 #include <android-base/unique_fd.h>
 #include "base/bit_utils.h"  // For RoundDown, RoundUp
 #include "base/globals.h"
@@ -25,6 +28,7 @@
 #include "gc/allocator/dlmalloc.h"
 #include "jit/jit_scoped_code_cache_write.h"
 #include "oat_quick_method_header.h"
+#include "palette/palette.h"
 
 using android::base::unique_fd;
 
@@ -323,5 +327,101 @@
   mspace_free(data_mspace_, data);
 }
 
+#if defined(__BIONIC__)
+
+static bool IsSealFutureWriteSupportedInternal() {
+  unique_fd fd(art::memfd_create("test_android_memfd", MFD_ALLOW_SEALING));
+  if (fd == -1) {
+    LOG(INFO) << "memfd_create failed: " << strerror(errno) << ", no memfd support.";
+    return false;
+  }
+
+  if (fcntl(fd, F_ADD_SEALS, F_SEAL_FUTURE_WRITE) == -1) {
+    LOG(INFO) << "fcntl(F_ADD_SEALS) failed: " << strerror(errno) << ", no memfd support.";
+    return false;
+  }
+
+  LOG(INFO) << "Using memfd for future sealing";
+  return true;
+}
+
+static bool IsSealFutureWriteSupported() {
+  static bool is_seal_future_write_supported = IsSealFutureWriteSupportedInternal();
+  return is_seal_future_write_supported;
+}
+
+int JitMemoryRegion::CreateZygoteMemory(size_t capacity, std::string* error_msg) {
+  /* Check if kernel support exists, otherwise fall back to ashmem */
+  static const char* kRegionName = "/jit-zygote-cache";
+  if (IsSealFutureWriteSupported()) {
+    int fd = art::memfd_create(kRegionName, MFD_ALLOW_SEALING);
+    if (fd == -1) {
+      std::ostringstream oss;
+      oss << "Failed to create zygote mapping: " << strerror(errno);
+      *error_msg = oss.str();
+      return -1;
+    }
+
+    if (ftruncate(fd, capacity) != 0) {
+      std::ostringstream oss;
+      oss << "Failed to create zygote mapping: " << strerror(errno);
+      *error_msg = oss.str();
+      return -1;
+    }
+
+    return fd;
+  }
+
+  LOG(INFO) << "Falling back to ashmem implementation for JIT zygote mapping";
+
+  int fd;
+  PaletteStatus status = PaletteAshmemCreateRegion(kRegionName, capacity, &fd);
+  if (status != PaletteStatus::kOkay) {
+    CHECK_EQ(status, PaletteStatus::kCheckErrno);
+    std::ostringstream oss;
+    oss << "Failed to create zygote mapping: " << strerror(errno);
+    *error_msg = oss.str();
+    return -1;
+  }
+  return fd;
+}
+
+bool JitMemoryRegion::ProtectZygoteMemory(int fd, std::string* error_msg) {
+  if (IsSealFutureWriteSupported()) {
+    if (fcntl(fd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_SEAL | F_SEAL_FUTURE_WRITE)
+            == -1) {
+      std::ostringstream oss;
+      oss << "Failed to protect zygote mapping: " << strerror(errno);
+      *error_msg = oss.str();
+      return false;
+    }
+  } else {
+    PaletteStatus status = PaletteAshmemSetProtRegion(fd, PROT_READ);
+    if (status != PaletteStatus::kOkay) {
+      CHECK_EQ(status, PaletteStatus::kCheckErrno);
+      std::ostringstream oss;
+      oss << "Failed to protect zygote mapping: " << strerror(errno);
+      *error_msg = oss.str();
+      return false;
+    }
+  }
+  return true;
+}
+
+#else
+
+// When running on non-bionic configuration, this is not supported.
+int JitMemoryRegion::CreateZygoteMemory(size_t capacity ATTRIBUTE_UNUSED,
+                                        std::string* error_msg ATTRIBUTE_UNUSED) {
+  return -1;
+}
+
+bool JitMemoryRegion::ProtectZygoteMemory(int fd ATTRIBUTE_UNUSED,
+                                          std::string* error_msg ATTRIBUTE_UNUSED) {
+  return true;
+}
+
+#endif
+
 }  // namespace jit
 }  // namespace art
diff --git a/runtime/jit/jit_memory_region.h b/runtime/jit/jit_memory_region.h
index 5886587..b4a0b75 100644
--- a/runtime/jit/jit_memory_region.h
+++ b/runtime/jit/jit_memory_region.h
@@ -26,6 +26,8 @@
 namespace art {
 namespace jit {
 
+class TestZygoteMemory;
+
 // Alignment in bytes that will suit all architectures for JIT code cache allocations.  The
 // allocated block is used for method header followed by generated code. Allocations should be
 // aligned to avoid sharing cache lines between different allocations. The alignment should be
@@ -131,6 +133,9 @@
     return reinterpret_cast<T*>(raw_src_ptr - src.Begin() + dst.Begin());
   }
 
+  static int CreateZygoteMemory(size_t capacity, std::string* error_msg);
+  static bool ProtectZygoteMemory(int fd, std::string* error_msg);
+
   // The initial capacity in bytes this code region starts with.
   size_t initial_capacity_ GUARDED_BY(Locks::jit_lock_);
 
@@ -167,6 +172,8 @@
 
   // The opaque mspace for allocating code.
   void* exec_mspace_ GUARDED_BY(Locks::jit_lock_);
+
+  friend class TestZygoteMemory;
 };
 
 }  // namespace jit
diff --git a/runtime/jit/jit_memory_region_test.cc b/runtime/jit/jit_memory_region_test.cc
new file mode 100644
index 0000000..25255bb
--- /dev/null
+++ b/runtime/jit/jit_memory_region_test.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jit/jit_memory_region.h"
+
+#include <android-base/unique_fd.h>
+#include <gtest/gtest.h>
+#include <sys/mman.h>
+
+#include "base/globals.h"
+
+namespace art {
+namespace jit {
+
+class TestZygoteMemory : public testing::Test {
+ public:
+  void BasicTest() {
+#if defined(__BIONIC__)
+    std::string error_msg;
+    size_t size = kPageSize;
+    android::base::unique_fd fd(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
+    CHECK_NE(fd.get(), -1);
+
+    // Create a writable mapping.
+    int32_t* addr = reinterpret_cast<int32_t*>(
+        mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
+    CHECK(addr != nullptr);
+    CHECK_NE(addr, MAP_FAILED);
+
+    // Test that we can write into the mapping.
+    addr[0] = 42;
+    CHECK_EQ(addr[0], 42);
+
+    // Protect the memory.
+    bool res = JitMemoryRegion::ProtectZygoteMemory(fd.get(), &error_msg);
+    CHECK(res);
+
+    // Test that we can still write into the mapping.
+    addr[0] = 2;
+    CHECK_EQ(addr[0], 2);
+
+    // Test that we cannot create another writable mapping.
+    int32_t* addr2 = reinterpret_cast<int32_t*>(
+        mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
+    CHECK_EQ(addr2, MAP_FAILED);
+
+    // With the existing mapping, we can toggle read/write.
+    CHECK_EQ(mprotect(addr, size, PROT_READ), 0) << strerror(errno);
+    CHECK_EQ(mprotect(addr, size, PROT_READ | PROT_WRITE), 0) << strerror(errno);
+
+    // Test mremap with old_size = 0. From the man pages:
+    //    If the value of old_size is zero, and old_address refers to a shareable mapping
+    //    (see mmap(2) MAP_SHARED), then mremap() will create a new mapping of the same pages.
+    addr2 = reinterpret_cast<int32_t*>(mremap(addr, 0, kPageSize, MREMAP_MAYMOVE));
+    CHECK_NE(addr2, MAP_FAILED);
+
+    // Test that we can  write into the remapped mapping.
+    addr2[0] = 3;
+    CHECK_EQ(addr2[0], 3);
+
+    addr2 = reinterpret_cast<int32_t*>(mremap(addr, kPageSize, 2 * kPageSize, MREMAP_MAYMOVE));
+    CHECK_NE(addr2, MAP_FAILED);
+
+    // Test that we can  write into the remapped mapping.
+    addr2[0] = 4;
+    CHECK_EQ(addr2[0], 4);
+#endif
+  }
+};
+
+TEST_F(TestZygoteMemory, BasicTest) {
+  BasicTest();
+}
+
+}  // namespace jit
+}  // namespace art