JIT: Only toggle write permissions in single view.

Dual view is here to protect inadvertent / malicious writes already.

Test: test.py
Change-Id: I26d0d16355dac955ce8ef2856db33241b3f353ff
diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc
index 09980c8..7104cda 100644
--- a/runtime/jit/jit_memory_region.cc
+++ b/runtime/jit/jit_memory_region.cc
@@ -199,7 +199,7 @@
       // is never executable.
       std::string name = exec_cache_name + "-rw";
       non_exec_pages = MemMap::MapFile(exec_capacity,
-                                       kProtR,
+                                       kIsDebugBuild ? kProtR : kProtRW,
                                        base_flags,
                                        mem_fd,
                                        /* start= */ data_capacity,
@@ -285,13 +285,12 @@
   if (code_heap != nullptr) {
     // Make all pages reserved for the code heap writable. The mspace allocator, that manages the
     // heap, will take and initialize pages in create_mspace_with_base().
-    CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW);
-    exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
+    {
+      ScopedCodeCacheWrite scc(*this);
+      exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
+    }
     CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
     SetFootprintLimit(current_capacity_);
-    // Protect pages containing heap metadata. Updates to the code heap toggle write permission to
-    // perform the update and there are no other times write access is required.
-    CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR);
   } else {
     exec_mspace_ = nullptr;
     SetFootprintLimit(current_capacity_);
diff --git a/runtime/jit/jit_scoped_code_cache_write.h b/runtime/jit/jit_scoped_code_cache_write.h
index ea99bdf..e2adebf 100644
--- a/runtime/jit/jit_scoped_code_cache_write.h
+++ b/runtime/jit/jit_scoped_code_cache_write.h
@@ -38,20 +38,24 @@
   explicit ScopedCodeCacheWrite(const JitMemoryRegion& region)
       : ScopedTrace("ScopedCodeCacheWrite"),
         region_(region) {
-    ScopedTrace trace("mprotect all");
-    const MemMap* const updatable_pages = region.GetUpdatableCodeMapping();
-    if (updatable_pages != nullptr) {
-      int prot = region.HasDualCodeMapping() ? kProtRW : kProtRWX;
-      CheckedCall(mprotect, "Cache +W", updatable_pages->Begin(), updatable_pages->Size(), prot);
+    if (kIsDebugBuild || !region.HasDualCodeMapping()) {
+      ScopedTrace trace("mprotect all");
+      const MemMap* const updatable_pages = region.GetUpdatableCodeMapping();
+      if (updatable_pages != nullptr) {
+        int prot = region.HasDualCodeMapping() ? kProtRW : kProtRWX;
+        CheckedCall(mprotect, "Cache +W", updatable_pages->Begin(), updatable_pages->Size(), prot);
+      }
     }
   }
 
   ~ScopedCodeCacheWrite() {
-    ScopedTrace trace("mprotect code");
-    const MemMap* const updatable_pages = region_.GetUpdatableCodeMapping();
-    if (updatable_pages != nullptr) {
-      int prot = region_.HasDualCodeMapping() ? kProtR : kProtRX;
-      CheckedCall(mprotect, "Cache -W", updatable_pages->Begin(), updatable_pages->Size(), prot);
+    if (kIsDebugBuild || !region_.HasDualCodeMapping()) {
+      ScopedTrace trace("mprotect code");
+      const MemMap* const updatable_pages = region_.GetUpdatableCodeMapping();
+      if (updatable_pages != nullptr) {
+        int prot = region_.HasDualCodeMapping() ? kProtR : kProtRX;
+        CheckedCall(mprotect, "Cache -W", updatable_pages->Begin(), updatable_pages->Size(), prot);
+      }
     }
   }