Move memory related regions of the JIT cache in its own class.

- Moves the JIT lock into the global locks list
- Jit cache has two regions: one inherited from zygote, and its own.

Bug: 119800099
Test: boot, test.py
Change-Id: I833ba2b5a3d3c10fa01286cdf52603fa174a2cc7
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 4a41624..701d8c3 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -115,6 +115,7 @@
         "jit/debugger_interface.cc",
         "jit/jit.cc",
         "jit/jit_code_cache.cc",
+        "jit/jit_memory_region.cc",
         "jit/profiling_info.cc",
         "jit/profile_saver.cc",
         "jni/check_jni.cc",
diff --git a/runtime/base/locks.cc b/runtime/base/locks.cc
index 4349be0..1cec44c 100644
--- a/runtime/base/locks.cc
+++ b/runtime/base/locks.cc
@@ -63,6 +63,7 @@
 Mutex* Locks::runtime_shutdown_lock_ = nullptr;
 Mutex* Locks::runtime_thread_pool_lock_ = nullptr;
 Mutex* Locks::cha_lock_ = nullptr;
+Mutex* Locks::jit_lock_ = nullptr;
 Mutex* Locks::subtype_check_lock_ = nullptr;
 Mutex* Locks::thread_list_lock_ = nullptr;
 ConditionVariable* Locks::thread_exit_cond_ = nullptr;
@@ -147,6 +148,7 @@
     DCHECK(mutator_lock_ != nullptr);
     DCHECK(profiler_lock_ != nullptr);
     DCHECK(cha_lock_ != nullptr);
+    DCHECK(jit_lock_ != nullptr);
     DCHECK(subtype_check_lock_ != nullptr);
     DCHECK(thread_list_lock_ != nullptr);
     DCHECK(thread_suspend_count_lock_ != nullptr);
@@ -303,6 +305,10 @@
     DCHECK(custom_tls_lock_ == nullptr);
     custom_tls_lock_ = new Mutex("Thread::custom_tls_ lock", current_lock_level);
 
+    UPDATE_CURRENT_LOCK_LEVEL(kJitCodeCacheLock);
+    DCHECK(jit_lock_ == nullptr);
+    jit_lock_ = new Mutex("Jit code cache", current_lock_level);
+
     UPDATE_CURRENT_LOCK_LEVEL(kCHALock);
     DCHECK(cha_lock_ == nullptr);
     cha_lock_ = new Mutex("CHA lock", current_lock_level);
diff --git a/runtime/base/locks.h b/runtime/base/locks.h
index b15fd32..77e304d 100644
--- a/runtime/base/locks.h
+++ b/runtime/base/locks.h
@@ -328,8 +328,11 @@
   // GetThreadLocalStorage.
   static Mutex* custom_tls_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
 
+  // Guard access to any JIT data structure.
+  static Mutex* jit_lock_ ACQUIRED_AFTER(custom_tls_lock_);
+
   // Guards Class Hierarchy Analysis (CHA).
-  static Mutex* cha_lock_ ACQUIRED_AFTER(custom_tls_lock_);
+  static Mutex* cha_lock_ ACQUIRED_AFTER(jit_lock_);
 
   // When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
   // doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index 8a15af2..e7ef03e 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -21,7 +21,7 @@
 #include "base/bit_utils.h"
 #include "base/mem_map.h"
 #include "card_table.h"
-#include "jit/jit_code_cache.h"
+#include "jit/jit_memory_region.h"
 
 namespace art {
 namespace gc {
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 2505111..f430d58 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -19,7 +19,6 @@
 #include <sstream>
 
 #include <android-base/logging.h>
-#include <android-base/unique_fd.h>
 
 #include "arch/context.h"
 #include "art_method-inl.h"
@@ -47,6 +46,7 @@
 #include "intern_table.h"
 #include "jit/jit.h"
 #include "jit/profiling_info.h"
+#include "jit/jit_scoped_code_cache_write.h"
 #include "linear_alloc.h"
 #include "oat_file-inl.h"
 #include "oat_quick_method_header.h"
@@ -57,37 +57,12 @@
 #include "thread-current-inl.h"
 #include "thread_list.h"
 
-using android::base::unique_fd;
-
 namespace art {
 namespace jit {
 
 static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
 static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
 
-// Data cache will be half of the capacity
-// Code cache will be the other half of the capacity.
-// TODO: Make this variable?
-static constexpr size_t kCodeAndDataCapacityDivider = 2;
-
-static constexpr int kProtR = PROT_READ;
-static constexpr int kProtRW = PROT_READ | PROT_WRITE;
-static constexpr int kProtRWX = PROT_READ | PROT_WRITE | PROT_EXEC;
-static constexpr int kProtRX = PROT_READ | PROT_EXEC;
-
-namespace {
-
-// Translate an address belonging to one memory map into an address in a second. This is useful
-// when there are two virtual memory ranges for the same physical memory range.
-template <typename T>
-T* TranslateAddress(T* src_ptr, const MemMap& src, const MemMap& dst) {
-  CHECK(src.HasAddress(src_ptr));
-  uint8_t* const raw_src_ptr = reinterpret_cast<uint8_t*>(src_ptr);
-  return reinterpret_cast<T*>(raw_src_ptr - src.Begin() + dst.Begin());
-}
-
-}  // namespace
-
 class JitCodeCache::JniStubKey {
  public:
   explicit JniStubKey(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -189,171 +164,6 @@
   std::vector<ArtMethod*> methods_;
 };
 
-bool JitCodeCache::InitializeMappings(bool rwx_memory_allowed,
-                                      bool is_zygote,
-                                      std::string* error_msg) {
-  ScopedTrace trace(__PRETTY_FUNCTION__);
-
-  const size_t capacity = max_capacity_;
-  const size_t data_capacity = capacity / kCodeAndDataCapacityDivider;
-  const size_t exec_capacity = capacity - data_capacity;
-
-  // File descriptor enabling dual-view mapping of code section.
-  unique_fd mem_fd;
-
-  // Zygote shouldn't create a shared mapping for JIT, so we cannot use dual view
-  // for it.
-  if (!is_zygote) {
-    // Bionic supports memfd_create, but the call may fail on older kernels.
-    mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
-    if (mem_fd.get() < 0) {
-      std::ostringstream oss;
-      oss << "Failed to initialize dual view JIT. memfd_create() error: " << strerror(errno);
-      if (!rwx_memory_allowed) {
-        // Without using RWX page permissions, the JIT can not fallback to single mapping as it
-        // requires tranitioning the code pages to RWX for updates.
-        *error_msg = oss.str();
-        return false;
-      }
-      VLOG(jit) << oss.str();
-    }
-  }
-
-  if (mem_fd.get() >= 0 && ftruncate(mem_fd, capacity) != 0) {
-    std::ostringstream oss;
-    oss << "Failed to initialize memory file: " << strerror(errno);
-    *error_msg = oss.str();
-    return false;
-  }
-
-  std::string data_cache_name = is_zygote ? "zygote-data-code-cache" : "data-code-cache";
-  std::string exec_cache_name = is_zygote ? "zygote-jit-code-cache" : "jit-code-cache";
-
-  std::string error_str;
-  // Map name specific for android_os_Debug.cpp accounting.
-  // Map in low 4gb to simplify accessing root tables for x86_64.
-  // We could do PC-relative addressing to avoid this problem, but that
-  // would require reserving code and data area before submitting, which
-  // means more windows for the code memory to be RWX.
-  int base_flags;
-  MemMap data_pages;
-  if (mem_fd.get() >= 0) {
-    // Dual view of JIT code cache case. Create an initial mapping of data pages large enough
-    // for data and non-writable view of JIT code pages. We use the memory file descriptor to
-    // enable dual mapping - we'll create a second mapping using the descriptor below. The
-    // mappings will look like:
-    //
-    //       VA                  PA
-    //
-    //       +---------------+
-    //       | non exec code |\
-    //       +---------------+ \
-    //       :               :\ \
-    //       +---------------+.\.+---------------+
-    //       |  exec code    |  \|     code      |
-    //       +---------------+...+---------------+
-    //       |      data     |   |     data      |
-    //       +---------------+...+---------------+
-    //
-    // In this configuration code updates are written to the non-executable view of the code
-    // cache, and the executable view of the code cache has fixed RX memory protections.
-    //
-    // This memory needs to be mapped shared as the code portions will have two mappings.
-    base_flags = MAP_SHARED;
-    data_pages = MemMap::MapFile(
-        data_capacity + exec_capacity,
-        kProtRW,
-        base_flags,
-        mem_fd,
-        /* start= */ 0,
-        /* low_4gb= */ true,
-        data_cache_name.c_str(),
-        &error_str);
-  } else {
-    // Single view of JIT code cache case. Create an initial mapping of data pages large enough
-    // for data and JIT code pages. The mappings will look like:
-    //
-    //       VA                  PA
-    //
-    //       +---------------+...+---------------+
-    //       |  exec code    |   |     code      |
-    //       +---------------+...+---------------+
-    //       |      data     |   |     data      |
-    //       +---------------+...+---------------+
-    //
-    // In this configuration code updates are written to the executable view of the code cache,
-    // and the executable view of the code cache transitions RX to RWX for the update and then
-    // back to RX after the update.
-    base_flags = MAP_PRIVATE | MAP_ANON;
-    data_pages = MemMap::MapAnonymous(
-        data_cache_name.c_str(),
-        data_capacity + exec_capacity,
-        kProtRW,
-        /* low_4gb= */ true,
-        &error_str);
-  }
-
-  if (!data_pages.IsValid()) {
-    std::ostringstream oss;
-    oss << "Failed to create read write cache: " << error_str << " size=" << capacity;
-    *error_msg = oss.str();
-    return false;
-  }
-
-  MemMap exec_pages;
-  MemMap non_exec_pages;
-  if (exec_capacity > 0) {
-    uint8_t* const divider = data_pages.Begin() + data_capacity;
-    // Set initial permission for executable view to catch any SELinux permission problems early
-    // (for processes that cannot map WX pages). Otherwise, this region does not need to be
-    // executable as there is no code in the cache yet.
-    exec_pages = data_pages.RemapAtEnd(divider,
-                                       exec_cache_name.c_str(),
-                                       kProtRX,
-                                       base_flags | MAP_FIXED,
-                                       mem_fd.get(),
-                                       (mem_fd.get() >= 0) ? data_capacity : 0,
-                                       &error_str);
-    if (!exec_pages.IsValid()) {
-      std::ostringstream oss;
-      oss << "Failed to create read execute code cache: " << error_str << " size=" << capacity;
-      *error_msg = oss.str();
-      return false;
-    }
-
-    if (mem_fd.get() >= 0) {
-      // For dual view, create the secondary view of code memory used for updating code. This view
-      // is never executable.
-      std::string name = exec_cache_name + "-rw";
-      non_exec_pages = MemMap::MapFile(exec_capacity,
-                                       kProtR,
-                                       base_flags,
-                                       mem_fd,
-                                       /* start= */ data_capacity,
-                                       /* low_4GB= */ false,
-                                       name.c_str(),
-                                       &error_str);
-      if (!non_exec_pages.IsValid()) {
-        static const char* kFailedNxView = "Failed to map non-executable view of JIT code cache";
-        if (rwx_memory_allowed) {
-          // Log and continue as single view JIT (requires RWX memory).
-          VLOG(jit) << kFailedNxView;
-        } else {
-          *error_msg = kFailedNxView;
-          return false;
-        }
-      }
-    }
-  } else {
-    // Profiling only. No memory for code required.
-  }
-
-  data_pages_ = std::move(data_pages);
-  exec_pages_ = std::move(exec_pages);
-  non_exec_pages_ = std::move(non_exec_pages);
-  return true;
-}
-
 JitCodeCache* JitCodeCache::Create(bool used_only_for_profile_data,
                                    bool rwx_memory_allowed,
                                    bool is_zygote,
@@ -385,19 +195,20 @@
 
   std::unique_ptr<JitCodeCache> jit_code_cache(new JitCodeCache());
 
-  MutexLock mu(Thread::Current(), jit_code_cache->lock_);
-  jit_code_cache->InitializeState(initial_capacity, max_capacity);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+  jit_code_cache->private_region_.InitializeState(initial_capacity, max_capacity);
 
   // Zygote should never collect code to share the memory with the children.
   if (is_zygote) {
     jit_code_cache->garbage_collect_code_ = false;
   }
 
-  if (!jit_code_cache->InitializeMappings(rwx_memory_allowed, is_zygote, error_msg)) {
+  if (!jit_code_cache->private_region_.InitializeMappings(
+        rwx_memory_allowed, is_zygote, error_msg)) {
     return nullptr;
   }
 
-  jit_code_cache->InitializeSpaces();
+  jit_code_cache->private_region_.InitializeSpaces();
 
   VLOG(jit) << "Created jit code cache: initial capacity="
             << PrettySize(initial_capacity)
@@ -408,82 +219,24 @@
 }
 
 JitCodeCache::JitCodeCache()
-    : lock_("Jit code cache", kJitCodeCacheLock),
-      lock_cond_("Jit code cache condition variable", lock_),
+    : is_weak_access_enabled_(true),
+      inline_cache_cond_("Jit inline cache condition variable", *Locks::jit_lock_),
+      lock_cond_("Jit code cache condition variable", *Locks::jit_lock_),
       collection_in_progress_(false),
       last_collection_increased_code_cache_(false),
       garbage_collect_code_(true),
-      used_memory_for_data_(0),
-      used_memory_for_code_(0),
       number_of_compilations_(0),
       number_of_osr_compilations_(0),
       number_of_collections_(0),
       histogram_stack_map_memory_use_("Memory used for stack maps", 16),
       histogram_code_memory_use_("Memory used for compiled code", 16),
-      histogram_profiling_info_memory_use_("Memory used for profiling info", 16),
-      is_weak_access_enabled_(true),
-      inline_cache_cond_("Jit inline cache condition variable", lock_),
-      zygote_data_pages_(),
-      zygote_exec_pages_(),
-      zygote_data_mspace_(nullptr),
-      zygote_exec_mspace_(nullptr) {
-}
-
-void JitCodeCache::InitializeState(size_t initial_capacity, size_t max_capacity) {
-  CHECK_GE(max_capacity, initial_capacity);
-  CHECK(max_capacity <= 1 * GB) << "The max supported size for JIT code cache is 1GB";
-  // Align both capacities to page size, as that's the unit mspaces use.
-  initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
-  max_capacity = RoundDown(max_capacity, 2 * kPageSize);
-
-  used_memory_for_data_ = 0;
-  used_memory_for_code_ = 0;
-  number_of_compilations_ = 0;
-  number_of_osr_compilations_ = 0;
-  number_of_collections_ = 0;
-
-  data_pages_ = MemMap();
-  exec_pages_ = MemMap();
-  non_exec_pages_ = MemMap();
-  initial_capacity_ = initial_capacity;
-  max_capacity_ = max_capacity;
-  current_capacity_ = initial_capacity,
-  data_end_ = initial_capacity / kCodeAndDataCapacityDivider;
-  exec_end_ = initial_capacity - data_end_;
-}
-
-void JitCodeCache::InitializeSpaces() {
-  // Initialize the data heap
-  data_mspace_ = create_mspace_with_base(data_pages_.Begin(), data_end_, false /*locked*/);
-  CHECK(data_mspace_ != nullptr) << "create_mspace_with_base (data) failed";
-
-  // Initialize the code heap
-  MemMap* code_heap = nullptr;
-  if (non_exec_pages_.IsValid()) {
-    code_heap = &non_exec_pages_;
-  } else if (exec_pages_.IsValid()) {
-    code_heap = &exec_pages_;
-  }
-  if (code_heap != nullptr) {
-    // Make all pages reserved for the code heap writable. The mspace allocator, that manages the
-    // heap, will take and initialize pages in create_mspace_with_base().
-    CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW);
-    exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
-    CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
-    SetFootprintLimit(initial_capacity_);
-    // Protect pages containing heap metadata. Updates to the code heap toggle write permission to
-    // perform the update and there are no other times write access is required.
-    CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR);
-  } else {
-    exec_mspace_ = nullptr;
-    SetFootprintLimit(initial_capacity_);
-  }
+      histogram_profiling_info_memory_use_("Memory used for profiling info", 16) {
 }
 
 JitCodeCache::~JitCodeCache() {}
 
 bool JitCodeCache::ContainsPc(const void* ptr) const {
-  return exec_pages_.HasAddress(ptr) || zygote_exec_pages_.HasAddress(ptr);
+  return private_region_.IsInExecSpace(ptr) || shared_region_.IsInExecSpace(ptr);
 }
 
 bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
@@ -498,7 +251,7 @@
 }
 
 bool JitCodeCache::ContainsMethod(ArtMethod* method) {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   if (UNLIKELY(method->IsNative())) {
     auto it = jni_stubs_map_.find(JniStubKey(method));
     if (it != jni_stubs_map_.end() &&
@@ -518,7 +271,7 @@
 
 const void* JitCodeCache::GetJniStubCode(ArtMethod* method) {
   DCHECK(method->IsNative());
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   auto it = jni_stubs_map_.find(JniStubKey(method));
   if (it != jni_stubs_map_.end()) {
     JniStubData& data = it->second;
@@ -568,34 +321,6 @@
   return nullptr;
 }
 
-class ScopedCodeCacheWrite : ScopedTrace {
- public:
-  explicit ScopedCodeCacheWrite(const JitCodeCache* const code_cache)
-      : ScopedTrace("ScopedCodeCacheWrite"),
-        code_cache_(code_cache) {
-    ScopedTrace trace("mprotect all");
-    const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping();
-    if (updatable_pages != nullptr) {
-      int prot = code_cache_->HasDualCodeMapping() ? kProtRW : kProtRWX;
-      CheckedCall(mprotect, "Cache +W", updatable_pages->Begin(), updatable_pages->Size(), prot);
-    }
-  }
-
-  ~ScopedCodeCacheWrite() {
-    ScopedTrace trace("mprotect code");
-    const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping();
-    if (updatable_pages != nullptr) {
-      int prot = code_cache_->HasDualCodeMapping() ? kProtR : kProtRX;
-      CheckedCall(mprotect, "Cache -W", updatable_pages->Begin(), updatable_pages->Size(), prot);
-    }
-  }
-
- private:
-  const JitCodeCache* const code_cache_;
-
-  DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
-};
-
 uint8_t* JitCodeCache::CommitCode(Thread* self,
                                   ArtMethod* method,
                                   uint8_t* stack_map,
@@ -741,7 +466,7 @@
 }
 
 void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   for (const auto& entry : method_code_map_) {
     uint32_t number_of_roots = 0;
     uint8_t* roots_data = GetRootTable(entry.first, &number_of_roots);
@@ -790,15 +515,10 @@
   // It does nothing if we are not using native debugger.
   RemoveNativeDebugInfoForJit(Thread::Current(), code_ptr);
   if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
-    FreeData(GetRootTable(code_ptr));
+    private_region_.FreeData(GetRootTable(code_ptr));
   }  // else this is a JNI stub without any data.
 
-  uint8_t* code_allocation = reinterpret_cast<uint8_t*>(allocation);
-  if (HasDualCodeMapping()) {
-    code_allocation = TranslateAddress(code_allocation, exec_pages_, non_exec_pages_);
-  }
-
-  FreeCode(code_allocation);
+  private_region_.FreeCode(reinterpret_cast<uint8_t*>(allocation));
 }
 
 void JitCodeCache::FreeAllMethodHeaders(
@@ -807,14 +527,14 @@
   // first since once we do FreeCode() below, the memory can be reused
   // so it's possible for the same method_header to start representing
   // different compile code.
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   {
     MutexLock mu2(Thread::Current(), *Locks::cha_lock_);
     Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
         ->RemoveDependentsWithMethodHeaders(method_headers);
   }
 
-  ScopedCodeCacheWrite scc(this);
+  ScopedCodeCacheWrite scc(private_region_);
   for (const OatQuickMethodHeader* method_header : method_headers) {
     FreeCodeAndData(method_header->GetCode());
   }
@@ -828,12 +548,12 @@
   // the CHA dependency map just once with an unordered_set.
   std::unordered_set<OatQuickMethodHeader*> method_headers;
   {
-    MutexLock mu(self, lock_);
+    MutexLock mu(self, *Locks::jit_lock_);
     // We do not check if a code cache GC is in progress, as this method comes
     // with the classlinker_classes_lock_ held, and suspending ourselves could
     // lead to a deadlock.
     {
-      ScopedCodeCacheWrite scc(this);
+      ScopedCodeCacheWrite scc(private_region_);
       for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
         it->second.RemoveMethodsIn(alloc);
         if (it->second.GetMethods().empty()) {
@@ -866,7 +586,7 @@
       ProfilingInfo* info = *it;
       if (alloc.ContainsUnsafe(info->GetMethod())) {
         info->GetMethod()->SetProfilingInfo(nullptr);
-        FreeData(reinterpret_cast<uint8_t*>(info));
+        private_region_.FreeData(reinterpret_cast<uint8_t*>(info));
         it = profiling_infos_.erase(it);
       } else {
         ++it;
@@ -887,7 +607,7 @@
     return;
   }
   ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   while (!IsWeakAccessEnabled(self)) {
     inline_cache_cond_.Wait(self);
   }
@@ -895,7 +615,7 @@
 
 void JitCodeCache::BroadcastForInlineCacheAccess() {
   Thread* self = Thread::Current();
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   inline_cache_cond_.Broadcast(self);
 }
 
@@ -940,23 +660,13 @@
 
 void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) {
   while (collection_in_progress_) {
-    lock_.Unlock(self);
+    Locks::jit_lock_->Unlock(self);
     {
       ScopedThreadSuspension sts(self, kSuspended);
-      MutexLock mu(self, lock_);
+      MutexLock mu(self, *Locks::jit_lock_);
       WaitForPotentialCollectionToComplete(self);
     }
-    lock_.Lock(self);
-  }
-}
-
-const MemMap* JitCodeCache::GetUpdatableCodeMapping() const {
-  if (HasDualCodeMapping()) {
-    return &non_exec_pages_;
-  } else if (HasCodeMapping()) {
-    return &exec_pages_;
-  } else {
-    return nullptr;
+    Locks::jit_lock_->Lock(self);
   }
 }
 
@@ -983,12 +693,12 @@
   OatQuickMethodHeader* method_header = nullptr;
   uint8_t* code_ptr = nullptr;
 
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
   // finish.
   WaitForPotentialCollectionToCompleteRunnable(self);
   {
-    ScopedCodeCacheWrite scc(this);
+    ScopedCodeCacheWrite scc(private_region_);
 
     size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
     // Ensure the header ends up at expected instruction alignment.
@@ -997,7 +707,7 @@
 
     // AllocateCode allocates memory in non-executable region for alignment header and code. The
     // header size may include alignment padding.
-    uint8_t* nox_memory = AllocateCode(total_size);
+    uint8_t* nox_memory = private_region_.AllocateCode(total_size);
     if (nox_memory == nullptr) {
       return nullptr;
     }
@@ -1008,9 +718,7 @@
     method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
 
     // From here code_ptr points to executable code.
-    if (HasDualCodeMapping()) {
-      code_ptr = TranslateAddress(code_ptr, non_exec_pages_, exec_pages_);
-    }
+    code_ptr = private_region_.GetExecutableAddress(code_ptr);
 
     new (method_header) OatQuickMethodHeader(
         (stack_map != nullptr) ? code_ptr - stack_map : 0u,
@@ -1022,9 +730,7 @@
     }
 
     // Update method_header pointer to executable code region.
-    if (HasDualCodeMapping()) {
-      method_header = TranslateAddress(method_header, non_exec_pages_, exec_pages_);
-    }
+    method_header = private_region_.GetExecutableAddress(method_header);
 
     // Both instruction and data caches need flushing to the point of unification where both share
     // a common view of memory. Flushing the data cache ensures the dirty cachelines from the
@@ -1041,7 +747,7 @@
     // For reference, this behavior is caused by this commit:
     // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
     //
-    if (HasDualCodeMapping()) {
+    if (private_region_.HasDualCodeMapping()) {
       // Flush the data cache lines associated with the non-executable copy of the code just added.
       FlushDataCache(nox_memory, nox_memory + total_size);
     }
@@ -1162,7 +868,7 @@
 }
 
 size_t JitCodeCache::CodeCacheSize() {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   return CodeCacheSizeLocked();
 }
 
@@ -1170,7 +876,7 @@
   // This function is used only for testing and only with non-native methods.
   CHECK(!method->IsNative());
 
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
 
   bool osr = osr_code_map_.find(method) != osr_code_map_.end();
   bool in_cache = RemoveMethodLocked(method, release_memory);
@@ -1200,7 +906,7 @@
   }
 
   bool in_cache = false;
-  ScopedCodeCacheWrite ccw(this);
+  ScopedCodeCacheWrite ccw(private_region_);
   if (UNLIKELY(method->IsNative())) {
     auto it = jni_stubs_map_.find(JniStubKey(method));
     if (it != jni_stubs_map_.end() && it->second.RemoveMethod(method)) {
@@ -1240,7 +946,7 @@
 // any cached information it has on the method. All threads must be suspended before calling this
 // method. The compiled code for the method (if there is any) must not be in any threads call stack.
 void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   RemoveMethodLocked(method, /* release_memory= */ true);
 }
 
@@ -1251,7 +957,7 @@
 // shouldn't be used since it is no longer logically in the jit code cache.
 // TODO We should add DCHECKS that validate that the JIT is paused when this method is entered.
 void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   if (old_method->IsNative()) {
     // Update methods in jni_stubs_map_.
     for (auto& entry : jni_stubs_map_) {
@@ -1288,7 +994,7 @@
 }
 
 void JitCodeCache::ClearEntryPointsInZygoteExecSpace() {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   // Iterate over profiling infos to know which methods may have been JITted. Note that
   // to be JITted, a method must have a profiling info.
   for (ProfilingInfo* info : profiling_infos_) {
@@ -1306,24 +1012,24 @@
 }
 
 size_t JitCodeCache::CodeCacheSizeLocked() {
-  return used_memory_for_code_;
+  return private_region_.GetUsedMemoryForCode();
 }
 
 size_t JitCodeCache::DataCacheSize() {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   return DataCacheSizeLocked();
 }
 
 size_t JitCodeCache::DataCacheSizeLocked() {
-  return used_memory_for_data_;
+  return private_region_.GetUsedMemoryForData();
 }
 
 void JitCodeCache::ClearData(Thread* self,
                              uint8_t* stack_map_data,
                              uint8_t* roots_data) {
   DCHECK_EQ(FromStackMapToRoots(stack_map_data), roots_data);
-  MutexLock mu(self, lock_);
-  FreeData(reinterpret_cast<uint8_t*>(roots_data));
+  MutexLock mu(self, *Locks::jit_lock_);
+  private_region_.FreeData(reinterpret_cast<uint8_t*>(roots_data));
 }
 
 size_t JitCodeCache::ReserveData(Thread* self,
@@ -1338,21 +1044,21 @@
 
   {
     ScopedThreadSuspension sts(self, kSuspended);
-    MutexLock mu(self, lock_);
+    MutexLock mu(self, *Locks::jit_lock_);
     WaitForPotentialCollectionToComplete(self);
-    result = AllocateData(size);
+    result = private_region_.AllocateData(size);
   }
 
   if (result == nullptr) {
     // Retry.
     GarbageCollectCache(self);
     ScopedThreadSuspension sts(self, kSuspended);
-    MutexLock mu(self, lock_);
+    MutexLock mu(self, *Locks::jit_lock_);
     WaitForPotentialCollectionToComplete(self);
-    result = AllocateData(size);
+    result = private_region_.AllocateData(size);
   }
 
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   histogram_stack_map_memory_use_.AddValue(size);
   if (size > kStackMapSizeLogThreshold) {
     LOG(INFO) << "JIT allocated "
@@ -1429,40 +1135,6 @@
   lock_cond_.Broadcast(self);
 }
 
-void JitCodeCache::SetFootprintLimit(size_t new_footprint) {
-  size_t data_space_footprint = new_footprint / kCodeAndDataCapacityDivider;
-  DCHECK(IsAlignedParam(data_space_footprint, kPageSize));
-  DCHECK_EQ(data_space_footprint * kCodeAndDataCapacityDivider, new_footprint);
-  mspace_set_footprint_limit(data_mspace_, data_space_footprint);
-  if (HasCodeMapping()) {
-    ScopedCodeCacheWrite scc(this);
-    mspace_set_footprint_limit(exec_mspace_, new_footprint - data_space_footprint);
-  }
-}
-
-bool JitCodeCache::IncreaseCodeCacheCapacity() {
-  if (current_capacity_ == max_capacity_) {
-    return false;
-  }
-
-  // Double the capacity if we're below 1MB, or increase it by 1MB if
-  // we're above.
-  if (current_capacity_ < 1 * MB) {
-    current_capacity_ *= 2;
-  } else {
-    current_capacity_ += 1 * MB;
-  }
-  if (current_capacity_ > max_capacity_) {
-    current_capacity_ = max_capacity_;
-  }
-
-  VLOG(jit) << "Increasing code cache capacity to " << PrettySize(current_capacity_);
-
-  SetFootprintLimit(current_capacity_);
-
-  return true;
-}
-
 void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) {
   Barrier barrier(0);
   size_t threads_running_checkpoint = 0;
@@ -1477,10 +1149,10 @@
 }
 
 bool JitCodeCache::ShouldDoFullCollection() {
-  if (current_capacity_ == max_capacity_) {
+  if (private_region_.GetCurrentCapacity() == private_region_.GetMaxCapacity()) {
     // Always do a full collection when the code cache is full.
     return true;
-  } else if (current_capacity_ < kReservedCapacity) {
+  } else if (private_region_.GetCurrentCapacity() < kReservedCapacity) {
     // Always do partial collection when the code cache size is below the reserved
     // capacity.
     return false;
@@ -1498,9 +1170,9 @@
   // Wait for an existing collection, or let everyone know we are starting one.
   {
     ScopedThreadSuspension sts(self, kSuspended);
-    MutexLock mu(self, lock_);
+    MutexLock mu(self, *Locks::jit_lock_);
     if (!garbage_collect_code_) {
-      IncreaseCodeCacheCapacity();
+      private_region_.IncreaseCodeCacheCapacity();
       return;
     } else if (WaitForPotentialCollectionToComplete(self)) {
       return;
@@ -1508,8 +1180,9 @@
       number_of_collections_++;
       live_bitmap_.reset(CodeCacheBitmap::Create(
           "code-cache-bitmap",
-          reinterpret_cast<uintptr_t>(exec_pages_.Begin()),
-          reinterpret_cast<uintptr_t>(exec_pages_.Begin() + current_capacity_ / 2)));
+          reinterpret_cast<uintptr_t>(private_region_.GetExecPages()->Begin()),
+          reinterpret_cast<uintptr_t>(
+              private_region_.GetExecPages()->Begin() + private_region_.GetCurrentCapacity() / 2)));
       collection_in_progress_ = true;
     }
   }
@@ -1520,7 +1193,7 @@
 
     bool do_full_collection = false;
     {
-      MutexLock mu(self, lock_);
+      MutexLock mu(self, *Locks::jit_lock_);
       do_full_collection = ShouldDoFullCollection();
     }
 
@@ -1537,7 +1210,7 @@
               << ", data=" << PrettySize(DataCacheSize());
 
     {
-      MutexLock mu(self, lock_);
+      MutexLock mu(self, *Locks::jit_lock_);
 
       // Increase the code cache only when we do partial collections.
       // TODO: base this strategy on how full the code cache is?
@@ -1545,7 +1218,7 @@
         last_collection_increased_code_cache_ = false;
       } else {
         last_collection_increased_code_cache_ = true;
-        IncreaseCodeCacheCapacity();
+        private_region_.IncreaseCodeCacheCapacity();
       }
 
       bool next_collection_will_be_full = ShouldDoFullCollection();
@@ -1597,8 +1270,8 @@
   ScopedTrace trace(__FUNCTION__);
   std::unordered_set<OatQuickMethodHeader*> method_headers;
   {
-    MutexLock mu(self, lock_);
-    ScopedCodeCacheWrite scc(this);
+    MutexLock mu(self, *Locks::jit_lock_);
+    ScopedCodeCacheWrite scc(private_region_);
     // Iterate over all compiled code and remove entries that are not marked.
     for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
       JniStubData* data = &it->second;
@@ -1627,13 +1300,13 @@
 }
 
 bool JitCodeCache::GetGarbageCollectCode() {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   return garbage_collect_code_;
 }
 
 void JitCodeCache::SetGarbageCollectCode(bool value) {
   Thread* self = Thread::Current();
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   if (garbage_collect_code_ != value) {
     if (garbage_collect_code_) {
       // When dynamically disabling the garbage collection, we neee
@@ -1652,7 +1325,7 @@
 void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
   ScopedTrace trace(__FUNCTION__);
   {
-    MutexLock mu(self, lock_);
+    MutexLock mu(self, *Locks::jit_lock_);
     if (collect_profiling_info) {
       // Clear the profiling info of methods that do not have compiled code as entrypoint.
       // Also remove the saved entry point from the ProfilingInfo objects.
@@ -1722,7 +1395,7 @@
   RemoveUnmarkedCode(self);
 
   if (collect_profiling_info) {
-    MutexLock mu(self, lock_);
+    MutexLock mu(self, *Locks::jit_lock_);
     // Free all profiling infos of methods not compiled nor being compiled.
     auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
       [this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS {
@@ -1738,7 +1411,7 @@
           info->GetMethod()->SetProfilingInfo(info);
         } else if (info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) != info) {
           // No need for this ProfilingInfo object anymore.
-          FreeData(reinterpret_cast<uint8_t*>(info));
+          private_region_.FreeData(reinterpret_cast<uint8_t*>(info));
           return true;
         }
         return false;
@@ -1762,7 +1435,7 @@
     CHECK(method != nullptr);
   }
 
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   OatQuickMethodHeader* method_header = nullptr;
   ArtMethod* found_method = nullptr;  // Only for DCHECK(), not for JNI stubs.
   if (method != nullptr && UNLIKELY(method->IsNative())) {
@@ -1811,7 +1484,7 @@
 }
 
 OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   auto it = osr_code_map_.find(method);
   if (it == osr_code_map_.end()) {
     return nullptr;
@@ -1829,19 +1502,19 @@
   if (!retry_allocation) {
     // If we are allocating for the interpreter, just try to lock, to avoid
     // lock contention with the JIT.
-    if (lock_.ExclusiveTryLock(self)) {
+    if (Locks::jit_lock_->ExclusiveTryLock(self)) {
       info = AddProfilingInfoInternal(self, method, entries);
-      lock_.ExclusiveUnlock(self);
+      Locks::jit_lock_->ExclusiveUnlock(self);
     }
   } else {
     {
-      MutexLock mu(self, lock_);
+      MutexLock mu(self, *Locks::jit_lock_);
       info = AddProfilingInfoInternal(self, method, entries);
     }
 
     if (info == nullptr) {
       GarbageCollectCache(self);
-      MutexLock mu(self, lock_);
+      MutexLock mu(self, *Locks::jit_lock_);
       info = AddProfilingInfoInternal(self, method, entries);
     }
   }
@@ -1861,7 +1534,7 @@
     return info;
   }
 
-  uint8_t* data = AllocateData(profile_info_size);
+  uint8_t* data = private_region_.AllocateData(profile_info_size);
   if (data == nullptr) {
     return nullptr;
   }
@@ -1877,28 +1550,15 @@
   return info;
 }
 
-// NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock
-// is already held.
-void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS {
-  if (mspace == exec_mspace_) {
-    DCHECK(exec_mspace_ != nullptr);
-    const MemMap* const code_pages = GetUpdatableCodeMapping();
-    void* result = code_pages->Begin() + exec_end_;
-    exec_end_ += increment;
-    return result;
-  } else {
-    DCHECK_EQ(data_mspace_, mspace);
-    void* result = data_pages_.Begin() + data_end_;
-    data_end_ += increment;
-    return result;
-  }
+void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) {
+  return private_region_.MoreCore(mspace, increment);
 }
 
 void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations,
                                       std::vector<ProfileMethodInfo>& methods) {
   Thread* self = Thread::Current();
   WaitUntilInlineCacheAccessible(self);
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   ScopedTrace trace(__FUNCTION__);
   uint16_t jit_compile_threshold = Runtime::Current()->GetJITOptions()->GetCompileThreshold();
   for (const ProfilingInfo* info : profiling_infos_) {
@@ -1979,7 +1639,7 @@
 }
 
 bool JitCodeCache::IsOsrCompiled(ArtMethod* method) {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   return osr_code_map_.find(method) != osr_code_map_.end();
 }
 
@@ -2002,7 +1662,7 @@
     }
   }
 
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   if (osr && (osr_code_map_.find(method) != osr_code_map_.end())) {
     return false;
   }
@@ -2063,7 +1723,7 @@
 }
 
 ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
   if (info != nullptr) {
     if (!info->IncrementInlineUse()) {
@@ -2075,7 +1735,7 @@
 }
 
 void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
   DCHECK(info != nullptr);
   info->DecrementInlineUse();
@@ -2083,7 +1743,7 @@
 
 void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self, bool osr) {
   DCHECK_EQ(Thread::Current(), self);
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   if (UNLIKELY(method->IsNative())) {
     auto it = jni_stubs_map_.find(JniStubKey(method));
     DCHECK(it != jni_stubs_map_.end());
@@ -2124,7 +1784,7 @@
         method, GetQuickToInterpreterBridge());
     ClearMethodCounter(method, /*was_warm=*/ profiling_info != nullptr);
   } else {
-    MutexLock mu(Thread::Current(), lock_);
+    MutexLock mu(Thread::Current(), *Locks::jit_lock_);
     auto it = osr_code_map_.find(method);
     if (it != osr_code_map_.end() && OatQuickMethodHeader::FromCodePointer(it->second) == header) {
       // Remove the OSR method, to avoid using it again.
@@ -2133,49 +1793,14 @@
   }
 }
 
-uint8_t* JitCodeCache::AllocateCode(size_t allocation_size) {
-  // Each allocation should be on its own set of cache lines. The allocation must be large enough
-  // for header, code, and any padding.
-  uint8_t* result = reinterpret_cast<uint8_t*>(
-      mspace_memalign(exec_mspace_, kJitCodeAlignment, allocation_size));
-  size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
-  size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
-  // Ensure the header ends up at expected instruction alignment.
-  DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment);
-  used_memory_for_code_ += mspace_usable_size(result);
-  return result;
-}
-
-void JitCodeCache::FreeCode(uint8_t* code) {
-  if (IsInZygoteExecSpace(code)) {
-    // No need to free, this is shared memory.
-    return;
-  }
-  used_memory_for_code_ -= mspace_usable_size(code);
-  mspace_free(exec_mspace_, code);
-}
-
-uint8_t* JitCodeCache::AllocateData(size_t data_size) {
-  void* result = mspace_malloc(data_mspace_, data_size);
-  used_memory_for_data_ += mspace_usable_size(result);
-  return reinterpret_cast<uint8_t*>(result);
-}
-
-void JitCodeCache::FreeData(uint8_t* data) {
-  if (IsInZygoteDataSpace(data)) {
-    // No need to free, this is shared memory.
-    return;
-  }
-  used_memory_for_data_ -= mspace_usable_size(data);
-  mspace_free(data_mspace_, data);
-}
-
 void JitCodeCache::Dump(std::ostream& os) {
-  MutexLock mu(Thread::Current(), lock_);
-  os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n"
-     << "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n"
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+  os << "Current JIT code cache size: " << PrettySize(private_region_.GetUsedMemoryForCode())
+                                        << "\n"
+     << "Current JIT data cache size: " << PrettySize(private_region_.GetUsedMemoryForData())
+                                        << "\n"
      << "Current JIT mini-debug-info size: " << PrettySize(GetJitMiniDebugInfoMemUsage()) << "\n"
-     << "Current JIT capacity: " << PrettySize(current_capacity_) << "\n"
+     << "Current JIT capacity: " << PrettySize(private_region_.GetCurrentCapacity()) << "\n"
      << "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n"
      << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n"
      << "Total number of JIT compilations: " << number_of_compilations_ << "\n"
@@ -2192,25 +1817,28 @@
     // Don't transition if this is for a child zygote.
     return;
   }
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
 
-  zygote_data_pages_ = std::move(data_pages_);
-  zygote_exec_pages_ = std::move(exec_pages_);
-  zygote_data_mspace_ = data_mspace_;
-  zygote_exec_mspace_ = exec_mspace_;
+  shared_region_ = std::move(private_region_);
+
+  // Reset all statistics to be specific to this process.
+  number_of_compilations_ = 0;
+  number_of_osr_compilations_ = 0;
+  number_of_collections_ = 0;
 
   size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity();
   size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity();
 
-  InitializeState(initial_capacity, max_capacity);
+  private_region_.InitializeState(initial_capacity, max_capacity);
 
   std::string error_msg;
-  if (!InitializeMappings(/* rwx_memory_allowed= */ !is_system_server, is_zygote, &error_msg)) {
+  if (!private_region_.InitializeMappings(
+          /* rwx_memory_allowed= */ !is_system_server, is_zygote, &error_msg)) {
     LOG(WARNING) << "Could not reset JIT state after zygote fork: " << error_msg;
     return;
   }
 
-  InitializeSpaces();
+  private_region_.InitializeSpaces();
 }
 
 }  // namespace jit
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 3078e2b..a56f6f0 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -31,6 +31,7 @@
 #include "base/mem_map.h"
 #include "base/mutex.h"
 #include "base/safe_map.h"
+#include "jit_memory_region.h"
 
 namespace art {
 
@@ -72,14 +73,6 @@
 namespace jit {
 
 class MarkCodeClosure;
-class ScopedCodeCacheWrite;
-
-// Alignment in bytes that will suit all architectures for JIT code cache allocations.  The
-// allocated block is used for method header followed by generated code. Allocations should be
-// aligned to avoid sharing cache lines between different allocations. The alignment should be
-// determined from the hardware, but this isn't readily exposed in userland plus some hardware
-// misreports.
-static constexpr int kJitCodeAlignment = 64;
 
 using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;
 
@@ -103,11 +96,11 @@
 
   bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr, bool prejit)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   void NotifyMethodRedefined(ArtMethod* method)
       REQUIRES(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   // Notify to the code cache that the compiler wants to use the
   // profiling info of `method` to drive optimizations,
@@ -115,15 +108,15 @@
   // collected.
   ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   void DoneCompilerUse(ArtMethod* method, Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   // Allocate and write code and its metadata to the code cache.
   // `cha_single_implementation_list` needs to be registered via CHA (if it's
@@ -143,20 +136,20 @@
                       bool has_should_deoptimize_flag,
                       const ArenaSet<ArtMethod*>& cha_single_implementation_list)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   // Return true if the code cache contains this pc.
   bool ContainsPc(const void* pc) const;
 
   // Returns true if either the method's entrypoint is JIT compiled code or it is the
   // instrumentation entrypoint and we can jump to jit code for this method. For testing use only.
-  bool WillExecuteJitCode(ArtMethod* method) REQUIRES(!lock_);
+  bool WillExecuteJitCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
 
   // Return true if the code cache contains this method.
-  bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
+  bool ContainsMethod(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
 
   // Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise.
-  const void* GetJniStubCode(ArtMethod* method) REQUIRES(!lock_);
+  const void* GetJniStubCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
 
   // Allocate a region of data that contain `size` bytes, and potentially space
   // for storing `number_of_roots` roots. Returns null if there is no more room.
@@ -168,43 +161,43 @@
                      uint8_t** stack_map_data,
                      uint8_t** roots_data)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   // Clear data from the data portion of the code cache.
   void ClearData(Thread* self, uint8_t* stack_map_data, uint8_t* roots_data)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   // Perform a collection on the code cache.
   void GarbageCollectCache(Thread* self)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Given the 'pc', try to find the JIT compiled code associated with it.
   // Return null if 'pc' is not in the code cache. 'method' is passed for
   // sanity check.
   OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Removes method from the cache for testing purposes. The caller
   // must ensure that all threads are suspended and the method should
   // not be in any thread's stack.
   bool RemoveMethod(ArtMethod* method, bool release_memory)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES(Locks::mutator_lock_);
 
   // Remove all methods in our cache that were allocated by 'alloc'.
   void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
@@ -213,11 +206,11 @@
                                   ArtMethod* method,
                                   const std::vector<uint32_t>& entries,
                                   bool retry_allocation)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
-    return mspace == data_mspace_ || mspace == exec_mspace_;
+    return private_region_.OwnsSpace(mspace);
   }
 
   void* MoreCore(const void* mspace, intptr_t increment);
@@ -225,36 +218,36 @@
   // Adds to `methods` all profiled methods which are part of any of the given dex locations.
   void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
                           std::vector<ProfileMethodInfo>& methods)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void Dump(std::ostream& os) REQUIRES(!lock_);
+  void Dump(std::ostream& os) REQUIRES(!Locks::jit_lock_);
 
-  bool IsOsrCompiled(ArtMethod* method) REQUIRES(!lock_);
+  bool IsOsrCompiled(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
 
   void SweepRootTables(IsMarkedVisitor* visitor)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // The GC needs to disallow the reading of inline caches when it processes them,
   // to avoid having a class being used while it is being deleted.
-  void AllowInlineCacheAccess() REQUIRES(!lock_);
-  void DisallowInlineCacheAccess() REQUIRES(!lock_);
-  void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
+  void AllowInlineCacheAccess() REQUIRES(!Locks::jit_lock_);
+  void DisallowInlineCacheAccess() REQUIRES(!Locks::jit_lock_);
+  void BroadcastForInlineCacheAccess() REQUIRES(!Locks::jit_lock_);
 
   // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
   // 'new_method' since it is being made obsolete.
   void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
-      REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
+      REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_);
 
   // Dynamically change whether we want to garbage collect code.
-  void SetGarbageCollectCode(bool value) REQUIRES(!lock_);
+  void SetGarbageCollectCode(bool value) REQUIRES(!Locks::jit_lock_);
 
-  bool GetGarbageCollectCode() REQUIRES(!lock_);
+  bool GetGarbageCollectCode() REQUIRES(!Locks::jit_lock_);
 
   // Unsafe variant for debug checks.
   bool GetGarbageCollectCodeUnsafe() const NO_THREAD_SAFETY_ANALYSIS {
@@ -264,13 +257,13 @@
   // If Jit-gc has been disabled (and instrumentation has been enabled) this will return the
   // jit-compiled entrypoint for this method.  Otherwise it will return null.
   const void* FindCompiledCodeForInstrumentation(ArtMethod* method)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Fetch the entrypoint that zygote may have saved for a method. The zygote saves an entrypoint
   // only for the case when the method's declaring class is not initialized.
   const void* GetZygoteSavedEntryPoint(ArtMethod* method)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void PostForkChildAction(bool is_system_server, bool is_zygote);
@@ -278,18 +271,11 @@
   // Clear the entrypoints of JIT compiled methods that belong in the zygote space.
   // This is used for removing non-debuggable JIT code at the point we realize the runtime
   // is debuggable.
-  void ClearEntryPointsInZygoteExecSpace() REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
+  void ClearEntryPointsInZygoteExecSpace() REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_);
 
  private:
   JitCodeCache();
 
-  void InitializeState(size_t initial_capacity, size_t max_capacity) REQUIRES(lock_);
-
-  bool InitializeMappings(bool rwx_memory_allowed, bool is_zygote, std::string* error_msg)
-      REQUIRES(lock_);
-
-  void InitializeSpaces() REQUIRES(lock_);
-
   // Internal version of 'CommitCode' that will not retry if the
   // allocation fails. Return null if the allocation fails.
   uint8_t* CommitCodeInternal(Thread* self,
@@ -303,208 +289,163 @@
                               const std::vector<Handle<mirror::Object>>& roots,
                               bool has_should_deoptimize_flag,
                               const ArenaSet<ArtMethod*>& cha_single_implementation_list)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Adds the given roots to the roots_data. Only a member for annotalysis.
   void FillRootTable(uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots)
-      REQUIRES(lock_)
+      REQUIRES(Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   ProfilingInfo* AddProfilingInfoInternal(Thread* self,
                                           ArtMethod* method,
                                           const std::vector<uint32_t>& entries)
-      REQUIRES(lock_)
+      REQUIRES(Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // If a collection is in progress, wait for it to finish. Must be called with the mutator lock.
   // The non-mutator lock version should be used if possible. This method will release then
   // re-acquire the mutator lock.
   void WaitForPotentialCollectionToCompleteRunnable(Thread* self)
-      REQUIRES(lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES(Locks::jit_lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // If a collection is in progress, wait for it to finish. Return
   // whether the thread actually waited.
   bool WaitForPotentialCollectionToComplete(Thread* self)
-      REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
+      REQUIRES(Locks::jit_lock_) REQUIRES(!Locks::mutator_lock_);
 
   // Remove CHA dependents and underlying allocations for entries in `method_headers`.
   void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES(!Locks::cha_lock_);
 
   // Removes method from the cache. The caller must ensure that all threads
   // are suspended and the method should not be in any thread's stack.
   bool RemoveMethodLocked(ArtMethod* method, bool release_memory)
-      REQUIRES(lock_)
+      REQUIRES(Locks::jit_lock_)
       REQUIRES(Locks::mutator_lock_);
 
   // Free code and data allocations for `code_ptr`.
-  void FreeCodeAndData(const void* code_ptr) REQUIRES(lock_);
+  void FreeCodeAndData(const void* code_ptr) REQUIRES(Locks::jit_lock_);
 
   // Number of bytes allocated in the code cache.
-  size_t CodeCacheSize() REQUIRES(!lock_);
+  size_t CodeCacheSize() REQUIRES(!Locks::jit_lock_);
 
   // Number of bytes allocated in the data cache.
-  size_t DataCacheSize() REQUIRES(!lock_);
+  size_t DataCacheSize() REQUIRES(!Locks::jit_lock_);
 
   // Number of bytes allocated in the code cache.
-  size_t CodeCacheSizeLocked() REQUIRES(lock_);
+  size_t CodeCacheSizeLocked() REQUIRES(Locks::jit_lock_);
 
   // Number of bytes allocated in the data cache.
-  size_t DataCacheSizeLocked() REQUIRES(lock_);
+  size_t DataCacheSizeLocked() REQUIRES(Locks::jit_lock_);
 
   // Notify all waiting threads that a collection is done.
-  void NotifyCollectionDone(Thread* self) REQUIRES(lock_);
-
-  // Try to increase the current capacity of the code cache. Return whether we
-  // succeeded at doing so.
-  bool IncreaseCodeCacheCapacity() REQUIRES(lock_);
-
-  // Set the footprint limit of the code cache.
-  void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
+  void NotifyCollectionDone(Thread* self) REQUIRES(Locks::jit_lock_);
 
   // Return whether we should do a full collection given the current state of the cache.
   bool ShouldDoFullCollection()
-      REQUIRES(lock_)
+      REQUIRES(Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void DoCollection(Thread* self, bool collect_profiling_info)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void RemoveUnmarkedCode(Thread* self)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void MarkCompiledCodeOnThreadStacks(Thread* self)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   CodeCacheBitmap* GetLiveBitmap() const {
     return live_bitmap_.get();
   }
 
-  uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
-  void FreeCode(uint8_t* code) REQUIRES(lock_);
-  uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
-  void FreeData(uint8_t* data) REQUIRES(lock_);
-
-  bool HasDualCodeMapping() const {
-    return non_exec_pages_.IsValid();
-  }
-
-  bool HasCodeMapping() const {
-    return exec_pages_.IsValid();
-  }
-
-  const MemMap* GetUpdatableCodeMapping() const;
-
   bool IsInZygoteDataSpace(const void* ptr) const {
-    return zygote_data_pages_.HasAddress(ptr);
+    return shared_region_.IsInDataSpace(ptr);
   }
 
   bool IsInZygoteExecSpace(const void* ptr) const {
-    return zygote_exec_pages_.HasAddress(ptr);
+    return shared_region_.IsInExecSpace(ptr);
   }
 
   bool IsWeakAccessEnabled(Thread* self) const;
   void WaitUntilInlineCacheAccessible(Thread* self)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   class JniStubKey;
   class JniStubData;
 
-  // Lock for guarding allocations, collections, and the method_code_map_.
-  Mutex lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
-  // Condition to wait on during collection.
-  ConditionVariable lock_cond_ GUARDED_BY(lock_);
-  // Whether there is a code cache collection in progress.
-  bool collection_in_progress_ GUARDED_BY(lock_);
-  // Mem map which holds data (stack maps and profiling info).
-  MemMap data_pages_;
-  // Mem map which holds code and has executable permission.
-  MemMap exec_pages_;
-  // Mem map which holds code with non executable permission. Only valid for dual view JIT when
-  // this is the non-executable view of code used to write updates.
-  MemMap non_exec_pages_;
-  // The opaque mspace for allocating data.
-  void* data_mspace_ GUARDED_BY(lock_);
-  // The opaque mspace for allocating code.
-  void* exec_mspace_ GUARDED_BY(lock_);
-  // Bitmap for collecting code and data.
-  std::unique_ptr<CodeCacheBitmap> live_bitmap_;
-  // Holds compiled code associated with the shorty for a JNI stub.
-  SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(lock_);
-  // Holds compiled code associated to the ArtMethod.
-  SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
-  // Holds osr compiled code associated to the ArtMethod.
-  SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
-  // ProfilingInfo objects we have allocated.
-  std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
-
-  // The initial capacity in bytes this code cache starts with.
-  size_t initial_capacity_ GUARDED_BY(lock_);
-
-  // The maximum capacity in bytes this code cache can go to.
-  size_t max_capacity_ GUARDED_BY(lock_);
-
-  // The current capacity in bytes of the code cache.
-  size_t current_capacity_ GUARDED_BY(lock_);
-
-  // The current footprint in bytes of the data portion of the code cache.
-  size_t data_end_ GUARDED_BY(lock_);
-
-  // The current footprint in bytes of the code portion of the code cache.
-  size_t exec_end_ GUARDED_BY(lock_);
-
-  // Whether the last collection round increased the code cache.
-  bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
-
-  // Whether we can do garbage collection. Not 'const' as tests may override this.
-  bool garbage_collect_code_ GUARDED_BY(lock_);
-
-  // The size in bytes of used memory for the data portion of the code cache.
-  size_t used_memory_for_data_ GUARDED_BY(lock_);
-
-  // The size in bytes of used memory for the code portion of the code cache.
-  size_t used_memory_for_code_ GUARDED_BY(lock_);
-
-  // Number of compilations done throughout the lifetime of the JIT.
-  size_t number_of_compilations_ GUARDED_BY(lock_);
-
-  // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
-  size_t number_of_osr_compilations_ GUARDED_BY(lock_);
-
-  // Number of code cache collections done throughout the lifetime of the JIT.
-  size_t number_of_collections_ GUARDED_BY(lock_);
-
-  // Histograms for keeping track of stack map size statistics.
-  Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(lock_);
-
-  // Histograms for keeping track of code size statistics.
-  Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(lock_);
-
-  // Histograms for keeping track of profiling info statistics.
-  Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
-
   // Whether the GC allows accessing weaks in inline caches. Note that this
   // is not used by the concurrent collector, which uses
   // Thread::SetWeakRefAccessEnabled instead.
   Atomic<bool> is_weak_access_enabled_;
 
   // Condition to wait on for accessing inline caches.
-  ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
+  ConditionVariable inline_cache_cond_ GUARDED_BY(Locks::jit_lock_);
 
-  // Mem map which holds zygote data (stack maps and profiling info).
-  MemMap zygote_data_pages_;
-  // Mem map which holds zygote code and has executable permission.
-  MemMap zygote_exec_pages_;
-  // The opaque mspace for allocating zygote data.
-  void* zygote_data_mspace_ GUARDED_BY(lock_);
-  // The opaque mspace for allocating zygote code.
-  void* zygote_exec_mspace_ GUARDED_BY(lock_);
+  // -------------- JIT memory regions ------------------------------------- //
+
+  // Shared region, inherited from the zygote.
+  JitMemoryRegion shared_region_;
+
+  // Process's own region.
+  JitMemoryRegion private_region_;
+
+  // -------------- Global JIT maps --------------------------------------- //
+
+  // Holds compiled code associated with the shorty for a JNI stub.
+  SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(Locks::jit_lock_);
+
+  // Holds compiled code associated to the ArtMethod.
+  SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(Locks::jit_lock_);
+
+  // Holds osr compiled code associated to the ArtMethod.
+  SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(Locks::jit_lock_);
+
+  // ProfilingInfo objects we have allocated.
+  std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_);
+
+  // -------------- JIT GC related data structures ----------------------- //
+
+  // Condition to wait on during collection.
+  ConditionVariable lock_cond_ GUARDED_BY(Locks::jit_lock_);
+
+  // Whether there is a code cache collection in progress.
+  bool collection_in_progress_ GUARDED_BY(Locks::jit_lock_);
+
+  // Bitmap for collecting code and data.
+  std::unique_ptr<CodeCacheBitmap> live_bitmap_;
+
+  // Whether the last collection round increased the code cache.
+  bool last_collection_increased_code_cache_ GUARDED_BY(Locks::jit_lock_);
+
+  // Whether we can do garbage collection. Not 'const' as tests may override this.
+  bool garbage_collect_code_ GUARDED_BY(Locks::jit_lock_);
+
+  // ---------------- JIT statistics -------------------------------------- //
+
+  // Number of compilations done throughout the lifetime of the JIT.
+  size_t number_of_compilations_ GUARDED_BY(Locks::jit_lock_);
+
+  // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
+  size_t number_of_osr_compilations_ GUARDED_BY(Locks::jit_lock_);
+
+  // Number of code cache collections done throughout the lifetime of the JIT.
+  size_t number_of_collections_ GUARDED_BY(Locks::jit_lock_);
+
+  // Histograms for keeping track of stack map size statistics.
+  Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(Locks::jit_lock_);
+
+  // Histograms for keeping track of code size statistics.
+  Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(Locks::jit_lock_);
+
+  // Histograms for keeping track of profiling info statistics.
+  Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(Locks::jit_lock_);
 
   friend class art::JitJniStubTestHelper;
   friend class ScopedCodeCacheWrite;
diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc
new file mode 100644
index 0000000..ac02bd8
--- /dev/null
+++ b/runtime/jit/jit_memory_region.cc
@@ -0,0 +1,327 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jit_memory_region.h"
+
+#include <android-base/unique_fd.h>
+#include "base/bit_utils.h"  // For RoundDown, RoundUp
+#include "base/globals.h"
+#include "base/logging.h"  // For VLOG.
+#include "base/memfd.h"
+#include "base/systrace.h"
+#include "gc/allocator/dlmalloc.h"
+#include "jit/jit_scoped_code_cache_write.h"
+#include "oat_quick_method_header.h"
+
+using android::base::unique_fd;
+
+namespace art {
+namespace jit {
+
+// Data cache will be half of the capacity
+// Code cache will be the other half of the capacity.
+// TODO: Make this variable?
+static constexpr size_t kCodeAndDataCapacityDivider = 2;
+
+bool JitMemoryRegion::InitializeMappings(bool rwx_memory_allowed,
+                                         bool is_zygote,
+                                         std::string* error_msg) {
+  ScopedTrace trace(__PRETTY_FUNCTION__);
+
+  const size_t capacity = max_capacity_;
+  const size_t data_capacity = capacity / kCodeAndDataCapacityDivider;
+  const size_t exec_capacity = capacity - data_capacity;
+
+  // File descriptor enabling dual-view mapping of code section.
+  unique_fd mem_fd;
+
+  // Zygote shouldn't create a shared mapping for JIT, so we cannot use dual view
+  // for it.
+  if (!is_zygote) {
+    // Bionic supports memfd_create, but the call may fail on older kernels.
+    mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
+    if (mem_fd.get() < 0) {
+      std::ostringstream oss;
+      oss << "Failed to initialize dual view JIT. memfd_create() error: " << strerror(errno);
+      if (!rwx_memory_allowed) {
+        // Without using RWX page permissions, the JIT can not fallback to single mapping as it
+        // requires tranitioning the code pages to RWX for updates.
+        *error_msg = oss.str();
+        return false;
+      }
+      VLOG(jit) << oss.str();
+    }
+  }
+
+  if (mem_fd.get() >= 0 && ftruncate(mem_fd, capacity) != 0) {
+    std::ostringstream oss;
+    oss << "Failed to initialize memory file: " << strerror(errno);
+    *error_msg = oss.str();
+    return false;
+  }
+
+  std::string data_cache_name = is_zygote ? "zygote-data-code-cache" : "data-code-cache";
+  std::string exec_cache_name = is_zygote ? "zygote-jit-code-cache" : "jit-code-cache";
+
+  std::string error_str;
+  // Map name specific for android_os_Debug.cpp accounting.
+  // Map in low 4gb to simplify accessing root tables for x86_64.
+  // We could do PC-relative addressing to avoid this problem, but that
+  // would require reserving code and data area before submitting, which
+  // means more windows for the code memory to be RWX.
+  int base_flags;
+  MemMap data_pages;
+  if (mem_fd.get() >= 0) {
+    // Dual view of JIT code cache case. Create an initial mapping of data pages large enough
+    // for data and non-writable view of JIT code pages. We use the memory file descriptor to
+    // enable dual mapping - we'll create a second mapping using the descriptor below. The
+    // mappings will look like:
+    //
+    //       VA                  PA
+    //
+    //       +---------------+
+    //       | non exec code |\
+    //       +---------------+ \
+    //       :               :\ \
+    //       +---------------+.\.+---------------+
+    //       |  exec code    |  \|     code      |
+    //       +---------------+...+---------------+
+    //       |      data     |   |     data      |
+    //       +---------------+...+---------------+
+    //
+    // In this configuration code updates are written to the non-executable view of the code
+    // cache, and the executable view of the code cache has fixed RX memory protections.
+    //
+    // This memory needs to be mapped shared as the code portions will have two mappings.
+    base_flags = MAP_SHARED;
+    data_pages = MemMap::MapFile(
+        data_capacity + exec_capacity,
+        kProtRW,
+        base_flags,
+        mem_fd,
+        /* start= */ 0,
+        /* low_4gb= */ true,
+        data_cache_name.c_str(),
+        &error_str);
+  } else {
+    // Single view of JIT code cache case. Create an initial mapping of data pages large enough
+    // for data and JIT code pages. The mappings will look like:
+    //
+    //       VA                  PA
+    //
+    //       +---------------+...+---------------+
+    //       |  exec code    |   |     code      |
+    //       +---------------+...+---------------+
+    //       |      data     |   |     data      |
+    //       +---------------+...+---------------+
+    //
+    // In this configuration code updates are written to the executable view of the code cache,
+    // and the executable view of the code cache transitions RX to RWX for the update and then
+    // back to RX after the update.
+    base_flags = MAP_PRIVATE | MAP_ANON;
+    data_pages = MemMap::MapAnonymous(
+        data_cache_name.c_str(),
+        data_capacity + exec_capacity,
+        kProtRW,
+        /* low_4gb= */ true,
+        &error_str);
+  }
+
+  if (!data_pages.IsValid()) {
+    std::ostringstream oss;
+    oss << "Failed to create read write cache: " << error_str << " size=" << capacity;
+    *error_msg = oss.str();
+    return false;
+  }
+
+  MemMap exec_pages;
+  MemMap non_exec_pages;
+  if (exec_capacity > 0) {
+    uint8_t* const divider = data_pages.Begin() + data_capacity;
+    // Set initial permission for executable view to catch any SELinux permission problems early
+    // (for processes that cannot map WX pages). Otherwise, this region does not need to be
+    // executable as there is no code in the cache yet.
+    exec_pages = data_pages.RemapAtEnd(divider,
+                                       exec_cache_name.c_str(),
+                                       kProtRX,
+                                       base_flags | MAP_FIXED,
+                                       mem_fd.get(),
+                                       (mem_fd.get() >= 0) ? data_capacity : 0,
+                                       &error_str);
+    if (!exec_pages.IsValid()) {
+      std::ostringstream oss;
+      oss << "Failed to create read execute code cache: " << error_str << " size=" << capacity;
+      *error_msg = oss.str();
+      return false;
+    }
+
+    if (mem_fd.get() >= 0) {
+      // For dual view, create the secondary view of code memory used for updating code. This view
+      // is never executable.
+      std::string name = exec_cache_name + "-rw";
+      non_exec_pages = MemMap::MapFile(exec_capacity,
+                                       kProtR,
+                                       base_flags,
+                                       mem_fd,
+                                       /* start= */ data_capacity,
+                                       /* low_4GB= */ false,
+                                       name.c_str(),
+                                       &error_str);
+      if (!non_exec_pages.IsValid()) {
+        static const char* kFailedNxView = "Failed to map non-executable view of JIT code cache";
+        if (rwx_memory_allowed) {
+          // Log and continue as single view JIT (requires RWX memory).
+          VLOG(jit) << kFailedNxView;
+        } else {
+          *error_msg = kFailedNxView;
+          return false;
+        }
+      }
+    }
+  } else {
+    // Profiling only. No memory for code required.
+  }
+
+  data_pages_ = std::move(data_pages);
+  exec_pages_ = std::move(exec_pages);
+  non_exec_pages_ = std::move(non_exec_pages);
+  return true;
+}
+
+void JitMemoryRegion::InitializeState(size_t initial_capacity, size_t max_capacity) {
+  CHECK_GE(max_capacity, initial_capacity);
+  CHECK(max_capacity <= 1 * GB) << "The max supported size for JIT code cache is 1GB";
+  // Align both capacities to page size, as that's the unit mspaces use.
+  initial_capacity_ = RoundDown(initial_capacity, 2 * kPageSize);
+  max_capacity_ = RoundDown(max_capacity, 2 * kPageSize);
+  current_capacity_ = initial_capacity,
+  data_end_ = initial_capacity / kCodeAndDataCapacityDivider;
+  exec_end_ = initial_capacity - data_end_;
+}
+
+void JitMemoryRegion::InitializeSpaces() {
+  // Initialize the data heap
+  data_mspace_ = create_mspace_with_base(data_pages_.Begin(), data_end_, false /*locked*/);
+  CHECK(data_mspace_ != nullptr) << "create_mspace_with_base (data) failed";
+
+  // Initialize the code heap
+  MemMap* code_heap = nullptr;
+  if (non_exec_pages_.IsValid()) {
+    code_heap = &non_exec_pages_;
+  } else if (exec_pages_.IsValid()) {
+    code_heap = &exec_pages_;
+  }
+  if (code_heap != nullptr) {
+    // Make all pages reserved for the code heap writable. The mspace allocator, that manages the
+    // heap, will take and initialize pages in create_mspace_with_base().
+    CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW);
+    exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
+    CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
+    SetFootprintLimit(initial_capacity_);
+    // Protect pages containing heap metadata. Updates to the code heap toggle write permission to
+    // perform the update and there are no other times write access is required.
+    CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR);
+  } else {
+    exec_mspace_ = nullptr;
+    SetFootprintLimit(initial_capacity_);
+  }
+}
+
+void JitMemoryRegion::SetFootprintLimit(size_t new_footprint) {
+  size_t data_space_footprint = new_footprint / kCodeAndDataCapacityDivider;
+  DCHECK(IsAlignedParam(data_space_footprint, kPageSize));
+  DCHECK_EQ(data_space_footprint * kCodeAndDataCapacityDivider, new_footprint);
+  mspace_set_footprint_limit(data_mspace_, data_space_footprint);
+  if (HasCodeMapping()) {
+    ScopedCodeCacheWrite scc(*this);
+    mspace_set_footprint_limit(exec_mspace_, new_footprint - data_space_footprint);
+  }
+}
+
+bool JitMemoryRegion::IncreaseCodeCacheCapacity() {
+  if (current_capacity_ == max_capacity_) {
+    return false;
+  }
+
+  // Double the capacity if we're below 1MB, or increase it by 1MB if
+  // we're above.
+  if (current_capacity_ < 1 * MB) {
+    current_capacity_ *= 2;
+  } else {
+    current_capacity_ += 1 * MB;
+  }
+  if (current_capacity_ > max_capacity_) {
+    current_capacity_ = max_capacity_;
+  }
+
+  VLOG(jit) << "Increasing code cache capacity to " << PrettySize(current_capacity_);
+
+  SetFootprintLimit(current_capacity_);
+
+  return true;
+}
+
+// NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock
+// is already held.
+void* JitMemoryRegion::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS {
+  if (mspace == exec_mspace_) {
+    DCHECK(exec_mspace_ != nullptr);
+    const MemMap* const code_pages = GetUpdatableCodeMapping();
+    void* result = code_pages->Begin() + exec_end_;
+    exec_end_ += increment;
+    return result;
+  } else {
+    DCHECK_EQ(data_mspace_, mspace);
+    void* result = data_pages_.Begin() + data_end_;
+    data_end_ += increment;
+    return result;
+  }
+}
+
+uint8_t* JitMemoryRegion::AllocateCode(size_t code_size) {
+  // Each allocation should be on its own set of cache lines.
+  // `code_size` covers the OatQuickMethodHeader, the JIT generated machine code,
+  // and any alignment padding.
+  size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+  size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
+  DCHECK_GT(code_size, header_size);
+  uint8_t* result = reinterpret_cast<uint8_t*>(
+      mspace_memalign(exec_mspace_, kJitCodeAlignment, code_size));
+  // Ensure the header ends up at expected instruction alignment.
+  DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment);
+  used_memory_for_code_ += mspace_usable_size(result);
+  return result;
+}
+
+void JitMemoryRegion::FreeCode(uint8_t* code) {
+  code = GetNonExecutableAddress(code);
+  used_memory_for_code_ -= mspace_usable_size(code);
+  mspace_free(exec_mspace_, code);
+}
+
+uint8_t* JitMemoryRegion::AllocateData(size_t data_size) {
+  void* result = mspace_malloc(data_mspace_, data_size);
+  used_memory_for_data_ += mspace_usable_size(result);
+  return reinterpret_cast<uint8_t*>(result);
+}
+
+void JitMemoryRegion::FreeData(uint8_t* data) {
+  used_memory_for_data_ -= mspace_usable_size(data);
+  mspace_free(data_mspace_, data);
+}
+
+}  // namespace jit
+}  // namespace art
diff --git a/runtime/jit/jit_memory_region.h b/runtime/jit/jit_memory_region.h
new file mode 100644
index 0000000..5886587
--- /dev/null
+++ b/runtime/jit/jit_memory_region.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_JIT_MEMORY_REGION_H_
+#define ART_RUNTIME_JIT_JIT_MEMORY_REGION_H_
+
+#include <string>
+
+#include "base/globals.h"
+#include "base/locks.h"
+#include "base/mem_map.h"
+
+namespace art {
+namespace jit {
+
+// Alignment in bytes that will suit all architectures for JIT code cache allocations.  The
+// allocated block is used for method header followed by generated code. Allocations should be
+// aligned to avoid sharing cache lines between different allocations. The alignment should be
+// determined from the hardware, but this isn't readily exposed in userland plus some hardware
+// misreports.
+static constexpr int kJitCodeAlignment = 64;
+
+// Represents a memory region for the JIT, where code and data are stored. This class
+// provides allocation and deallocation primitives.
+class JitMemoryRegion {
+ public:
+  JitMemoryRegion()
+      : used_memory_for_code_(0),
+        used_memory_for_data_(0) {}
+
+  void InitializeState(size_t initial_capacity, size_t max_capacity)
+      REQUIRES(Locks::jit_lock_);
+
+  bool InitializeMappings(bool rwx_memory_allowed, bool is_zygote, std::string* error_msg)
+      REQUIRES(Locks::jit_lock_);
+
+  void InitializeSpaces() REQUIRES(Locks::jit_lock_);
+
+  // Try to increase the current capacity of the code cache. Return whether we
+  // succeeded at doing so.
+  bool IncreaseCodeCacheCapacity() REQUIRES(Locks::jit_lock_);
+
+  // Set the footprint limit of the code cache.
+  void SetFootprintLimit(size_t new_footprint) REQUIRES(Locks::jit_lock_);
+  uint8_t* AllocateCode(size_t code_size) REQUIRES(Locks::jit_lock_);
+  void FreeCode(uint8_t* code) REQUIRES(Locks::jit_lock_);
+  uint8_t* AllocateData(size_t data_size) REQUIRES(Locks::jit_lock_);
+  void FreeData(uint8_t* data) REQUIRES(Locks::jit_lock_);
+
+  bool HasDualCodeMapping() const {
+    return non_exec_pages_.IsValid();
+  }
+
+  bool HasCodeMapping() const {
+    return exec_pages_.IsValid();
+  }
+
+  bool IsInDataSpace(const void* ptr) const {
+    return data_pages_.HasAddress(ptr);
+  }
+
+  bool IsInExecSpace(const void* ptr) const {
+    return exec_pages_.HasAddress(ptr);
+  }
+
+  const MemMap* GetUpdatableCodeMapping() const {
+    if (HasDualCodeMapping()) {
+      return &non_exec_pages_;
+    } else if (HasCodeMapping()) {
+      return &exec_pages_;
+    } else {
+      return nullptr;
+    }
+  }
+
+  const MemMap* GetExecPages() const {
+    return &exec_pages_;
+  }
+
+  template <typename T> T* GetExecutableAddress(T* src_ptr) {
+    return TranslateAddress(src_ptr, non_exec_pages_, exec_pages_);
+  }
+
+  template <typename T> T* GetNonExecutableAddress(T* src_ptr) {
+    return TranslateAddress(src_ptr, exec_pages_, non_exec_pages_);
+  }
+
+  void* MoreCore(const void* mspace, intptr_t increment);
+
+  bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
+    return mspace == data_mspace_ || mspace == exec_mspace_;
+  }
+
+  size_t GetCurrentCapacity() const REQUIRES(Locks::jit_lock_) {
+    return current_capacity_;
+  }
+
+  size_t GetMaxCapacity() const REQUIRES(Locks::jit_lock_) {
+    return max_capacity_;
+  }
+
+  size_t GetUsedMemoryForCode() const REQUIRES(Locks::jit_lock_) {
+    return used_memory_for_code_;
+  }
+
+  size_t GetUsedMemoryForData() const REQUIRES(Locks::jit_lock_) {
+    return used_memory_for_data_;
+  }
+
+ private:
+  template <typename T>
+  T* TranslateAddress(T* src_ptr, const MemMap& src, const MemMap& dst) {
+    if (!HasDualCodeMapping()) {
+      return src_ptr;
+    }
+    CHECK(src.HasAddress(src_ptr));
+    uint8_t* const raw_src_ptr = reinterpret_cast<uint8_t*>(src_ptr);
+    return reinterpret_cast<T*>(raw_src_ptr - src.Begin() + dst.Begin());
+  }
+
+  // The initial capacity in bytes this code region starts with.
+  size_t initial_capacity_ GUARDED_BY(Locks::jit_lock_);
+
+  // The maximum capacity in bytes this region can go to.
+  size_t max_capacity_ GUARDED_BY(Locks::jit_lock_);
+
+  // The current capacity in bytes of the region.
+  size_t current_capacity_ GUARDED_BY(Locks::jit_lock_);
+
+  // The current footprint in bytes of the data portion of the region.
+  size_t data_end_ GUARDED_BY(Locks::jit_lock_);
+
+  // The current footprint in bytes of the code portion of the region.
+  size_t exec_end_ GUARDED_BY(Locks::jit_lock_);
+
+  // The size in bytes of used memory for the code portion of the region.
+  size_t used_memory_for_code_ GUARDED_BY(Locks::jit_lock_);
+
+  // The size in bytes of used memory for the data portion of the region.
+  size_t used_memory_for_data_ GUARDED_BY(Locks::jit_lock_);
+
+  // Mem map which holds data (stack maps and profiling info).
+  MemMap data_pages_;
+
+  // Mem map which holds code and has executable permission.
+  MemMap exec_pages_;
+
+  // Mem map which holds code with non executable permission. Only valid for dual view JIT when
+  // this is the non-executable view of code used to write updates.
+  MemMap non_exec_pages_;
+
+  // The opaque mspace for allocating data.
+  void* data_mspace_ GUARDED_BY(Locks::jit_lock_);
+
+  // The opaque mspace for allocating code.
+  void* exec_mspace_ GUARDED_BY(Locks::jit_lock_);
+};
+
+}  // namespace jit
+}  // namespace art
+
+#endif  // ART_RUNTIME_JIT_JIT_MEMORY_REGION_H_
diff --git a/runtime/jit/jit_scoped_code_cache_write.h b/runtime/jit/jit_scoped_code_cache_write.h
new file mode 100644
index 0000000..ea99bdf
--- /dev/null
+++ b/runtime/jit/jit_scoped_code_cache_write.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_JIT_SCOPED_CODE_CACHE_WRITE_H_
+#define ART_RUNTIME_JIT_JIT_SCOPED_CODE_CACHE_WRITE_H_
+
+#include <sys/mman.h>
+
+#include "base/systrace.h"
+#include "base/utils.h"  // For CheckedCall
+
+namespace art {
+namespace jit {
+
+class JitMemoryRegion;
+
+static constexpr int kProtR = PROT_READ;
+static constexpr int kProtRW = PROT_READ | PROT_WRITE;
+static constexpr int kProtRWX = PROT_READ | PROT_WRITE | PROT_EXEC;
+static constexpr int kProtRX = PROT_READ | PROT_EXEC;
+
+// Helper for toggling JIT memory R <-> RW.
+class ScopedCodeCacheWrite : ScopedTrace {
+ public:
+  explicit ScopedCodeCacheWrite(const JitMemoryRegion& region)
+      : ScopedTrace("ScopedCodeCacheWrite"),
+        region_(region) {
+    ScopedTrace trace("mprotect all");
+    const MemMap* const updatable_pages = region.GetUpdatableCodeMapping();
+    if (updatable_pages != nullptr) {
+      int prot = region.HasDualCodeMapping() ? kProtRW : kProtRWX;
+      CheckedCall(mprotect, "Cache +W", updatable_pages->Begin(), updatable_pages->Size(), prot);
+    }
+  }
+
+  ~ScopedCodeCacheWrite() {
+    ScopedTrace trace("mprotect code");
+    const MemMap* const updatable_pages = region_.GetUpdatableCodeMapping();
+    if (updatable_pages != nullptr) {
+      int prot = region_.HasDualCodeMapping() ? kProtR : kProtRX;
+      CheckedCall(mprotect, "Cache -W", updatable_pages->Begin(), updatable_pages->Size(), prot);
+    }
+  }
+
+ private:
+  const JitMemoryRegion& region_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
+};
+
+}  // namespace jit
+}  // namespace art
+
+#endif  // ART_RUNTIME_JIT_JIT_SCOPED_CODE_CACHE_WRITE_H_
diff --git a/test/667-jit-jni-stub/jit_jni_stub_test.cc b/test/667-jit-jni-stub/jit_jni_stub_test.cc
index 82e06fc..c21971f 100644
--- a/test/667-jit-jni-stub/jit_jni_stub_test.cc
+++ b/test/667-jit-jni-stub/jit_jni_stub_test.cc
@@ -31,7 +31,7 @@
   static bool isNextJitGcFull(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
     CHECK(Runtime::Current()->GetJit() != nullptr);
     jit::JitCodeCache* cache = Runtime::Current()->GetJit()->GetCodeCache();
-    MutexLock mu(self, cache->lock_);
+    MutexLock mu(self, *Locks::jit_lock_);
     return cache->ShouldDoFullCollection();
   }
 };