Move memory related regions of the JIT cache in its own class.

- Moves the JIT lock into the global locks list
- Jit cache has two regions: one inherited from zygote, and its own.

Bug: 119800099
Test: boot, test.py
Change-Id: I833ba2b5a3d3c10fa01286cdf52603fa174a2cc7
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 3078e2b..a56f6f0 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -31,6 +31,7 @@
 #include "base/mem_map.h"
 #include "base/mutex.h"
 #include "base/safe_map.h"
+#include "jit_memory_region.h"
 
 namespace art {
 
@@ -72,14 +73,6 @@
 namespace jit {
 
 class MarkCodeClosure;
-class ScopedCodeCacheWrite;
-
-// Alignment in bytes that will suit all architectures for JIT code cache allocations.  The
-// allocated block is used for method header followed by generated code. Allocations should be
-// aligned to avoid sharing cache lines between different allocations. The alignment should be
-// determined from the hardware, but this isn't readily exposed in userland plus some hardware
-// misreports.
-static constexpr int kJitCodeAlignment = 64;
 
 using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;
 
@@ -103,11 +96,11 @@
 
   bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr, bool prejit)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   void NotifyMethodRedefined(ArtMethod* method)
       REQUIRES(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   // Notify to the code cache that the compiler wants to use the
   // profiling info of `method` to drive optimizations,
@@ -115,15 +108,15 @@
   // collected.
   ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   void DoneCompilerUse(ArtMethod* method, Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   // Allocate and write code and its metadata to the code cache.
   // `cha_single_implementation_list` needs to be registered via CHA (if it's
@@ -143,20 +136,20 @@
                       bool has_should_deoptimize_flag,
                       const ArenaSet<ArtMethod*>& cha_single_implementation_list)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   // Return true if the code cache contains this pc.
   bool ContainsPc(const void* pc) const;
 
   // Returns true if either the method's entrypoint is JIT compiled code or it is the
   // instrumentation entrypoint and we can jump to jit code for this method. For testing use only.
-  bool WillExecuteJitCode(ArtMethod* method) REQUIRES(!lock_);
+  bool WillExecuteJitCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
 
   // Return true if the code cache contains this method.
-  bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
+  bool ContainsMethod(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
 
   // Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise.
-  const void* GetJniStubCode(ArtMethod* method) REQUIRES(!lock_);
+  const void* GetJniStubCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
 
   // Allocate a region of data that contain `size` bytes, and potentially space
   // for storing `number_of_roots` roots. Returns null if there is no more room.
@@ -168,43 +161,43 @@
                      uint8_t** stack_map_data,
                      uint8_t** roots_data)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   // Clear data from the data portion of the code cache.
   void ClearData(Thread* self, uint8_t* stack_map_data, uint8_t* roots_data)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   // Perform a collection on the code cache.
   void GarbageCollectCache(Thread* self)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Given the 'pc', try to find the JIT compiled code associated with it.
   // Return null if 'pc' is not in the code cache. 'method' is passed for
   // sanity check.
   OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Removes method from the cache for testing purposes. The caller
   // must ensure that all threads are suspended and the method should
   // not be in any thread's stack.
   bool RemoveMethod(ArtMethod* method, bool release_memory)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES(Locks::mutator_lock_);
 
   // Remove all methods in our cache that were allocated by 'alloc'.
   void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
@@ -213,11 +206,11 @@
                                   ArtMethod* method,
                                   const std::vector<uint32_t>& entries,
                                   bool retry_allocation)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
-    return mspace == data_mspace_ || mspace == exec_mspace_;
+    return private_region_.OwnsSpace(mspace);
   }
 
   void* MoreCore(const void* mspace, intptr_t increment);
@@ -225,36 +218,36 @@
   // Adds to `methods` all profiled methods which are part of any of the given dex locations.
   void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
                           std::vector<ProfileMethodInfo>& methods)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void Dump(std::ostream& os) REQUIRES(!lock_);
+  void Dump(std::ostream& os) REQUIRES(!Locks::jit_lock_);
 
-  bool IsOsrCompiled(ArtMethod* method) REQUIRES(!lock_);
+  bool IsOsrCompiled(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
 
   void SweepRootTables(IsMarkedVisitor* visitor)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // The GC needs to disallow the reading of inline caches when it processes them,
   // to avoid having a class being used while it is being deleted.
-  void AllowInlineCacheAccess() REQUIRES(!lock_);
-  void DisallowInlineCacheAccess() REQUIRES(!lock_);
-  void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
+  void AllowInlineCacheAccess() REQUIRES(!Locks::jit_lock_);
+  void DisallowInlineCacheAccess() REQUIRES(!Locks::jit_lock_);
+  void BroadcastForInlineCacheAccess() REQUIRES(!Locks::jit_lock_);
 
   // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
   // 'new_method' since it is being made obsolete.
   void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
-      REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
+      REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_);
 
   // Dynamically change whether we want to garbage collect code.
-  void SetGarbageCollectCode(bool value) REQUIRES(!lock_);
+  void SetGarbageCollectCode(bool value) REQUIRES(!Locks::jit_lock_);
 
-  bool GetGarbageCollectCode() REQUIRES(!lock_);
+  bool GetGarbageCollectCode() REQUIRES(!Locks::jit_lock_);
 
   // Unsafe variant for debug checks.
   bool GetGarbageCollectCodeUnsafe() const NO_THREAD_SAFETY_ANALYSIS {
@@ -264,13 +257,13 @@
   // If Jit-gc has been disabled (and instrumentation has been enabled) this will return the
   // jit-compiled entrypoint for this method.  Otherwise it will return null.
   const void* FindCompiledCodeForInstrumentation(ArtMethod* method)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Fetch the entrypoint that zygote may have saved for a method. The zygote saves an entrypoint
   // only for the case when the method's declaring class is not initialized.
   const void* GetZygoteSavedEntryPoint(ArtMethod* method)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void PostForkChildAction(bool is_system_server, bool is_zygote);
@@ -278,18 +271,11 @@
   // Clear the entrypoints of JIT compiled methods that belong in the zygote space.
   // This is used for removing non-debuggable JIT code at the point we realize the runtime
   // is debuggable.
-  void ClearEntryPointsInZygoteExecSpace() REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
+  void ClearEntryPointsInZygoteExecSpace() REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_);
 
  private:
   JitCodeCache();
 
-  void InitializeState(size_t initial_capacity, size_t max_capacity) REQUIRES(lock_);
-
-  bool InitializeMappings(bool rwx_memory_allowed, bool is_zygote, std::string* error_msg)
-      REQUIRES(lock_);
-
-  void InitializeSpaces() REQUIRES(lock_);
-
   // Internal version of 'CommitCode' that will not retry if the
   // allocation fails. Return null if the allocation fails.
   uint8_t* CommitCodeInternal(Thread* self,
@@ -303,208 +289,163 @@
                               const std::vector<Handle<mirror::Object>>& roots,
                               bool has_should_deoptimize_flag,
                               const ArenaSet<ArtMethod*>& cha_single_implementation_list)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Adds the given roots to the roots_data. Only a member for annotalysis.
   void FillRootTable(uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots)
-      REQUIRES(lock_)
+      REQUIRES(Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   ProfilingInfo* AddProfilingInfoInternal(Thread* self,
                                           ArtMethod* method,
                                           const std::vector<uint32_t>& entries)
-      REQUIRES(lock_)
+      REQUIRES(Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // If a collection is in progress, wait for it to finish. Must be called with the mutator lock.
   // The non-mutator lock version should be used if possible. This method will release then
   // re-acquire the mutator lock.
   void WaitForPotentialCollectionToCompleteRunnable(Thread* self)
-      REQUIRES(lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES(Locks::jit_lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // If a collection is in progress, wait for it to finish. Return
   // whether the thread actually waited.
   bool WaitForPotentialCollectionToComplete(Thread* self)
-      REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
+      REQUIRES(Locks::jit_lock_) REQUIRES(!Locks::mutator_lock_);
 
   // Remove CHA dependents and underlying allocations for entries in `method_headers`.
   void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES(!Locks::cha_lock_);
 
   // Removes method from the cache. The caller must ensure that all threads
   // are suspended and the method should not be in any thread's stack.
   bool RemoveMethodLocked(ArtMethod* method, bool release_memory)
-      REQUIRES(lock_)
+      REQUIRES(Locks::jit_lock_)
       REQUIRES(Locks::mutator_lock_);
 
   // Free code and data allocations for `code_ptr`.
-  void FreeCodeAndData(const void* code_ptr) REQUIRES(lock_);
+  void FreeCodeAndData(const void* code_ptr) REQUIRES(Locks::jit_lock_);
 
   // Number of bytes allocated in the code cache.
-  size_t CodeCacheSize() REQUIRES(!lock_);
+  size_t CodeCacheSize() REQUIRES(!Locks::jit_lock_);
 
   // Number of bytes allocated in the data cache.
-  size_t DataCacheSize() REQUIRES(!lock_);
+  size_t DataCacheSize() REQUIRES(!Locks::jit_lock_);
 
   // Number of bytes allocated in the code cache.
-  size_t CodeCacheSizeLocked() REQUIRES(lock_);
+  size_t CodeCacheSizeLocked() REQUIRES(Locks::jit_lock_);
 
   // Number of bytes allocated in the data cache.
-  size_t DataCacheSizeLocked() REQUIRES(lock_);
+  size_t DataCacheSizeLocked() REQUIRES(Locks::jit_lock_);
 
   // Notify all waiting threads that a collection is done.
-  void NotifyCollectionDone(Thread* self) REQUIRES(lock_);
-
-  // Try to increase the current capacity of the code cache. Return whether we
-  // succeeded at doing so.
-  bool IncreaseCodeCacheCapacity() REQUIRES(lock_);
-
-  // Set the footprint limit of the code cache.
-  void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
+  void NotifyCollectionDone(Thread* self) REQUIRES(Locks::jit_lock_);
 
   // Return whether we should do a full collection given the current state of the cache.
   bool ShouldDoFullCollection()
-      REQUIRES(lock_)
+      REQUIRES(Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void DoCollection(Thread* self, bool collect_profiling_info)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void RemoveUnmarkedCode(Thread* self)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void MarkCompiledCodeOnThreadStacks(Thread* self)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   CodeCacheBitmap* GetLiveBitmap() const {
     return live_bitmap_.get();
   }
 
-  uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
-  void FreeCode(uint8_t* code) REQUIRES(lock_);
-  uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
-  void FreeData(uint8_t* data) REQUIRES(lock_);
-
-  bool HasDualCodeMapping() const {
-    return non_exec_pages_.IsValid();
-  }
-
-  bool HasCodeMapping() const {
-    return exec_pages_.IsValid();
-  }
-
-  const MemMap* GetUpdatableCodeMapping() const;
-
   bool IsInZygoteDataSpace(const void* ptr) const {
-    return zygote_data_pages_.HasAddress(ptr);
+    return shared_region_.IsInDataSpace(ptr);
   }
 
   bool IsInZygoteExecSpace(const void* ptr) const {
-    return zygote_exec_pages_.HasAddress(ptr);
+    return shared_region_.IsInExecSpace(ptr);
   }
 
   bool IsWeakAccessEnabled(Thread* self) const;
   void WaitUntilInlineCacheAccessible(Thread* self)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   class JniStubKey;
   class JniStubData;
 
-  // Lock for guarding allocations, collections, and the method_code_map_.
-  Mutex lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
-  // Condition to wait on during collection.
-  ConditionVariable lock_cond_ GUARDED_BY(lock_);
-  // Whether there is a code cache collection in progress.
-  bool collection_in_progress_ GUARDED_BY(lock_);
-  // Mem map which holds data (stack maps and profiling info).
-  MemMap data_pages_;
-  // Mem map which holds code and has executable permission.
-  MemMap exec_pages_;
-  // Mem map which holds code with non executable permission. Only valid for dual view JIT when
-  // this is the non-executable view of code used to write updates.
-  MemMap non_exec_pages_;
-  // The opaque mspace for allocating data.
-  void* data_mspace_ GUARDED_BY(lock_);
-  // The opaque mspace for allocating code.
-  void* exec_mspace_ GUARDED_BY(lock_);
-  // Bitmap for collecting code and data.
-  std::unique_ptr<CodeCacheBitmap> live_bitmap_;
-  // Holds compiled code associated with the shorty for a JNI stub.
-  SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(lock_);
-  // Holds compiled code associated to the ArtMethod.
-  SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
-  // Holds osr compiled code associated to the ArtMethod.
-  SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
-  // ProfilingInfo objects we have allocated.
-  std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
-
-  // The initial capacity in bytes this code cache starts with.
-  size_t initial_capacity_ GUARDED_BY(lock_);
-
-  // The maximum capacity in bytes this code cache can go to.
-  size_t max_capacity_ GUARDED_BY(lock_);
-
-  // The current capacity in bytes of the code cache.
-  size_t current_capacity_ GUARDED_BY(lock_);
-
-  // The current footprint in bytes of the data portion of the code cache.
-  size_t data_end_ GUARDED_BY(lock_);
-
-  // The current footprint in bytes of the code portion of the code cache.
-  size_t exec_end_ GUARDED_BY(lock_);
-
-  // Whether the last collection round increased the code cache.
-  bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
-
-  // Whether we can do garbage collection. Not 'const' as tests may override this.
-  bool garbage_collect_code_ GUARDED_BY(lock_);
-
-  // The size in bytes of used memory for the data portion of the code cache.
-  size_t used_memory_for_data_ GUARDED_BY(lock_);
-
-  // The size in bytes of used memory for the code portion of the code cache.
-  size_t used_memory_for_code_ GUARDED_BY(lock_);
-
-  // Number of compilations done throughout the lifetime of the JIT.
-  size_t number_of_compilations_ GUARDED_BY(lock_);
-
-  // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
-  size_t number_of_osr_compilations_ GUARDED_BY(lock_);
-
-  // Number of code cache collections done throughout the lifetime of the JIT.
-  size_t number_of_collections_ GUARDED_BY(lock_);
-
-  // Histograms for keeping track of stack map size statistics.
-  Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(lock_);
-
-  // Histograms for keeping track of code size statistics.
-  Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(lock_);
-
-  // Histograms for keeping track of profiling info statistics.
-  Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
-
   // Whether the GC allows accessing weaks in inline caches. Note that this
   // is not used by the concurrent collector, which uses
   // Thread::SetWeakRefAccessEnabled instead.
   Atomic<bool> is_weak_access_enabled_;
 
   // Condition to wait on for accessing inline caches.
-  ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
+  ConditionVariable inline_cache_cond_ GUARDED_BY(Locks::jit_lock_);
 
-  // Mem map which holds zygote data (stack maps and profiling info).
-  MemMap zygote_data_pages_;
-  // Mem map which holds zygote code and has executable permission.
-  MemMap zygote_exec_pages_;
-  // The opaque mspace for allocating zygote data.
-  void* zygote_data_mspace_ GUARDED_BY(lock_);
-  // The opaque mspace for allocating zygote code.
-  void* zygote_exec_mspace_ GUARDED_BY(lock_);
+  // -------------- JIT memory regions ------------------------------------- //
+
+  // Shared region, inherited from the zygote.
+  JitMemoryRegion shared_region_;
+
+  // Process's own region.
+  JitMemoryRegion private_region_;
+
+  // -------------- Global JIT maps --------------------------------------- //
+
+  // Holds compiled code associated with the shorty for a JNI stub.
+  SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(Locks::jit_lock_);
+
+  // Holds compiled code associated to the ArtMethod.
+  SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(Locks::jit_lock_);
+
+  // Holds osr compiled code associated to the ArtMethod.
+  SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(Locks::jit_lock_);
+
+  // ProfilingInfo objects we have allocated.
+  std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_);
+
+  // -------------- JIT GC related data structures ----------------------- //
+
+  // Condition to wait on during collection.
+  ConditionVariable lock_cond_ GUARDED_BY(Locks::jit_lock_);
+
+  // Whether there is a code cache collection in progress.
+  bool collection_in_progress_ GUARDED_BY(Locks::jit_lock_);
+
+  // Bitmap for collecting code and data.
+  std::unique_ptr<CodeCacheBitmap> live_bitmap_;
+
+  // Whether the last collection round increased the code cache.
+  bool last_collection_increased_code_cache_ GUARDED_BY(Locks::jit_lock_);
+
+  // Whether we can do garbage collection. Not 'const' as tests may override this.
+  bool garbage_collect_code_ GUARDED_BY(Locks::jit_lock_);
+
+  // ---------------- JIT statistics -------------------------------------- //
+
+  // Number of compilations done throughout the lifetime of the JIT.
+  size_t number_of_compilations_ GUARDED_BY(Locks::jit_lock_);
+
+  // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
+  size_t number_of_osr_compilations_ GUARDED_BY(Locks::jit_lock_);
+
+  // Number of code cache collections done throughout the lifetime of the JIT.
+  size_t number_of_collections_ GUARDED_BY(Locks::jit_lock_);
+
+  // Histograms for keeping track of stack map size statistics.
+  Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(Locks::jit_lock_);
+
+  // Histograms for keeping track of code size statistics.
+  Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(Locks::jit_lock_);
+
+  // Histograms for keeping track of profiling info statistics.
+  Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(Locks::jit_lock_);
 
   friend class art::JitJniStubTestHelper;
   friend class ScopedCodeCacheWrite;