Fix JIT data dual mapping for apps.

We don't use it now, but it would be nice to make it functional.

Mark the read-only memory as const, and fix the compile errors.

Test: test.py -b --host --jit
Bug: 119800099
Change-Id: Ic1c45072f3c97f560e843f95fb87b95f754c6e03
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index fd1d9a6..24ca0fc 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -174,9 +174,7 @@
   static JITDescriptor& Descriptor() { return __jit_debug_descriptor; }
   static void NotifyNativeDebugger() { __jit_debug_register_code_ptr(); }
   static const void* Alloc(size_t size) { return Memory()->AllocateData(size); }
-  static void Free(const void* ptr) {
-    Memory()->FreeData(const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(ptr)));
-  }
+  static void Free(const void* ptr) { Memory()->FreeData(reinterpret_cast<const uint8_t*>(ptr)); }
   static void Free(void* ptr) = delete;
   template<class T> static T* Writable(const T* v) {
     return const_cast<T*>(Memory()->GetWritableDataAddress(v));
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index dc2bb7c..c0342ba 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -330,7 +330,7 @@
                                   size_t code_size,
                                   const uint8_t* stack_map,
                                   size_t stack_map_size,
-                                  uint8_t* roots_data,
+                                  const uint8_t* roots_data,
                                   const std::vector<Handle<mirror::Object>>& roots,
                                   bool osr,
                                   bool has_should_deoptimize_flag,
@@ -407,7 +407,7 @@
   }
 }
 
-static uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) {
+static const uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) {
   OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
   uint8_t* data = method_header->GetOptimizedCodeInfoPtr();
   uint32_t roots = GetNumberOfRoots(data);
@@ -454,7 +454,10 @@
   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   for (const auto& entry : method_code_map_) {
     uint32_t number_of_roots = 0;
-    uint8_t* roots_data = GetRootTable(entry.first, &number_of_roots);
+    const uint8_t* root_table = GetRootTable(entry.first, &number_of_roots);
+    uint8_t* roots_data = private_region_.IsInDataSpace(root_table)
+        ? private_region_.GetWritableDataAddress(root_table)
+        : shared_region_.GetWritableDataAddress(root_table);
     GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
     for (uint32_t i = 0; i < number_of_roots; ++i) {
       // This does not need a read barrier because this is called by GC.
@@ -581,7 +584,7 @@
       ProfilingInfo* info = *it;
       if (alloc.ContainsUnsafe(info->GetMethod())) {
         info->GetMethod()->SetProfilingInfo(nullptr);
-        private_region_.FreeData(reinterpret_cast<uint8_t*>(info));
+        private_region_.FreeWritableData(reinterpret_cast<uint8_t*>(info));
         it = profiling_infos_.erase(it);
       } else {
         ++it;
@@ -672,7 +675,7 @@
                                           size_t code_size,
                                           const uint8_t* stack_map,
                                           size_t stack_map_size,
-                                          uint8_t* roots_data,
+                                          const uint8_t* roots_data,
                                           const std::vector<Handle<mirror::Object>>& roots,
                                           bool osr,
                                           bool has_should_deoptimize_flag,
@@ -687,7 +690,7 @@
   }
 
   size_t root_table_size = ComputeRootTableSize(roots.size());
-  uint8_t* stack_map_data = roots_data + root_table_size;
+  const uint8_t* stack_map_data = roots_data + root_table_size;
 
   MutexLock mu(self, *Locks::jit_lock_);
   // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
@@ -954,19 +957,19 @@
 
 void JitCodeCache::ClearData(Thread* self,
                              JitMemoryRegion* region,
-                             uint8_t* roots_data) {
+                             const uint8_t* roots_data) {
   MutexLock mu(self, *Locks::jit_lock_);
-  region->FreeData(reinterpret_cast<uint8_t*>(roots_data));
+  region->FreeData(roots_data);
 }
 
-uint8_t* JitCodeCache::ReserveData(Thread* self,
-                                   JitMemoryRegion* region,
-                                   size_t stack_map_size,
-                                   size_t number_of_roots,
-                                   ArtMethod* method) {
+const uint8_t* JitCodeCache::ReserveData(Thread* self,
+                                         JitMemoryRegion* region,
+                                         size_t stack_map_size,
+                                         size_t number_of_roots,
+                                         ArtMethod* method) {
   size_t table_size = ComputeRootTableSize(number_of_roots);
   size_t size = RoundUp(stack_map_size + table_size, sizeof(void*));
-  uint8_t* result = nullptr;
+  const uint8_t* result = nullptr;
 
   {
     ScopedThreadSuspension sts(self, kSuspended);
@@ -1318,7 +1321,7 @@
           info->GetMethod()->SetProfilingInfo(info);
         } else if (info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) != info) {
           // No need for this ProfilingInfo object anymore.
-          private_region_.FreeData(reinterpret_cast<uint8_t*>(info));
+          private_region_.FreeWritableData(reinterpret_cast<uint8_t*>(info));
           return true;
         }
         return false;
@@ -1448,11 +1451,12 @@
     return info;
   }
 
-  uint8_t* data = private_region_.AllocateData(profile_info_size);
+  const uint8_t* data = private_region_.AllocateData(profile_info_size);
   if (data == nullptr) {
     return nullptr;
   }
-  info = new (data) ProfilingInfo(method, entries);
+  uint8_t* writable_data = private_region_.GetWritableDataAddress(data);
+  info = new (writable_data) ProfilingInfo(method, entries);
 
   // Make sure other threads see the data in the profiling info object before the
   // store in the ArtMethod's ProfilingInfo pointer.
@@ -1801,7 +1805,8 @@
   // Allocate for 40-80% capacity. This will offer OK lookup times, and termination
   // cases.
   size_t capacity = RoundUpToPowerOfTwo(number_of_methods * 100 / 80);
-  Entry* data = reinterpret_cast<Entry*>(region_->AllocateData(capacity * sizeof(Entry)));
+  const Entry* data =
+      reinterpret_cast<const Entry*>(region_->AllocateData(capacity * sizeof(Entry)));
   if (data != nullptr) {
     region_->FillData(data, capacity, Entry { nullptr, nullptr });
     map_ = ArrayRef(data, capacity);
@@ -1869,7 +1874,7 @@
   // be added, we are guaranteed to find a free slot in the array, and
   // therefore for this loop to terminate.
   while (true) {
-    Entry* entry = &map_[index];
+    const Entry* entry = &map_[index];
     if (entry->method == nullptr) {
       // Note that readers can read this memory concurrently, but that's OK as
       // we are writing pointers.
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 6aa5f31..64607b6 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -116,7 +116,7 @@
   };
 
   // The map allocated with `region_`.
-  ArrayRef<Entry> map_;
+  ArrayRef<const Entry> map_;
 
   // The region in which the map is allocated.
   JitMemoryRegion* const region_;
@@ -183,7 +183,7 @@
                       size_t code_size,
                       const uint8_t* stack_map,
                       size_t stack_map_size,
-                      uint8_t* roots_data,
+                      const uint8_t* roots_data,
                       const std::vector<Handle<mirror::Object>>& roots,
                       bool osr,
                       bool has_should_deoptimize_flag,
@@ -207,17 +207,17 @@
   // Allocate a region of data that will contain a stack map of size `stack_map_size` and
   // `number_of_roots` roots accessed by the JIT code.
   // Return a pointer to where roots will be stored.
-  uint8_t* ReserveData(Thread* self,
-                       JitMemoryRegion* region,
-                       size_t stack_map_size,
-                       size_t number_of_roots,
-                       ArtMethod* method)
+  const uint8_t* ReserveData(Thread* self,
+                             JitMemoryRegion* region,
+                             size_t stack_map_size,
+                             size_t number_of_roots,
+                             ArtMethod* method)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::jit_lock_);
 
   // Clear data from the data portion of the code cache.
   void ClearData(
-      Thread* self, JitMemoryRegion* region, uint8_t* roots_data)
+      Thread* self, JitMemoryRegion* region, const uint8_t* roots_data)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::jit_lock_);
 
@@ -351,7 +351,7 @@
                               size_t code_size,
                               const uint8_t* stack_map,
                               size_t stack_map_size,
-                              uint8_t* roots_data,
+                              const uint8_t* roots_data,
                               const std::vector<Handle<mirror::Object>>& roots,
                               bool osr,
                               bool has_should_deoptimize_flag,
diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc
index 39353ca..447bbf4 100644
--- a/runtime/jit/jit_memory_region.cc
+++ b/runtime/jit/jit_memory_region.cc
@@ -452,11 +452,11 @@
   reinterpret_cast<uint32_t*>(roots_data)[length] = length;
 }
 
-bool JitMemoryRegion::CommitData(uint8_t* roots_data,
+bool JitMemoryRegion::CommitData(const uint8_t* readonly_roots_data,
                                  const std::vector<Handle<mirror::Object>>& roots,
                                  const uint8_t* stack_map,
                                  size_t stack_map_size) {
-  roots_data = GetWritableDataAddress(roots_data);
+  uint8_t* roots_data = GetWritableDataAddress(readonly_roots_data);
   size_t root_table_size = ComputeRootTableSize(roots.size());
   uint8_t* stack_map_data = roots_data + root_table_size;
   FillRootTable(roots_data, roots);
@@ -476,16 +476,19 @@
   mspace_free(exec_mspace_, const_cast<uint8_t*>(code));
 }
 
-uint8_t* JitMemoryRegion::AllocateData(size_t data_size) {
+const uint8_t* JitMemoryRegion::AllocateData(size_t data_size) {
   void* result = mspace_malloc(data_mspace_, data_size);
   used_memory_for_data_ += mspace_usable_size(result);
   return reinterpret_cast<uint8_t*>(GetNonWritableDataAddress(result));
 }
 
-void JitMemoryRegion::FreeData(uint8_t* data) {
-  data = GetWritableDataAddress(data);
-  used_memory_for_data_ -= mspace_usable_size(data);
-  mspace_free(data_mspace_, data);
+void JitMemoryRegion::FreeData(const uint8_t* data) {
+  FreeWritableData(GetWritableDataAddress(data));
+}
+
+void JitMemoryRegion::FreeWritableData(uint8_t* writable_data) REQUIRES(Locks::jit_lock_) {
+  used_memory_for_data_ -= mspace_usable_size(writable_data);
+  mspace_free(data_mspace_, writable_data);
 }
 
 #if defined(__BIONIC__) && defined(ART_TARGET)
diff --git a/runtime/jit/jit_memory_region.h b/runtime/jit/jit_memory_region.h
index bc05cb6..2bb69a7 100644
--- a/runtime/jit/jit_memory_region.h
+++ b/runtime/jit/jit_memory_region.h
@@ -28,8 +28,6 @@
 
 namespace art {
 
-struct JitNativeInfo;
-
 namespace mirror {
 class Object;
 }
@@ -89,11 +87,13 @@
                               bool has_should_deoptimize_flag)
       REQUIRES(Locks::jit_lock_);
   void FreeCode(const uint8_t* code) REQUIRES(Locks::jit_lock_);
-  uint8_t* AllocateData(size_t data_size) REQUIRES(Locks::jit_lock_);
-  void FreeData(uint8_t* data) REQUIRES(Locks::jit_lock_);
+  const uint8_t* AllocateData(size_t data_size) REQUIRES(Locks::jit_lock_);
+  void FreeData(const uint8_t* data) REQUIRES(Locks::jit_lock_);
+  void FreeData(uint8_t* writable_data) REQUIRES(Locks::jit_lock_) = delete;
+  void FreeWritableData(uint8_t* writable_data) REQUIRES(Locks::jit_lock_);
 
   // Emit roots and stack map into the memory pointed by `roots_data`.
-  bool CommitData(uint8_t* roots_data,
+  bool CommitData(const uint8_t* roots_data,
                   const std::vector<Handle<mirror::Object>>& roots,
                   const uint8_t* stack_map,
                   size_t stack_map_size)
@@ -114,14 +114,14 @@
   }
 
   template <typename T>
-  void FillData(T* address, size_t n, const T& t)  REQUIRES(Locks::jit_lock_) {
+  void FillData(const T* address, size_t n, const T& t)  REQUIRES(Locks::jit_lock_) {
     std::fill_n(GetWritableDataAddress(address), n, t);
   }
 
   // Generic helper for writing abritrary data in the data portion of the
   // region.
   template <typename T>
-  void WriteData(T* address, const T& value) {
+  void WriteData(const T* address, const T& value) {
     *GetWritableDataAddress(address) = value;
   }
 
@@ -179,6 +179,13 @@
     return data_end_;
   }
 
+  template <typename T> T* GetWritableDataAddress(const T* src_ptr) {
+    if (!HasDualDataMapping()) {
+      return const_cast<T*>(src_ptr);
+    }
+    return const_cast<T*>(TranslateAddress(src_ptr, data_pages_, writable_data_pages_));
+  }
+
  private:
   template <typename T>
   T* TranslateAddress(T* src_ptr, const MemMap& src, const MemMap& dst) {
@@ -212,13 +219,6 @@
     return TranslateAddress(src_ptr, writable_data_pages_, data_pages_);
   }
 
-  template <typename T> T* GetWritableDataAddress(T* src_ptr) {
-    if (!HasDualDataMapping()) {
-      return src_ptr;
-    }
-    return TranslateAddress(src_ptr, data_pages_, writable_data_pages_);
-  }
-
   template <typename T> T* GetExecutableAddress(T* src_ptr) {
     if (!HasDualCodeMapping()) {
       return src_ptr;
@@ -279,7 +279,6 @@
 
   friend class ScopedCodeCacheWrite;  // For GetUpdatableCodeMapping
   friend class TestZygoteMemory;
-  friend struct art::JitNativeInfo;  // For GetWritableDataAddress.
 };
 
 }  // namespace jit