Revert "Revert "Make the JIT zygote memory shared.""

This reverts commit 2fef66b294417d447630f9d98de68227eef476d3.

Bug: 119800099
Bug: 136110523

Reason for revert: Fixed webview_zygote case.

Change-Id: Iaae8c999463d77b7b1e62b55458493bdbc97a104
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 1f734fe..201f3ce 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -267,15 +267,15 @@
     return false;
   }
 
+  JitMemoryRegion* region = GetCodeCache()->GetCurrentRegion();
+
   // If we get a request to compile a proxy method, we pass the actual Java method
   // of that proxy method, as the compiler does not expect a proxy method.
   ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
-  if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr, prejit)) {
+  if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr, prejit, region)) {
     return false;
   }
 
-  JitMemoryRegion* region = GetCodeCache()->GetPrivateRegion();
-
   VLOG(jit) << "Compiling method "
             << ArtMethod::PrettyMethod(method_to_compile)
             << " osr=" << std::boolalpha << osr;
@@ -838,7 +838,7 @@
         klass == GetClassRoot<mirror::VarHandle>()) {
       // MethodHandle and VarHandle invocation methods are required to throw an
       // UnsupportedOperationException if invoked reflectively. We achieve this by having native
-      // implementations that arise the exception. We need to disable JIT compilation of these JNI
+      // implementations that raise the exception. We need to disable JIT compilation of these JNI
       // methods as it can lead to transitioning between JIT compiled JNI stubs and generic JNI
       // stubs. Since these stubs have different stack representations we can then crash in stack
       // walking (b/78151261).
@@ -854,9 +854,11 @@
                              uint32_t new_count,
                              bool with_backedges) {
   if (thread_pool_ == nullptr) {
-    // Should only see this when shutting down, starting up, or in safe mode.
+    // Should only see this when shutting down, starting up, in safe mode, or
+    // child zygote.
     DCHECK(Runtime::Current()->IsShuttingDown(self) ||
            !Runtime::Current()->IsFinishedStarting() ||
+           Runtime::Current()->IsZygote() ||
            Runtime::Current()->IsSafeMode());
     return false;
   }
@@ -876,7 +878,9 @@
 
   if (old_count < WarmMethodThreshold() && new_count >= WarmMethodThreshold()) {
     // Note: Native method have no "warm" state or profiling info.
-    if (!method->IsNative() && method->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
+    if (!method->IsNative() &&
+        (method->GetProfilingInfo(kRuntimePointerSize) == nullptr) &&
+        code_cache_->CanAllocateProfilingInfo()) {
       bool success = ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
       if (success) {
         VLOG(jit) << "Start profiling " << method->PrettyMethod();
@@ -1022,14 +1026,9 @@
 }
 
 void Jit::PostForkChildAction(bool is_system_server, bool is_zygote) {
-  if (is_zygote) {
-    // Remove potential tasks that have been inherited from the zygote. Child zygotes
-    // currently don't need the whole boot image compiled (ie webview_zygote).
+  if (is_zygote || Runtime::Current()->IsSafeMode()) {
+    // Remove potential tasks that have been inherited from the zygote.
     thread_pool_->RemoveAllTasks(Thread::Current());
-    // Don't transition if this is for a child zygote.
-    return;
-  }
-  if (Runtime::Current()->IsSafeMode()) {
     // Delete the thread pool, we are not going to JIT.
     thread_pool_.reset(nullptr);
     return;
@@ -1074,5 +1073,31 @@
   thread_pool_->CreateThreads();
 }
 
+bool Jit::CanEncodeMethod(ArtMethod* method ATTRIBUTE_UNUSED,
+                          bool is_for_shared_region ATTRIBUTE_UNUSED) const {
+  // TODO: For shared region, we should only encode a method of a class
+  // allocated before any fork.
+  return true;
+}
+
+bool Jit::CanEncodeClass(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const {
+  // TODO: For shared region, we should only encode a non-moving class allocated
+  // before any fork.
+  return !is_for_shared_region || !Runtime::Current()->GetHeap()->IsMovableObject(cls);
+}
+
+bool Jit::CanEncodeString(ObjPtr<mirror::String> string, bool is_for_shared_region) const {
+  // TODO: For shared region, we should only encode a non-moving string allocated
+  // before any fork.
+  return !is_for_shared_region || !Runtime::Current()->GetHeap()->IsMovableObject(string);
+}
+
+bool Jit::CanAssumeInitialized(ObjPtr<mirror::Class> cls,
+                               bool is_for_shared_region ATTRIBUTE_UNUSED) const {
+  // TODO: For shared region, we should assume initialized if the class is initialized
+  // before any fork.
+  return cls->IsInitialized();
+}
+
 }  // namespace jit
 }  // namespace art
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 5643277..e44e1c9 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -39,6 +39,7 @@
 class Object;
 class Class;
 class ClassLoader;
+class String;
 }   // namespace mirror
 
 namespace jit {
@@ -212,7 +213,8 @@
     return options_->GetPriorityThreadWeight();
   }
 
-  // Returns false if we only need to save profile information and not compile methods.
+  // Return whether we should do JIT compilation. Note this will returns false
+  // if we only need to save profile information and not compile methods.
   bool UseJitCompilation() const {
     return options_->UseJitCompilation();
   }
@@ -322,6 +324,16 @@
   void RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
                         jobject class_loader);
 
+  // Called by the compiler to know whether it can directly encode the
+  // method/class/string.
+  bool CanEncodeMethod(ArtMethod* method, bool is_for_shared_region) const;
+  bool CanEncodeClass(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  bool CanEncodeString(ObjPtr<mirror::String> string, bool is_for_shared_region) const
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  bool CanAssumeInitialized(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
  private:
   Jit(JitCodeCache* code_cache, JitOptions* options);
 
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 709882c..97b5b8d 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -206,8 +206,10 @@
   if (is_zygote) {
     // Zygote should never collect code to share the memory with the children.
     jit_code_cache->garbage_collect_code_ = false;
+    jit_code_cache->shared_region_ = std::move(region);
+  } else {
+    jit_code_cache->private_region_ = std::move(region);
   }
-  jit_code_cache->private_region_ = std::move(region);
 
   VLOG(jit) << "Created jit code cache: initial capacity="
             << PrettySize(initial_capacity)
@@ -383,7 +385,8 @@
   return reinterpret_cast<const uint32_t*>(stack_map)[-1];
 }
 
-static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots)
+static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots,
+                                bool is_shared_region)
     REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
   if (!kIsDebugBuild) {
     return;
@@ -396,6 +399,10 @@
       ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
       CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr);
     }
+    // Ensure that we don't put movable objects in the shared region.
+    if (is_shared_region) {
+      CHECK(!Runtime::Current()->GetHeap()->IsMovableObject(object.Get()));
+    }
   }
 }
 
@@ -664,7 +671,7 @@
   if (!method->IsNative()) {
     // We need to do this before grabbing the lock_ because it needs to be able to see the string
     // InternTable. Native methods do not have roots.
-    DCheckRootsAreValid(roots);
+    DCheckRootsAreValid(roots, IsSharedRegion(*region));
   }
 
   size_t root_table_size = ComputeRootTableSize(roots.size());
@@ -1401,6 +1408,7 @@
                                               bool retry_allocation)
     // No thread safety analysis as we are using TryLock/Unlock explicitly.
     NO_THREAD_SAFETY_ANALYSIS {
+  DCHECK(CanAllocateProfilingInfo());
   ProfilingInfo* info = nullptr;
   if (!retry_allocation) {
     // If we are allocating for the interpreter, just try to lock, to avoid
@@ -1454,7 +1462,9 @@
 }
 
 void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) {
-  return private_region_.MoreCore(mspace, increment);
+  return shared_region_.OwnsSpace(mspace)
+      ? shared_region_.MoreCore(mspace, increment)
+      : private_region_.MoreCore(mspace, increment);
 }
 
 void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations,
@@ -1546,7 +1556,11 @@
   return osr_code_map_.find(method) != osr_code_map_.end();
 }
 
-bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr, bool prejit) {
+bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
+                                       Thread* self,
+                                       bool osr,
+                                       bool prejit,
+                                       JitMemoryRegion* region) {
   if (!osr && ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
     return false;
   }
@@ -1608,7 +1622,7 @@
     ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
     if (info == nullptr) {
       // When prejitting, we don't allocate a profiling info.
-      if (!prejit) {
+      if (!prejit && !IsSharedRegion(*region)) {
         VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
         // Because the counter is not atomic, there are some rare cases where we may not hit the
         // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
@@ -1716,13 +1730,19 @@
 }
 
 void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
-  if (is_zygote) {
-    // Don't transition if this is for a child zygote.
+  if (is_zygote || Runtime::Current()->IsSafeMode()) {
+    // Don't create a private region for a child zygote. Regions are usually map shared
+    // (to satisfy dual-view), and we don't want children of a child zygote to inherit it.
     return;
   }
-  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
 
-  shared_region_ = std::move(private_region_);
+  if (private_region_.IsValid()) {
+    // In case the zygote was running with its own private region (happens for
+    // unit tests), move the region to the shared one.
+    CHECK(!shared_region_.IsValid());
+    std::swap(shared_region_, private_region_);
+  }
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
 
   // Reset all statistics to be specific to this process.
   number_of_compilations_ = 0;
@@ -1741,5 +1761,9 @@
   }
 }
 
+JitMemoryRegion* JitCodeCache::GetCurrentRegion() {
+  return Runtime::Current()->IsZygote() ? &shared_region_ : &private_region_;
+}
+
 }  // namespace jit
 }  // namespace art
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index a4e2964..88b440b 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -96,7 +96,11 @@
                               std::string* error_msg);
   ~JitCodeCache();
 
-  bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr, bool prejit)
+  bool NotifyCompilationOf(ArtMethod* method,
+                           Thread* self,
+                           bool osr,
+                           bool prejit,
+                           JitMemoryRegion* region)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::jit_lock_);
 
@@ -213,7 +217,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
-    return private_region_.OwnsSpace(mspace);
+    return private_region_.OwnsSpace(mspace) || shared_region_.OwnsSpace(mspace);
   }
 
   void* MoreCore(const void* mspace, intptr_t increment);
@@ -276,7 +280,15 @@
   // is debuggable.
   void ClearEntryPointsInZygoteExecSpace() REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_);
 
-  JitMemoryRegion* GetPrivateRegion() { return &private_region_; }
+  JitMemoryRegion* GetCurrentRegion();
+  bool IsSharedRegion(const JitMemoryRegion& region) const { return &region == &shared_region_; }
+  bool CanAllocateProfilingInfo() {
+    // If we don't have a private region, we cannot allocate a profiling info.
+    // A shared region doesn't support in general GC objects, which a profiling info
+    // can reference.
+    JitMemoryRegion* region = GetCurrentRegion();
+    return region->IsValid() && !IsSharedRegion(*region);
+  }
 
  private:
   JitCodeCache();
diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc
index a39e121..bcf90ce 100644
--- a/runtime/jit/jit_memory_region.cc
+++ b/runtime/jit/jit_memory_region.cc
@@ -64,9 +64,14 @@
   // File descriptor enabling dual-view mapping of code section.
   unique_fd mem_fd;
 
-  // Zygote shouldn't create a shared mapping for JIT, so we cannot use dual view
-  // for it.
-  if (!is_zygote) {
+  if (is_zygote) {
+    // Because we are not going to GC code generated by the zygote, just use all available.
+    current_capacity_ = max_capacity;
+    mem_fd = unique_fd(CreateZygoteMemory(capacity, error_msg));
+    if (mem_fd.get() < 0) {
+      return false;
+    }
+  } else {
     // Bionic supports memfd_create, but the call may fail on older kernels.
     mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
     if (mem_fd.get() < 0) {
@@ -79,16 +84,14 @@
         return false;
       }
       VLOG(jit) << oss.str();
+    } else if (ftruncate(mem_fd, capacity) != 0) {
+      std::ostringstream oss;
+      oss << "Failed to initialize memory file: " << strerror(errno);
+      *error_msg = oss.str();
+      return false;
     }
   }
 
-  if (mem_fd.get() >= 0 && ftruncate(mem_fd, capacity) != 0) {
-    std::ostringstream oss;
-    oss << "Failed to initialize memory file: " << strerror(errno);
-    *error_msg = oss.str();
-    return false;
-  }
-
   std::string data_cache_name = is_zygote ? "zygote-data-code-cache" : "data-code-cache";
   std::string exec_cache_name = is_zygote ? "zygote-jit-code-cache" : "jit-code-cache";
 
@@ -207,6 +210,13 @@
         }
       }
     }
+    if (is_zygote) {
+      // Now that we have created the writable and executable mappings, prevent creating any new
+      // ones.
+      if (!ProtectZygoteMemory(mem_fd.get(), error_msg)) {
+        return false;
+      }
+    }
   } else {
     // Profiling only. No memory for code required.
   }
@@ -234,15 +244,14 @@
     CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW);
     exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
     CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
-    SetFootprintLimit(initial_capacity_);
+    SetFootprintLimit(current_capacity_);
     // Protect pages containing heap metadata. Updates to the code heap toggle write permission to
     // perform the update and there are no other times write access is required.
     CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR);
   } else {
     exec_mspace_ = nullptr;
-    SetFootprintLimit(initial_capacity_);
+    SetFootprintLimit(current_capacity_);
   }
-
   return true;
 }
 
diff --git a/runtime/jit/jit_memory_region.h b/runtime/jit/jit_memory_region.h
index f325480..af0ca83 100644
--- a/runtime/jit/jit_memory_region.h
+++ b/runtime/jit/jit_memory_region.h
@@ -96,6 +96,10 @@
       REQUIRES(Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  bool IsValid() const NO_THREAD_SAFETY_ANALYSIS {
+    return exec_mspace_ != nullptr || data_mspace_ != nullptr;
+  }
+
   bool HasDualCodeMapping() const {
     return non_exec_pages_.IsValid();
   }