Revert "Make the JIT zygote memory shared."

This reverts commit 05f87217ddc9b4b9186710c0135b918f456c5aef.

Bug: 119800099
Bug: 136110523

Reason for revert: testWebview flaking

Change-Id: I96afa6bc9c56c4aaf5ed72ae370f6f69c096c559
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 1515575..9ef5ec3 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1814,8 +1814,7 @@
       callee_dead_reference_safe,
       graph_->IsDebuggable(),
       /* osr= */ false,
-      /* is_shared_jit_code= */ graph_->IsCompilingForSharedJitCode(),
-      /* start_instruction_id= */ caller_instruction_counter);
+      caller_instruction_counter);
   callee_graph->SetArtMethod(resolved_method);
 
   // When they are needed, allocate `inline_stats_` on the Arena instead
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index f8f813e..5e7b575 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -29,7 +29,6 @@
 #include "driver/dex_compilation_unit.h"
 #include "driver/compiler_options.h"
 #include "imtable-inl.h"
-#include "jit/jit.h"
 #include "mirror/dex_cache.h"
 #include "oat_file.h"
 #include "optimizing_compiler_stats.h"
@@ -1291,20 +1290,15 @@
   // Check if the class will be initialized at runtime.
   if (cls->IsInitialized()) {
     Runtime* runtime = Runtime::Current();
-    if (runtime->IsAotCompiler()) {
-      // Assume loaded only if klass is in the boot image. App classes cannot be assumed
-      // loaded because we don't even know what class loader will be used to load them.
-      if (IsInBootImage(cls.Get(), code_generator_->GetCompilerOptions())) {
-        return true;
-      }
-    } else {
+    if (!runtime->IsAotCompiler()) {
       DCHECK(runtime->UseJitCompilation());
-      if (Runtime::Current()->GetJit()->CanAssumeInitialized(
-              cls.Get(),
-              graph_->IsCompilingForSharedJitCode())) {
-        // For JIT, the class cannot revert to an uninitialized state.
-        return true;
-      }
+      // For JIT, the class cannot revert to an uninitialized state.
+      return true;
+    }
+    // Assume loaded only if klass is in the boot image. App classes cannot be assumed
+    // loaded because we don't even know what class loader will be used to load them.
+    if (IsInBootImage(cls.Get(), code_generator_->GetCompilerOptions())) {
+      return true;
     }
   }
 
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 759a8e6..8ac33a4 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -320,7 +320,6 @@
          bool dead_reference_safe = false,
          bool debuggable = false,
          bool osr = false,
-         bool is_shared_jit_code = false,
          int start_instruction_id = 0)
       : allocator_(allocator),
         arena_stack_(arena_stack),
@@ -356,8 +355,7 @@
         art_method_(nullptr),
         inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()),
         osr_(osr),
-        cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)),
-        is_shared_jit_code_(is_shared_jit_code) {
+        cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) {
     blocks_.reserve(kDefaultNumberOfBlocks);
   }
 
@@ -587,10 +585,6 @@
 
   bool IsCompilingOsr() const { return osr_; }
 
-  bool IsCompilingForSharedJitCode() const {
-    return is_shared_jit_code_;
-  }
-
   ArenaSet<ArtMethod*>& GetCHASingleImplementationList() {
     return cha_single_implementation_list_;
   }
@@ -780,10 +774,6 @@
   // List of methods that are assumed to have single implementation.
   ArenaSet<ArtMethod*> cha_single_implementation_list_;
 
-  // Whether we are JIT compiling in the shared region area, putting
-  // restrictions on, for example, how literals are being generated.
-  bool is_shared_jit_code_;
-
   friend class SsaBuilder;           // For caching constants.
   friend class SsaLivenessAnalysis;  // For the linear order.
   friend class HInliner;             // For the reverse post order.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 6f3b9fe..2153ddd 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -384,7 +384,6 @@
                             ArtMethod* method,
                             bool baseline,
                             bool osr,
-                            bool is_shared_jit_code,
                             VariableSizedHandleScope* handles) const;
 
   CodeGenerator* TryCompileIntrinsic(ArenaAllocator* allocator,
@@ -784,7 +783,6 @@
                                               ArtMethod* method,
                                               bool baseline,
                                               bool osr,
-                                              bool is_shared_jit_code,
                                               VariableSizedHandleScope* handles) const {
   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
   const CompilerOptions& compiler_options = GetCompilerOptions();
@@ -852,8 +850,7 @@
       kInvalidInvokeType,
       dead_reference_safe,
       compiler_options.GetDebuggable(),
-      /* osr= */ osr,
-      /* is_shared_jit_code= */ is_shared_jit_code);
+      /* osr= */ osr);
 
   if (method != nullptr) {
     graph->SetArtMethod(method);
@@ -1110,7 +1107,6 @@
                        method,
                        compiler_options.IsBaseline(),
                        /* osr= */ false,
-                       /* is_shared_jit_code= */ false,
                        &handles));
       }
     }
@@ -1372,7 +1368,6 @@
                    method,
                    baseline,
                    osr,
-                   /* is_shared_jit_code= */ code_cache->IsSharedRegion(*region),
                    &handles));
     if (codegen.get() == nullptr) {
       return false;
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 3e22edc..8637db1 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -19,7 +19,6 @@
 #include "art_method-inl.h"
 #include "base/casts.h"
 #include "base/enums.h"
-#include "base/logging.h"
 #include "class_linker.h"
 #include "code_generator.h"
 #include "driver/compiler_options.h"
@@ -27,7 +26,6 @@
 #include "gc/heap.h"
 #include "gc/space/image_space.h"
 #include "handle_scope-inl.h"
-#include "jit/jit.h"
 #include "mirror/dex_cache.h"
 #include "mirror/string.h"
 #include "nodes.h"
@@ -100,17 +98,11 @@
     }
     code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
   } else if (Runtime::Current()->UseJitCompilation()) {
-    if (Runtime::Current()->GetJit()->CanEncodeMethod(
-            callee,
-            codegen->GetGraph()->IsCompilingForSharedJitCode())) {
-      method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress;
-      method_load_data = reinterpret_cast<uintptr_t>(callee);
-      code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
-    } else {
-      // Do not sharpen.
-      method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall;
-      code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
-    }
+    // JIT or on-device AOT compilation referencing a boot image method.
+    // Use the method address directly.
+    method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress;
+    method_load_data = reinterpret_cast<uintptr_t>(callee);
+    code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
   } else if (IsInBootImage(callee)) {
     // Use PC-relative access to the .data.bimg.rel.ro methods array.
     method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo;
@@ -183,16 +175,7 @@
         if (is_in_boot_image) {
           desired_load_kind = HLoadClass::LoadKind::kJitBootImageAddress;
         } else if (klass != nullptr) {
-          if (runtime->GetJit()->CanEncodeClass(
-                  klass.Get(),
-                  codegen->GetGraph()->IsCompilingForSharedJitCode())) {
-            desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
-          } else {
-            // Shared JIT code cannot encode a literal that the GC can move.
-            VLOG(jit) << "Unable to encode in shared region class literal: "
-                      << klass->PrettyClass();
-            desired_load_kind = HLoadClass::LoadKind::kRuntimeCall;
-          }
+          desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
         } else {
           // Class not loaded yet. This happens when the dex code requesting
           // this `HLoadClass` hasn't been executed in the interpreter.
@@ -348,18 +331,10 @@
       DCHECK(!codegen->GetCompilerOptions().GetCompilePic());
       string = class_linker->LookupString(string_index, dex_cache.Get());
       if (string != nullptr) {
-        gc::Heap* heap = runtime->GetHeap();
-        if (heap->ObjectIsInBootImageSpace(string)) {
+        if (runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
           desired_load_kind = HLoadString::LoadKind::kJitBootImageAddress;
-        } else if (runtime->GetJit()->CanEncodeString(
-                  string,
-                  codegen->GetGraph()->IsCompilingForSharedJitCode())) {
-          desired_load_kind = HLoadString::LoadKind::kJitTableAddress;
         } else {
-          // Shared JIT code cannot encode a literal that the GC can move.
-          VLOG(jit) << "Unable to encode in shared region string literal: "
-                    << string->ToModifiedUtf8();
-          desired_load_kind = HLoadString::LoadKind::kRuntimeCall;
+          desired_load_kind = HLoadString::LoadKind::kJitTableAddress;
         }
       } else {
         desired_load_kind = HLoadString::LoadKind::kRuntimeCall;
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index c5133b9..1f734fe 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -267,15 +267,15 @@
     return false;
   }
 
-  JitMemoryRegion* region = GetCodeCache()->GetCurrentRegion();
-
   // If we get a request to compile a proxy method, we pass the actual Java method
   // of that proxy method, as the compiler does not expect a proxy method.
   ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
-  if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr, prejit, region)) {
+  if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr, prejit)) {
     return false;
   }
 
+  JitMemoryRegion* region = GetCodeCache()->GetPrivateRegion();
+
   VLOG(jit) << "Compiling method "
             << ArtMethod::PrettyMethod(method_to_compile)
             << " osr=" << std::boolalpha << osr;
@@ -838,7 +838,7 @@
         klass == GetClassRoot<mirror::VarHandle>()) {
       // MethodHandle and VarHandle invocation methods are required to throw an
       // UnsupportedOperationException if invoked reflectively. We achieve this by having native
-      // implementations that raise the exception. We need to disable JIT compilation of these JNI
+      // implementations that arise the exception. We need to disable JIT compilation of these JNI
       // methods as it can lead to transitioning between JIT compiled JNI stubs and generic JNI
       // stubs. Since these stubs have different stack representations we can then crash in stack
       // walking (b/78151261).
@@ -1074,35 +1074,5 @@
   thread_pool_->CreateThreads();
 }
 
-bool Jit::CanEncodeMethod(ArtMethod* method ATTRIBUTE_UNUSED,
-                          bool is_for_shared_region ATTRIBUTE_UNUSED) const {
-  // TODO: For shared region, we should only encode a method of a class
-  // allocated before any fork.
-  return true;
-}
-
-bool Jit::CanEncodeClass(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const {
-  // TODO: For shared region, we should only encode a non-moving class allocated
-  // before any fork.
-  return !is_for_shared_region || !Runtime::Current()->GetHeap()->IsMovableObject(cls);
-}
-
-bool Jit::CanEncodeString(ObjPtr<mirror::String> string, bool is_for_shared_region) const {
-  // TODO: For shared region, we should only encode a non-moving string allocated
-  // before any fork.
-  return !is_for_shared_region || !Runtime::Current()->GetHeap()->IsMovableObject(string);
-}
-
-bool Jit::CanAssumeInitialized(ObjPtr<mirror::Class> cls,
-                               bool is_for_shared_region ATTRIBUTE_UNUSED) const {
-  // TODO: For shared region, we should assume initialized if the class is initialized
-  // before any fork.
-  return cls->IsInitialized();
-}
-
-bool Jit::UseJitCompilation() {
-  return options_->UseJitCompilation() && GetCodeCache()->GetCurrentRegion()->IsValid();
-}
-
 }  // namespace jit
 }  // namespace art
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 1474a30..5643277 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -39,7 +39,6 @@
 class Object;
 class Class;
 class ClassLoader;
-class String;
 }   // namespace mirror
 
 namespace jit {
@@ -213,9 +212,10 @@
     return options_->GetPriorityThreadWeight();
   }
 
-  // Return whether we should do JIT compilation. Note this will returns false
-  // if we only need to save profile information and not compile methods.
-  bool UseJitCompilation();
+  // Returns false if we only need to save profile information and not compile methods.
+  bool UseJitCompilation() const {
+    return options_->UseJitCompilation();
+  }
 
   bool GetSaveProfilingInfo() const {
     return options_->GetSaveProfilingInfo();
@@ -322,16 +322,6 @@
   void RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
                         jobject class_loader);
 
-  // Called by the compiler to know whether it can directly encode the
-  // method/class/string.
-  bool CanEncodeMethod(ArtMethod* method, bool is_for_shared_region) const;
-  bool CanEncodeClass(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  bool CanEncodeString(ObjPtr<mirror::String> string, bool is_for_shared_region) const
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  bool CanAssumeInitialized(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
  private:
   Jit(JitCodeCache* code_cache, JitOptions* options);
 
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index d227cec..dd1dbea 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -206,10 +206,8 @@
   if (is_zygote) {
     // Zygote should never collect code to share the memory with the children.
     jit_code_cache->garbage_collect_code_ = false;
-    jit_code_cache->shared_region_ = std::move(region);
-  } else {
-    jit_code_cache->private_region_ = std::move(region);
   }
+  jit_code_cache->private_region_ = std::move(region);
 
   VLOG(jit) << "Created jit code cache: initial capacity="
             << PrettySize(initial_capacity)
@@ -385,8 +383,7 @@
   return reinterpret_cast<const uint32_t*>(stack_map)[-1];
 }
 
-static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots,
-                                bool is_shared_region)
+static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots)
     REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
   if (!kIsDebugBuild) {
     return;
@@ -399,10 +396,6 @@
       ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
       CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr);
     }
-    // Ensure that we don't put movable objects in the shared region.
-    if (is_shared_region) {
-      CHECK(!Runtime::Current()->GetHeap()->IsMovableObject(object.Get()));
-    }
   }
 }
 
@@ -671,7 +664,7 @@
   if (!method->IsNative()) {
     // We need to do this before grabbing the lock_ because it needs to be able to see the string
     // InternTable. Native methods do not have roots.
-    DCheckRootsAreValid(roots, IsSharedRegion(*region));
+    DCheckRootsAreValid(roots);
   }
 
   size_t root_table_size = ComputeRootTableSize(roots.size());
@@ -1408,13 +1401,6 @@
                                               bool retry_allocation)
     // No thread safety analysis as we are using TryLock/Unlock explicitly.
     NO_THREAD_SAFETY_ANALYSIS {
-  // If we don't have a private region, we cannot allocate a profiling info.
-  // A shared region doesn't support in general GC objects, which a profiling info
-  // can reference.
-  if (!private_region_.IsValid()) {
-    return nullptr;
-  }
-
   ProfilingInfo* info = nullptr;
   if (!retry_allocation) {
     // If we are allocating for the interpreter, just try to lock, to avoid
@@ -1468,9 +1454,7 @@
 }
 
 void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) {
-  return shared_region_.OwnsSpace(mspace)
-      ? shared_region_.MoreCore(mspace, increment)
-      : private_region_.MoreCore(mspace, increment);
+  return private_region_.MoreCore(mspace, increment);
 }
 
 void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations,
@@ -1562,11 +1546,7 @@
   return osr_code_map_.find(method) != osr_code_map_.end();
 }
 
-bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
-                                       Thread* self,
-                                       bool osr,
-                                       bool prejit,
-                                       JitMemoryRegion* region) {
+bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr, bool prejit) {
   if (!osr && ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
     return false;
   }
@@ -1628,7 +1608,7 @@
     ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
     if (info == nullptr) {
       // When prejitting, we don't allocate a profiling info.
-      if (!prejit && !IsSharedRegion(*region)) {
+      if (!prejit) {
         VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
         // Because the counter is not atomic, there are some rare cases where we may not hit the
         // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
@@ -1737,19 +1717,13 @@
 
 void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
   if (is_zygote) {
-    // Don't create a private region for a child zygote. Regions are usually map shared
-    // (to satisfy dual-view), and we don't want children of a child zygote to inherit it.
+    // Don't transition if this is for a child zygote.
     return;
   }
-
-  if (private_region_.IsValid()) {
-    // In case the zygote was running with its own private region (happens for
-    // unit tests), move the region to the shared one.
-    CHECK(!shared_region_.IsValid());
-    std::swap(shared_region_, private_region_);
-  }
   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
 
+  shared_region_ = std::move(private_region_);
+
   // Reset all statistics to be specific to this process.
   number_of_compilations_ = 0;
   number_of_osr_compilations_ = 0;
@@ -1767,9 +1741,5 @@
   }
 }
 
-JitMemoryRegion* JitCodeCache::GetCurrentRegion() {
-  return Runtime::Current()->IsZygote() ? &shared_region_ : &private_region_;
-}
-
 }  // namespace jit
 }  // namespace art
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index a777ab7..a4e2964 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -96,11 +96,7 @@
                               std::string* error_msg);
   ~JitCodeCache();
 
-  bool NotifyCompilationOf(ArtMethod* method,
-                           Thread* self,
-                           bool osr,
-                           bool prejit,
-                           JitMemoryRegion* region)
+  bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr, bool prejit)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::jit_lock_);
 
@@ -217,7 +213,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
-    return private_region_.OwnsSpace(mspace) || shared_region_.OwnsSpace(mspace);
+    return private_region_.OwnsSpace(mspace);
   }
 
   void* MoreCore(const void* mspace, intptr_t increment);
@@ -280,8 +276,7 @@
   // is debuggable.
   void ClearEntryPointsInZygoteExecSpace() REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_);
 
-  JitMemoryRegion* GetCurrentRegion();
-  bool IsSharedRegion(const JitMemoryRegion& region) const { return &region == &shared_region_; }
+  JitMemoryRegion* GetPrivateRegion() { return &private_region_; }
 
  private:
   JitCodeCache();
diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc
index 3438c37..2db5245 100644
--- a/runtime/jit/jit_memory_region.cc
+++ b/runtime/jit/jit_memory_region.cc
@@ -64,14 +64,9 @@
   // File descriptor enabling dual-view mapping of code section.
   unique_fd mem_fd;
 
-  if (is_zygote) {
-    // Because we are not going to GC code generated by the zygote, just use all available.
-    current_capacity_ = max_capacity;
-    mem_fd = unique_fd(CreateZygoteMemory(capacity, error_msg));
-    if (mem_fd.get() < 0) {
-      return false;
-    }
-  } else {
+  // Zygote shouldn't create a shared mapping for JIT, so we cannot use dual view
+  // for it.
+  if (!is_zygote) {
     // Bionic supports memfd_create, but the call may fail on older kernels.
     mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
     if (mem_fd.get() < 0) {
@@ -84,14 +79,16 @@
         return false;
       }
       VLOG(jit) << oss.str();
-    } else if (ftruncate(mem_fd, capacity) != 0) {
-      std::ostringstream oss;
-      oss << "Failed to initialize memory file: " << strerror(errno);
-      *error_msg = oss.str();
-      return false;
     }
   }
 
+  if (mem_fd.get() >= 0 && ftruncate(mem_fd, capacity) != 0) {
+    std::ostringstream oss;
+    oss << "Failed to initialize memory file: " << strerror(errno);
+    *error_msg = oss.str();
+    return false;
+  }
+
   std::string data_cache_name = is_zygote ? "zygote-data-code-cache" : "data-code-cache";
   std::string exec_cache_name = is_zygote ? "zygote-jit-code-cache" : "jit-code-cache";
 
@@ -210,13 +207,6 @@
         }
       }
     }
-    if (is_zygote) {
-      // Now that we have created the writable and executable mappings, prevent creating any new
-      // ones.
-      if (!ProtectZygoteMemory(mem_fd.get(), error_msg)) {
-        return false;
-      }
-    }
   } else {
     // Profiling only. No memory for code required.
   }
@@ -244,14 +234,15 @@
     CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW);
     exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
     CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
-    SetFootprintLimit(current_capacity_);
+    SetFootprintLimit(initial_capacity_);
     // Protect pages containing heap metadata. Updates to the code heap toggle write permission to
     // perform the update and there are no other times write access is required.
     CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR);
   } else {
     exec_mspace_ = nullptr;
-    SetFootprintLimit(current_capacity_);
+    SetFootprintLimit(initial_capacity_);
   }
+
   return true;
 }
 
diff --git a/runtime/jit/jit_memory_region.h b/runtime/jit/jit_memory_region.h
index 2cf01bd..b5f808d 100644
--- a/runtime/jit/jit_memory_region.h
+++ b/runtime/jit/jit_memory_region.h
@@ -107,10 +107,6 @@
       REQUIRES(Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool IsValid() const NO_THREAD_SAFETY_ANALYSIS {
-    return exec_mspace_ != nullptr || data_mspace_ != nullptr;
-  }
-
   bool HasDualCodeMapping() const {
     return non_exec_pages_.IsValid();
   }