Revert "Revert "Make the JIT zygote memory shared.""
This reverts commit 2fef66b294417d447630f9d98de68227eef476d3.
Bug: 119800099
Bug: 136110523
Reason for revert: Fixed webview_zygote case.
Change-Id: Iaae8c999463d77b7b1e62b55458493bdbc97a104
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 9ef5ec3..1515575 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1814,7 +1814,8 @@
callee_dead_reference_safe,
graph_->IsDebuggable(),
/* osr= */ false,
- caller_instruction_counter);
+ /* is_shared_jit_code= */ graph_->IsCompilingForSharedJitCode(),
+ /* start_instruction_id= */ caller_instruction_counter);
callee_graph->SetArtMethod(resolved_method);
// When they are needed, allocate `inline_stats_` on the Arena instead
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 5e7b575..f8f813e 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -29,6 +29,7 @@
#include "driver/dex_compilation_unit.h"
#include "driver/compiler_options.h"
#include "imtable-inl.h"
+#include "jit/jit.h"
#include "mirror/dex_cache.h"
#include "oat_file.h"
#include "optimizing_compiler_stats.h"
@@ -1290,15 +1291,20 @@
// Check if the class will be initialized at runtime.
if (cls->IsInitialized()) {
Runtime* runtime = Runtime::Current();
- if (!runtime->IsAotCompiler()) {
+ if (runtime->IsAotCompiler()) {
+ // Assume loaded only if klass is in the boot image. App classes cannot be assumed
+ // loaded because we don't even know what class loader will be used to load them.
+ if (IsInBootImage(cls.Get(), code_generator_->GetCompilerOptions())) {
+ return true;
+ }
+ } else {
DCHECK(runtime->UseJitCompilation());
- // For JIT, the class cannot revert to an uninitialized state.
- return true;
- }
- // Assume loaded only if klass is in the boot image. App classes cannot be assumed
- // loaded because we don't even know what class loader will be used to load them.
- if (IsInBootImage(cls.Get(), code_generator_->GetCompilerOptions())) {
- return true;
+ if (Runtime::Current()->GetJit()->CanAssumeInitialized(
+ cls.Get(),
+ graph_->IsCompilingForSharedJitCode())) {
+ // For JIT, the class cannot revert to an uninitialized state.
+ return true;
+ }
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 3736413..5111036 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -320,6 +320,7 @@
bool dead_reference_safe = false,
bool debuggable = false,
bool osr = false,
+ bool is_shared_jit_code = false,
int start_instruction_id = 0)
: allocator_(allocator),
arena_stack_(arena_stack),
@@ -355,7 +356,8 @@
art_method_(nullptr),
inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()),
osr_(osr),
- cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) {
+ cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)),
+ is_shared_jit_code_(is_shared_jit_code) {
blocks_.reserve(kDefaultNumberOfBlocks);
}
@@ -585,6 +587,10 @@
bool IsCompilingOsr() const { return osr_; }
+ bool IsCompilingForSharedJitCode() const {
+ return is_shared_jit_code_;
+ }
+
ArenaSet<ArtMethod*>& GetCHASingleImplementationList() {
return cha_single_implementation_list_;
}
@@ -774,6 +780,10 @@
// List of methods that are assumed to have single implementation.
ArenaSet<ArtMethod*> cha_single_implementation_list_;
+ // Whether we are JIT compiling in the shared region area, putting
+ // restrictions on, for example, how literals are being generated.
+ bool is_shared_jit_code_;
+
friend class SsaBuilder; // For caching constants.
friend class SsaLivenessAnalysis; // For the linear order.
friend class HInliner; // For the reverse post order.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 2153ddd..6f3b9fe 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -384,6 +384,7 @@
ArtMethod* method,
bool baseline,
bool osr,
+ bool is_shared_jit_code,
VariableSizedHandleScope* handles) const;
CodeGenerator* TryCompileIntrinsic(ArenaAllocator* allocator,
@@ -783,6 +784,7 @@
ArtMethod* method,
bool baseline,
bool osr,
+ bool is_shared_jit_code,
VariableSizedHandleScope* handles) const {
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
const CompilerOptions& compiler_options = GetCompilerOptions();
@@ -850,7 +852,8 @@
kInvalidInvokeType,
dead_reference_safe,
compiler_options.GetDebuggable(),
- /* osr= */ osr);
+ /* osr= */ osr,
+ /* is_shared_jit_code= */ is_shared_jit_code);
if (method != nullptr) {
graph->SetArtMethod(method);
@@ -1107,6 +1110,7 @@
method,
compiler_options.IsBaseline(),
/* osr= */ false,
+ /* is_shared_jit_code= */ false,
&handles));
}
}
@@ -1368,6 +1372,7 @@
method,
baseline,
osr,
+ /* is_shared_jit_code= */ code_cache->IsSharedRegion(*region),
&handles));
if (codegen.get() == nullptr) {
return false;
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 8637db1..3e22edc 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -19,6 +19,7 @@
#include "art_method-inl.h"
#include "base/casts.h"
#include "base/enums.h"
+#include "base/logging.h"
#include "class_linker.h"
#include "code_generator.h"
#include "driver/compiler_options.h"
@@ -26,6 +27,7 @@
#include "gc/heap.h"
#include "gc/space/image_space.h"
#include "handle_scope-inl.h"
+#include "jit/jit.h"
#include "mirror/dex_cache.h"
#include "mirror/string.h"
#include "nodes.h"
@@ -98,11 +100,17 @@
}
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
} else if (Runtime::Current()->UseJitCompilation()) {
- // JIT or on-device AOT compilation referencing a boot image method.
- // Use the method address directly.
- method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress;
- method_load_data = reinterpret_cast<uintptr_t>(callee);
- code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+ if (Runtime::Current()->GetJit()->CanEncodeMethod(
+ callee,
+ codegen->GetGraph()->IsCompilingForSharedJitCode())) {
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress;
+ method_load_data = reinterpret_cast<uintptr_t>(callee);
+ code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+ } else {
+ // Do not sharpen.
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall;
+ code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+ }
} else if (IsInBootImage(callee)) {
// Use PC-relative access to the .data.bimg.rel.ro methods array.
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo;
@@ -175,7 +183,16 @@
if (is_in_boot_image) {
desired_load_kind = HLoadClass::LoadKind::kJitBootImageAddress;
} else if (klass != nullptr) {
- desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
+ if (runtime->GetJit()->CanEncodeClass(
+ klass.Get(),
+ codegen->GetGraph()->IsCompilingForSharedJitCode())) {
+ desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
+ } else {
+ // Shared JIT code cannot encode a literal that the GC can move.
+ VLOG(jit) << "Unable to encode in shared region class literal: "
+ << klass->PrettyClass();
+ desired_load_kind = HLoadClass::LoadKind::kRuntimeCall;
+ }
} else {
// Class not loaded yet. This happens when the dex code requesting
// this `HLoadClass` hasn't been executed in the interpreter.
@@ -331,10 +348,18 @@
DCHECK(!codegen->GetCompilerOptions().GetCompilePic());
string = class_linker->LookupString(string_index, dex_cache.Get());
if (string != nullptr) {
- if (runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
+ gc::Heap* heap = runtime->GetHeap();
+ if (heap->ObjectIsInBootImageSpace(string)) {
desired_load_kind = HLoadString::LoadKind::kJitBootImageAddress;
- } else {
+ } else if (runtime->GetJit()->CanEncodeString(
+ string,
+ codegen->GetGraph()->IsCompilingForSharedJitCode())) {
desired_load_kind = HLoadString::LoadKind::kJitTableAddress;
+ } else {
+ // Shared JIT code cannot encode a literal that the GC can move.
+ VLOG(jit) << "Unable to encode in shared region string literal: "
+ << string->ToModifiedUtf8();
+ desired_load_kind = HLoadString::LoadKind::kRuntimeCall;
}
} else {
desired_load_kind = HLoadString::LoadKind::kRuntimeCall;
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 1f734fe..201f3ce 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -267,15 +267,15 @@
return false;
}
+ JitMemoryRegion* region = GetCodeCache()->GetCurrentRegion();
+
// If we get a request to compile a proxy method, we pass the actual Java method
// of that proxy method, as the compiler does not expect a proxy method.
ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
- if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr, prejit)) {
+ if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr, prejit, region)) {
return false;
}
- JitMemoryRegion* region = GetCodeCache()->GetPrivateRegion();
-
VLOG(jit) << "Compiling method "
<< ArtMethod::PrettyMethod(method_to_compile)
<< " osr=" << std::boolalpha << osr;
@@ -838,7 +838,7 @@
klass == GetClassRoot<mirror::VarHandle>()) {
// MethodHandle and VarHandle invocation methods are required to throw an
// UnsupportedOperationException if invoked reflectively. We achieve this by having native
- // implementations that arise the exception. We need to disable JIT compilation of these JNI
+ // implementations that raise the exception. We need to disable JIT compilation of these JNI
// methods as it can lead to transitioning between JIT compiled JNI stubs and generic JNI
// stubs. Since these stubs have different stack representations we can then crash in stack
// walking (b/78151261).
@@ -854,9 +854,11 @@
uint32_t new_count,
bool with_backedges) {
if (thread_pool_ == nullptr) {
- // Should only see this when shutting down, starting up, or in safe mode.
+ // Should only see this when shutting down, starting up, in safe mode, or
+ // child zygote.
DCHECK(Runtime::Current()->IsShuttingDown(self) ||
!Runtime::Current()->IsFinishedStarting() ||
+ Runtime::Current()->IsZygote() ||
Runtime::Current()->IsSafeMode());
return false;
}
@@ -876,7 +878,9 @@
if (old_count < WarmMethodThreshold() && new_count >= WarmMethodThreshold()) {
// Note: Native method have no "warm" state or profiling info.
- if (!method->IsNative() && method->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
+ if (!method->IsNative() &&
+ (method->GetProfilingInfo(kRuntimePointerSize) == nullptr) &&
+ code_cache_->CanAllocateProfilingInfo()) {
bool success = ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
if (success) {
VLOG(jit) << "Start profiling " << method->PrettyMethod();
@@ -1022,14 +1026,9 @@
}
void Jit::PostForkChildAction(bool is_system_server, bool is_zygote) {
- if (is_zygote) {
- // Remove potential tasks that have been inherited from the zygote. Child zygotes
- // currently don't need the whole boot image compiled (ie webview_zygote).
+ if (is_zygote || Runtime::Current()->IsSafeMode()) {
+ // Remove potential tasks that have been inherited from the zygote.
thread_pool_->RemoveAllTasks(Thread::Current());
- // Don't transition if this is for a child zygote.
- return;
- }
- if (Runtime::Current()->IsSafeMode()) {
// Delete the thread pool, we are not going to JIT.
thread_pool_.reset(nullptr);
return;
@@ -1074,5 +1073,31 @@
thread_pool_->CreateThreads();
}
+bool Jit::CanEncodeMethod(ArtMethod* method ATTRIBUTE_UNUSED,
+ bool is_for_shared_region ATTRIBUTE_UNUSED) const {
+ // TODO: For shared region, we should only encode a method of a class
+ // allocated before any fork.
+ return true;
+}
+
+bool Jit::CanEncodeClass(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const {
+ // TODO: For shared region, we should only encode a non-moving class allocated
+ // before any fork.
+ return !is_for_shared_region || !Runtime::Current()->GetHeap()->IsMovableObject(cls);
+}
+
+bool Jit::CanEncodeString(ObjPtr<mirror::String> string, bool is_for_shared_region) const {
+ // TODO: For shared region, we should only encode a non-moving string allocated
+ // before any fork.
+ return !is_for_shared_region || !Runtime::Current()->GetHeap()->IsMovableObject(string);
+}
+
+bool Jit::CanAssumeInitialized(ObjPtr<mirror::Class> cls,
+ bool is_for_shared_region ATTRIBUTE_UNUSED) const {
+ // TODO: For shared region, we should assume initialized if the class is initialized
+ // before any fork.
+ return cls->IsInitialized();
+}
+
} // namespace jit
} // namespace art
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 5643277..e44e1c9 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -39,6 +39,7 @@
class Object;
class Class;
class ClassLoader;
+class String;
} // namespace mirror
namespace jit {
@@ -212,7 +213,8 @@
return options_->GetPriorityThreadWeight();
}
- // Returns false if we only need to save profile information and not compile methods.
+ // Return whether we should do JIT compilation. Note this will returns false
+ // if we only need to save profile information and not compile methods.
bool UseJitCompilation() const {
return options_->UseJitCompilation();
}
@@ -322,6 +324,16 @@
void RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
jobject class_loader);
+ // Called by the compiler to know whether it can directly encode the
+ // method/class/string.
+ bool CanEncodeMethod(ArtMethod* method, bool is_for_shared_region) const;
+ bool CanEncodeClass(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ bool CanEncodeString(ObjPtr<mirror::String> string, bool is_for_shared_region) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ bool CanAssumeInitialized(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
Jit(JitCodeCache* code_cache, JitOptions* options);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 709882c..97b5b8d 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -206,8 +206,10 @@
if (is_zygote) {
// Zygote should never collect code to share the memory with the children.
jit_code_cache->garbage_collect_code_ = false;
+ jit_code_cache->shared_region_ = std::move(region);
+ } else {
+ jit_code_cache->private_region_ = std::move(region);
}
- jit_code_cache->private_region_ = std::move(region);
VLOG(jit) << "Created jit code cache: initial capacity="
<< PrettySize(initial_capacity)
@@ -383,7 +385,8 @@
return reinterpret_cast<const uint32_t*>(stack_map)[-1];
}
-static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots)
+static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots,
+ bool is_shared_region)
REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
if (!kIsDebugBuild) {
return;
@@ -396,6 +399,10 @@
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr);
}
+ // Ensure that we don't put movable objects in the shared region.
+ if (is_shared_region) {
+ CHECK(!Runtime::Current()->GetHeap()->IsMovableObject(object.Get()));
+ }
}
}
@@ -664,7 +671,7 @@
if (!method->IsNative()) {
// We need to do this before grabbing the lock_ because it needs to be able to see the string
// InternTable. Native methods do not have roots.
- DCheckRootsAreValid(roots);
+ DCheckRootsAreValid(roots, IsSharedRegion(*region));
}
size_t root_table_size = ComputeRootTableSize(roots.size());
@@ -1401,6 +1408,7 @@
bool retry_allocation)
// No thread safety analysis as we are using TryLock/Unlock explicitly.
NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK(CanAllocateProfilingInfo());
ProfilingInfo* info = nullptr;
if (!retry_allocation) {
// If we are allocating for the interpreter, just try to lock, to avoid
@@ -1454,7 +1462,9 @@
}
void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) {
- return private_region_.MoreCore(mspace, increment);
+ return shared_region_.OwnsSpace(mspace)
+ ? shared_region_.MoreCore(mspace, increment)
+ : private_region_.MoreCore(mspace, increment);
}
void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations,
@@ -1546,7 +1556,11 @@
return osr_code_map_.find(method) != osr_code_map_.end();
}
-bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr, bool prejit) {
+bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
+ Thread* self,
+ bool osr,
+ bool prejit,
+ JitMemoryRegion* region) {
if (!osr && ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
return false;
}
@@ -1608,7 +1622,7 @@
ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
if (info == nullptr) {
// When prejitting, we don't allocate a profiling info.
- if (!prejit) {
+ if (!prejit && !IsSharedRegion(*region)) {
VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
// Because the counter is not atomic, there are some rare cases where we may not hit the
// threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
@@ -1716,13 +1730,19 @@
}
void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
- if (is_zygote) {
- // Don't transition if this is for a child zygote.
+ if (is_zygote || Runtime::Current()->IsSafeMode()) {
+ // Don't create a private region for a child zygote. Regions are usually map shared
+ // (to satisfy dual-view), and we don't want children of a child zygote to inherit it.
return;
}
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
- shared_region_ = std::move(private_region_);
+ if (private_region_.IsValid()) {
+ // In case the zygote was running with its own private region (happens for
+ // unit tests), move the region to the shared one.
+ CHECK(!shared_region_.IsValid());
+ std::swap(shared_region_, private_region_);
+ }
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
// Reset all statistics to be specific to this process.
number_of_compilations_ = 0;
@@ -1741,5 +1761,9 @@
}
}
+JitMemoryRegion* JitCodeCache::GetCurrentRegion() {
+ return Runtime::Current()->IsZygote() ? &shared_region_ : &private_region_;
+}
+
} // namespace jit
} // namespace art
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index a4e2964..88b440b 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -96,7 +96,11 @@
std::string* error_msg);
~JitCodeCache();
- bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr, bool prejit)
+ bool NotifyCompilationOf(ArtMethod* method,
+ Thread* self,
+ bool osr,
+ bool prejit,
+ JitMemoryRegion* region)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
@@ -213,7 +217,7 @@
REQUIRES_SHARED(Locks::mutator_lock_);
bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
- return private_region_.OwnsSpace(mspace);
+ return private_region_.OwnsSpace(mspace) || shared_region_.OwnsSpace(mspace);
}
void* MoreCore(const void* mspace, intptr_t increment);
@@ -276,7 +280,15 @@
// is debuggable.
void ClearEntryPointsInZygoteExecSpace() REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_);
- JitMemoryRegion* GetPrivateRegion() { return &private_region_; }
+ JitMemoryRegion* GetCurrentRegion();
+ bool IsSharedRegion(const JitMemoryRegion& region) const { return ®ion == &shared_region_; }
+ bool CanAllocateProfilingInfo() {
+ // If we don't have a private region, we cannot allocate a profiling info.
+ // A shared region doesn't support in general GC objects, which a profiling info
+ // can reference.
+ JitMemoryRegion* region = GetCurrentRegion();
+ return region->IsValid() && !IsSharedRegion(*region);
+ }
private:
JitCodeCache();
diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc
index a39e121..bcf90ce 100644
--- a/runtime/jit/jit_memory_region.cc
+++ b/runtime/jit/jit_memory_region.cc
@@ -64,9 +64,14 @@
// File descriptor enabling dual-view mapping of code section.
unique_fd mem_fd;
- // Zygote shouldn't create a shared mapping for JIT, so we cannot use dual view
- // for it.
- if (!is_zygote) {
+ if (is_zygote) {
+ // Because we are not going to GC code generated by the zygote, just use all available.
+ current_capacity_ = max_capacity;
+ mem_fd = unique_fd(CreateZygoteMemory(capacity, error_msg));
+ if (mem_fd.get() < 0) {
+ return false;
+ }
+ } else {
// Bionic supports memfd_create, but the call may fail on older kernels.
mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
if (mem_fd.get() < 0) {
@@ -79,16 +84,14 @@
return false;
}
VLOG(jit) << oss.str();
+ } else if (ftruncate(mem_fd, capacity) != 0) {
+ std::ostringstream oss;
+ oss << "Failed to initialize memory file: " << strerror(errno);
+ *error_msg = oss.str();
+ return false;
}
}
- if (mem_fd.get() >= 0 && ftruncate(mem_fd, capacity) != 0) {
- std::ostringstream oss;
- oss << "Failed to initialize memory file: " << strerror(errno);
- *error_msg = oss.str();
- return false;
- }
-
std::string data_cache_name = is_zygote ? "zygote-data-code-cache" : "data-code-cache";
std::string exec_cache_name = is_zygote ? "zygote-jit-code-cache" : "jit-code-cache";
@@ -207,6 +210,13 @@
}
}
}
+ if (is_zygote) {
+ // Now that we have created the writable and executable mappings, prevent creating any new
+ // ones.
+ if (!ProtectZygoteMemory(mem_fd.get(), error_msg)) {
+ return false;
+ }
+ }
} else {
// Profiling only. No memory for code required.
}
@@ -234,15 +244,14 @@
CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW);
exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
- SetFootprintLimit(initial_capacity_);
+ SetFootprintLimit(current_capacity_);
// Protect pages containing heap metadata. Updates to the code heap toggle write permission to
// perform the update and there are no other times write access is required.
CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR);
} else {
exec_mspace_ = nullptr;
- SetFootprintLimit(initial_capacity_);
+ SetFootprintLimit(current_capacity_);
}
-
return true;
}
diff --git a/runtime/jit/jit_memory_region.h b/runtime/jit/jit_memory_region.h
index f325480..af0ca83 100644
--- a/runtime/jit/jit_memory_region.h
+++ b/runtime/jit/jit_memory_region.h
@@ -96,6 +96,10 @@
REQUIRES(Locks::jit_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsValid() const NO_THREAD_SAFETY_ANALYSIS {
+ return exec_mspace_ != nullptr || data_mspace_ != nullptr;
+ }
+
bool HasDualCodeMapping() const {
return non_exec_pages_.IsValid();
}