diff options
-rw-r--r-- | compiler/optimizing/inliner.cc | 3 | ||||
-rw-r--r-- | compiler/optimizing/instruction_builder.cc | 22 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 12 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 7 | ||||
-rw-r--r-- | compiler/optimizing/sharpening.cc | 41 | ||||
-rw-r--r-- | runtime/jit/jit.cc | 38 | ||||
-rw-r--r-- | runtime/jit/jit.h | 18 | ||||
-rw-r--r-- | runtime/jit/jit_code_cache.cc | 48 | ||||
-rw-r--r-- | runtime/jit/jit_code_cache.h | 11 | ||||
-rw-r--r-- | runtime/jit/jit_memory_region.cc | 35 | ||||
-rw-r--r-- | runtime/jit/jit_memory_region.h | 4 |
11 files changed, 187 insertions, 52 deletions
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 9ef5ec31d1..15155751a7 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -1814,7 +1814,8 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, callee_dead_reference_safe, graph_->IsDebuggable(), /* osr= */ false, - caller_instruction_counter); + /* is_shared_jit_code= */ graph_->IsCompilingForSharedJitCode(), + /* start_instruction_id= */ caller_instruction_counter); callee_graph->SetArtMethod(resolved_method); // When they are needed, allocate `inline_stats_` on the Arena instead diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index 5e7b57523f..f8f813e3fd 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -29,6 +29,7 @@ #include "driver/dex_compilation_unit.h" #include "driver/compiler_options.h" #include "imtable-inl.h" +#include "jit/jit.h" #include "mirror/dex_cache.h" #include "oat_file.h" #include "optimizing_compiler_stats.h" @@ -1290,15 +1291,20 @@ bool HInstructionBuilder::IsInitialized(Handle<mirror::Class> cls) const { // Check if the class will be initialized at runtime. if (cls->IsInitialized()) { Runtime* runtime = Runtime::Current(); - if (!runtime->IsAotCompiler()) { + if (runtime->IsAotCompiler()) { + // Assume loaded only if klass is in the boot image. App classes cannot be assumed + // loaded because we don't even know what class loader will be used to load them. + if (IsInBootImage(cls.Get(), code_generator_->GetCompilerOptions())) { + return true; + } + } else { DCHECK(runtime->UseJitCompilation()); - // For JIT, the class cannot revert to an uninitialized state. - return true; - } - // Assume loaded only if klass is in the boot image. App classes cannot be assumed - // loaded because we don't even know what class loader will be used to load them. - if (IsInBootImage(cls.Get(), code_generator_->GetCompilerOptions())) { - return true; + if (Runtime::Current()->GetJit()->CanAssumeInitialized( + cls.Get(), + graph_->IsCompilingForSharedJitCode())) { + // For JIT, the class cannot revert to an uninitialized state. + return true; + } } } diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 8ac33a4309..759a8e6638 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -320,6 +320,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { bool dead_reference_safe = false, bool debuggable = false, bool osr = false, + bool is_shared_jit_code = false, int start_instruction_id = 0) : allocator_(allocator), arena_stack_(arena_stack), @@ -355,7 +356,8 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { art_method_(nullptr), inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()), osr_(osr), - cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) { + cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)), + is_shared_jit_code_(is_shared_jit_code) { blocks_.reserve(kDefaultNumberOfBlocks); } @@ -585,6 +587,10 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { bool IsCompilingOsr() const { return osr_; } + bool IsCompilingForSharedJitCode() const { + return is_shared_jit_code_; + } + ArenaSet<ArtMethod*>& GetCHASingleImplementationList() { return cha_single_implementation_list_; } @@ -774,6 +780,10 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { // List of methods that are assumed to have single implementation. ArenaSet<ArtMethod*> cha_single_implementation_list_; + // Whether we are JIT compiling in the shared region area, putting + // restrictions on, for example, how literals are being generated. + bool is_shared_jit_code_; + friend class SsaBuilder; // For caching constants. friend class SsaLivenessAnalysis; // For the linear order. friend class HInliner; // For the reverse post order. diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 2153ddd7b4..6f3b9feb9d 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -384,6 +384,7 @@ class OptimizingCompiler final : public Compiler { ArtMethod* method, bool baseline, bool osr, + bool is_shared_jit_code, VariableSizedHandleScope* handles) const; CodeGenerator* TryCompileIntrinsic(ArenaAllocator* allocator, @@ -783,6 +784,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, ArtMethod* method, bool baseline, bool osr, + bool is_shared_jit_code, VariableSizedHandleScope* handles) const { MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation); const CompilerOptions& compiler_options = GetCompilerOptions(); @@ -850,7 +852,8 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, kInvalidInvokeType, dead_reference_safe, compiler_options.GetDebuggable(), - /* osr= */ osr); + /* osr= */ osr, + /* is_shared_jit_code= */ is_shared_jit_code); if (method != nullptr) { graph->SetArtMethod(method); @@ -1107,6 +1110,7 @@ CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item, method, compiler_options.IsBaseline(), /* osr= */ false, + /* is_shared_jit_code= */ false, &handles)); } } @@ -1368,6 +1372,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, method, baseline, osr, + /* is_shared_jit_code= */ code_cache->IsSharedRegion(*region), &handles)); if (codegen.get() == nullptr) { return false; diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc index 8637db13ad..3e22edc773 100644 --- a/compiler/optimizing/sharpening.cc +++ b/compiler/optimizing/sharpening.cc @@ -19,6 +19,7 @@ #include "art_method-inl.h" #include "base/casts.h" #include "base/enums.h" +#include "base/logging.h" #include "class_linker.h" #include "code_generator.h" #include "driver/compiler_options.h" @@ -26,6 +27,7 @@ #include "gc/heap.h" #include "gc/space/image_space.h" #include "handle_scope-inl.h" +#include "jit/jit.h" #include "mirror/dex_cache.h" #include "mirror/string.h" #include "nodes.h" @@ -98,11 +100,17 @@ HInvokeStaticOrDirect::DispatchInfo HSharpening::SharpenInvokeStaticOrDirect( } code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod; } else if (Runtime::Current()->UseJitCompilation()) { - // JIT or on-device AOT compilation referencing a boot image method. - // Use the method address directly. - method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress; - method_load_data = reinterpret_cast<uintptr_t>(callee); - code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod; + if (Runtime::Current()->GetJit()->CanEncodeMethod( + callee, + codegen->GetGraph()->IsCompilingForSharedJitCode())) { + method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress; + method_load_data = reinterpret_cast<uintptr_t>(callee); + code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod; + } else { + // Do not sharpen. + method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall; + code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod; + } } else if (IsInBootImage(callee)) { // Use PC-relative access to the .data.bimg.rel.ro methods array. method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo; @@ -175,7 +183,16 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind( if (is_in_boot_image) { desired_load_kind = HLoadClass::LoadKind::kJitBootImageAddress; } else if (klass != nullptr) { - desired_load_kind = HLoadClass::LoadKind::kJitTableAddress; + if (runtime->GetJit()->CanEncodeClass( + klass.Get(), + codegen->GetGraph()->IsCompilingForSharedJitCode())) { + desired_load_kind = HLoadClass::LoadKind::kJitTableAddress; + } else { + // Shared JIT code cannot encode a literal that the GC can move. + VLOG(jit) << "Unable to encode in shared region class literal: " + << klass->PrettyClass(); + desired_load_kind = HLoadClass::LoadKind::kRuntimeCall; + } } else { // Class not loaded yet. This happens when the dex code requesting // this `HLoadClass` hasn't been executed in the interpreter. @@ -331,10 +348,18 @@ void HSharpening::ProcessLoadString( DCHECK(!codegen->GetCompilerOptions().GetCompilePic()); string = class_linker->LookupString(string_index, dex_cache.Get()); if (string != nullptr) { - if (runtime->GetHeap()->ObjectIsInBootImageSpace(string)) { + gc::Heap* heap = runtime->GetHeap(); + if (heap->ObjectIsInBootImageSpace(string)) { desired_load_kind = HLoadString::LoadKind::kJitBootImageAddress; - } else { + } else if (runtime->GetJit()->CanEncodeString( + string, + codegen->GetGraph()->IsCompilingForSharedJitCode())) { desired_load_kind = HLoadString::LoadKind::kJitTableAddress; + } else { + // Shared JIT code cannot encode a literal that the GC can move. + VLOG(jit) << "Unable to encode in shared region string literal: " + << string->ToModifiedUtf8(); + desired_load_kind = HLoadString::LoadKind::kRuntimeCall; } } else { desired_load_kind = HLoadString::LoadKind::kRuntimeCall; diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index 1f734fead7..c5133b98c1 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -267,15 +267,15 @@ bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr return false; } + JitMemoryRegion* region = GetCodeCache()->GetCurrentRegion(); + // If we get a request to compile a proxy method, we pass the actual Java method // of that proxy method, as the compiler does not expect a proxy method. ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); - if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr, prejit)) { + if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr, prejit, region)) { return false; } - JitMemoryRegion* region = GetCodeCache()->GetPrivateRegion(); - VLOG(jit) << "Compiling method " << ArtMethod::PrettyMethod(method_to_compile) << " osr=" << std::boolalpha << osr; @@ -838,7 +838,7 @@ static bool IgnoreSamplesForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mut klass == GetClassRoot<mirror::VarHandle>()) { // MethodHandle and VarHandle invocation methods are required to throw an // UnsupportedOperationException if invoked reflectively. We achieve this by having native - // implementations that arise the exception. We need to disable JIT compilation of these JNI + // implementations that raise the exception. We need to disable JIT compilation of these JNI // methods as it can lead to transitioning between JIT compiled JNI stubs and generic JNI // stubs. Since these stubs have different stack representations we can then crash in stack // walking (b/78151261). @@ -1074,5 +1074,35 @@ void Jit::PostZygoteFork() { thread_pool_->CreateThreads(); } +bool Jit::CanEncodeMethod(ArtMethod* method ATTRIBUTE_UNUSED, + bool is_for_shared_region ATTRIBUTE_UNUSED) const { + // TODO: For shared region, we should only encode a method of a class + // allocated before any fork. + return true; +} + +bool Jit::CanEncodeClass(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const { + // TODO: For shared region, we should only encode a non-moving class allocated + // before any fork. + return !is_for_shared_region || !Runtime::Current()->GetHeap()->IsMovableObject(cls); +} + +bool Jit::CanEncodeString(ObjPtr<mirror::String> string, bool is_for_shared_region) const { + // TODO: For shared region, we should only encode a non-moving string allocated + // before any fork. + return !is_for_shared_region || !Runtime::Current()->GetHeap()->IsMovableObject(string); +} + +bool Jit::CanAssumeInitialized(ObjPtr<mirror::Class> cls, + bool is_for_shared_region ATTRIBUTE_UNUSED) const { + // TODO: For shared region, we should assume initialized if the class is initialized + // before any fork. + return cls->IsInitialized(); +} + +bool Jit::UseJitCompilation() { + return options_->UseJitCompilation() && GetCodeCache()->GetCurrentRegion()->IsValid(); +} + } // namespace jit } // namespace art diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h index 56432778e1..1474a30f6e 100644 --- a/runtime/jit/jit.h +++ b/runtime/jit/jit.h @@ -39,6 +39,7 @@ namespace mirror { class Object; class Class; class ClassLoader; +class String; } // namespace mirror namespace jit { @@ -212,10 +213,9 @@ class Jit { return options_->GetPriorityThreadWeight(); } - // Returns false if we only need to save profile information and not compile methods. - bool UseJitCompilation() const { - return options_->UseJitCompilation(); - } + // Return whether we should do JIT compilation. Note this will returns false + // if we only need to save profile information and not compile methods. + bool UseJitCompilation(); bool GetSaveProfilingInfo() const { return options_->GetSaveProfilingInfo(); @@ -322,6 +322,16 @@ class Jit { void RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files, jobject class_loader); + // Called by the compiler to know whether it can directly encode the + // method/class/string. + bool CanEncodeMethod(ArtMethod* method, bool is_for_shared_region) const; + bool CanEncodeClass(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const + REQUIRES_SHARED(Locks::mutator_lock_); + bool CanEncodeString(ObjPtr<mirror::String> string, bool is_for_shared_region) const + REQUIRES_SHARED(Locks::mutator_lock_); + bool CanAssumeInitialized(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const + REQUIRES_SHARED(Locks::mutator_lock_); + private: Jit(JitCodeCache* code_cache, JitOptions* options); diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 6ea2af0ef9..e3e405f3ec 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -206,8 +206,10 @@ JitCodeCache* JitCodeCache::Create(bool used_only_for_profile_data, if (is_zygote) { // Zygote should never collect code to share the memory with the children. jit_code_cache->garbage_collect_code_ = false; + jit_code_cache->shared_region_ = std::move(region); + } else { + jit_code_cache->private_region_ = std::move(region); } - jit_code_cache->private_region_ = std::move(region); VLOG(jit) << "Created jit code cache: initial capacity=" << PrettySize(initial_capacity) @@ -383,7 +385,8 @@ static uint32_t GetNumberOfRoots(const uint8_t* stack_map) { return reinterpret_cast<const uint32_t*>(stack_map)[-1]; } -static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots) +static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots, + bool is_shared_region) REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) { if (!kIsDebugBuild) { return; @@ -396,6 +399,10 @@ static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr); } + // Ensure that we don't put movable objects in the shared region. + if (is_shared_region) { + CHECK(!Runtime::Current()->GetHeap()->IsMovableObject(object.Get())); + } } } @@ -664,7 +671,7 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, if (!method->IsNative()) { // We need to do this before grabbing the lock_ because it needs to be able to see the string // InternTable. Native methods do not have roots. - DCheckRootsAreValid(roots); + DCheckRootsAreValid(roots, IsSharedRegion(*region)); } size_t root_table_size = ComputeRootTableSize(roots.size()); @@ -1396,6 +1403,13 @@ ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self, bool retry_allocation) // No thread safety analysis as we are using TryLock/Unlock explicitly. NO_THREAD_SAFETY_ANALYSIS { + // If we don't have a private region, we cannot allocate a profiling info. + // A shared region doesn't support in general GC objects, which a profiling info + // can reference. + if (!private_region_.IsValid()) { + return nullptr; + } + ProfilingInfo* info = nullptr; if (!retry_allocation) { // If we are allocating for the interpreter, just try to lock, to avoid @@ -1449,7 +1463,9 @@ ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self ATTRIBUTE_UNU } void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) { - return private_region_.MoreCore(mspace, increment); + return shared_region_.OwnsSpace(mspace) + ? shared_region_.MoreCore(mspace, increment) + : private_region_.MoreCore(mspace, increment); } void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations, @@ -1541,7 +1557,11 @@ bool JitCodeCache::IsOsrCompiled(ArtMethod* method) { return osr_code_map_.find(method) != osr_code_map_.end(); } -bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr, bool prejit) { +bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, + Thread* self, + bool osr, + bool prejit, + JitMemoryRegion* region) { if (!osr && ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { return false; } @@ -1603,7 +1623,7 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); if (info == nullptr) { // When prejitting, we don't allocate a profiling info. - if (!prejit) { + if (!prejit && !IsSharedRegion(*region)) { VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled"; // Because the counter is not atomic, there are some rare cases where we may not hit the // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this. @@ -1712,12 +1732,18 @@ void JitCodeCache::Dump(std::ostream& os) { void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) { if (is_zygote) { - // Don't transition if this is for a child zygote. + // Don't create a private region for a child zygote. Regions are usually map shared + // (to satisfy dual-view), and we don't want children of a child zygote to inherit it. return; } - MutexLock mu(Thread::Current(), *Locks::jit_lock_); - shared_region_ = std::move(private_region_); + if (private_region_.IsValid()) { + // In case the zygote was running with its own private region (happens for + // unit tests), move the region to the shared one. + CHECK(!shared_region_.IsValid()); + std::swap(shared_region_, private_region_); + } + MutexLock mu(Thread::Current(), *Locks::jit_lock_); // Reset all statistics to be specific to this process. number_of_compilations_ = 0; @@ -1736,5 +1762,9 @@ void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) { } } +JitMemoryRegion* JitCodeCache::GetCurrentRegion() { + return Runtime::Current()->IsZygote() ? &shared_region_ : &private_region_; +} + } // namespace jit } // namespace art diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index a4e2964499..a777ab740f 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -96,7 +96,11 @@ class JitCodeCache { std::string* error_msg); ~JitCodeCache(); - bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr, bool prejit) + bool NotifyCompilationOf(ArtMethod* method, + Thread* self, + bool osr, + bool prejit, + JitMemoryRegion* region) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::jit_lock_); @@ -213,7 +217,7 @@ class JitCodeCache { REQUIRES_SHARED(Locks::mutator_lock_); bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS { - return private_region_.OwnsSpace(mspace); + return private_region_.OwnsSpace(mspace) || shared_region_.OwnsSpace(mspace); } void* MoreCore(const void* mspace, intptr_t increment); @@ -276,7 +280,8 @@ class JitCodeCache { // is debuggable. void ClearEntryPointsInZygoteExecSpace() REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_); - JitMemoryRegion* GetPrivateRegion() { return &private_region_; } + JitMemoryRegion* GetCurrentRegion(); + bool IsSharedRegion(const JitMemoryRegion& region) const { return ®ion == &shared_region_; } private: JitCodeCache(); diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc index b8b7827cb4..26aaa53c66 100644 --- a/runtime/jit/jit_memory_region.cc +++ b/runtime/jit/jit_memory_region.cc @@ -64,9 +64,14 @@ bool JitMemoryRegion::Initialize(size_t initial_capacity, // File descriptor enabling dual-view mapping of code section. unique_fd mem_fd; - // Zygote shouldn't create a shared mapping for JIT, so we cannot use dual view - // for it. - if (!is_zygote) { + if (is_zygote) { + // Because we are not going to GC code generated by the zygote, just use all available. + current_capacity_ = max_capacity; + mem_fd = unique_fd(CreateZygoteMemory(capacity, error_msg)); + if (mem_fd.get() < 0) { + return false; + } + } else { // Bionic supports memfd_create, but the call may fail on older kernels. mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0)); if (mem_fd.get() < 0) { @@ -79,16 +84,14 @@ bool JitMemoryRegion::Initialize(size_t initial_capacity, return false; } VLOG(jit) << oss.str(); + } else if (ftruncate(mem_fd, capacity) != 0) { + std::ostringstream oss; + oss << "Failed to initialize memory file: " << strerror(errno); + *error_msg = oss.str(); + return false; } } - if (mem_fd.get() >= 0 && ftruncate(mem_fd, capacity) != 0) { - std::ostringstream oss; - oss << "Failed to initialize memory file: " << strerror(errno); - *error_msg = oss.str(); - return false; - } - std::string data_cache_name = is_zygote ? "zygote-data-code-cache" : "data-code-cache"; std::string exec_cache_name = is_zygote ? "zygote-jit-code-cache" : "jit-code-cache"; @@ -207,6 +210,13 @@ bool JitMemoryRegion::Initialize(size_t initial_capacity, } } } + if (is_zygote) { + // Now that we have created the writable and executable mappings, prevent creating any new + // ones. + if (!ProtectZygoteMemory(mem_fd.get(), error_msg)) { + return false; + } + } } else { // Profiling only. No memory for code required. } @@ -234,15 +244,14 @@ bool JitMemoryRegion::Initialize(size_t initial_capacity, CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW); exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/); CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed"; - SetFootprintLimit(initial_capacity_); + SetFootprintLimit(current_capacity_); // Protect pages containing heap metadata. Updates to the code heap toggle write permission to // perform the update and there are no other times write access is required. CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR); } else { exec_mspace_ = nullptr; - SetFootprintLimit(initial_capacity_); + SetFootprintLimit(current_capacity_); } - return true; } diff --git a/runtime/jit/jit_memory_region.h b/runtime/jit/jit_memory_region.h index 7aa640ca63..e32fc052f5 100644 --- a/runtime/jit/jit_memory_region.h +++ b/runtime/jit/jit_memory_region.h @@ -107,6 +107,10 @@ class JitMemoryRegion { REQUIRES(Locks::jit_lock_) REQUIRES_SHARED(Locks::mutator_lock_); + bool IsValid() const NO_THREAD_SAFETY_ANALYSIS { + return exec_mspace_ != nullptr || data_mspace_ != nullptr; + } + bool HasDualCodeMapping() const { return non_exec_pages_.IsValid(); } |