diff options
| -rw-r--r-- | runtime/jit/jit.cc | 2 | ||||
| -rw-r--r-- | runtime/jit/jit_code_cache.cc | 78 | ||||
| -rw-r--r-- | runtime/jit/jit_code_cache.h | 10 | ||||
| -rw-r--r-- | runtime/native/dalvik_system_ZygoteHooks.cc | 10 | ||||
| -rw-r--r-- | runtime/runtime.cc | 17 | ||||
| -rw-r--r-- | runtime/runtime.h | 6 |
6 files changed, 90 insertions, 33 deletions
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index a757960e3e..57d3b9f6c7 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -183,10 +183,12 @@ Jit* Jit::Create(JitOptions* options, std::string* error_msg) { if (jit_compiler_handle_ == nullptr && !LoadCompiler(error_msg)) { return nullptr; } + bool code_cache_only_for_profile_data = !options->UseJitCompilation(); jit->code_cache_.reset(JitCodeCache::Create( options->GetCodeCacheInitialCapacity(), options->GetCodeCacheMaxCapacity(), jit->generate_debug_info_, + code_cache_only_for_profile_data, error_msg)); if (jit->GetCodeCache() == nullptr) { return nullptr; diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index f0f0df4f5e..1048c8093f 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -50,7 +50,6 @@ namespace art { namespace jit { -static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC; static constexpr int kProtData = PROT_READ | PROT_WRITE; static constexpr int kProtCode = PROT_READ | PROT_EXEC; @@ -161,6 +160,7 @@ class JitCodeCache::JniStubData { JitCodeCache* JitCodeCache::Create(size_t initial_capacity, size_t max_capacity, bool generate_debug_info, + bool used_only_for_profile_data, std::string* error_msg) { ScopedTrace trace(__PRETTY_FUNCTION__); CHECK_GE(max_capacity, initial_capacity); @@ -184,6 +184,15 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity, return nullptr; } + // Decide how we should map the code and data sections. + // If we use the code cache just for profiling we do not need to map the code section as + // executable. + // NOTE 1: this is yet another workaround to bypass strict SElinux policies in order to be able + // to profile system server. + // NOTE 2: We could just not create the code section at all but we will need to + // special case too many cases. + int memmap_flags_prot_code = used_only_for_profile_data ? (kProtCode & ~PROT_EXEC) : kProtCode; + std::string error_str; // Map name specific for android_os_Debug.cpp accounting. // Map in low 4gb to simplify accessing root tables for x86_64. @@ -216,8 +225,11 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity, DCHECK_EQ(code_size + data_size, max_capacity); uint8_t* divider = data_map->Begin() + data_size; - MemMap* code_map = - data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str, use_ashmem); + MemMap* code_map = data_map->RemapAtEnd( + divider, + "jit-code-cache", + memmap_flags_prot_code | PROT_WRITE, + &error_str, use_ashmem); if (code_map == nullptr) { std::ostringstream oss; oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity; @@ -229,7 +241,13 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity, code_size = initial_capacity - data_size; DCHECK_EQ(code_size + data_size, initial_capacity); return new JitCodeCache( - code_map, data_map.release(), code_size, data_size, max_capacity, garbage_collect_code); + code_map, + data_map.release(), + code_size, + data_size, + max_capacity, + garbage_collect_code, + memmap_flags_prot_code); } JitCodeCache::JitCodeCache(MemMap* code_map, @@ -237,7 +255,8 @@ JitCodeCache::JitCodeCache(MemMap* code_map, size_t initial_code_capacity, size_t initial_data_capacity, size_t max_capacity, - bool garbage_collect_code) + bool garbage_collect_code, + int memmap_flags_prot_code) : lock_("Jit code cache", kJitCodeCacheLock), lock_cond_("Jit code cache condition variable", lock_), collection_in_progress_(false), @@ -259,7 +278,8 @@ JitCodeCache::JitCodeCache(MemMap* code_map, histogram_code_memory_use_("Memory used for compiled code", 16), histogram_profiling_info_memory_use_("Memory used for profiling info", 16), is_weak_access_enabled_(true), - inline_cache_cond_("Jit inline cache condition variable", lock_) { + inline_cache_cond_("Jit inline cache condition variable", lock_), + memmap_flags_prot_code_(memmap_flags_prot_code) { DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity); code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/); @@ -275,7 +295,7 @@ JitCodeCache::JitCodeCache(MemMap* code_map, "mprotect jit code cache", code_map_->Begin(), code_map_->Size(), - kProtCode); + memmap_flags_prot_code_); CheckedCall(mprotect, "mprotect jit data cache", data_map_->Begin(), @@ -328,28 +348,32 @@ const void* JitCodeCache::GetJniStubCode(ArtMethod* method) { class ScopedCodeCacheWrite : ScopedTrace { public: - explicit ScopedCodeCacheWrite(MemMap* code_map, bool only_for_tlb_shootdown = false) + explicit ScopedCodeCacheWrite(const JitCodeCache* const code_cache, + bool only_for_tlb_shootdown = false) : ScopedTrace("ScopedCodeCacheWrite"), - code_map_(code_map), + code_cache_(code_cache), only_for_tlb_shootdown_(only_for_tlb_shootdown) { ScopedTrace trace("mprotect all"); - CheckedCall(mprotect, - "make code writable", - code_map_->Begin(), - only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(), - kProtAll); + CheckedCall( + mprotect, + "make code writable", + code_cache_->code_map_->Begin(), + only_for_tlb_shootdown_ ? kPageSize : code_cache_->code_map_->Size(), + code_cache_->memmap_flags_prot_code_ | PROT_WRITE); } + ~ScopedCodeCacheWrite() { ScopedTrace trace("mprotect code"); - CheckedCall(mprotect, - "make code protected", - code_map_->Begin(), - only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(), - kProtCode); + CheckedCall( + mprotect, + "make code protected", + code_cache_->code_map_->Begin(), + only_for_tlb_shootdown_ ? kPageSize : code_cache_->code_map_->Size(), + code_cache_->memmap_flags_prot_code_); } private: - MemMap* const code_map_; + const JitCodeCache* const code_cache_; // If we're using ScopedCacheWrite only for TLB shootdown, we limit the scope of mprotect to // one page. @@ -571,7 +595,7 @@ void JitCodeCache::FreeAllMethodHeaders( // so it's possible for the same method_header to start representing // different compile code. MutexLock mu(Thread::Current(), lock_); - ScopedCodeCacheWrite scc(code_map_.get()); + ScopedCodeCacheWrite scc(this); for (const OatQuickMethodHeader* method_header : method_headers) { FreeCode(method_header->GetCode()); } @@ -590,7 +614,7 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) { // with the classlinker_classes_lock_ held, and suspending ourselves could // lead to a deadlock. { - ScopedCodeCacheWrite scc(code_map_.get()); + ScopedCodeCacheWrite scc(this); for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) { it->second.RemoveMethodsIn(alloc); if (it->second.GetMethods().empty()) { @@ -729,7 +753,7 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, MutexLock mu(self, lock_); WaitForPotentialCollectionToComplete(self); { - ScopedCodeCacheWrite scc(code_map_.get()); + ScopedCodeCacheWrite scc(this); memory = AllocateCode(total_size); if (memory == nullptr) { return nullptr; @@ -817,7 +841,7 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, { // Flush data cache, as compiled code references literals in it. // We also need a TLB shootdown to act as memory barrier across cores. - ScopedCodeCacheWrite ccw(code_map_.get(), /* only_for_tlb_shootdown */ true); + ScopedCodeCacheWrite ccw(this, /* only_for_tlb_shootdown */ true); FlushDataCache(reinterpret_cast<char*>(roots_data), reinterpret_cast<char*>(roots_data + data_size)); } @@ -895,7 +919,7 @@ bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) { } bool in_cache = false; - ScopedCodeCacheWrite ccw(code_map_.get()); + ScopedCodeCacheWrite ccw(this); if (UNLIKELY(method->IsNative())) { auto it = jni_stubs_map_.find(JniStubKey(method)); if (it != jni_stubs_map_.end() && it->second.RemoveMethod(method)) { @@ -1122,7 +1146,7 @@ void JitCodeCache::SetFootprintLimit(size_t new_footprint) { DCHECK_EQ(per_space_footprint * 2, new_footprint); mspace_set_footprint_limit(data_mspace_, per_space_footprint); { - ScopedCodeCacheWrite scc(code_map_.get()); + ScopedCodeCacheWrite scc(this); mspace_set_footprint_limit(code_mspace_, per_space_footprint); } } @@ -1290,7 +1314,7 @@ void JitCodeCache::RemoveUnmarkedCode(Thread* self) { std::unordered_set<OatQuickMethodHeader*> method_headers; { MutexLock mu(self, lock_); - ScopedCodeCacheWrite scc(code_map_.get()); + ScopedCodeCacheWrite scc(this); // Iterate over all compiled code and remove entries that are not marked. for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) { JniStubData* data = &it->second; diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index dfa7ac0970..92deb97873 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -68,6 +68,7 @@ template<class T> class ObjectArray; namespace jit { class JitInstrumentationCache; +class ScopedCodeCacheWrite; // Alignment in bits that will suit all architectures. static constexpr int kJitCodeAlignment = 16; @@ -88,6 +89,7 @@ class JitCodeCache { static JitCodeCache* Create(size_t initial_capacity, size_t max_capacity, bool generate_debug_info, + bool used_only_for_profile_data, std::string* error_msg); ~JitCodeCache(); @@ -270,7 +272,8 @@ class JitCodeCache { size_t initial_code_capacity, size_t initial_data_capacity, size_t max_capacity, - bool garbage_collect_code); + bool garbage_collect_code, + int memmap_flags_prot_code); // Internal version of 'CommitCode' that will not retry if the // allocation fails. Return null if the allocation fails. @@ -446,7 +449,12 @@ class JitCodeCache { // Condition to wait on for accessing inline caches. ConditionVariable inline_cache_cond_ GUARDED_BY(lock_); + // Mapping flags for the code section. + const int memmap_flags_prot_code_; + friend class art::JitJniStubTestHelper; + friend class ScopedCodeCacheWrite; + DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache); }; diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc index 3dd3e43779..891cdfab1a 100644 --- a/runtime/native/dalvik_system_ZygoteHooks.cc +++ b/runtime/native/dalvik_system_ZygoteHooks.cc @@ -177,6 +177,7 @@ enum { DEBUG_GENERATE_MINI_DEBUG_INFO = 1 << 11, HIDDEN_API_ENFORCEMENT_POLICY_MASK = (1 << 12) | (1 << 13), + PROFILE_SYSTEM_SERVER = 1 << 14, // bits to shift (flags & HIDDEN_API_ENFORCEMENT_POLICY_MASK) by to get a value // corresponding to hiddenapi::EnforcementPolicy @@ -308,6 +309,9 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env, (runtime_flags & HIDDEN_API_ENFORCEMENT_POLICY_MASK) >> API_ENFORCEMENT_POLICY_SHIFT); runtime_flags &= ~HIDDEN_API_ENFORCEMENT_POLICY_MASK; + bool profile_system_server = (runtime_flags & PROFILE_SYSTEM_SERVER) == PROFILE_SYSTEM_SERVER; + runtime_flags &= ~PROFILE_SYSTEM_SERVER; + if (runtime_flags != 0) { LOG(ERROR) << StringPrintf("Unknown bits set in runtime_flags: %#x", runtime_flags); } @@ -391,7 +395,11 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env, env, is_system_server, action, isa_string.c_str()); } else { Runtime::Current()->InitNonZygoteOrPostFork( - env, is_system_server, Runtime::NativeBridgeAction::kUnload, nullptr); + env, + is_system_server, + Runtime::NativeBridgeAction::kUnload, + /*isa*/ nullptr, + profile_system_server); } } diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 891bb8a114..4062d17268 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -873,7 +873,11 @@ void Runtime::EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) { } void Runtime::InitNonZygoteOrPostFork( - JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa) { + JNIEnv* env, + bool is_system_server, + NativeBridgeAction action, + const char* isa, + bool profile_system_server) { is_zygote_ = false; if (is_native_bridge_loaded_) { @@ -896,8 +900,15 @@ void Runtime::InitNonZygoteOrPostFork( heap_->ResetGcPerformanceInfo(); // We may want to collect profiling samples for system server, but we never want to JIT there. - if ((!is_system_server || !jit_options_->UseJitCompilation()) && - !safe_mode_ && + if (is_system_server) { + jit_options_->SetUseJitCompilation(false); + jit_options_->SetSaveProfilingInfo(profile_system_server); + if (profile_system_server) { + jit_options_->SetWaitForJitNotificationsToSaveProfile(false); + VLOG(profiler) << "Enabling system server profiles"; + } + } + if (!safe_mode_ && (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) && jit_ == nullptr) { // Note that when running ART standalone (not zygote, nor zygote fork), diff --git a/runtime/runtime.h b/runtime/runtime.h index c8aa4e5bb1..c0ff6b3444 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -446,7 +446,11 @@ class Runtime { void PreZygoteFork(); void InitNonZygoteOrPostFork( - JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa); + JNIEnv* env, + bool is_system_server, + NativeBridgeAction action, + const char* isa, + bool profile_system_server = false); const instrumentation::Instrumentation* GetInstrumentation() const { return &instrumentation_; |