Enable JIT in zygote.
bug: 119800099
Test: boot test
Change-Id: I92dc59adeaf1202a984d363b1420ef26e53ebe84
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 4a3ef07..e43d771 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -291,22 +291,6 @@
return success;
}
-void Jit::CreateThreadPool() {
- if (Runtime::Current()->IsSafeMode()) {
- // Never create the pool in safe mode.
- return;
- }
- // There is a DCHECK in the 'AddSamples' method to ensure the thread pool
- // is not null when we instrument.
-
- // We need peers as we may report the JIT thread, e.g., in the debugger.
- constexpr bool kJitPoolNeedsPeers = true;
- thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
-
- thread_pool_->SetPthreadPriority(options_->GetThreadPoolPthreadPriority());
- Start();
-}
-
void Jit::DeleteThreadPool() {
Thread* self = Thread::Current();
DCHECK(Runtime::Current()->IsShuttingDown(self));
@@ -562,10 +546,10 @@
class JitCompileTask final : public Task {
public:
- enum TaskKind {
+ enum class TaskKind {
kAllocateProfile,
kCompile,
- kCompileOsr
+ kCompileOsr,
};
JitCompileTask(ArtMethod* method, TaskKind kind) : method_(method), kind_(kind) {
@@ -582,14 +566,20 @@
void Run(Thread* self) override {
ScopedObjectAccess soa(self);
- if (kind_ == kCompile) {
- Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr= */ false);
- } else if (kind_ == kCompileOsr) {
- Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr= */ true);
- } else {
- DCHECK(kind_ == kAllocateProfile);
- if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) {
- VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
+ switch (kind_) {
+ case TaskKind::kCompile:
+ case TaskKind::kCompileOsr: {
+ Runtime::Current()->GetJit()->CompileMethod(
+ method_,
+ self,
+ /* osr= */ (kind_ == TaskKind::kCompileOsr));
+ break;
+ }
+ case TaskKind::kAllocateProfile: {
+ if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) {
+ VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
+ }
+ break;
}
}
ProfileSaver::NotifyJitActivity();
@@ -607,6 +597,18 @@
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
};
+void Jit::CreateThreadPool() {
+ // There is a DCHECK in the 'AddSamples' method to ensure the tread pool
+ // is not null when we instrument.
+
+ // We need peers as we may report the JIT thread, e.g., in the debugger.
+ constexpr bool kJitPoolNeedsPeers = true;
+ thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
+
+ thread_pool_->SetPthreadPriority(options_->GetThreadPoolPthreadPriority());
+ Start();
+}
+
static bool IgnoreSamplesForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsClassInitializer() || !method->IsCompilable()) {
// We do not want to compile such methods.
@@ -630,11 +632,10 @@
void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_backedges) {
if (thread_pool_ == nullptr) {
- // Should only see this when shutting down, starting up, or in zygote, which doesn't
- // have a thread pool.
+ // Should only see this when shutting down, starting up, or in safe mode.
DCHECK(Runtime::Current()->IsShuttingDown(self) ||
!Runtime::Current()->IsFinishedStarting() ||
- Runtime::Current()->IsZygote());
+ Runtime::Current()->IsSafeMode());
return;
}
if (IgnoreSamplesForMethod(method)) {
@@ -675,7 +676,8 @@
if (!success) {
// We failed allocating. Instead of doing the collection on the Java thread, we push
// an allocation to a compiler thread, that will do the collection.
- thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kAllocateProfile));
+ thread_pool_->AddTask(
+ self, new JitCompileTask(method, JitCompileTask::TaskKind::kAllocateProfile));
}
}
// Avoid jumping more than one state at a time.
@@ -685,7 +687,7 @@
if ((new_count >= HotMethodThreshold()) &&
!code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
DCHECK(thread_pool_ != nullptr);
- thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile));
+ thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
}
// Avoid jumping more than one state at a time.
new_count = std::min(new_count, static_cast<uint32_t>(OSRMethodThreshold() - 1));
@@ -697,7 +699,8 @@
DCHECK(!method->IsNative()); // No back edges reported for native methods.
if ((new_count >= OSRMethodThreshold()) && !code_cache_->IsOsrCompiled(method)) {
DCHECK(thread_pool_ != nullptr);
- thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr));
+ thread_pool_->AddTask(
+ self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileOsr));
}
}
}
@@ -730,7 +733,7 @@
// The compiler requires a ProfilingInfo object for non-native methods.
ProfilingInfo::Create(thread, np_method, /* retry_allocation= */ true);
}
- JitCompileTask compile_task(method, JitCompileTask::kCompile);
+ JitCompileTask compile_task(method, JitCompileTask::TaskKind::kCompile);
// Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
ScopedSetRuntimeThread ssrt(thread);
compile_task.Run(thread);
@@ -798,7 +801,16 @@
}
}
-void Jit::PostForkChildAction() {
+void Jit::PostForkChildAction(bool is_zygote) {
+ if (is_zygote) {
+ // Don't transition if this is for a child zygote.
+ return;
+ }
+ if (Runtime::Current()->IsSafeMode()) {
+ // Delete the thread pool, we are not going to JIT.
+ thread_pool_.reset(nullptr);
+ return;
+ }
// At this point, the compiler options have been adjusted to the particular configuration
// of the forked child. Parse them again.
jit_update_options_(jit_compiler_handle_);
@@ -806,6 +818,28 @@
// Adjust the status of code cache collection: the status from zygote was to not collect.
code_cache_->SetGarbageCollectCode(!jit_generate_debug_info_(jit_compiler_handle_) &&
!Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled());
+
+ if (thread_pool_ != nullptr) {
+ // Remove potential tasks that have been inherited from the zygote.
+ thread_pool_->RemoveAllTasks(Thread::Current());
+
+ // Resume JIT compilation.
+ thread_pool_->CreateThreads();
+ }
+}
+
+void Jit::PreZygoteFork() {
+ if (thread_pool_ == nullptr) {
+ return;
+ }
+ thread_pool_->DeleteThreads();
+}
+
+void Jit::PostZygoteFork() {
+ if (thread_pool_ == nullptr) {
+ return;
+ }
+ thread_pool_->CreateThreads();
}
} // namespace jit
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index e12b032..7ce5f07 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -285,8 +285,14 @@
// Start JIT threads.
void Start();
- // Transition to a zygote child state.
- void PostForkChildAction();
+ // Transition to a child state.
+ void PostForkChildAction(bool is_zygote);
+
+ // Prepare for forking.
+ void PreZygoteFork();
+
+ // Adjust state after forking.
+ void PostZygoteFork();
private:
Jit(JitCodeCache* code_cache, JitOptions* options);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 97887cc..1d53a58 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -436,6 +436,12 @@
initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
max_capacity = RoundDown(max_capacity, 2 * kPageSize);
+ used_memory_for_data_ = 0;
+ used_memory_for_code_ = 0;
+ number_of_compilations_ = 0;
+ number_of_osr_compilations_ = 0;
+ number_of_collections_ = 0;
+
data_pages_ = MemMap();
exec_pages_ = MemMap();
non_exec_pages_ = MemMap();
@@ -477,7 +483,7 @@
JitCodeCache::~JitCodeCache() {}
bool JitCodeCache::ContainsPc(const void* ptr) const {
- return exec_pages_.Begin() <= ptr && ptr < exec_pages_.End();
+ return exec_pages_.HasAddress(ptr) || zygote_exec_pages_.HasAddress(ptr);
}
bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
@@ -1321,7 +1327,7 @@
return true;
}
const void* code = method_header->GetCode();
- if (code_cache_->ContainsPc(code)) {
+ if (code_cache_->ContainsPc(code) && !code_cache_->IsInZygoteExecSpace(code)) {
// Use the atomic set version, as multiple threads are executing this code.
bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
}
@@ -1493,7 +1499,7 @@
// interpreter will update its entry point to the compiled code and call it.
for (ProfilingInfo* info : profiling_infos_) {
const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
- if (ContainsPc(entry_point)) {
+ if (!IsInZygoteDataSpace(info) && ContainsPc(entry_point)) {
info->SetSavedEntryPoint(entry_point);
// Don't call Instrumentation::UpdateMethodsCode(), as it can check the declaring
// class of the method. We may be concurrently running a GC which makes accessing
@@ -1508,7 +1514,7 @@
// Change entry points of native methods back to the GenericJNI entrypoint.
for (const auto& entry : jni_stubs_map_) {
const JniStubData& data = entry.second;
- if (!data.IsCompiled()) {
+ if (!data.IsCompiled() || IsInZygoteExecSpace(data.GetCode())) {
continue;
}
// Make sure a single invocation of the GenericJNI trampoline tries to recompile.
@@ -1540,7 +1546,9 @@
// Iterate over all compiled code and remove entries that are not marked.
for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
JniStubData* data = &it->second;
- if (!data->IsCompiled() || GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) {
+ if (IsInZygoteExecSpace(data->GetCode()) ||
+ !data->IsCompiled() ||
+ GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) {
++it;
} else {
method_headers.insert(OatQuickMethodHeader::FromCodePointer(data->GetCode()));
@@ -1550,7 +1558,7 @@
for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
const void* code_ptr = it->first;
uintptr_t allocation = FromCodeToAllocation(code_ptr);
- if (GetLiveBitmap()->Test(allocation)) {
+ if (IsInZygoteExecSpace(code_ptr) || GetLiveBitmap()->Test(allocation)) {
++it;
} else {
OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
@@ -1571,7 +1579,7 @@
// Also remove the saved entry point from the ProfilingInfo objects.
for (ProfilingInfo* info : profiling_infos_) {
const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
- if (!ContainsPc(ptr) && !info->IsInUseByCompiler()) {
+ if (!ContainsPc(ptr) && !info->IsInUseByCompiler() && !IsInZygoteDataSpace(info)) {
info->GetMethod()->SetProfilingInfo(nullptr);
}
@@ -1596,6 +1604,9 @@
for (const auto& entry : jni_stubs_map_) {
const JniStubData& data = entry.second;
const void* code_ptr = data.GetCode();
+ if (IsInZygoteExecSpace(code_ptr)) {
+ continue;
+ }
const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
for (ArtMethod* method : data.GetMethods()) {
if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
@@ -1607,6 +1618,9 @@
for (const auto& it : method_code_map_) {
ArtMethod* method = it.second;
const void* code_ptr = it.first;
+ if (IsInZygoteExecSpace(code_ptr)) {
+ continue;
+ }
const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
@@ -1953,6 +1967,7 @@
instrumentation->UpdateNativeMethodsCodeToJitCode(m, entrypoint);
}
if (collection_in_progress_) {
+ CHECK(!IsInZygoteExecSpace(data->GetCode()));
GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(data->GetCode()));
}
}
@@ -2057,6 +2072,7 @@
}
void JitCodeCache::FreeCode(uint8_t* code) {
+ CHECK(!IsInZygoteExecSpace(code));
used_memory_for_code_ -= mspace_usable_size(code);
mspace_free(exec_mspace_, code);
}
@@ -2068,6 +2084,7 @@
}
void JitCodeCache::FreeData(uint8_t* data) {
+ CHECK(!IsInZygoteDataSpace(data));
used_memory_for_data_ -= mspace_usable_size(data);
mspace_free(data_mspace_, data);
}
@@ -2091,13 +2108,11 @@
}
void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
+ if (is_zygote) {
+ // Don't transition if this is for a child zygote.
+ return;
+ }
MutexLock mu(Thread::Current(), lock_);
- // Currently, we don't expect any compilations from zygote.
- CHECK_EQ(number_of_compilations_, 0u);
- CHECK_EQ(number_of_osr_compilations_, 0u);
- CHECK(jni_stubs_map_.empty());
- CHECK(method_code_map_.empty());
- CHECK(osr_code_map_.empty());
zygote_data_pages_ = std::move(data_pages_);
zygote_exec_pages_ = std::move(exec_pages_);
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 7a838fd..e2f3357 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -71,6 +71,7 @@
namespace jit {
+class MarkCodeClosure;
class ScopedCodeCacheWrite;
// Alignment in bits that will suit all architectures.
@@ -387,6 +388,14 @@
const MemMap* GetUpdatableCodeMapping() const;
+ bool IsInZygoteDataSpace(const void* ptr) const {
+ return zygote_data_pages_.HasAddress(ptr);
+ }
+
+ bool IsInZygoteExecSpace(const void* ptr) const {
+ return zygote_exec_pages_.HasAddress(ptr);
+ }
+
bool IsWeakAccessEnabled(Thread* self) const;
void WaitUntilInlineCacheAccessible(Thread* self)
REQUIRES(!lock_)
@@ -487,6 +496,7 @@
friend class art::JitJniStubTestHelper;
friend class ScopedCodeCacheWrite;
+ friend class MarkCodeClosure;
DISALLOW_COPY_AND_ASSIGN(JitCodeCache);
};