Reland "Put queued compilations in sets."
This reverts commit 5fe67aacaa2387410ad4c26051cba856328c99f9.
Bug: 218444629
Reason for revert: Have NotifyCompilationOf only check if the method is
being compiled.
Change-Id: I84f8a040fd094810cbfbaf0d9e1dfccbdb517968
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 5e01aaa..ae3575a 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -259,10 +259,14 @@
return true;
}
-bool Jit::CompileMethod(ArtMethod* method,
- Thread* self,
- CompilationKind compilation_kind,
- bool prejit) {
+bool Jit::CompileMethodInternal(ArtMethod* method,
+ Thread* self,
+ CompilationKind compilation_kind,
+ bool prejit) {
+ if (kIsDebugBuild) {
+ MutexLock mu(self, *Locks::jit_lock_);
+ CHECK(GetCodeCache()->IsMethodBeingCompiled(method, compilation_kind));
+ }
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(!method->IsRuntimeMethod());
@@ -323,7 +327,7 @@
<< ArtMethod::PrettyMethod(method_to_compile)
<< " kind=" << compilation_kind;
bool success = jit_compiler_->CompileMethod(self, region, method_to_compile, compilation_kind);
- code_cache_->DoneCompiling(method_to_compile, self, compilation_kind);
+ code_cache_->DoneCompiling(method_to_compile, self);
if (!success) {
VLOG(jit) << "Failed to compile method "
<< ArtMethod::PrettyMethod(method_to_compile)
@@ -743,6 +747,48 @@
child_mapping_methods.Reset();
}
+class ScopedCompilation {
+ public:
+ ScopedCompilation(ScopedCompilation&& other) :
+ jit_(other.jit_),
+ method_(other.method_),
+ compilation_kind_(other.compilation_kind_),
+ owns_compilation_(other.owns_compilation_) {
+ other.owns_compilation_ = false;
+ }
+
+ ScopedCompilation(Jit* jit, ArtMethod* method, CompilationKind compilation_kind)
+ : jit_(jit),
+ method_(method),
+ compilation_kind_(compilation_kind),
+ owns_compilation_(true) {
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ if (jit_->GetCodeCache()->IsMethodBeingCompiled(method_, compilation_kind_)) {
+ owns_compilation_ = false;
+ return;
+ }
+ jit_->GetCodeCache()->AddMethodBeingCompiled(method_, compilation_kind_);
+ }
+
+ bool OwnsCompilation() const {
+ return owns_compilation_;
+ }
+
+
+ ~ScopedCompilation() {
+ if (owns_compilation_) {
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ jit_->GetCodeCache()->RemoveMethodBeingCompiled(method_, compilation_kind_);
+ }
+ }
+
+ private:
+ Jit* const jit_;
+ ArtMethod* const method_;
+ const CompilationKind compilation_kind_;
+ bool owns_compilation_;
+};
+
class JitCompileTask final : public Task {
public:
enum class TaskKind {
@@ -750,25 +796,16 @@
kPreCompile,
};
- JitCompileTask(ArtMethod* method, TaskKind task_kind, CompilationKind compilation_kind)
- : method_(method), kind_(task_kind), compilation_kind_(compilation_kind), klass_(nullptr) {
- ScopedObjectAccess soa(Thread::Current());
- // For a non-bootclasspath class, add a global ref to the class to prevent class unloading
- // until compilation is done.
- // When we precompile, this is either with boot classpath methods, or main
- // class loader methods, so we don't need to keep a global reference.
- if (method->GetDeclaringClass()->GetClassLoader() != nullptr &&
- kind_ != TaskKind::kPreCompile) {
- klass_ = soa.Vm()->AddGlobalRef(soa.Self(), method_->GetDeclaringClass());
- CHECK(klass_ != nullptr);
- }
- }
-
- ~JitCompileTask() {
- if (klass_ != nullptr) {
- ScopedObjectAccess soa(Thread::Current());
- soa.Vm()->DeleteGlobalRef(soa.Self(), klass_);
- }
+ JitCompileTask(ArtMethod* method,
+ TaskKind task_kind,
+ CompilationKind compilation_kind,
+ ScopedCompilation&& sc)
+ : method_(method),
+ kind_(task_kind),
+ compilation_kind_(compilation_kind),
+ scoped_compilation_(std::move(sc)) {
+ DCHECK(scoped_compilation_.OwnsCompilation());
+ DCHECK(!sc.OwnsCompilation());
}
void Run(Thread* self) override {
@@ -777,7 +814,7 @@
switch (kind_) {
case TaskKind::kCompile:
case TaskKind::kPreCompile: {
- Runtime::Current()->GetJit()->CompileMethod(
+ Runtime::Current()->GetJit()->CompileMethodInternal(
method_,
self,
compilation_kind_,
@@ -797,7 +834,7 @@
ArtMethod* const method_;
const TaskKind kind_;
const CompilationKind compilation_kind_;
- jobject klass_;
+ ScopedCompilation scoped_compilation_;
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
};
@@ -1285,6 +1322,21 @@
}
}
+void Jit::AddCompileTask(Thread* self,
+ ArtMethod* method,
+ CompilationKind compilation_kind,
+ bool precompile) {
+ ScopedCompilation sc(this, method, compilation_kind);
+ if (!sc.OwnsCompilation()) {
+ return;
+ }
+ JitCompileTask::TaskKind task_kind = precompile
+ ? JitCompileTask::TaskKind::kPreCompile
+ : JitCompileTask::TaskKind::kCompile;
+ thread_pool_->AddTask(
+ self, new JitCompileTask(method, task_kind, compilation_kind, std::move(sc)));
+}
+
bool Jit::CompileMethodFromProfile(Thread* self,
ClassLinker* class_linker,
uint32_t method_idx,
@@ -1305,6 +1357,7 @@
// Already seen by another profile.
return false;
}
+ CompilationKind compilation_kind = CompilationKind::kOptimized;
const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
if (class_linker->IsQuickToInterpreterBridge(entry_point) ||
class_linker->IsQuickGenericJniStub(entry_point) ||
@@ -1315,11 +1368,15 @@
VLOG(jit) << "JIT Zygote processing method " << ArtMethod::PrettyMethod(method)
<< " from profile";
method->SetPreCompiled();
+ ScopedCompilation sc(this, method, compilation_kind);
+ if (!sc.OwnsCompilation()) {
+ return false;
+ }
if (!add_to_queue) {
- CompileMethod(method, self, CompilationKind::kOptimized, /* prejit= */ true);
+ CompileMethodInternal(method, self, compilation_kind, /* prejit= */ true);
} else {
Task* task = new JitCompileTask(
- method, JitCompileTask::TaskKind::kPreCompile, CompilationKind::kOptimized);
+ method, JitCompileTask::TaskKind::kPreCompile, compilation_kind, std::move(sc));
if (compile_after_boot) {
AddPostBootTask(self, task);
} else {
@@ -1470,11 +1527,7 @@
// hotness threshold. If we're not only using the baseline compiler, enqueue a compilation
// task that will compile optimize the method.
if (!options_->UseBaselineCompiler()) {
- thread_pool_->AddTask(
- self,
- new JitCompileTask(method,
- JitCompileTask::TaskKind::kCompile,
- CompilationKind::kOptimized));
+ AddCompileTask(self, method, CompilationKind::kOptimized);
}
}
@@ -1494,23 +1547,17 @@
bool was_runtime_thread_;
};
-void Jit::MethodEntered(Thread* thread, ArtMethod* method) {
+void Jit::MethodEntered(Thread* self, ArtMethod* method) {
Runtime* runtime = Runtime::Current();
if (UNLIKELY(runtime->UseJitCompilation() && JitAtFirstUse())) {
ArtMethod* np_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
if (np_method->IsCompilable()) {
- // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
- // conflicts with jitzygote optimizations.
- JitCompileTask compile_task(
- method, JitCompileTask::TaskKind::kCompile, CompilationKind::kOptimized);
- // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
- ScopedSetRuntimeThread ssrt(thread);
- compile_task.Run(thread);
+ CompileMethod(method, self, CompilationKind::kOptimized, /* prejit= */ false);
}
return;
}
- AddSamples(thread, method);
+ AddSamples(self, method);
}
void Jit::WaitForCompilationToFinish(Thread* self) {
@@ -1740,9 +1787,7 @@
if (!method->IsNative() && !code_cache_->IsOsrCompiled(method)) {
// If we already have compiled code for it, nterp may be stuck in a loop.
// Compile OSR.
- thread_pool_->AddTask(
- self,
- new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kOsr));
+ AddCompileTask(self, method, CompilationKind::kOsr);
}
return;
}
@@ -1776,17 +1821,27 @@
}
if (!method->IsNative() && GetCodeCache()->CanAllocateProfilingInfo()) {
- thread_pool_->AddTask(
- self,
- new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kBaseline));
+ AddCompileTask(self, method, CompilationKind::kBaseline);
} else {
- thread_pool_->AddTask(
- self,
- new JitCompileTask(method,
- JitCompileTask::TaskKind::kCompile,
- CompilationKind::kOptimized));
+ AddCompileTask(self, method, CompilationKind::kOptimized);
}
}
+bool Jit::CompileMethod(ArtMethod* method,
+ Thread* self,
+ CompilationKind compilation_kind,
+ bool prejit) {
+ ScopedCompilation sc(this, method, compilation_kind);
+ // TODO: all current users of this method expect us to wait if it is being compiled.
+ if (!sc.OwnsCompilation()) {
+ return false;
+ }
+ // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
+ ScopedSetRuntimeThread ssrt(self);
+ // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
+ // conflicts with jitzygote optimizations.
+ return CompileMethodInternal(method, self, compilation_kind, prejit);
+}
+
} // namespace jit
} // namespace art
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index b439c8e..fd92451 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -53,6 +53,7 @@
namespace jit {
class JitCodeCache;
+class JitCompileTask;
class JitMemoryRegion;
class JitOptions;
@@ -461,6 +462,17 @@
static bool BindCompilerMethods(std::string* error_msg);
+ void AddCompileTask(Thread* self,
+ ArtMethod* method,
+ CompilationKind compilation_kind,
+ bool precompile = false);
+
+ bool CompileMethodInternal(ArtMethod* method,
+ Thread* self,
+ CompilationKind compilation_kind,
+ bool prejit)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// JIT compiler
static void* jit_library_handle_;
static JitCompilerInterface* jit_compiler_;
@@ -507,6 +519,8 @@
// between the zygote and apps.
std::map<ArtMethod*, uint16_t> shared_method_counters_;
+ friend class art::jit::JitCompileTask;
+
DISALLOW_COPY_AND_ASSIGN(Jit);
};
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 0b34688..4148660 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -1594,10 +1594,30 @@
return osr_code_map_.find(method) != osr_code_map_.end();
}
+void JitCodeCache::VisitRoots(RootVisitor* visitor) {
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ UnbufferedRootVisitor root_visitor(visitor, RootInfo(kRootStickyClass));
+ for (ArtMethod* method : current_optimized_compilations_) {
+ method->VisitRoots(root_visitor, kRuntimePointerSize);
+ }
+ for (ArtMethod* method : current_baseline_compilations_) {
+ method->VisitRoots(root_visitor, kRuntimePointerSize);
+ }
+ for (ArtMethod* method : current_osr_compilations_) {
+ method->VisitRoots(root_visitor, kRuntimePointerSize);
+ }
+}
+
bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
Thread* self,
CompilationKind compilation_kind,
bool prejit) {
+ if (kIsDebugBuild) {
+ MutexLock mu(self, *Locks::jit_lock_);
+ // Note: the compilation kind may have been adjusted after what was passed initially.
+ // We really just want to check that the method is indeed being compiled.
+ CHECK(IsMethodBeingCompiled(method));
+ }
const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
if (compilation_kind != CompilationKind::kOsr && ContainsPc(existing_entry_point)) {
OatQuickMethodHeader* method_header =
@@ -1686,13 +1706,8 @@
}
}
}
- MutexLock mu(self, *Locks::jit_lock_);
- if (IsMethodBeingCompiled(method, compilation_kind)) {
- return false;
- }
- AddMethodBeingCompiled(method, compilation_kind);
- return true;
}
+ return true;
}
ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
@@ -1715,9 +1730,7 @@
it->second->DecrementInlineUse();
}
-void JitCodeCache::DoneCompiling(ArtMethod* method,
- Thread* self,
- CompilationKind compilation_kind) {
+void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self) {
DCHECK_EQ(Thread::Current(), self);
MutexLock mu(self, *Locks::jit_lock_);
if (UNLIKELY(method->IsNative())) {
@@ -1729,8 +1742,6 @@
// Failed to compile; the JNI compiler never fails, but the cache may be full.
jni_stubs_map_.erase(it); // Remove the entry added in NotifyCompilationOf().
} // else Commit() updated entrypoints of all methods in the JniStubData.
- } else {
- RemoveMethodBeingCompiled(method, compilation_kind);
}
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index fb861a4..a534ba9 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -215,7 +215,7 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
- void DoneCompiling(ArtMethod* method, Thread* self, CompilationKind compilation_kind)
+ void DoneCompiling(ArtMethod* method, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
@@ -403,6 +403,20 @@
ProfilingInfo* GetProfilingInfo(ArtMethod* method, Thread* self);
void ResetHotnessCounter(ArtMethod* method, Thread* self);
+ void VisitRoots(RootVisitor* visitor);
+
+ // Return whether `method` is being compiled with the given mode.
+ bool IsMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
+ REQUIRES(Locks::jit_lock_);
+
+ // Remove `method` from the list of methods meing compiled with the given mode.
+ void RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
+ REQUIRES(Locks::jit_lock_);
+
+ // Record that `method` is being compiled with the given mode.
+ void AddMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
+ REQUIRES(Locks::jit_lock_);
+
private:
JitCodeCache();
@@ -492,18 +506,6 @@
REQUIRES(!Locks::jit_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Record that `method` is being compiled with the given mode.
- void AddMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
- REQUIRES(Locks::jit_lock_);
-
- // Remove `method` from the list of methods meing compiled with the given mode.
- void RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
- REQUIRES(Locks::jit_lock_);
-
- // Return whether `method` is being compiled with the given mode.
- bool IsMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
- REQUIRES(Locks::jit_lock_);
-
// Return whether `method` is being compiled in any mode.
bool IsMethodBeingCompiled(ArtMethod* method) REQUIRES(Locks::jit_lock_);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 42f706a..d2eb3bd 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -2457,6 +2457,9 @@
class_linker_->VisitRoots(visitor, flags);
jni_id_manager_->VisitRoots(visitor);
heap_->VisitAllocationRecords(visitor);
+ if (jit_ != nullptr) {
+ jit_->GetCodeCache()->VisitRoots(visitor);
+ }
if ((flags & kVisitRootFlagNewRoots) == 0) {
// Guaranteed to have no new roots in the constant roots.
VisitConstantRoots(visitor);