summaryrefslogtreecommitdiff
path: root/runtime/jit
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/jit')
-rw-r--r--runtime/jit/jit.cc229
-rw-r--r--runtime/jit/jit.h15
-rw-r--r--runtime/jit/jit_code_cache.cc328
-rw-r--r--runtime/jit/jit_code_cache.h42
-rw-r--r--runtime/jit/jit_load_test.cc38
-rw-r--r--runtime/jit/jit_memory_region.cc6
-rw-r--r--runtime/jit/profile_saver.cc23
-rw-r--r--runtime/jit/profiling_info_test.cc7
8 files changed, 449 insertions, 239 deletions
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 6d634ae120..b231cce0bc 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -208,7 +208,6 @@ Jit* Jit::Create(JitCodeCache* code_cache, JitOptions* options) {
// Jit GC for now (b/147208992).
if (code_cache->GetGarbageCollectCode()) {
code_cache->SetGarbageCollectCode(!jit_compiler_->GenerateDebugInfo() &&
- !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled() &&
!jit->JitAtFirstUse());
}
@@ -259,10 +258,14 @@ bool Jit::LoadCompilerLibrary(std::string* error_msg) {
return true;
}
-bool Jit::CompileMethod(ArtMethod* method,
- Thread* self,
- CompilationKind compilation_kind,
- bool prejit) {
+bool Jit::CompileMethodInternal(ArtMethod* method,
+ Thread* self,
+ CompilationKind compilation_kind,
+ bool prejit) {
+ if (kIsDebugBuild) {
+ MutexLock mu(self, *Locks::jit_lock_);
+ CHECK(GetCodeCache()->IsMethodBeingCompiled(method, compilation_kind));
+ }
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(!method->IsRuntimeMethod());
@@ -279,9 +282,8 @@ bool Jit::CompileMethod(ArtMethod* method,
compilation_kind = CompilationKind::kOptimized;
}
- RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks();
// Don't compile the method if it has breakpoints.
- if (cb->IsMethodBeingInspected(method)) {
+ if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
VLOG(jit) << "JIT not compiling " << method->PrettyMethod()
<< " due to not being safe to jit according to runtime-callbacks. For example, there"
<< " could be breakpoints in this method.";
@@ -323,7 +325,7 @@ bool Jit::CompileMethod(ArtMethod* method,
<< ArtMethod::PrettyMethod(method_to_compile)
<< " kind=" << compilation_kind;
bool success = jit_compiler_->CompileMethod(self, region, method_to_compile, compilation_kind);
- code_cache_->DoneCompiling(method_to_compile, self, compilation_kind);
+ code_cache_->DoneCompiling(method_to_compile, self);
if (!success) {
VLOG(jit) << "Failed to compile method "
<< ArtMethod::PrettyMethod(method_to_compile)
@@ -568,12 +570,11 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
// Before allowing the jump, make sure no code is actively inspecting the method to avoid
// jumping from interpreter to OSR while e.g. single stepping. Note that we could selectively
// disable OSR when single stepping, but that's currently hard to know at this point.
- if (Runtime::Current()->GetInstrumentation()->InterpreterStubsInstalled() ||
- Runtime::Current()->GetInstrumentation()->IsDeoptimized(method) ||
- thread->IsForceInterpreter() ||
- method->GetDeclaringClass()->IsObsoleteObject() ||
- Dbg::IsForcedInterpreterNeededForUpcall(thread, method) ||
- Runtime::Current()->GetRuntimeCallbacks()->IsMethodBeingInspected(method)) {
+ // Currently, HaveLocalsChanged is not frame specific. It is possible to make it frame specific
+ // to allow OSR of frames that don't have any locals changed but it isn't worth the additional
+ // complexity.
+ if (Runtime::Current()->GetInstrumentation()->NeedsSlowInterpreterForMethod(thread, method) ||
+ Runtime::Current()->GetRuntimeCallbacks()->HaveLocalsChanged()) {
return false;
}
@@ -748,6 +749,51 @@ void Jit::NotifyZygoteCompilationDone() {
child_mapping_methods.Reset();
}
+class ScopedCompilation {
+ public:
+ ScopedCompilation(ScopedCompilation&& other) noexcept :
+ jit_(other.jit_),
+ method_(other.method_),
+ compilation_kind_(other.compilation_kind_),
+ owns_compilation_(other.owns_compilation_) {
+ other.owns_compilation_ = false;
+ }
+
+ ScopedCompilation(Jit* jit, ArtMethod* method, CompilationKind compilation_kind)
+ : jit_(jit),
+ method_(method),
+ compilation_kind_(compilation_kind),
+ owns_compilation_(true) {
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ // We don't want to enqueue any new tasks when thread pool has stopped. This simplifies
+ // the implementation of redefinition feature in jvmti.
+ if (jit_->GetThreadPool() == nullptr ||
+ !jit_->GetThreadPool()->HasStarted(Thread::Current()) ||
+ jit_->GetCodeCache()->IsMethodBeingCompiled(method_, compilation_kind_)) {
+ owns_compilation_ = false;
+ return;
+ }
+ jit_->GetCodeCache()->AddMethodBeingCompiled(method_, compilation_kind_);
+ }
+
+ bool OwnsCompilation() const {
+ return owns_compilation_;
+ }
+
+ ~ScopedCompilation() {
+ if (owns_compilation_) {
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ jit_->GetCodeCache()->RemoveMethodBeingCompiled(method_, compilation_kind_);
+ }
+ }
+
+ private:
+ Jit* const jit_;
+ ArtMethod* const method_;
+ const CompilationKind compilation_kind_;
+ bool owns_compilation_;
+};
+
class JitCompileTask final : public Task {
public:
enum class TaskKind {
@@ -755,25 +801,16 @@ class JitCompileTask final : public Task {
kPreCompile,
};
- JitCompileTask(ArtMethod* method, TaskKind task_kind, CompilationKind compilation_kind)
- : method_(method), kind_(task_kind), compilation_kind_(compilation_kind), klass_(nullptr) {
- ScopedObjectAccess soa(Thread::Current());
- // For a non-bootclasspath class, add a global ref to the class to prevent class unloading
- // until compilation is done.
- // When we precompile, this is either with boot classpath methods, or main
- // class loader methods, so we don't need to keep a global reference.
- if (method->GetDeclaringClass()->GetClassLoader() != nullptr &&
- kind_ != TaskKind::kPreCompile) {
- klass_ = soa.Vm()->AddGlobalRef(soa.Self(), method_->GetDeclaringClass());
- CHECK(klass_ != nullptr);
- }
- }
-
- ~JitCompileTask() {
- if (klass_ != nullptr) {
- ScopedObjectAccess soa(Thread::Current());
- soa.Vm()->DeleteGlobalRef(soa.Self(), klass_);
- }
+ JitCompileTask(ArtMethod* method,
+ TaskKind task_kind,
+ CompilationKind compilation_kind,
+ ScopedCompilation&& sc)
+ : method_(method),
+ kind_(task_kind),
+ compilation_kind_(compilation_kind),
+ scoped_compilation_(std::move(sc)) {
+ DCHECK(scoped_compilation_.OwnsCompilation());
+ DCHECK(!sc.OwnsCompilation());
}
void Run(Thread* self) override {
@@ -782,7 +819,7 @@ class JitCompileTask final : public Task {
switch (kind_) {
case TaskKind::kCompile:
case TaskKind::kPreCompile: {
- Runtime::Current()->GetJit()->CompileMethod(
+ Runtime::Current()->GetJit()->CompileMethodInternal(
method_,
self,
compilation_kind_,
@@ -802,7 +839,7 @@ class JitCompileTask final : public Task {
ArtMethod* const method_;
const TaskKind kind_;
const CompilationKind compilation_kind_;
- jobject klass_;
+ ScopedCompilation scoped_compilation_;
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
};
@@ -888,22 +925,16 @@ class ZygoteVerificationTask final : public Task {
uint64_t start_ns = ThreadCpuNanoTime();
uint64_t number_of_classes = 0;
for (const DexFile* dex_file : boot_class_path) {
- if (dex_file->GetOatDexFile() != nullptr &&
- dex_file->GetOatDexFile()->GetOatFile() != nullptr) {
- // If backed by an .oat file, we have already run verification at
- // compile-time. Note that some classes may still have failed
- // verification there if they reference updatable mainline module
- // classes.
- continue;
- }
for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
const dex::ClassDef& class_def = dex_file->GetClassDef(i);
const char* descriptor = dex_file->GetClassDescriptor(class_def);
- ScopedNullHandle<mirror::ClassLoader> null_loader;
- klass.Assign(linker->FindClass(self, descriptor, null_loader));
+ klass.Assign(linker->LookupResolvedType(descriptor, /* class_loader= */ nullptr));
if (klass == nullptr) {
- self->ClearException();
- LOG(WARNING) << "Could not find " << descriptor;
+ // Class not loaded yet.
+ DCHECK(!self->IsExceptionPending());
+ continue;
+ }
+ if (klass->IsVerified()) {
continue;
}
if (linker->VerifyClass(self, /* verifier_deps= */ nullptr, klass) ==
@@ -918,9 +949,9 @@ class ZygoteVerificationTask final : public Task {
CHECK(!self->IsExceptionPending());
}
}
- LOG(INFO) << "Verified "
+ LOG(INFO) << "Background verification of "
<< number_of_classes
- << " classes from mainline modules in "
+ << " classes from boot classpath took "
<< PrettyDuration(ThreadCpuNanoTime() - start_ns);
}
};
@@ -1280,16 +1311,40 @@ void Jit::RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>>& de
return;
}
Runtime* runtime = Runtime::Current();
- // If the runtime is debuggable, no need to precompile methods.
+ // If the runtime is debuggable, don't bother precompiling methods.
+ // If system server is being profiled, don't precompile as we are going to use
+ // the JIT to count hotness. Note that --count-hotness-in-compiled-code is
+ // only forced when we also profile the boot classpath, see
+ // AndroidRuntime.cpp.
if (runtime->IsSystemServer() &&
UseJitCompilation() &&
options_->UseProfiledJitCompilation() &&
runtime->HasImageWithProfile() &&
+ !runtime->IsSystemServerProfiled() &&
!runtime->IsJavaDebuggable()) {
+ // Note: this precompilation is currently not running in production because:
+ // - UseProfiledJitCompilation() is not set by default.
+ // - System server dex files are registered *before* we set the runtime as
+ // system server (though we are in the system server process).
thread_pool_->AddTask(Thread::Current(), new JitProfileTask(dex_files, class_loader));
}
}
+void Jit::AddCompileTask(Thread* self,
+ ArtMethod* method,
+ CompilationKind compilation_kind,
+ bool precompile) {
+ ScopedCompilation sc(this, method, compilation_kind);
+ if (!sc.OwnsCompilation()) {
+ return;
+ }
+ JitCompileTask::TaskKind task_kind = precompile
+ ? JitCompileTask::TaskKind::kPreCompile
+ : JitCompileTask::TaskKind::kCompile;
+ thread_pool_->AddTask(
+ self, new JitCompileTask(method, task_kind, compilation_kind, std::move(sc)));
+}
+
bool Jit::CompileMethodFromProfile(Thread* self,
ClassLinker* class_linker,
uint32_t method_idx,
@@ -1310,21 +1365,27 @@ bool Jit::CompileMethodFromProfile(Thread* self,
// Already seen by another profile.
return false;
}
+ CompilationKind compilation_kind = CompilationKind::kOptimized;
const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
if (class_linker->IsQuickToInterpreterBridge(entry_point) ||
class_linker->IsQuickGenericJniStub(entry_point) ||
- (entry_point == interpreter::GetNterpEntryPoint()) ||
- // We explicitly check for the stub. The trampoline is for methods backed by
- // a .oat file that has a compiled version of the method.
+ class_linker->IsNterpEntryPoint(entry_point) ||
+ // We explicitly check for the resolution stub, and not the resolution trampoline.
+ // The trampoline is for methods backed by a .oat file that has a compiled version of
+ // the method.
(entry_point == GetQuickResolutionStub())) {
VLOG(jit) << "JIT Zygote processing method " << ArtMethod::PrettyMethod(method)
<< " from profile";
method->SetPreCompiled();
+ ScopedCompilation sc(this, method, compilation_kind);
+ if (!sc.OwnsCompilation()) {
+ return false;
+ }
if (!add_to_queue) {
- CompileMethod(method, self, CompilationKind::kOptimized, /* prejit= */ true);
+ CompileMethodInternal(method, self, compilation_kind, /* prejit= */ true);
} else {
Task* task = new JitCompileTask(
- method, JitCompileTask::TaskKind::kPreCompile, CompilationKind::kOptimized);
+ method, JitCompileTask::TaskKind::kPreCompile, compilation_kind, std::move(sc));
if (compile_after_boot) {
AddPostBootTask(self, task);
} else {
@@ -1342,7 +1403,7 @@ uint32_t Jit::CompileMethodsFromBootProfile(
const std::string& profile_file,
Handle<mirror::ClassLoader> class_loader,
bool add_to_queue) {
- unix_file::FdFile profile(profile_file.c_str(), O_RDONLY, true);
+ unix_file::FdFile profile(profile_file, O_RDONLY, true);
if (profile.Fd() == -1) {
PLOG(WARNING) << "No boot profile: " << profile_file;
@@ -1392,7 +1453,7 @@ uint32_t Jit::CompileMethodsFromProfile(
// We don't generate boot profiles on device, therefore we don't
// need to lock the file.
- unix_file::FdFile profile(profile_file.c_str(), O_RDONLY, true);
+ unix_file::FdFile profile(profile_file, O_RDONLY, true);
if (profile.Fd() == -1) {
PLOG(WARNING) << "No profile: " << profile_file;
@@ -1475,11 +1536,7 @@ void Jit::EnqueueOptimizedCompilation(ArtMethod* method, Thread* self) {
// hotness threshold. If we're not only using the baseline compiler, enqueue a compilation
// task that will compile optimize the method.
if (!options_->UseBaselineCompiler()) {
- thread_pool_->AddTask(
- self,
- new JitCompileTask(method,
- JitCompileTask::TaskKind::kCompile,
- CompilationKind::kOptimized));
+ AddCompileTask(self, method, CompilationKind::kOptimized);
}
}
@@ -1499,23 +1556,17 @@ class ScopedSetRuntimeThread {
bool was_runtime_thread_;
};
-void Jit::MethodEntered(Thread* thread, ArtMethod* method) {
+void Jit::MethodEntered(Thread* self, ArtMethod* method) {
Runtime* runtime = Runtime::Current();
if (UNLIKELY(runtime->UseJitCompilation() && JitAtFirstUse())) {
ArtMethod* np_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
if (np_method->IsCompilable()) {
- // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
- // conflicts with jitzygote optimizations.
- JitCompileTask compile_task(
- method, JitCompileTask::TaskKind::kCompile, CompilationKind::kOptimized);
- // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
- ScopedSetRuntimeThread ssrt(thread);
- compile_task.Run(thread);
+ CompileMethod(method, self, CompilationKind::kOptimized, /* prejit= */ false);
}
return;
}
- AddSamples(thread, method);
+ AddSamples(self, method);
}
void Jit::WaitForCompilationToFinish(Thread* self) {
@@ -1620,7 +1671,6 @@ void Jit::PostForkChildAction(bool is_system_server, bool is_zygote) {
// Jit GC for now (b/147208992).
code_cache_->SetGarbageCollectCode(
!jit_compiler_->GenerateDebugInfo() &&
- !runtime->GetInstrumentation()->AreExitStubsInstalled() &&
!JitAtFirstUse());
if (is_system_server && runtime->HasImageWithProfile()) {
@@ -1745,17 +1795,14 @@ void Jit::MaybeEnqueueCompilation(ArtMethod* method, Thread* self) {
if (!method->IsNative() && !code_cache_->IsOsrCompiled(method)) {
// If we already have compiled code for it, nterp may be stuck in a loop.
// Compile OSR.
- thread_pool_->AddTask(
- self,
- new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kOsr));
+ AddCompileTask(self, method, CompilationKind::kOsr);
}
return;
}
// Check if we have precompiled this method.
if (UNLIKELY(method->IsPreCompiled())) {
- if (!NeedsClinitCheckBeforeCall(method) ||
- method->GetDeclaringClass()->IsVisiblyInitialized()) {
+ if (!method->StillNeedsClinitCheck()) {
const void* entry_point = code_cache_->GetSavedEntryPointOfPreCompiledMethod(method);
if (entry_point != nullptr) {
Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(method, entry_point);
@@ -1764,7 +1811,7 @@ void Jit::MaybeEnqueueCompilation(ArtMethod* method, Thread* self) {
return;
}
- static constexpr size_t kIndividualSharedMethodHotnessThreshold = 0xff;
+ static constexpr size_t kIndividualSharedMethodHotnessThreshold = 0x3f;
if (method->IsMemorySharedMethod()) {
MutexLock mu(self, lock_);
auto it = shared_method_counters_.find(method);
@@ -1781,16 +1828,26 @@ void Jit::MaybeEnqueueCompilation(ArtMethod* method, Thread* self) {
}
if (!method->IsNative() && GetCodeCache()->CanAllocateProfilingInfo()) {
- thread_pool_->AddTask(
- self,
- new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kBaseline));
+ AddCompileTask(self, method, CompilationKind::kBaseline);
} else {
- thread_pool_->AddTask(
- self,
- new JitCompileTask(method,
- JitCompileTask::TaskKind::kCompile,
- CompilationKind::kOptimized));
+ AddCompileTask(self, method, CompilationKind::kOptimized);
+ }
+}
+
+bool Jit::CompileMethod(ArtMethod* method,
+ Thread* self,
+ CompilationKind compilation_kind,
+ bool prejit) {
+ ScopedCompilation sc(this, method, compilation_kind);
+ // TODO: all current users of this method expect us to wait if it is being compiled.
+ if (!sc.OwnsCompilation()) {
+ return false;
}
+ // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
+ ScopedSetRuntimeThread ssrt(self);
+ // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
+ // conflicts with jitzygote optimizations.
+ return CompileMethodInternal(method, self, compilation_kind, prejit);
}
} // namespace jit
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index b439c8ee9e..c95fd9d934 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -53,6 +53,7 @@ class String;
namespace jit {
class JitCodeCache;
+class JitCompileTask;
class JitMemoryRegion;
class JitOptions;
@@ -195,6 +196,7 @@ class JitCompilerInterface {
virtual bool GenerateDebugInfo() = 0;
virtual void ParseCompilerOptions() = 0;
virtual bool IsBaselineCompiler() const = 0;
+ virtual void SetDebuggableCompilerOption(bool value) = 0;
virtual std::vector<uint8_t> PackElfFileForJIT(ArrayRef<const JITCodeEntry*> elf_files,
ArrayRef<const void*> removed_symbols,
@@ -461,6 +463,17 @@ class Jit {
static bool BindCompilerMethods(std::string* error_msg);
+ void AddCompileTask(Thread* self,
+ ArtMethod* method,
+ CompilationKind compilation_kind,
+ bool precompile = false);
+
+ bool CompileMethodInternal(ArtMethod* method,
+ Thread* self,
+ CompilationKind compilation_kind,
+ bool prejit)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// JIT compiler
static void* jit_library_handle_;
static JitCompilerInterface* jit_compiler_;
@@ -507,6 +520,8 @@ class Jit {
// between the zygote and apps.
std::map<ArtMethod*, uint16_t> shared_method_counters_;
+ friend class art::jit::JitCompileTask;
+
DISALLOW_COPY_AND_ASSIGN(Jit);
};
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 0b34688ff3..34f9045a33 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -40,7 +40,7 @@
#include "entrypoints/entrypoint_utils-inl.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/bitmap-inl.h"
-#include "gc/allocator/dlmalloc.h"
+#include "gc/allocator/art-dlmalloc.h"
#include "gc/scoped_gc_critical_section.h"
#include "handle.h"
#include "handle_scope-inl.h"
@@ -127,31 +127,19 @@ class JitCodeCache::JniStubData {
DCHECK(entrypoint == OatQuickMethodHeader::FromCodePointer(GetCode())->GetEntryPoint());
instrumentation::Instrumentation* instrum = Runtime::Current()->GetInstrumentation();
for (ArtMethod* m : GetMethods()) {
- // Because `m` might be in the process of being deleted:
- // - Call the dedicated method instead of the more generic UpdateMethodsCode
- // - Check the class status without a full read barrier; use ReadBarrier::IsMarked().
- bool can_set_entrypoint = true;
- if (NeedsClinitCheckBeforeCall(m)) {
- // To avoid resurrecting an unreachable object, we must not use a full read
- // barrier but we do not want to miss updating an entrypoint under common
- // circumstances, i.e. during a GC the class becomes visibly initialized,
- // the method becomes hot, we compile the thunk and want to update the
- // entrypoint while the method's declaring class field still points to the
- // from-space class object with the old status. Therefore we read the
- // declaring class without a read barrier and check if it's already marked.
- // If yes, we check the status of the to-space class object as intended.
- // Otherwise, there is no to-space object and the from-space class object
- // contains the most recent value of the status field; even if this races
- // with another thread doing a read barrier and updating the status, that's
- // no different from a race with a thread that just updates the status.
- // Such race can happen only for the zygote method pre-compilation, as we
- // otherwise compile only thunks for methods of visibly initialized classes.
- ObjPtr<mirror::Class> klass = m->GetDeclaringClass<kWithoutReadBarrier>();
- ObjPtr<mirror::Class> marked = ReadBarrier::IsMarked(klass.Ptr());
- ObjPtr<mirror::Class> checked_klass = (marked != nullptr) ? marked : klass;
- can_set_entrypoint = checked_klass->IsVisiblyInitialized();
- }
- if (can_set_entrypoint) {
+ // Because `m` might be in the process of being deleted,
+ // - use the `ArtMethod::StillNeedsClinitCheckMayBeDead()` to check if
+ // we can update the entrypoint, and
+ // - call `Instrumentation::UpdateNativeMethodsCodeToJitCode` instead of the
+ // more generic function `Instrumentation::UpdateMethodsCode()`.
+ // The `ArtMethod::StillNeedsClinitCheckMayBeDead()` checks the class status
+ // in the to-space object if any even if the method's declaring class points to
+ // the from-space class object. This way we do not miss updating an entrypoint
+ // even under uncommon circumstances, when during a GC the class becomes visibly
+ // initialized, the method becomes hot, we compile the thunk and want to update
+ // the entrypoint while the method's declaring class field still points to the
+ // from-space class object with the old status.
+ if (!m->StillNeedsClinitCheckMayBeDead()) {
instrum->UpdateNativeMethodsCodeToJitCode(m, entrypoint);
}
}
@@ -220,9 +208,10 @@ JitCodeCache* JitCodeCache::Create(bool used_only_for_profile_data,
}
}
- size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity();
+ Runtime* runtime = Runtime::Current();
+ size_t initial_capacity = runtime->GetJITOptions()->GetCodeCacheInitialCapacity();
// Check whether the provided max capacity in options is below 1GB.
- size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity();
+ size_t max_capacity = runtime->GetJITOptions()->GetCodeCacheMaxCapacity();
// We need to have 32 bit offsets from method headers in code cache which point to things
// in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
// Ensure we're below 1 GB to be safe.
@@ -244,6 +233,11 @@ JitCodeCache* JitCodeCache::Create(bool used_only_for_profile_data,
return nullptr;
}
+ if (region.HasCodeMapping()) {
+ const MemMap* exec_pages = region.GetExecPages();
+ runtime->AddGeneratedCodeRange(exec_pages->Begin(), exec_pages->Size());
+ }
+
std::unique_ptr<JitCodeCache> jit_code_cache(new JitCodeCache());
if (is_zygote) {
// Zygote should never collect code to share the memory with the children.
@@ -278,7 +272,16 @@ JitCodeCache::JitCodeCache()
histogram_profiling_info_memory_use_("Memory used for profiling info", 16) {
}
-JitCodeCache::~JitCodeCache() {}
+JitCodeCache::~JitCodeCache() {
+ if (private_region_.HasCodeMapping()) {
+ const MemMap* exec_pages = private_region_.GetExecPages();
+ Runtime::Current()->RemoveGeneratedCodeRange(exec_pages->Begin(), exec_pages->Size());
+ }
+ if (shared_region_.HasCodeMapping()) {
+ const MemMap* exec_pages = shared_region_.GetExecPages();
+ Runtime::Current()->RemoveGeneratedCodeRange(exec_pages->Begin(), exec_pages->Size());
+ }
+}
bool JitCodeCache::PrivateRegionContainsPc(const void* ptr) const {
return private_region_.IsInExecSpace(ptr);
@@ -289,7 +292,9 @@ bool JitCodeCache::ContainsPc(const void* ptr) const {
}
bool JitCodeCache::ContainsMethod(ArtMethod* method) {
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ Thread* self = Thread::Current();
+ ScopedDebugDisallowReadBarriers sddrb(self);
+ MutexLock mu(self, *Locks::jit_lock_);
if (UNLIKELY(method->IsNative())) {
auto it = jni_stubs_map_.find(JniStubKey(method));
if (it != jni_stubs_map_.end() &&
@@ -312,7 +317,9 @@ bool JitCodeCache::ContainsMethod(ArtMethod* method) {
const void* JitCodeCache::GetJniStubCode(ArtMethod* method) {
DCHECK(method->IsNative());
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ Thread* self = Thread::Current();
+ ScopedDebugDisallowReadBarriers sddrb(self);
+ MutexLock mu(self, *Locks::jit_lock_);
auto it = jni_stubs_map_.find(JniStubKey(method));
if (it != jni_stubs_map_.end()) {
JniStubData& data = it->second;
@@ -324,12 +331,14 @@ const void* JitCodeCache::GetJniStubCode(ArtMethod* method) {
}
const void* JitCodeCache::GetSavedEntryPointOfPreCompiledMethod(ArtMethod* method) {
+ Thread* self = Thread::Current();
+ ScopedDebugDisallowReadBarriers sddrb(self);
if (method->IsPreCompiled()) {
const void* code_ptr = nullptr;
- if (method->GetDeclaringClass()->IsBootStrapClassLoaded()) {
+ if (method->GetDeclaringClass<kWithoutReadBarrier>()->IsBootStrapClassLoaded()) {
code_ptr = zygote_map_.GetCodeFor(method);
} else {
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
auto it = saved_compiled_methods_map_.find(method);
if (it != saved_compiled_methods_map_.end()) {
code_ptr = it->second;
@@ -353,12 +362,12 @@ bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
}
static uintptr_t FromCodeToAllocation(const void* code) {
- size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+ size_t alignment = GetInstructionSetCodeAlignment(kRuntimeISA);
return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
}
static const void* FromAllocationToCode(const uint8_t* alloc) {
- size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+ size_t alignment = GetInstructionSetCodeAlignment(kRuntimeISA);
return reinterpret_cast<const void*>(alloc + RoundUp(sizeof(OatQuickMethodHeader), alignment));
}
@@ -400,7 +409,9 @@ static const uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roo
}
void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ Thread* self = Thread::Current();
+ ScopedDebugDisallowReadBarriers sddrb(self);
+ MutexLock mu(self, *Locks::jit_lock_);
for (const auto& entry : method_code_map_) {
uint32_t number_of_roots = 0;
const uint8_t* root_table = GetRootTable(entry.first, &number_of_roots);
@@ -416,20 +427,20 @@ void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
} else if (object->IsString<kDefaultVerifyFlags>()) {
mirror::Object* new_object = visitor->IsMarked(object);
// We know the string is marked because it's a strongly-interned string that
- // is always alive. The IsMarked implementation of the CMS collector returns
- // null for newly allocated objects, but we know those haven't moved. Therefore,
- // only update the entry if we get a different non-null string.
+ // is always alive.
// TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method
// out of the weak access/creation pause. b/32167580
- if (new_object != nullptr && new_object != object) {
- DCHECK(new_object->IsString());
+ DCHECK_NE(new_object, nullptr) << "old-string:" << object;
+ if (new_object != object) {
roots[i] = GcRoot<mirror::Object>(new_object);
}
} else {
- Runtime::ProcessWeakClass(
- reinterpret_cast<GcRoot<mirror::Class>*>(&roots[i]),
- visitor,
- Runtime::GetWeakClassSentinel());
+ mirror::Object* new_klass = visitor->IsMarked(object);
+ if (new_klass == nullptr) {
+ roots[i] = GcRoot<mirror::Object>(Runtime::GetWeakClassSentinel());
+ } else if (new_klass != object) {
+ roots[i] = GcRoot<mirror::Object>(new_klass);
+ }
}
}
}
@@ -439,7 +450,13 @@ void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
InlineCache* cache = &info->cache_[i];
for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
- Runtime::ProcessWeakClass(&cache->classes_[j], visitor, nullptr);
+ mirror::Class* klass = cache->classes_[j].Read<kWithoutReadBarrier>();
+ if (klass != nullptr) {
+ mirror::Class* new_klass = down_cast<mirror::Class*>(visitor->IsMarked(klass));
+ if (new_klass != klass) {
+ cache->classes_[j] = GcRoot<mirror::Class>(new_klass);
+ }
+ }
}
}
}
@@ -506,6 +523,7 @@ void JitCodeCache::FreeAllMethodHeaders(
void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
ScopedTrace trace(__PRETTY_FUNCTION__);
+ ScopedDebugDisallowReadBarriers sddrb(self);
// We use a set to first collect all method_headers whose code need to be
// removed. We need to free the underlying code after we remove CHA dependencies
// for entries in this set. And it's more efficient to iterate through
@@ -560,7 +578,7 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
}
bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const {
- return kUseReadBarrier
+ return gUseReadBarrier
? self->GetWeakRefAccessEnabled()
: is_weak_access_enabled_.load(std::memory_order_seq_cst);
}
@@ -583,13 +601,13 @@ void JitCodeCache::BroadcastForInlineCacheAccess() {
}
void JitCodeCache::AllowInlineCacheAccess() {
- DCHECK(!kUseReadBarrier);
+ DCHECK(!gUseReadBarrier);
is_weak_access_enabled_.store(true, std::memory_order_seq_cst);
BroadcastForInlineCacheAccess();
}
void JitCodeCache::DisallowInlineCacheAccess() {
- DCHECK(!kUseReadBarrier);
+ DCHECK(!gUseReadBarrier);
is_weak_access_enabled_.store(false, std::memory_order_seq_cst);
}
@@ -700,32 +718,37 @@ bool JitCodeCache::Commit(Thread* self,
// compiled code is considered invalidated by some class linking, but below we still make the
// compiled code valid for the method. Need cha_lock_ for checking all single-implementation
// flags and register dependencies.
- MutexLock cha_mu(self, *Locks::cha_lock_);
- bool single_impl_still_valid = true;
- for (ArtMethod* single_impl : cha_single_implementation_list) {
- if (!single_impl->HasSingleImplementation()) {
- // Simply discard the compiled code. Clear the counter so that it may be recompiled later.
- // Hopefully the class hierarchy will be more stable when compilation is retried.
- single_impl_still_valid = false;
- ClearMethodCounter(method, /*was_warm=*/ false);
- break;
+ {
+ ScopedDebugDisallowReadBarriers sddrb(self);
+ MutexLock cha_mu(self, *Locks::cha_lock_);
+ bool single_impl_still_valid = true;
+ for (ArtMethod* single_impl : cha_single_implementation_list) {
+ if (!single_impl->HasSingleImplementation()) {
+ // Simply discard the compiled code. Clear the counter so that it may be recompiled later.
+ // Hopefully the class hierarchy will be more stable when compilation is retried.
+ single_impl_still_valid = false;
+ ClearMethodCounter(method, /*was_warm=*/ false);
+ break;
+ }
}
- }
- // Discard the code if any single-implementation assumptions are now invalid.
- if (UNLIKELY(!single_impl_still_valid)) {
- VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions.";
- return false;
- }
- DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsJavaDebuggable())
- << "Should not be using cha on debuggable apps/runs!";
+ // Discard the code if any single-implementation assumptions are now invalid.
+ if (UNLIKELY(!single_impl_still_valid)) {
+ VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions.";
+ return false;
+ }
+ DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsJavaDebuggable())
+ << "Should not be using cha on debuggable apps/runs!";
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- for (ArtMethod* single_impl : cha_single_implementation_list) {
- class_linker->GetClassHierarchyAnalysis()->AddDependency(single_impl, method, method_header);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ for (ArtMethod* single_impl : cha_single_implementation_list) {
+ class_linker->GetClassHierarchyAnalysis()->AddDependency(
+ single_impl, method, method_header);
+ }
}
if (UNLIKELY(method->IsNative())) {
+ ScopedDebugDisallowReadBarriers sddrb(self);
auto it = jni_stubs_map_.find(JniStubKey(method));
DCHECK(it != jni_stubs_map_.end())
<< "Entry inserted in NotifyCompilationOf() should be alive.";
@@ -736,14 +759,17 @@ bool JitCodeCache::Commit(Thread* self,
data->UpdateEntryPoints(method_header->GetEntryPoint());
} else {
if (method->IsPreCompiled() && IsSharedRegion(*region)) {
+ ScopedDebugDisallowReadBarriers sddrb(self);
zygote_map_.Put(code_ptr, method);
} else {
+ ScopedDebugDisallowReadBarriers sddrb(self);
method_code_map_.Put(code_ptr, method);
}
if (compilation_kind == CompilationKind::kOsr) {
+ ScopedDebugDisallowReadBarriers sddrb(self);
osr_code_map_.Put(method, code_ptr);
- } else if (NeedsClinitCheckBeforeCall(method) &&
- !method->GetDeclaringClass()->IsVisiblyInitialized()) {
+ } else if (method->StillNeedsClinitCheck()) {
+ ScopedDebugDisallowReadBarriers sddrb(self);
// This situation currently only occurs in the jit-zygote mode.
DCHECK(!garbage_collect_code_);
DCHECK(method->IsPreCompiled());
@@ -784,7 +810,9 @@ bool JitCodeCache::RemoveMethod(ArtMethod* method, bool release_memory) {
// This function is used only for testing and only with non-native methods.
CHECK(!method->IsNative());
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ Thread* self = Thread::Current();
+ ScopedDebugDisallowReadBarriers sddrb(self);
+ MutexLock mu(self, *Locks::jit_lock_);
bool osr = osr_code_map_.find(method) != osr_code_map_.end();
bool in_cache = RemoveMethodLocked(method, release_memory);
@@ -853,7 +881,9 @@ bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) {
// any cached information it has on the method. All threads must be suspended before calling this
// method. The compiled code for the method (if there is any) must not be in any threads call stack.
void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ Thread* self = Thread::Current();
+ ScopedDebugDisallowReadBarriers sddrb(self);
+ MutexLock mu(self, *Locks::jit_lock_);
RemoveMethodLocked(method, /* release_memory= */ true);
}
@@ -864,7 +894,9 @@ void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
// shouldn't be used since it is no longer logically in the jit code cache.
// TODO We should add DCHECKS that validate that the JIT is paused when this method is entered.
void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ Thread* self = Thread::Current();
+ ScopedDebugDisallowReadBarriers sddrb(self);
+ MutexLock mu(self, *Locks::jit_lock_);
if (old_method->IsNative()) {
// Update methods in jni_stubs_map_.
for (auto& entry : jni_stubs_map_) {
@@ -891,11 +923,14 @@ void JitCodeCache::TransitionToDebuggable() {
// Check that none of our methods have an entrypoint in the zygote exec
// space (this should be taken care of by
// ClassLinker::UpdateEntryPointsClassVisitor.
+ Thread* self = Thread::Current();
+ ScopedDebugDisallowReadBarriers sddrb(self);
{
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
if (kIsDebugBuild) {
- for (const auto& it : method_code_map_) {
- ArtMethod* method = it.second;
+ // TODO: Check `jni_stubs_map_`?
+ for (const auto& entry : method_code_map_) {
+ ArtMethod* method = entry.second;
DCHECK(!method->IsPreCompiled());
DCHECK(!IsInZygoteExecSpace(method->GetEntryPointFromQuickCompiledCode()));
}
@@ -1032,22 +1067,6 @@ class MarkCodeClosure final : public Closure {
/* context= */ nullptr,
art::StackVisitor::StackWalkKind::kSkipInlinedFrames);
- if (kIsDebugBuild) {
- // The stack walking code queries the side instrumentation stack if it
- // sees an instrumentation exit pc, so the JIT code of methods in that stack
- // must have been seen. We check this below.
- for (const auto& it : *thread->GetInstrumentationStack()) {
- // The 'method_' in InstrumentationStackFrame is the one that has return_pc_ in
- // its stack frame, it is not the method owning return_pc_. We just pass null to
- // LookupMethodHeader: the method is only checked against in debug builds.
- OatQuickMethodHeader* method_header =
- code_cache_->LookupMethodHeader(it.second.return_pc_, /* method= */ nullptr);
- if (method_header != nullptr) {
- const void* code = method_header->GetCode();
- CHECK(bitmap_->Test(FromCodeToAllocation(code)));
- }
- }
- }
barrier_->Pass(Thread::Current());
}
@@ -1156,6 +1175,7 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
// Start polling the liveness of compiled code to prepare for the next full collection.
if (next_collection_will_be_full) {
+ ScopedDebugDisallowReadBarriers sddrb(self);
for (auto it : profiling_infos_) {
it.second->ResetCounter();
}
@@ -1187,6 +1207,7 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
void JitCodeCache::RemoveUnmarkedCode(Thread* self) {
ScopedTrace trace(__FUNCTION__);
+ ScopedDebugDisallowReadBarriers sddrb(self);
std::unordered_set<OatQuickMethodHeader*> method_headers;
{
MutexLock mu(self, *Locks::jit_lock_);
@@ -1234,6 +1255,7 @@ void JitCodeCache::SetGarbageCollectCode(bool value) {
}
void JitCodeCache::RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind kind) {
+ ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
DCHECK(IsMethodBeingCompiled(method, kind));
switch (kind) {
case CompilationKind::kOsr:
@@ -1249,6 +1271,7 @@ void JitCodeCache::RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind
}
void JitCodeCache::AddMethodBeingCompiled(ArtMethod* method, CompilationKind kind) {
+ ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
DCHECK(!IsMethodBeingCompiled(method, kind));
switch (kind) {
case CompilationKind::kOsr:
@@ -1264,6 +1287,7 @@ void JitCodeCache::AddMethodBeingCompiled(ArtMethod* method, CompilationKind kin
}
bool JitCodeCache::IsMethodBeingCompiled(ArtMethod* method, CompilationKind kind) {
+ ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
switch (kind) {
case CompilationKind::kOsr:
return ContainsElement(current_osr_compilations_, method);
@@ -1275,12 +1299,14 @@ bool JitCodeCache::IsMethodBeingCompiled(ArtMethod* method, CompilationKind kind
}
bool JitCodeCache::IsMethodBeingCompiled(ArtMethod* method) {
+ ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
return ContainsElement(current_optimized_compilations_, method) ||
ContainsElement(current_osr_compilations_, method) ||
ContainsElement(current_baseline_compilations_, method);
}
ProfilingInfo* JitCodeCache::GetProfilingInfo(ArtMethod* method, Thread* self) {
+ ScopedDebugDisallowReadBarriers sddrb(self);
MutexLock mu(self, *Locks::jit_lock_);
DCHECK(IsMethodBeingCompiled(method))
<< "GetProfilingInfo should only be called when the method is being compiled";
@@ -1292,6 +1318,7 @@ ProfilingInfo* JitCodeCache::GetProfilingInfo(ArtMethod* method, Thread* self) {
}
void JitCodeCache::ResetHotnessCounter(ArtMethod* method, Thread* self) {
+ ScopedDebugDisallowReadBarriers sddrb(self);
MutexLock mu(self, *Locks::jit_lock_);
auto it = profiling_infos_.find(method);
DCHECK(it != profiling_infos_.end());
@@ -1302,6 +1329,7 @@ void JitCodeCache::ResetHotnessCounter(ArtMethod* method, Thread* self) {
void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
ScopedTrace trace(__FUNCTION__);
{
+ ScopedDebugDisallowReadBarriers sddrb(self);
MutexLock mu(self, *Locks::jit_lock_);
// Update to interpreter the methods that have baseline entrypoints and whose baseline
@@ -1390,7 +1418,9 @@ OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod*
CHECK(method != nullptr);
}
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ Thread* self = Thread::Current();
+ ScopedDebugDisallowReadBarriers sddrb(self);
+ MutexLock mu(self, *Locks::jit_lock_);
OatQuickMethodHeader* method_header = nullptr;
ArtMethod* found_method = nullptr; // Only for DCHECK(), not for JNI stubs.
if (method != nullptr && UNLIKELY(method->IsNative())) {
@@ -1445,7 +1475,9 @@ OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod*
}
OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) {
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ Thread* self = Thread::Current();
+ ScopedDebugDisallowReadBarriers sddrb(self);
+ MutexLock mu(self, *Locks::jit_lock_);
auto it = osr_code_map_.find(method);
if (it == osr_code_map_.end()) {
return nullptr;
@@ -1471,9 +1503,10 @@ ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
return info;
}
-ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self ATTRIBUTE_UNUSED,
+ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self,
ArtMethod* method,
const std::vector<uint32_t>& entries) {
+ ScopedDebugDisallowReadBarriers sddrb(self);
// Check whether some other thread has concurrently created it.
auto it = profiling_infos_.find(method);
if (it != profiling_infos_.end()) {
@@ -1506,11 +1539,14 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
std::vector<ProfileMethodInfo>& methods) {
Thread* self = Thread::Current();
WaitUntilInlineCacheAccessible(self);
+ // TODO: Avoid read barriers for potentially dead methods.
+ // ScopedDebugDisallowReadBarriers sddrb(self);
MutexLock mu(self, *Locks::jit_lock_);
ScopedTrace trace(__FUNCTION__);
- for (auto it : profiling_infos_) {
- ProfilingInfo* info = it.second;
- ArtMethod* method = info->GetMethod();
+ for (const auto& entry : profiling_infos_) {
+ ArtMethod* method = entry.first;
+ ProfilingInfo* info = entry.second;
+ DCHECK_EQ(method, info->GetMethod());
const DexFile* dex_file = method->GetDexFile();
const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation());
if (!ContainsElement(dex_base_locations, base_location)) {
@@ -1590,14 +1626,41 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
}
bool JitCodeCache::IsOsrCompiled(ArtMethod* method) {
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ Thread* self = Thread::Current();
+ ScopedDebugDisallowReadBarriers sddrb(self);
+ MutexLock mu(self, *Locks::jit_lock_);
return osr_code_map_.find(method) != osr_code_map_.end();
}
+void JitCodeCache::VisitRoots(RootVisitor* visitor) {
+ if (Runtime::Current()->GetHeap()->IsPerformingUffdCompaction()) {
+ // In case of userfaultfd compaction, ArtMethods are updated concurrently
+ // via linear-alloc.
+ return;
+ }
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ UnbufferedRootVisitor root_visitor(visitor, RootInfo(kRootStickyClass));
+ for (ArtMethod* method : current_optimized_compilations_) {
+ method->VisitRoots(root_visitor, kRuntimePointerSize);
+ }
+ for (ArtMethod* method : current_baseline_compilations_) {
+ method->VisitRoots(root_visitor, kRuntimePointerSize);
+ }
+ for (ArtMethod* method : current_osr_compilations_) {
+ method->VisitRoots(root_visitor, kRuntimePointerSize);
+ }
+}
+
bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
Thread* self,
CompilationKind compilation_kind,
bool prejit) {
+ if (kIsDebugBuild) {
+ MutexLock mu(self, *Locks::jit_lock_);
+ // Note: the compilation kind may have been adjusted after what was passed initially.
+ // We really just want to check that the method is indeed being compiled.
+ CHECK(IsMethodBeingCompiled(method));
+ }
const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
if (compilation_kind != CompilationKind::kOsr && ContainsPc(existing_entry_point)) {
OatQuickMethodHeader* method_header =
@@ -1612,7 +1675,7 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
}
}
- if (NeedsClinitCheckBeforeCall(method) && !prejit) {
+ if (method->NeedsClinitCheckBeforeCall() && !prejit) {
// We do not need a synchronization barrier for checking the visibly initialized status
// or checking the initialized status just for requesting visible initialization.
ClassStatus status = method->GetDeclaringClass()
@@ -1635,6 +1698,7 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
}
}
+ ScopedDebugDisallowReadBarriers sddrb(self);
if (compilation_kind == CompilationKind::kOsr) {
MutexLock mu(self, *Locks::jit_lock_);
if (osr_code_map_.find(method) != osr_code_map_.end()) {
@@ -1686,16 +1750,12 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
}
}
}
- MutexLock mu(self, *Locks::jit_lock_);
- if (IsMethodBeingCompiled(method, compilation_kind)) {
- return false;
- }
- AddMethodBeingCompiled(method, compilation_kind);
- return true;
}
+ return true;
}
ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
+ ScopedDebugDisallowReadBarriers sddrb(self);
MutexLock mu(self, *Locks::jit_lock_);
auto it = profiling_infos_.find(method);
if (it == profiling_infos_.end()) {
@@ -1709,16 +1769,16 @@ ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self)
}
void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) {
+ ScopedDebugDisallowReadBarriers sddrb(self);
MutexLock mu(self, *Locks::jit_lock_);
auto it = profiling_infos_.find(method);
DCHECK(it != profiling_infos_.end());
it->second->DecrementInlineUse();
}
-void JitCodeCache::DoneCompiling(ArtMethod* method,
- Thread* self,
- CompilationKind compilation_kind) {
+void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self) {
DCHECK_EQ(Thread::Current(), self);
+ ScopedDebugDisallowReadBarriers sddrb(self);
MutexLock mu(self, *Locks::jit_lock_);
if (UNLIKELY(method->IsNative())) {
auto it = jni_stubs_map_.find(JniStubKey(method));
@@ -1729,25 +1789,40 @@ void JitCodeCache::DoneCompiling(ArtMethod* method,
// Failed to compile; the JNI compiler never fails, but the cache may be full.
jni_stubs_map_.erase(it); // Remove the entry added in NotifyCompilationOf().
} // else Commit() updated entrypoints of all methods in the JniStubData.
- } else {
- RemoveMethodBeingCompiled(method, compilation_kind);
}
}
void JitCodeCache::InvalidateAllCompiledCode() {
- art::MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ Thread* self = Thread::Current();
+ ScopedDebugDisallowReadBarriers sddrb(self);
+ art::MutexLock mu(self, *Locks::jit_lock_);
VLOG(jit) << "Invalidating all compiled code";
- ClassLinker* linker = Runtime::Current()->GetClassLinker();
- for (auto it : method_code_map_) {
- ArtMethod* meth = it.second;
+ Runtime* runtime = Runtime::Current();
+ ClassLinker* linker = runtime->GetClassLinker();
+ instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
+ // TODO: Clear `jni_stubs_map_`?
+ for (const auto& entry : method_code_map_) {
+ ArtMethod* meth = entry.second;
// We were compiled, so we must be warm.
ClearMethodCounter(meth, /*was_warm=*/true);
- if (meth->IsObsolete()) {
+ if (UNLIKELY(meth->IsObsolete())) {
linker->SetEntryPointsForObsoleteMethod(meth);
} else {
- Runtime::Current()->GetInstrumentation()->InitializeMethodsCode(meth, /*aot_code=*/ nullptr);
+ instr->InitializeMethodsCode(meth, /*aot_code=*/ nullptr);
+ }
+ }
+
+ for (const auto& entry : zygote_map_) {
+ if (entry.method == nullptr) {
+ continue;
+ }
+ if (entry.method->IsPreCompiled()) {
+ entry.method->ClearPreCompiled();
}
+ Runtime::Current()->GetInstrumentation()->InitializeMethodsCode(entry.method,
+ /*aot_code=*/nullptr);
}
+
saved_compiled_methods_map_.clear();
osr_code_map_.clear();
}
@@ -1765,7 +1840,9 @@ void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
Runtime::Current()->GetInstrumentation()->InitializeMethodsCode(method, /*aot_code=*/ nullptr);
ClearMethodCounter(method, /*was_warm=*/ true);
} else {
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ Thread* self = Thread::Current();
+ ScopedDebugDisallowReadBarriers sddrb(self);
+ MutexLock mu(self, *Locks::jit_lock_);
auto it = osr_code_map_.find(method);
if (it != osr_code_map_.end() && OatQuickMethodHeader::FromCodePointer(it->second) == header) {
// Remove the OSR method, to avoid using it again.
@@ -1817,7 +1894,8 @@ void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
// We do this now and not in Jit::PostForkChildAction, as system server calls
// JitCodeCache::PostForkChildAction first, and then does some code loading
// that may result in new JIT tasks that we want to keep.
- ThreadPool* pool = Runtime::Current()->GetJit()->GetThreadPool();
+ Runtime* runtime = Runtime::Current();
+ ThreadPool* pool = runtime->GetJit()->GetThreadPool();
if (pool != nullptr) {
pool->RemoveAllTasks(self);
}
@@ -1828,7 +1906,7 @@ void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
// to write to them.
shared_region_.ResetWritableMappings();
- if (is_zygote || Runtime::Current()->IsSafeMode()) {
+ if (is_zygote || runtime->IsSafeMode()) {
// Don't create a private region for a child zygote. Regions are usually map shared
// (to satisfy dual-view), and we don't want children of a child zygote to inherit it.
return;
@@ -1843,8 +1921,8 @@ void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
histogram_code_memory_use_.Reset();
histogram_profiling_info_memory_use_.Reset();
- size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity();
- size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity();
+ size_t initial_capacity = runtime->GetJITOptions()->GetCodeCacheInitialCapacity();
+ size_t max_capacity = runtime->GetJITOptions()->GetCodeCacheMaxCapacity();
std::string error_msg;
if (!private_region_.Initialize(initial_capacity,
max_capacity,
@@ -1853,6 +1931,10 @@ void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
&error_msg)) {
LOG(WARNING) << "Could not create private region after zygote fork: " << error_msg;
}
+ if (private_region_.HasCodeMapping()) {
+ const MemMap* exec_pages = private_region_.GetExecPages();
+ runtime->AddGeneratedCodeRange(exec_pages->Begin(), exec_pages->Size());
+ }
}
JitMemoryRegion* JitCodeCache::GetCurrentRegion() {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index fb861a4d82..a6b101be25 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -215,7 +215,7 @@ class JitCodeCache {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
- void DoneCompiling(ArtMethod* method, Thread* self, CompilationKind compilation_kind)
+ void DoneCompiling(ArtMethod* method, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
@@ -230,10 +230,12 @@ class JitCodeCache {
bool PrivateRegionContainsPc(const void* pc) const;
// Return true if the code cache contains this method.
- bool ContainsMethod(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
+ bool ContainsMethod(ArtMethod* method)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::jit_lock_);
// Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise.
- const void* GetJniStubCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
+ const void* GetJniStubCode(ArtMethod* method)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::jit_lock_);
// Allocate a region for both code and data in the JIT code cache.
// The reserved memory is left completely uninitialized.
@@ -403,6 +405,20 @@ class JitCodeCache {
ProfilingInfo* GetProfilingInfo(ArtMethod* method, Thread* self);
void ResetHotnessCounter(ArtMethod* method, Thread* self);
+ void VisitRoots(RootVisitor* visitor);
+
+ // Return whether `method` is being compiled with the given mode.
+ bool IsMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
+ REQUIRES(Locks::jit_lock_);
+
+ // Remove `method` from the list of methods meing compiled with the given mode.
+ void RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
+ REQUIRES(Locks::jit_lock_);
+
+ // Record that `method` is being compiled with the given mode.
+ void AddMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
+ REQUIRES(Locks::jit_lock_);
+
private:
JitCodeCache();
@@ -492,18 +508,6 @@ class JitCodeCache {
REQUIRES(!Locks::jit_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Record that `method` is being compiled with the given mode.
- void AddMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
- REQUIRES(Locks::jit_lock_);
-
- // Remove `method` from the list of methods meing compiled with the given mode.
- void RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
- REQUIRES(Locks::jit_lock_);
-
- // Return whether `method` is being compiled with the given mode.
- bool IsMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
- REQUIRES(Locks::jit_lock_);
-
// Return whether `method` is being compiled in any mode.
bool IsMethodBeingCompiled(ArtMethod* method) REQUIRES(Locks::jit_lock_);
@@ -528,6 +532,14 @@ class JitCodeCache {
// -------------- Global JIT maps --------------------------------------- //
+ // Note: The methods held in these maps may be dead, so we must ensure that we do not use
+ // read barriers on their declaring classes as that could unnecessarily keep them alive or
+ // crash the GC, depending on the GC phase and particular GC's details. Asserting that we
+ // do not emit read barriers for these methods can be tricky as we're allowed to emit read
+ // barriers for other methods that are known to be alive, such as the method being compiled.
+ // The GC must ensure that methods in these maps are cleaned up with `RemoveMethodsIn()`
+ // before the declaring class memory is freed.
+
// Holds compiled code associated with the shorty for a JNI stub.
SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(Locks::jit_lock_);
diff --git a/runtime/jit/jit_load_test.cc b/runtime/jit/jit_load_test.cc
new file mode 100644
index 0000000000..4b080a57a3
--- /dev/null
+++ b/runtime/jit/jit_load_test.cc
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common_runtime_test.h"
+#include "compiler_callbacks.h"
+
+namespace art {
+
+class JitLoadTest : public CommonRuntimeTest {
+ protected:
+ void SetUpRuntimeOptions(RuntimeOptions *options) override {
+ callbacks_.reset();
+ CommonRuntimeTest::SetUpRuntimeOptions(options);
+ options->push_back(std::make_pair("-Xusejit:true", nullptr));
+ }
+};
+
+
+TEST_F(JitLoadTest, JitLoad) {
+ Thread::Current()->TransitionFromSuspendedToRunnable();
+ runtime_->Start();
+ ASSERT_NE(runtime_->GetJit(), nullptr);
+}
+
+} // namespace art
diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc
index 56407f58c0..410bf7004a 100644
--- a/runtime/jit/jit_memory_region.cc
+++ b/runtime/jit/jit_memory_region.cc
@@ -27,7 +27,7 @@
#include "base/membarrier.h"
#include "base/memfd.h"
#include "base/systrace.h"
-#include "gc/allocator/dlmalloc.h"
+#include "gc/allocator/art-dlmalloc.h"
#include "jit/jit_scoped_code_cache_write.h"
#include "oat_quick_method_header.h"
#include "palette/palette.h"
@@ -360,7 +360,7 @@ const uint8_t* JitMemoryRegion::CommitCode(ArrayRef<const uint8_t> reserved_code
DCHECK(IsInExecSpace(reserved_code.data()));
ScopedCodeCacheWrite scc(*this);
- size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+ size_t alignment = GetInstructionSetCodeAlignment(kRuntimeISA);
size_t header_size = OatQuickMethodHeader::InstructionAlignedSize();
size_t total_size = header_size + code.size();
@@ -468,7 +468,7 @@ bool JitMemoryRegion::CommitData(ArrayRef<const uint8_t> reserved_data,
}
const uint8_t* JitMemoryRegion::AllocateCode(size_t size) {
- size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+ size_t alignment = GetInstructionSetCodeAlignment(kRuntimeISA);
void* result = mspace_memalign(exec_mspace_, alignment, size);
if (UNLIKELY(result == nullptr)) {
return nullptr;
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index cea654fe17..3321636122 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -23,7 +23,6 @@
#include <unistd.h>
#include "android-base/strings.h"
-
#include "art_method-inl.h"
#include "base/compiler_filter.h"
#include "base/enums.h"
@@ -32,6 +31,7 @@
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/time_utils.h"
+#include "base/unix_file/fd_file.h"
#include "class_table-inl.h"
#include "dex/dex_file_loader.h"
#include "dex_reference_collection.h"
@@ -136,21 +136,20 @@ void ProfileSaver::Run() {
{
MutexLock mu(self, wait_lock_);
- const uint64_t end_time = NanoTime() + MsToNs(force_early_first_save
+ const uint64_t sleep_time = MsToNs(force_early_first_save
? options_.GetMinFirstSaveMs()
: options_.GetSaveResolvedClassesDelayMs());
- while (!Runtime::Current()->GetStartupCompleted()) {
+ const uint64_t start_time = NanoTime();
+ const uint64_t end_time = start_time + sleep_time;
+ while (!Runtime::Current()->GetStartupCompleted() || force_early_first_save) {
const uint64_t current_time = NanoTime();
if (current_time >= end_time) {
break;
}
period_condition_.TimedWait(self, NsToMs(end_time - current_time), 0);
}
- total_ms_of_sleep_ += options_.GetSaveResolvedClassesDelayMs();
+ total_ms_of_sleep_ += NsToMs(NanoTime() - start_time);
}
- // Tell the runtime that startup is completed if it has not already been notified.
- // TODO: We should use another thread to do this in case the profile saver is not running.
- Runtime::Current()->NotifyStartupCompleted();
FetchAndCacheResolvedClassesAndMethods(/*startup=*/ true);
@@ -869,11 +868,17 @@ bool ProfileSaver::ProcessProfilingInfo(
}
{
ProfileCompilationInfo info(Runtime::Current()->GetArenaPool(),
- /*for_boot_image=*/ options_.GetProfileBootClassPath());
- if (!info.Load(filename, /*clear_if_invalid=*/ true)) {
+ /*for_boot_image=*/options_.GetProfileBootClassPath());
+ // Load the existing profile before saving.
+ // If the file is updated between `Load` and `Save`, the update will be lost. This is
+ // acceptable. The main reason is that the lost entries will eventually come back if the user
+ // keeps using the same methods, or they won't be needed if the user doesn't use the same
+ // methods again.
+ if (!info.Load(filename, /*clear_if_invalid=*/true)) {
LOG(WARNING) << "Could not forcefully load profile " << filename;
continue;
}
+
uint64_t last_save_number_of_methods = info.GetNumberOfMethods();
uint64_t last_save_number_of_classes = info.GetNumberOfResolvedClasses();
VLOG(profiler) << "last_save_number_of_methods=" << last_save_number_of_methods
diff --git a/runtime/jit/profiling_info_test.cc b/runtime/jit/profiling_info_test.cc
index ce0a30fd9f..021bebfe4e 100644
--- a/runtime/jit/profiling_info_test.cc
+++ b/runtime/jit/profiling_info_test.cc
@@ -72,6 +72,7 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
Hotness::Flag flags) {
ProfileCompilationInfo info;
std::vector<ProfileMethodInfo> profile_methods;
+ profile_methods.reserve(methods.size());
ScopedObjectAccess soa(Thread::Current());
for (ArtMethod* method : methods) {
profile_methods.emplace_back(
@@ -188,7 +189,7 @@ TEST_F(ProfileCompilationInfoTest, SaveArtMethods) {
// Check that what we saved is in the profile.
ProfileCompilationInfo info1;
- ASSERT_TRUE(info1.Load(GetFd(profile)));
+ ASSERT_TRUE(info1.Load(profile.GetFilename(), /*clear_if_invalid=*/false));
ASSERT_EQ(info1.GetNumberOfMethods(), main_methods.size());
{
ScopedObjectAccess soa(self);
@@ -208,7 +209,7 @@ TEST_F(ProfileCompilationInfoTest, SaveArtMethods) {
// Check that what we saved is in the profile (methods form Main and Second).
ProfileCompilationInfo info2;
- ASSERT_TRUE(info2.Load(GetFd(profile)));
+ ASSERT_TRUE(info2.Load(profile.GetFilename(), /*clear_if_invalid=*/false));
ASSERT_EQ(info2.GetNumberOfMethods(), main_methods.size() + second_methods.size());
{
ScopedObjectAccess soa(self);
@@ -248,7 +249,7 @@ TEST_F(ProfileCompilationInfoTest, SaveArtMethodsWithInlineCaches) {
// Check that what we saved is in the profile.
ProfileCompilationInfo info;
- ASSERT_TRUE(info.Load(GetFd(profile)));
+ ASSERT_TRUE(info.Load(profile.GetFilename(), /*clear_if_invalid=*/false));
ASSERT_EQ(info.GetNumberOfMethods(), main_methods.size());
{
ScopedObjectAccess soa(self);