Refine statistics around the JIT.
- Better namings.
- Now also time the code cache collection time.
- Random cleanups.
bug:23128949
bug:27445008
bug:27442890
Change-Id: I1dd52544bea678af868e7c47907f7a0fc9a146c3
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 79a6d38..6ff1e2e 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -85,7 +85,7 @@
exit(EXIT_FAILURE);
}
-JitCompiler::JitCompiler() : total_time_(0) {
+JitCompiler::JitCompiler() {
compiler_options_.reset(new CompilerOptions(
CompilerOptions::kDefaultCompilerFilter,
CompilerOptions::kDefaultHugeMethodThreshold,
@@ -195,7 +195,6 @@
bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) {
DCHECK(!method->IsProxyMethod());
TimingLogger logger("JIT compiler timing logger", true, VLOG_IS_ON(jit));
- const uint64_t start_time = NanoTime();
StackHandleScope<2> hs(self);
self->AssertNoPendingException();
Runtime* runtime = Runtime::Current();
@@ -236,7 +235,6 @@
runtime->GetJitArenaPool()->TrimMaps();
}
- total_time_ += NanoTime() - start_time;
runtime->GetJit()->AddTimingLogger(logger);
return success;
}
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index 5294d0e..bc134fe 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -18,13 +18,10 @@
#define ART_COMPILER_JIT_JIT_COMPILER_H_
#include "base/mutex.h"
-#include "compiler_callbacks.h"
#include "compiled_method.h"
-#include "dex/verification_results.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
-#include "oat_file.h"
namespace art {
@@ -37,23 +34,19 @@
public:
static JitCompiler* Create();
virtual ~JitCompiler();
+
+ // Compilation entrypoint. Returns whether the compilation succeeded.
bool CompileMethod(Thread* self, ArtMethod* method, bool osr)
SHARED_REQUIRES(Locks::mutator_lock_);
- CompilerCallbacks* GetCompilerCallbacks() const;
- size_t GetTotalCompileTime() const {
- return total_time_;
- }
+
CompilerOptions* GetCompilerOptions() const {
return compiler_options_.get();
}
private:
- uint64_t total_time_;
std::unique_ptr<CompilerOptions> compiler_options_;
std::unique_ptr<CumulativeLogger> cumulative_logger_;
- std::unique_ptr<VerificationResults> verification_results_;
std::unique_ptr<DexFileToMethodInlinerMap> method_inliner_map_;
- std::unique_ptr<CompilerCallbacks> callbacks_;
std::unique_ptr<CompilerDriver> compiler_driver_;
std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
std::unique_ptr<File> perf_file_;
@@ -62,8 +55,7 @@
// This is in the compiler since the runtime doesn't have access to the compiled method
// structures.
- bool AddToCodeCache(ArtMethod* method,
- const CompiledMethod* compiled_method)
+ bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method)
SHARED_REQUIRES(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(JitCompiler);
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 91b006a..a3b99e3 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -59,12 +59,7 @@
}
void Jit::DumpInfo(std::ostream& os) {
- os << "JIT code cache size=" << PrettySize(code_cache_->CodeCacheSize()) << "\n"
- << "JIT data cache size=" << PrettySize(code_cache_->DataCacheSize()) << "\n"
- << "JIT current capacity=" << PrettySize(code_cache_->GetCurrentCapacity()) << "\n"
- << "JIT number of compiled code=" << code_cache_->NumberOfCompiledCode() << "\n"
- << "JIT total number of compilations=" << code_cache_->NumberOfCompilations() << "\n"
- << "JIT total number of osr compilations=" << code_cache_->NumberOfOsrCompilations() << "\n";
+ code_cache_->Dump(os);
cumulative_timings_.Dump(os);
}
@@ -97,7 +92,7 @@
return nullptr;
}
jit->save_profiling_info_ = options->GetSaveProfilingInfo();
- LOG(INFO) << "JIT created with initial_capacity="
+ VLOG(jit) << "JIT created with initial_capacity="
<< PrettySize(options->GetCodeCacheInitialCapacity())
<< ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity())
<< ", compile_threshold=" << options->GetCompileThreshold()
@@ -174,7 +169,6 @@
// of that proxy method, as the compiler does not expect a proxy method.
ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(sizeof(void*));
if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr)) {
- VLOG(jit) << "JIT not compiling " << PrettyMethod(method) << " due to code cache";
return false;
}
bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, osr);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index e0380bd..8c69bc8 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -24,6 +24,7 @@
#include "debugger_interface.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/bitmap-inl.h"
+#include "jit/jit.h"
#include "jit/profiling_info.h"
#include "linear_alloc.h"
#include "mem_map.h"
@@ -129,7 +130,9 @@
used_memory_for_data_(0),
used_memory_for_code_(0),
number_of_compilations_(0),
- number_of_osr_compilations_(0) {
+ number_of_osr_compilations_(0),
+ number_of_deoptimizations_(0),
+ number_of_collections_(0) {
DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
@@ -363,16 +366,6 @@
return reinterpret_cast<uint8_t*>(method_header);
}
-size_t JitCodeCache::NumberOfCompilations() {
- MutexLock mu(Thread::Current(), lock_);
- return number_of_compilations_;
-}
-
-size_t JitCodeCache::NumberOfOsrCompilations() {
- MutexLock mu(Thread::Current(), lock_);
- return number_of_osr_compilations_;
-}
-
size_t JitCodeCache::CodeCacheSize() {
MutexLock mu(Thread::Current(), lock_);
return CodeCacheSizeLocked();
@@ -391,11 +384,6 @@
return used_memory_for_data_;
}
-size_t JitCodeCache::NumberOfCompiledCode() {
- MutexLock mu(Thread::Current(), lock_);
- return method_code_map_.size();
-}
-
void JitCodeCache::ClearData(Thread* self, void* data) {
MutexLock mu(self, lock_);
FreeData(reinterpret_cast<uint8_t*>(data));
@@ -577,6 +565,7 @@
if (WaitForPotentialCollectionToComplete(self)) {
return;
} else {
+ number_of_collections_++;
live_bitmap_.reset(CodeCacheBitmap::Create(
"code-cache-bitmap",
reinterpret_cast<uintptr_t>(code_map_->Begin()),
@@ -585,63 +574,69 @@
}
}
- bool do_full_collection = false;
+ TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit));
{
- MutexLock mu(self, lock_);
- do_full_collection = ShouldDoFullCollection();
- }
+ TimingLogger::ScopedTiming st("Code cache collection", &logger);
- if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
- LOG(INFO) << "Do "
- << (do_full_collection ? "full" : "partial")
- << " code cache collection, code="
- << PrettySize(CodeCacheSize())
- << ", data=" << PrettySize(DataCacheSize());
- }
-
- DoCollection(self, /* collect_profiling_info */ do_full_collection);
-
- if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
- LOG(INFO) << "After code cache collection, code="
- << PrettySize(CodeCacheSize())
- << ", data=" << PrettySize(DataCacheSize());
- }
-
- {
- MutexLock mu(self, lock_);
-
- // Increase the code cache only when we do partial collections.
- // TODO: base this strategy on how full the code cache is?
- if (do_full_collection) {
- last_collection_increased_code_cache_ = false;
- } else {
- last_collection_increased_code_cache_ = true;
- IncreaseCodeCacheCapacity();
+ bool do_full_collection = false;
+ {
+ MutexLock mu(self, lock_);
+ do_full_collection = ShouldDoFullCollection();
}
- bool next_collection_will_be_full = ShouldDoFullCollection();
+ if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
+ LOG(INFO) << "Do "
+ << (do_full_collection ? "full" : "partial")
+ << " code cache collection, code="
+ << PrettySize(CodeCacheSize())
+ << ", data=" << PrettySize(DataCacheSize());
+ }
- // Start polling the liveness of compiled code to prepare for the next full collection.
- // We avoid doing this if exit stubs are installed to not mess with the instrumentation.
- // TODO(ngeoffray): Clean up instrumentation and code cache interactions.
- if (!Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled() &&
- next_collection_will_be_full) {
- // Save the entry point of methods we have compiled, and update the entry
- // point of those methods to the interpreter. If the method is invoked, the
- // interpreter will update its entry point to the compiled code and call it.
- for (ProfilingInfo* info : profiling_infos_) {
- const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
- if (ContainsPc(entry_point)) {
- info->SetSavedEntryPoint(entry_point);
- info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
- }
+ DoCollection(self, /* collect_profiling_info */ do_full_collection);
+
+ if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
+ LOG(INFO) << "After code cache collection, code="
+ << PrettySize(CodeCacheSize())
+ << ", data=" << PrettySize(DataCacheSize());
+ }
+
+ {
+ MutexLock mu(self, lock_);
+
+ // Increase the code cache only when we do partial collections.
+ // TODO: base this strategy on how full the code cache is?
+ if (do_full_collection) {
+ last_collection_increased_code_cache_ = false;
+ } else {
+ last_collection_increased_code_cache_ = true;
+ IncreaseCodeCacheCapacity();
}
- DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
+ bool next_collection_will_be_full = ShouldDoFullCollection();
+
+ // Start polling the liveness of compiled code to prepare for the next full collection.
+ // We avoid doing this if exit stubs are installed to not mess with the instrumentation.
+ // TODO(ngeoffray): Clean up instrumentation and code cache interactions.
+ if (!Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled() &&
+ next_collection_will_be_full) {
+ // Save the entry point of methods we have compiled, and update the entry
+ // point of those methods to the interpreter. If the method is invoked, the
+ // interpreter will update its entry point to the compiled code and call it.
+ for (ProfilingInfo* info : profiling_infos_) {
+ const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+ if (ContainsPc(entry_point)) {
+ info->SetSavedEntryPoint(entry_point);
+ info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+ }
+ }
+
+ DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
+ }
+ live_bitmap_.reset(nullptr);
+ NotifyCollectionDone(self);
}
- live_bitmap_.reset(nullptr);
- NotifyCollectionDone(self);
}
+ Runtime::Current()->GetJit()->AddTimingLogger(logger);
}
void JitCodeCache::RemoveUnusedAndUnmarkedCode(Thread* self) {
@@ -874,17 +869,27 @@
bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr) {
if (!osr && ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
+ VLOG(jit) << PrettyMethod(method) << " is already compiled";
return false;
}
MutexLock mu(self, lock_);
if (osr && (osr_code_map_.find(method) != osr_code_map_.end())) {
+ VLOG(jit) << PrettyMethod(method) << " is already osr compiled";
return false;
}
+
ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
- if (info == nullptr || info->IsMethodBeingCompiled()) {
+ if (info == nullptr) {
+ VLOG(jit) << PrettyMethod(method) << " needs a ProfilingInfo to be compiled";
return false;
}
+
+ if (info->IsMethodBeingCompiled()) {
+ VLOG(jit) << PrettyMethod(method) << " is already being compiled";
+ return false;
+ }
+
info->SetIsMethodBeingCompiled(true);
return true;
}
@@ -924,6 +929,8 @@
osr_code_map_.erase(it);
}
}
+ MutexLock mu(Thread::Current(), lock_);
+ number_of_deoptimizations_++;
}
uint8_t* JitCodeCache::AllocateCode(size_t code_size) {
@@ -953,5 +960,18 @@
mspace_free(data_mspace_, data);
}
+void JitCodeCache::Dump(std::ostream& os) {
+ MutexLock mu(Thread::Current(), lock_);
+ os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n"
+ << "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n"
+ << "Current JIT capacity: " << PrettySize(current_capacity_) << "\n"
+ << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n"
+ << "Total number of JIT compilations: " << number_of_compilations_ << "\n"
+ << "Total number of JIT compilations for on stack replacement: "
+ << number_of_osr_compilations_ << "\n"
+ << "Total number of deoptimizations: " << number_of_deoptimizations_ << "\n"
+ << "Total number of JIT code cache collections: " << number_of_collections_ << std::endl;
+}
+
} // namespace jit
} // namespace art
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index aa1b139..2a41a70 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -67,14 +67,6 @@
// Number of bytes allocated in the data cache.
size_t DataCacheSize() REQUIRES(!lock_);
- // Number of compiled code in the code cache. Note that this is not the number
- // of methods that got JIT compiled, as we might have collected some.
- size_t NumberOfCompiledCode() REQUIRES(!lock_);
-
- // Number of compilations done throughout the lifetime of the JIT.
- size_t NumberOfCompilations() REQUIRES(!lock_);
- size_t NumberOfOsrCompilations() REQUIRES(!lock_);
-
bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!lock_);
@@ -185,6 +177,8 @@
REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ void Dump(std::ostream& os) REQUIRES(!lock_);
+
private:
// Take ownership of maps.
JitCodeCache(MemMap* code_map,
@@ -256,6 +250,11 @@
REQUIRES(lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ void FreeCode(uint8_t* code) REQUIRES(lock_);
+ uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
+ void FreeData(uint8_t* data) REQUIRES(lock_);
+ uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
+
// Lock for guarding allocations, collections, and the method_code_map_.
Mutex lock_;
// Condition to wait on during collection.
@@ -307,19 +306,21 @@
// The size in bytes of used memory for the code portion of the code cache.
size_t used_memory_for_code_ GUARDED_BY(lock_);
- void FreeCode(uint8_t* code) REQUIRES(lock_);
- uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
- void FreeData(uint8_t* data) REQUIRES(lock_);
- uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
-
// Number of compilations done throughout the lifetime of the JIT.
size_t number_of_compilations_ GUARDED_BY(lock_);
+
+ // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
size_t number_of_osr_compilations_ GUARDED_BY(lock_);
+ // Number of deoptimizations done throughout the lifetime of the JIT.
+ size_t number_of_deoptimizations_ GUARDED_BY(lock_);
+
+ // Number of code cache collections done throughout the lifetime of the JIT.
+ size_t number_of_collections_ GUARDED_BY(lock_);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
};
-
} // namespace jit
} // namespace art