Add more statistics for memory use of the JIT.
Collect data for stack maps, profiling info, and compiled code.
bug:27520994
Change-Id: Ic87361230c96ce0090027a37d750e948d806c597
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 53bd38d..a42b887 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -892,7 +892,7 @@
}
size_t stack_map_size = codegen->ComputeStackMapsSize();
- uint8_t* stack_map_data = code_cache->ReserveData(self, stack_map_size);
+ uint8_t* stack_map_data = code_cache->ReserveData(self, stack_map_size, method);
if (stack_map_data == nullptr) {
return false;
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index af47da6..c681ed7 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -40,6 +40,9 @@
static constexpr int kProtData = PROT_READ | PROT_WRITE;
static constexpr int kProtCode = PROT_READ | PROT_EXEC;
+static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
+static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
+
#define CHECKED_MPROTECT(memory, size, prot) \
do { \
int rc = mprotect(memory, size, prot); \
@@ -134,7 +137,10 @@
number_of_compilations_(0),
number_of_osr_compilations_(0),
number_of_deoptimizations_(0),
- number_of_collections_(0) {
+ number_of_collections_(0),
+ histogram_stack_map_memory_use_("Memory used for stack maps", 16),
+ histogram_code_memory_use_("Memory used for compiled code", 16),
+ histogram_profiling_info_memory_use_("Memory used for profiling info", 16) {
DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
@@ -377,6 +383,13 @@
<< " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": "
<< reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
<< reinterpret_cast<const void*>(method_header->GetEntryPoint() + method_header->code_size_);
+ histogram_code_memory_use_.AddValue(code_size);
+ if (code_size > kCodeSizeLogThreshold) {
+ LOG(INFO) << "JIT allocated "
+ << PrettySize(code_size)
+ << " for compiled code of "
+ << PrettyMethod(method);
+ }
}
return reinterpret_cast<uint8_t*>(method_header);
@@ -405,7 +418,7 @@
FreeData(reinterpret_cast<uint8_t*>(data));
}
-uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
+uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size, ArtMethod* method) {
size = RoundUp(size, sizeof(void*));
uint8_t* result = nullptr;
@@ -425,15 +438,14 @@
result = AllocateData(size);
}
- return result;
-}
-
-uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
- uint8_t* result = ReserveData(self, end - begin);
- if (result == nullptr) {
- return nullptr; // Out of space in the data cache.
+ MutexLock mu(self, lock_);
+ histogram_stack_map_memory_use_.AddValue(size);
+ if (size > kStackMapSizeLogThreshold) {
+ LOG(INFO) << "JIT allocated "
+ << PrettySize(size)
+ << " for stack maps of "
+ << PrettyMethod(method);
}
- std::copy(begin, end, result);
return result;
}
@@ -868,6 +880,7 @@
method->SetProfilingInfo(info);
profiling_infos_.push_back(info);
+ histogram_profiling_info_memory_use_.AddValue(profile_info_size);
return info;
}
@@ -1021,6 +1034,9 @@
<< number_of_osr_compilations_ << "\n"
<< "Total number of deoptimizations: " << number_of_deoptimizations_ << "\n"
<< "Total number of JIT code cache collections: " << number_of_collections_ << std::endl;
+ histogram_stack_map_memory_use_.PrintMemoryUse(os);
+ histogram_code_memory_use_.PrintMemoryUse(os);
+ histogram_profiling_info_memory_use_.PrintMemoryUse(os);
}
} // namespace jit
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 98dd70d..a54f04f 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -20,6 +20,7 @@
#include "instrumentation.h"
#include "atomic.h"
+#include "base/histogram-inl.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "gc/accounting/bitmap.h"
@@ -109,7 +110,7 @@
bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
// Reserve a region of data of size at least "size". Returns null if there is no more room.
- uint8_t* ReserveData(Thread* self, size_t size)
+ uint8_t* ReserveData(Thread* self, size_t size, ArtMethod* method)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!lock_);
@@ -118,12 +119,6 @@
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!lock_);
- // Add a data array of size (end - begin) with the associated contents, returns null if there
- // is no more room.
- uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end)
- SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(!lock_);
-
CodeCacheBitmap* GetLiveBitmap() const {
return live_bitmap_.get();
}
@@ -332,6 +327,15 @@
// Number of code cache collections done throughout the lifetime of the JIT.
size_t number_of_collections_ GUARDED_BY(lock_);
+ // Histograms for keeping track of stack map size statistics.
+ Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(lock_);
+
+ // Histograms for keeping track of code size statistics.
+ Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(lock_);
+
+ // Histograms for keeping track of profiling info statistics.
+ Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
};