Increase code cache after 1 full collection.
Also add a max capacity option.
Change-Id: Icd442b72e9be0c6b091b588b4c4473c69b7cde10
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index f34b5ed..34fb790 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -457,8 +457,10 @@
EXPECT_SINGLE_PARSE_VALUE(false, "-Xusejit:false", M::UseJIT);
}
{
- EXPECT_SINGLE_PARSE_VALUE(MemoryKiB(16 * KB), "-Xjitcodecachesize:16K", M::JITCodeCacheCapacity);
- EXPECT_SINGLE_PARSE_VALUE(MemoryKiB(16 * MB), "-Xjitcodecachesize:16M", M::JITCodeCacheCapacity);
+ EXPECT_SINGLE_PARSE_VALUE(
+ MemoryKiB(16 * KB), "-Xjitcodecacheinitialcapacity:16K", M::JITCodeCacheInitialCapacity);
+ EXPECT_SINGLE_PARSE_VALUE(
+ MemoryKiB(16 * MB), "-Xjitcodecacheinitialcapacity:16M", M::JITCodeCacheInitialCapacity);
}
{
EXPECT_SINGLE_PARSE_VALUE(12345u, "-Xjitthreshold:12345", M::JITCompileThreshold);
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 77f606d..e754a52 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -20,6 +20,8 @@
#include "gc/accounting/card_table.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "memory_tool_malloc_space-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
@@ -318,10 +320,17 @@
// Implement the dlmalloc morecore callback.
void* ArtDlMallocMoreCore(void* mspace, intptr_t increment) {
- Heap* heap = Runtime::Current()->GetHeap();
+ Runtime* runtime = Runtime::Current();
+ Heap* heap = runtime->GetHeap();
::art::gc::space::DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace();
// Support for multiple DlMalloc provided by a slow path.
if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) {
+ if (LIKELY(runtime->GetJit() != nullptr)) {
+ jit::JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache();
+ if (code_cache->OwnsSpace(mspace)) {
+ return code_cache->MoreCore(mspace, increment);
+ }
+ }
dlmalloc_space = nullptr;
for (space::ContinuousSpace* space : heap->GetContinuousSpaces()) {
if (space->IsDlMallocSpace()) {
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index f691151..ecbf13c 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -34,8 +34,10 @@
JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
auto* jit_options = new JitOptions;
jit_options->use_jit_ = options.GetOrDefault(RuntimeArgumentMap::UseJIT);
- jit_options->code_cache_capacity_ =
- options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheCapacity);
+ jit_options->code_cache_initial_capacity_ =
+ options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheInitialCapacity);
+ jit_options->code_cache_max_capacity_ =
+ options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheMaxCapacity);
jit_options->compile_threshold_ =
options.GetOrDefault(RuntimeArgumentMap::JITCompileThreshold);
jit_options->warmup_threshold_ =
@@ -69,13 +71,15 @@
if (!jit->LoadCompiler(error_msg)) {
return nullptr;
}
- jit->code_cache_.reset(JitCodeCache::Create(options->GetCodeCacheCapacity(), error_msg));
+ jit->code_cache_.reset(JitCodeCache::Create(
+ options->GetCodeCacheInitialCapacity(), options->GetCodeCacheMaxCapacity(), error_msg));
if (jit->GetCodeCache() == nullptr) {
return nullptr;
}
- LOG(INFO) << "JIT created with code_cache_capacity="
- << PrettySize(options->GetCodeCacheCapacity())
- << " compile_threshold=" << options->GetCompileThreshold();
+ LOG(INFO) << "JIT created with initial_capacity="
+ << PrettySize(options->GetCodeCacheInitialCapacity())
+ << ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity())
+ << ", compile_threshold=" << options->GetCompileThreshold();
return jit.release();
}
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 1f89f9b..fc76549 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -102,8 +102,11 @@
size_t GetWarmupThreshold() const {
return warmup_threshold_;
}
- size_t GetCodeCacheCapacity() const {
- return code_cache_capacity_;
+ size_t GetCodeCacheInitialCapacity() const {
+ return code_cache_initial_capacity_;
+ }
+ size_t GetCodeCacheMaxCapacity() const {
+ return code_cache_max_capacity_;
}
bool DumpJitInfoOnShutdown() const {
return dump_info_on_shutdown_;
@@ -117,13 +120,18 @@
private:
bool use_jit_;
- size_t code_cache_capacity_;
+ size_t code_cache_initial_capacity_;
+ size_t code_cache_max_capacity_;
size_t compile_threshold_;
size_t warmup_threshold_;
bool dump_info_on_shutdown_;
- JitOptions() : use_jit_(false), code_cache_capacity_(0), compile_threshold_(0),
- dump_info_on_shutdown_(false) { }
+ JitOptions()
+ : use_jit_(false),
+ code_cache_initial_capacity_(0),
+ code_cache_max_capacity_(0),
+ compile_threshold_(0),
+ dump_info_on_shutdown_(false) { }
DISALLOW_COPY_AND_ASSIGN(JitOptions);
};
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index a291a09..da79109 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -44,73 +44,89 @@
} \
} while (false) \
-JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) {
- CHECK_GT(capacity, 0U);
- CHECK_LT(capacity, kMaxCapacity);
+JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
+ size_t max_capacity,
+ std::string* error_msg) {
+ CHECK_GE(max_capacity, initial_capacity);
+ // We need to have 32 bit offsets from method headers in code cache which point to things
+ // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
+ // Ensure we're below 1 GB to be safe.
+ if (max_capacity > 1 * GB) {
+ std::ostringstream oss;
+ oss << "Maxium code cache capacity is limited to 1 GB, "
+ << PrettySize(max_capacity) << " is too big";
+ *error_msg = oss.str();
+ return nullptr;
+ }
+
std::string error_str;
// Map name specific for android_os_Debug.cpp accounting.
MemMap* data_map = MemMap::MapAnonymous(
- "data-code-cache", nullptr, capacity, kProtAll, false, false, &error_str);
+ "data-code-cache", nullptr, max_capacity, kProtAll, false, false, &error_str);
if (data_map == nullptr) {
std::ostringstream oss;
- oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
+ oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
*error_msg = oss.str();
return nullptr;
}
+ // Align both capacities to page size, as that's the unit mspaces use.
+ initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
+ max_capacity = RoundDown(max_capacity, 2 * kPageSize);
+
// Data cache is 1 / 2 of the map.
// TODO: Make this variable?
- size_t data_size = RoundUp(data_map->Size() / 2, kPageSize);
- size_t code_size = data_map->Size() - data_size;
+ size_t data_size = max_capacity / 2;
+ size_t code_size = max_capacity - data_size;
+ DCHECK_EQ(code_size + data_size, max_capacity);
uint8_t* divider = data_map->Begin() + data_size;
- // We need to have 32 bit offsets from method headers in code cache which point to things
- // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
MemMap* code_map = data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str);
if (code_map == nullptr) {
std::ostringstream oss;
- oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
+ oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
*error_msg = oss.str();
return nullptr;
}
- DCHECK_EQ(code_map->Size(), code_size);
DCHECK_EQ(code_map->Begin(), divider);
- return new JitCodeCache(code_map, data_map);
+ data_size = initial_capacity / 2;
+ code_size = initial_capacity - data_size;
+ DCHECK_EQ(code_size + data_size, initial_capacity);
+ return new JitCodeCache(code_map, data_map, code_size, data_size, max_capacity);
}
-JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map)
+JitCodeCache::JitCodeCache(MemMap* code_map,
+ MemMap* data_map,
+ size_t initial_code_capacity,
+ size_t initial_data_capacity,
+ size_t max_capacity)
: lock_("Jit code cache", kJitCodeCacheLock),
lock_cond_("Jit code cache variable", lock_),
collection_in_progress_(false),
code_map_(code_map),
- data_map_(data_map) {
+ data_map_(data_map),
+ max_capacity_(max_capacity),
+ current_capacity_(initial_code_capacity + initial_data_capacity),
+ code_end_(initial_code_capacity),
+ data_end_(initial_data_capacity),
+ has_done_one_collection_(false) {
- code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/);
- data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/);
+ code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
+ data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_end_, false /*locked*/);
if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
PLOG(FATAL) << "create_mspace_with_base failed";
}
- // Prevent morecore requests from the mspace.
- mspace_set_footprint_limit(code_mspace_, code_map_->Size());
- mspace_set_footprint_limit(data_mspace_, data_map_->Size());
+ SetFootprintLimit(current_capacity_);
CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
- live_bitmap_.reset(CodeCacheBitmap::Create("code-cache-bitmap",
- reinterpret_cast<uintptr_t>(code_map_->Begin()),
- reinterpret_cast<uintptr_t>(code_map_->End())));
-
- if (live_bitmap_.get() == nullptr) {
- PLOG(FATAL) << "creating bitmaps for the JIT code cache failed";
- }
-
- VLOG(jit) << "Created jit code cache: data size="
- << PrettySize(data_map_->Size())
- << ", code size="
- << PrettySize(code_map_->Size());
+ VLOG(jit) << "Created jit code cache: initial data size="
+ << PrettySize(initial_data_capacity)
+ << ", initial code size="
+ << PrettySize(initial_code_capacity);
}
bool JitCodeCache::ContainsPc(const void* ptr) const {
@@ -433,13 +449,48 @@
Barrier* const barrier_;
};
-void JitCodeCache::GarbageCollectCache(Thread* self) {
- if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
- LOG(INFO) << "Clearing code cache, code="
- << PrettySize(CodeCacheSize())
- << ", data=" << PrettySize(DataCacheSize());
+void JitCodeCache::NotifyCollectionDone(Thread* self) {
+ collection_in_progress_ = false;
+ lock_cond_.Broadcast(self);
+}
+
+void JitCodeCache::SetFootprintLimit(size_t new_footprint) {
+ size_t per_space_footprint = new_footprint / 2;
+ DCHECK(IsAlignedParam(per_space_footprint, kPageSize));
+ DCHECK_EQ(per_space_footprint * 2, new_footprint);
+ mspace_set_footprint_limit(data_mspace_, per_space_footprint);
+ {
+ ScopedCodeCacheWrite scc(code_map_.get());
+ mspace_set_footprint_limit(code_mspace_, per_space_footprint);
+ }
+}
+
+bool JitCodeCache::IncreaseCodeCacheCapacity() {
+ if (current_capacity_ == max_capacity_) {
+ return false;
}
+ // Double the capacity if we're below 1MB, or increase it by 1MB if
+ // we're above.
+ if (current_capacity_ < 1 * MB) {
+ current_capacity_ *= 2;
+ } else {
+ current_capacity_ += 1 * MB;
+ }
+ if (current_capacity_ > max_capacity_) {
+ current_capacity_ = max_capacity_;
+ }
+
+ if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
+ LOG(INFO) << "Increasing code cache capacity to " << PrettySize(current_capacity_);
+ }
+
+ SetFootprintLimit(current_capacity_);
+
+ return true;
+}
+
+void JitCodeCache::GarbageCollectCache(Thread* self) {
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
// Wait for an existing collection, or let everyone know we are starting one.
@@ -452,6 +503,28 @@
collection_in_progress_ = true;
}
}
+
+ // Check if we just need to grow the capacity. If we don't, allocate the bitmap while
+ // we hold the lock.
+ {
+ MutexLock mu(self, lock_);
+ if (has_done_one_collection_ && IncreaseCodeCacheCapacity()) {
+ has_done_one_collection_ = false;
+ NotifyCollectionDone(self);
+ return;
+ } else {
+ live_bitmap_.reset(CodeCacheBitmap::Create(
+ "code-cache-bitmap",
+ reinterpret_cast<uintptr_t>(code_map_->Begin()),
+ reinterpret_cast<uintptr_t>(code_map_->Begin() + current_capacity_ / 2)));
+ }
+ }
+
+ if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
+ LOG(INFO) << "Clearing code cache, code="
+ << PrettySize(CodeCacheSize())
+ << ", data=" << PrettySize(DataCacheSize());
+ }
// Walk over all compiled methods and set the entry points of these
// methods to interpreter.
{
@@ -500,7 +573,6 @@
}
}
}
- GetLiveBitmap()->Bitmap::Clear();
// Free all profiling info.
for (ProfilingInfo* info : profiling_infos_) {
@@ -509,8 +581,9 @@
}
profiling_infos_.clear();
- collection_in_progress_ = false;
- lock_cond_.Broadcast(self);
+ live_bitmap_.reset(nullptr);
+ has_done_one_collection_ = true;
+ NotifyCollectionDone(self);
}
if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
@@ -589,5 +662,20 @@
return info;
}
+// NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock
+// is already held.
+void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS {
+ if (code_mspace_ == mspace) {
+ size_t result = code_end_;
+ code_end_ += increment;
+ return reinterpret_cast<void*>(result + code_map_->Begin());
+ } else {
+ DCHECK_EQ(data_mspace_, mspace);
+ size_t result = data_end_;
+ data_end_ += increment;
+ return reinterpret_cast<void*>(result + data_map_->Begin());
+ }
+}
+
} // namespace jit
} // namespace art
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 131446c..13481e0 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -41,20 +41,20 @@
class JitInstrumentationCache;
-// Alignment that will suit all architectures.
+// Alignment in bits that will suit all architectures.
static constexpr int kJitCodeAlignment = 16;
using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;
class JitCodeCache {
public:
- static constexpr size_t kMaxCapacity = 1 * GB;
+ static constexpr size_t kMaxCapacity = 64 * MB;
// Put the default to a very low amount for debug builds to stress the code cache
// collection.
- static constexpr size_t kDefaultCapacity = kIsDebugBuild ? 20 * KB : 2 * MB;
+ static constexpr size_t kInitialCapacity = kIsDebugBuild ? 16 * KB : 64 * KB;
// Create the code cache with a code + data capacity equal to "capacity", error message is passed
// in the out arg error_msg.
- static JitCodeCache* Create(size_t capacity, std::string* error_msg);
+ static JitCodeCache* Create(size_t initial_capacity, size_t max_capacity, std::string* error_msg);
// Number of bytes allocated in the code cache.
size_t CodeCacheSize() REQUIRES(!lock_);
@@ -133,9 +133,19 @@
REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
+ return mspace == code_mspace_ || mspace == data_mspace_;
+ }
+
+ void* MoreCore(const void* mspace, intptr_t increment);
+
private:
- // Take ownership of code_mem_map.
- JitCodeCache(MemMap* code_map, MemMap* data_map);
+ // Take ownership of maps.
+ JitCodeCache(MemMap* code_map,
+ MemMap* data_map,
+ size_t initial_code_capacity,
+ size_t initial_data_capacity,
+ size_t max_capacity);
// Internal version of 'CommitCode' that will not retry if the
// allocation fails. Return null if the allocation fails.
@@ -172,6 +182,16 @@
// Number of bytes allocated in the data cache.
size_t DataCacheSizeLocked() REQUIRES(lock_);
+ // Notify all waiting threads that a collection is done.
+ void NotifyCollectionDone(Thread* self) REQUIRES(lock_);
+
+ // Try to increase the current capacity of the code cache. Return whether we
+ // succeeded at doing so.
+ bool IncreaseCodeCacheCapacity() REQUIRES(lock_);
+
+ // Set the footprint limit of the code cache.
+ void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
+
// Lock for guarding allocations, collections, and the method_code_map_.
Mutex lock_;
// Condition to wait on during collection.
@@ -193,6 +213,21 @@
// ProfilingInfo objects we have allocated.
std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
+ // The maximum capacity in bytes this code cache can go to.
+ size_t max_capacity_ GUARDED_BY(lock_);
+
+ // The current capacity in bytes of the code cache.
+ size_t current_capacity_ GUARDED_BY(lock_);
+
+ // The current footprint in bytes of the code portion of the code cache.
+ size_t code_end_ GUARDED_BY(lock_);
+
+ // The current footprint in bytes of the data portion of the code cache.
+ size_t data_end_ GUARDED_BY(lock_);
+
+ // Whether a collection has already been done on the current capacity.
+ bool has_done_one_collection_ GUARDED_BY(lock_);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
};
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index ae16c7f..2e1fc95 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -152,9 +152,12 @@
.WithType<bool>()
.WithValueMap({{"false", false}, {"true", true}})
.IntoKey(M::UseJIT)
- .Define("-Xjitcodecachesize:_")
+ .Define("-Xjitcodecacheinitialcapacity:_")
.WithType<MemoryKiB>()
- .IntoKey(M::JITCodeCacheCapacity)
+ .IntoKey(M::JITCodeCacheInitialCapacity)
+ .Define("-Xjitcodecachemaxcapacity:_")
+ .WithType<MemoryKiB>()
+ .IntoKey(M::JITCodeCacheMaxCapacity)
.Define("-Xjitthreshold:_")
.WithType<unsigned int>()
.IntoKey(M::JITCompileThreshold)
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 3489834..9051eda 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -69,7 +69,8 @@
RUNTIME_OPTIONS_KEY (bool, UseJIT, false)
RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold, jit::Jit::kDefaultCompileThreshold)
RUNTIME_OPTIONS_KEY (unsigned int, JITWarmupThreshold, jit::Jit::kDefaultWarmupThreshold)
-RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheCapacity, jit::JitCodeCache::kDefaultCapacity)
+RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheInitialCapacity, jit::JitCodeCache::kInitialCapacity)
+RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheMaxCapacity, jit::JitCodeCache::kMaxCapacity)
RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \
HSpaceCompactForOOMMinIntervalsMs,\
MsToNs(100 * 1000)) // 100s