Move memory related regions of the JIT cache in its own class.
- Moves the JIT lock into the global locks list
- Jit cache has two regions: one inherited from zygote, and its own.
Bug: 119800099
Test: boot, test.py
Change-Id: I833ba2b5a3d3c10fa01286cdf52603fa174a2cc7
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 2505111..f430d58 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -19,7 +19,6 @@
#include <sstream>
#include <android-base/logging.h>
-#include <android-base/unique_fd.h>
#include "arch/context.h"
#include "art_method-inl.h"
@@ -47,6 +46,7 @@
#include "intern_table.h"
#include "jit/jit.h"
#include "jit/profiling_info.h"
+#include "jit/jit_scoped_code_cache_write.h"
#include "linear_alloc.h"
#include "oat_file-inl.h"
#include "oat_quick_method_header.h"
@@ -57,37 +57,12 @@
#include "thread-current-inl.h"
#include "thread_list.h"
-using android::base::unique_fd;
-
namespace art {
namespace jit {
static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
-// Data cache will be half of the capacity
-// Code cache will be the other half of the capacity.
-// TODO: Make this variable?
-static constexpr size_t kCodeAndDataCapacityDivider = 2;
-
-static constexpr int kProtR = PROT_READ;
-static constexpr int kProtRW = PROT_READ | PROT_WRITE;
-static constexpr int kProtRWX = PROT_READ | PROT_WRITE | PROT_EXEC;
-static constexpr int kProtRX = PROT_READ | PROT_EXEC;
-
-namespace {
-
-// Translate an address belonging to one memory map into an address in a second. This is useful
-// when there are two virtual memory ranges for the same physical memory range.
-template <typename T>
-T* TranslateAddress(T* src_ptr, const MemMap& src, const MemMap& dst) {
- CHECK(src.HasAddress(src_ptr));
- uint8_t* const raw_src_ptr = reinterpret_cast<uint8_t*>(src_ptr);
- return reinterpret_cast<T*>(raw_src_ptr - src.Begin() + dst.Begin());
-}
-
-} // namespace
-
class JitCodeCache::JniStubKey {
public:
explicit JniStubKey(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -189,171 +164,6 @@
std::vector<ArtMethod*> methods_;
};
-bool JitCodeCache::InitializeMappings(bool rwx_memory_allowed,
- bool is_zygote,
- std::string* error_msg) {
- ScopedTrace trace(__PRETTY_FUNCTION__);
-
- const size_t capacity = max_capacity_;
- const size_t data_capacity = capacity / kCodeAndDataCapacityDivider;
- const size_t exec_capacity = capacity - data_capacity;
-
- // File descriptor enabling dual-view mapping of code section.
- unique_fd mem_fd;
-
- // Zygote shouldn't create a shared mapping for JIT, so we cannot use dual view
- // for it.
- if (!is_zygote) {
- // Bionic supports memfd_create, but the call may fail on older kernels.
- mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
- if (mem_fd.get() < 0) {
- std::ostringstream oss;
- oss << "Failed to initialize dual view JIT. memfd_create() error: " << strerror(errno);
- if (!rwx_memory_allowed) {
- // Without using RWX page permissions, the JIT can not fallback to single mapping as it
- // requires tranitioning the code pages to RWX for updates.
- *error_msg = oss.str();
- return false;
- }
- VLOG(jit) << oss.str();
- }
- }
-
- if (mem_fd.get() >= 0 && ftruncate(mem_fd, capacity) != 0) {
- std::ostringstream oss;
- oss << "Failed to initialize memory file: " << strerror(errno);
- *error_msg = oss.str();
- return false;
- }
-
- std::string data_cache_name = is_zygote ? "zygote-data-code-cache" : "data-code-cache";
- std::string exec_cache_name = is_zygote ? "zygote-jit-code-cache" : "jit-code-cache";
-
- std::string error_str;
- // Map name specific for android_os_Debug.cpp accounting.
- // Map in low 4gb to simplify accessing root tables for x86_64.
- // We could do PC-relative addressing to avoid this problem, but that
- // would require reserving code and data area before submitting, which
- // means more windows for the code memory to be RWX.
- int base_flags;
- MemMap data_pages;
- if (mem_fd.get() >= 0) {
- // Dual view of JIT code cache case. Create an initial mapping of data pages large enough
- // for data and non-writable view of JIT code pages. We use the memory file descriptor to
- // enable dual mapping - we'll create a second mapping using the descriptor below. The
- // mappings will look like:
- //
- // VA PA
- //
- // +---------------+
- // | non exec code |\
- // +---------------+ \
- // : :\ \
- // +---------------+.\.+---------------+
- // | exec code | \| code |
- // +---------------+...+---------------+
- // | data | | data |
- // +---------------+...+---------------+
- //
- // In this configuration code updates are written to the non-executable view of the code
- // cache, and the executable view of the code cache has fixed RX memory protections.
- //
- // This memory needs to be mapped shared as the code portions will have two mappings.
- base_flags = MAP_SHARED;
- data_pages = MemMap::MapFile(
- data_capacity + exec_capacity,
- kProtRW,
- base_flags,
- mem_fd,
- /* start= */ 0,
- /* low_4gb= */ true,
- data_cache_name.c_str(),
- &error_str);
- } else {
- // Single view of JIT code cache case. Create an initial mapping of data pages large enough
- // for data and JIT code pages. The mappings will look like:
- //
- // VA PA
- //
- // +---------------+...+---------------+
- // | exec code | | code |
- // +---------------+...+---------------+
- // | data | | data |
- // +---------------+...+---------------+
- //
- // In this configuration code updates are written to the executable view of the code cache,
- // and the executable view of the code cache transitions RX to RWX for the update and then
- // back to RX after the update.
- base_flags = MAP_PRIVATE | MAP_ANON;
- data_pages = MemMap::MapAnonymous(
- data_cache_name.c_str(),
- data_capacity + exec_capacity,
- kProtRW,
- /* low_4gb= */ true,
- &error_str);
- }
-
- if (!data_pages.IsValid()) {
- std::ostringstream oss;
- oss << "Failed to create read write cache: " << error_str << " size=" << capacity;
- *error_msg = oss.str();
- return false;
- }
-
- MemMap exec_pages;
- MemMap non_exec_pages;
- if (exec_capacity > 0) {
- uint8_t* const divider = data_pages.Begin() + data_capacity;
- // Set initial permission for executable view to catch any SELinux permission problems early
- // (for processes that cannot map WX pages). Otherwise, this region does not need to be
- // executable as there is no code in the cache yet.
- exec_pages = data_pages.RemapAtEnd(divider,
- exec_cache_name.c_str(),
- kProtRX,
- base_flags | MAP_FIXED,
- mem_fd.get(),
- (mem_fd.get() >= 0) ? data_capacity : 0,
- &error_str);
- if (!exec_pages.IsValid()) {
- std::ostringstream oss;
- oss << "Failed to create read execute code cache: " << error_str << " size=" << capacity;
- *error_msg = oss.str();
- return false;
- }
-
- if (mem_fd.get() >= 0) {
- // For dual view, create the secondary view of code memory used for updating code. This view
- // is never executable.
- std::string name = exec_cache_name + "-rw";
- non_exec_pages = MemMap::MapFile(exec_capacity,
- kProtR,
- base_flags,
- mem_fd,
- /* start= */ data_capacity,
- /* low_4GB= */ false,
- name.c_str(),
- &error_str);
- if (!non_exec_pages.IsValid()) {
- static const char* kFailedNxView = "Failed to map non-executable view of JIT code cache";
- if (rwx_memory_allowed) {
- // Log and continue as single view JIT (requires RWX memory).
- VLOG(jit) << kFailedNxView;
- } else {
- *error_msg = kFailedNxView;
- return false;
- }
- }
- }
- } else {
- // Profiling only. No memory for code required.
- }
-
- data_pages_ = std::move(data_pages);
- exec_pages_ = std::move(exec_pages);
- non_exec_pages_ = std::move(non_exec_pages);
- return true;
-}
-
JitCodeCache* JitCodeCache::Create(bool used_only_for_profile_data,
bool rwx_memory_allowed,
bool is_zygote,
@@ -385,19 +195,20 @@
std::unique_ptr<JitCodeCache> jit_code_cache(new JitCodeCache());
- MutexLock mu(Thread::Current(), jit_code_cache->lock_);
- jit_code_cache->InitializeState(initial_capacity, max_capacity);
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ jit_code_cache->private_region_.InitializeState(initial_capacity, max_capacity);
// Zygote should never collect code to share the memory with the children.
if (is_zygote) {
jit_code_cache->garbage_collect_code_ = false;
}
- if (!jit_code_cache->InitializeMappings(rwx_memory_allowed, is_zygote, error_msg)) {
+ if (!jit_code_cache->private_region_.InitializeMappings(
+ rwx_memory_allowed, is_zygote, error_msg)) {
return nullptr;
}
- jit_code_cache->InitializeSpaces();
+ jit_code_cache->private_region_.InitializeSpaces();
VLOG(jit) << "Created jit code cache: initial capacity="
<< PrettySize(initial_capacity)
@@ -408,82 +219,24 @@
}
JitCodeCache::JitCodeCache()
- : lock_("Jit code cache", kJitCodeCacheLock),
- lock_cond_("Jit code cache condition variable", lock_),
+ : is_weak_access_enabled_(true),
+ inline_cache_cond_("Jit inline cache condition variable", *Locks::jit_lock_),
+ lock_cond_("Jit code cache condition variable", *Locks::jit_lock_),
collection_in_progress_(false),
last_collection_increased_code_cache_(false),
garbage_collect_code_(true),
- used_memory_for_data_(0),
- used_memory_for_code_(0),
number_of_compilations_(0),
number_of_osr_compilations_(0),
number_of_collections_(0),
histogram_stack_map_memory_use_("Memory used for stack maps", 16),
histogram_code_memory_use_("Memory used for compiled code", 16),
- histogram_profiling_info_memory_use_("Memory used for profiling info", 16),
- is_weak_access_enabled_(true),
- inline_cache_cond_("Jit inline cache condition variable", lock_),
- zygote_data_pages_(),
- zygote_exec_pages_(),
- zygote_data_mspace_(nullptr),
- zygote_exec_mspace_(nullptr) {
-}
-
-void JitCodeCache::InitializeState(size_t initial_capacity, size_t max_capacity) {
- CHECK_GE(max_capacity, initial_capacity);
- CHECK(max_capacity <= 1 * GB) << "The max supported size for JIT code cache is 1GB";
- // Align both capacities to page size, as that's the unit mspaces use.
- initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
- max_capacity = RoundDown(max_capacity, 2 * kPageSize);
-
- used_memory_for_data_ = 0;
- used_memory_for_code_ = 0;
- number_of_compilations_ = 0;
- number_of_osr_compilations_ = 0;
- number_of_collections_ = 0;
-
- data_pages_ = MemMap();
- exec_pages_ = MemMap();
- non_exec_pages_ = MemMap();
- initial_capacity_ = initial_capacity;
- max_capacity_ = max_capacity;
- current_capacity_ = initial_capacity,
- data_end_ = initial_capacity / kCodeAndDataCapacityDivider;
- exec_end_ = initial_capacity - data_end_;
-}
-
-void JitCodeCache::InitializeSpaces() {
- // Initialize the data heap
- data_mspace_ = create_mspace_with_base(data_pages_.Begin(), data_end_, false /*locked*/);
- CHECK(data_mspace_ != nullptr) << "create_mspace_with_base (data) failed";
-
- // Initialize the code heap
- MemMap* code_heap = nullptr;
- if (non_exec_pages_.IsValid()) {
- code_heap = &non_exec_pages_;
- } else if (exec_pages_.IsValid()) {
- code_heap = &exec_pages_;
- }
- if (code_heap != nullptr) {
- // Make all pages reserved for the code heap writable. The mspace allocator, that manages the
- // heap, will take and initialize pages in create_mspace_with_base().
- CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW);
- exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
- CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
- SetFootprintLimit(initial_capacity_);
- // Protect pages containing heap metadata. Updates to the code heap toggle write permission to
- // perform the update and there are no other times write access is required.
- CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR);
- } else {
- exec_mspace_ = nullptr;
- SetFootprintLimit(initial_capacity_);
- }
+ histogram_profiling_info_memory_use_("Memory used for profiling info", 16) {
}
JitCodeCache::~JitCodeCache() {}
bool JitCodeCache::ContainsPc(const void* ptr) const {
- return exec_pages_.HasAddress(ptr) || zygote_exec_pages_.HasAddress(ptr);
+ return private_region_.IsInExecSpace(ptr) || shared_region_.IsInExecSpace(ptr);
}
bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
@@ -498,7 +251,7 @@
}
bool JitCodeCache::ContainsMethod(ArtMethod* method) {
- MutexLock mu(Thread::Current(), lock_);
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
if (UNLIKELY(method->IsNative())) {
auto it = jni_stubs_map_.find(JniStubKey(method));
if (it != jni_stubs_map_.end() &&
@@ -518,7 +271,7 @@
const void* JitCodeCache::GetJniStubCode(ArtMethod* method) {
DCHECK(method->IsNative());
- MutexLock mu(Thread::Current(), lock_);
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
auto it = jni_stubs_map_.find(JniStubKey(method));
if (it != jni_stubs_map_.end()) {
JniStubData& data = it->second;
@@ -568,34 +321,6 @@
return nullptr;
}
-class ScopedCodeCacheWrite : ScopedTrace {
- public:
- explicit ScopedCodeCacheWrite(const JitCodeCache* const code_cache)
- : ScopedTrace("ScopedCodeCacheWrite"),
- code_cache_(code_cache) {
- ScopedTrace trace("mprotect all");
- const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping();
- if (updatable_pages != nullptr) {
- int prot = code_cache_->HasDualCodeMapping() ? kProtRW : kProtRWX;
- CheckedCall(mprotect, "Cache +W", updatable_pages->Begin(), updatable_pages->Size(), prot);
- }
- }
-
- ~ScopedCodeCacheWrite() {
- ScopedTrace trace("mprotect code");
- const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping();
- if (updatable_pages != nullptr) {
- int prot = code_cache_->HasDualCodeMapping() ? kProtR : kProtRX;
- CheckedCall(mprotect, "Cache -W", updatable_pages->Begin(), updatable_pages->Size(), prot);
- }
- }
-
- private:
- const JitCodeCache* const code_cache_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
-};
-
uint8_t* JitCodeCache::CommitCode(Thread* self,
ArtMethod* method,
uint8_t* stack_map,
@@ -741,7 +466,7 @@
}
void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
- MutexLock mu(Thread::Current(), lock_);
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
for (const auto& entry : method_code_map_) {
uint32_t number_of_roots = 0;
uint8_t* roots_data = GetRootTable(entry.first, &number_of_roots);
@@ -790,15 +515,10 @@
// It does nothing if we are not using native debugger.
RemoveNativeDebugInfoForJit(Thread::Current(), code_ptr);
if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
- FreeData(GetRootTable(code_ptr));
+ private_region_.FreeData(GetRootTable(code_ptr));
} // else this is a JNI stub without any data.
- uint8_t* code_allocation = reinterpret_cast<uint8_t*>(allocation);
- if (HasDualCodeMapping()) {
- code_allocation = TranslateAddress(code_allocation, exec_pages_, non_exec_pages_);
- }
-
- FreeCode(code_allocation);
+ private_region_.FreeCode(reinterpret_cast<uint8_t*>(allocation));
}
void JitCodeCache::FreeAllMethodHeaders(
@@ -807,14 +527,14 @@
// first since once we do FreeCode() below, the memory can be reused
// so it's possible for the same method_header to start representing
// different compile code.
- MutexLock mu(Thread::Current(), lock_);
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
{
MutexLock mu2(Thread::Current(), *Locks::cha_lock_);
Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
->RemoveDependentsWithMethodHeaders(method_headers);
}
- ScopedCodeCacheWrite scc(this);
+ ScopedCodeCacheWrite scc(private_region_);
for (const OatQuickMethodHeader* method_header : method_headers) {
FreeCodeAndData(method_header->GetCode());
}
@@ -828,12 +548,12 @@
// the CHA dependency map just once with an unordered_set.
std::unordered_set<OatQuickMethodHeader*> method_headers;
{
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
// We do not check if a code cache GC is in progress, as this method comes
// with the classlinker_classes_lock_ held, and suspending ourselves could
// lead to a deadlock.
{
- ScopedCodeCacheWrite scc(this);
+ ScopedCodeCacheWrite scc(private_region_);
for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
it->second.RemoveMethodsIn(alloc);
if (it->second.GetMethods().empty()) {
@@ -866,7 +586,7 @@
ProfilingInfo* info = *it;
if (alloc.ContainsUnsafe(info->GetMethod())) {
info->GetMethod()->SetProfilingInfo(nullptr);
- FreeData(reinterpret_cast<uint8_t*>(info));
+ private_region_.FreeData(reinterpret_cast<uint8_t*>(info));
it = profiling_infos_.erase(it);
} else {
++it;
@@ -887,7 +607,7 @@
return;
}
ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
while (!IsWeakAccessEnabled(self)) {
inline_cache_cond_.Wait(self);
}
@@ -895,7 +615,7 @@
void JitCodeCache::BroadcastForInlineCacheAccess() {
Thread* self = Thread::Current();
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
inline_cache_cond_.Broadcast(self);
}
@@ -940,23 +660,13 @@
void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) {
while (collection_in_progress_) {
- lock_.Unlock(self);
+ Locks::jit_lock_->Unlock(self);
{
ScopedThreadSuspension sts(self, kSuspended);
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
WaitForPotentialCollectionToComplete(self);
}
- lock_.Lock(self);
- }
-}
-
-const MemMap* JitCodeCache::GetUpdatableCodeMapping() const {
- if (HasDualCodeMapping()) {
- return &non_exec_pages_;
- } else if (HasCodeMapping()) {
- return &exec_pages_;
- } else {
- return nullptr;
+ Locks::jit_lock_->Lock(self);
}
}
@@ -983,12 +693,12 @@
OatQuickMethodHeader* method_header = nullptr;
uint8_t* code_ptr = nullptr;
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
// We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
// finish.
WaitForPotentialCollectionToCompleteRunnable(self);
{
- ScopedCodeCacheWrite scc(this);
+ ScopedCodeCacheWrite scc(private_region_);
size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
// Ensure the header ends up at expected instruction alignment.
@@ -997,7 +707,7 @@
// AllocateCode allocates memory in non-executable region for alignment header and code. The
// header size may include alignment padding.
- uint8_t* nox_memory = AllocateCode(total_size);
+ uint8_t* nox_memory = private_region_.AllocateCode(total_size);
if (nox_memory == nullptr) {
return nullptr;
}
@@ -1008,9 +718,7 @@
method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
// From here code_ptr points to executable code.
- if (HasDualCodeMapping()) {
- code_ptr = TranslateAddress(code_ptr, non_exec_pages_, exec_pages_);
- }
+ code_ptr = private_region_.GetExecutableAddress(code_ptr);
new (method_header) OatQuickMethodHeader(
(stack_map != nullptr) ? code_ptr - stack_map : 0u,
@@ -1022,9 +730,7 @@
}
// Update method_header pointer to executable code region.
- if (HasDualCodeMapping()) {
- method_header = TranslateAddress(method_header, non_exec_pages_, exec_pages_);
- }
+ method_header = private_region_.GetExecutableAddress(method_header);
// Both instruction and data caches need flushing to the point of unification where both share
// a common view of memory. Flushing the data cache ensures the dirty cachelines from the
@@ -1041,7 +747,7 @@
// For reference, this behavior is caused by this commit:
// https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
//
- if (HasDualCodeMapping()) {
+ if (private_region_.HasDualCodeMapping()) {
// Flush the data cache lines associated with the non-executable copy of the code just added.
FlushDataCache(nox_memory, nox_memory + total_size);
}
@@ -1162,7 +868,7 @@
}
size_t JitCodeCache::CodeCacheSize() {
- MutexLock mu(Thread::Current(), lock_);
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
return CodeCacheSizeLocked();
}
@@ -1170,7 +876,7 @@
// This function is used only for testing and only with non-native methods.
CHECK(!method->IsNative());
- MutexLock mu(Thread::Current(), lock_);
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
bool osr = osr_code_map_.find(method) != osr_code_map_.end();
bool in_cache = RemoveMethodLocked(method, release_memory);
@@ -1200,7 +906,7 @@
}
bool in_cache = false;
- ScopedCodeCacheWrite ccw(this);
+ ScopedCodeCacheWrite ccw(private_region_);
if (UNLIKELY(method->IsNative())) {
auto it = jni_stubs_map_.find(JniStubKey(method));
if (it != jni_stubs_map_.end() && it->second.RemoveMethod(method)) {
@@ -1240,7 +946,7 @@
// any cached information it has on the method. All threads must be suspended before calling this
// method. The compiled code for the method (if there is any) must not be in any threads call stack.
void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
- MutexLock mu(Thread::Current(), lock_);
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
RemoveMethodLocked(method, /* release_memory= */ true);
}
@@ -1251,7 +957,7 @@
// shouldn't be used since it is no longer logically in the jit code cache.
// TODO We should add DCHECKS that validate that the JIT is paused when this method is entered.
void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
- MutexLock mu(Thread::Current(), lock_);
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
if (old_method->IsNative()) {
// Update methods in jni_stubs_map_.
for (auto& entry : jni_stubs_map_) {
@@ -1288,7 +994,7 @@
}
void JitCodeCache::ClearEntryPointsInZygoteExecSpace() {
- MutexLock mu(Thread::Current(), lock_);
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
// Iterate over profiling infos to know which methods may have been JITted. Note that
// to be JITted, a method must have a profiling info.
for (ProfilingInfo* info : profiling_infos_) {
@@ -1306,24 +1012,24 @@
}
size_t JitCodeCache::CodeCacheSizeLocked() {
- return used_memory_for_code_;
+ return private_region_.GetUsedMemoryForCode();
}
size_t JitCodeCache::DataCacheSize() {
- MutexLock mu(Thread::Current(), lock_);
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
return DataCacheSizeLocked();
}
size_t JitCodeCache::DataCacheSizeLocked() {
- return used_memory_for_data_;
+ return private_region_.GetUsedMemoryForData();
}
void JitCodeCache::ClearData(Thread* self,
uint8_t* stack_map_data,
uint8_t* roots_data) {
DCHECK_EQ(FromStackMapToRoots(stack_map_data), roots_data);
- MutexLock mu(self, lock_);
- FreeData(reinterpret_cast<uint8_t*>(roots_data));
+ MutexLock mu(self, *Locks::jit_lock_);
+ private_region_.FreeData(reinterpret_cast<uint8_t*>(roots_data));
}
size_t JitCodeCache::ReserveData(Thread* self,
@@ -1338,21 +1044,21 @@
{
ScopedThreadSuspension sts(self, kSuspended);
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
WaitForPotentialCollectionToComplete(self);
- result = AllocateData(size);
+ result = private_region_.AllocateData(size);
}
if (result == nullptr) {
// Retry.
GarbageCollectCache(self);
ScopedThreadSuspension sts(self, kSuspended);
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
WaitForPotentialCollectionToComplete(self);
- result = AllocateData(size);
+ result = private_region_.AllocateData(size);
}
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
histogram_stack_map_memory_use_.AddValue(size);
if (size > kStackMapSizeLogThreshold) {
LOG(INFO) << "JIT allocated "
@@ -1429,40 +1135,6 @@
lock_cond_.Broadcast(self);
}
-void JitCodeCache::SetFootprintLimit(size_t new_footprint) {
- size_t data_space_footprint = new_footprint / kCodeAndDataCapacityDivider;
- DCHECK(IsAlignedParam(data_space_footprint, kPageSize));
- DCHECK_EQ(data_space_footprint * kCodeAndDataCapacityDivider, new_footprint);
- mspace_set_footprint_limit(data_mspace_, data_space_footprint);
- if (HasCodeMapping()) {
- ScopedCodeCacheWrite scc(this);
- mspace_set_footprint_limit(exec_mspace_, new_footprint - data_space_footprint);
- }
-}
-
-bool JitCodeCache::IncreaseCodeCacheCapacity() {
- if (current_capacity_ == max_capacity_) {
- return false;
- }
-
- // Double the capacity if we're below 1MB, or increase it by 1MB if
- // we're above.
- if (current_capacity_ < 1 * MB) {
- current_capacity_ *= 2;
- } else {
- current_capacity_ += 1 * MB;
- }
- if (current_capacity_ > max_capacity_) {
- current_capacity_ = max_capacity_;
- }
-
- VLOG(jit) << "Increasing code cache capacity to " << PrettySize(current_capacity_);
-
- SetFootprintLimit(current_capacity_);
-
- return true;
-}
-
void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) {
Barrier barrier(0);
size_t threads_running_checkpoint = 0;
@@ -1477,10 +1149,10 @@
}
bool JitCodeCache::ShouldDoFullCollection() {
- if (current_capacity_ == max_capacity_) {
+ if (private_region_.GetCurrentCapacity() == private_region_.GetMaxCapacity()) {
// Always do a full collection when the code cache is full.
return true;
- } else if (current_capacity_ < kReservedCapacity) {
+ } else if (private_region_.GetCurrentCapacity() < kReservedCapacity) {
// Always do partial collection when the code cache size is below the reserved
// capacity.
return false;
@@ -1498,9 +1170,9 @@
// Wait for an existing collection, or let everyone know we are starting one.
{
ScopedThreadSuspension sts(self, kSuspended);
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
if (!garbage_collect_code_) {
- IncreaseCodeCacheCapacity();
+ private_region_.IncreaseCodeCacheCapacity();
return;
} else if (WaitForPotentialCollectionToComplete(self)) {
return;
@@ -1508,8 +1180,9 @@
number_of_collections_++;
live_bitmap_.reset(CodeCacheBitmap::Create(
"code-cache-bitmap",
- reinterpret_cast<uintptr_t>(exec_pages_.Begin()),
- reinterpret_cast<uintptr_t>(exec_pages_.Begin() + current_capacity_ / 2)));
+ reinterpret_cast<uintptr_t>(private_region_.GetExecPages()->Begin()),
+ reinterpret_cast<uintptr_t>(
+ private_region_.GetExecPages()->Begin() + private_region_.GetCurrentCapacity() / 2)));
collection_in_progress_ = true;
}
}
@@ -1520,7 +1193,7 @@
bool do_full_collection = false;
{
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
do_full_collection = ShouldDoFullCollection();
}
@@ -1537,7 +1210,7 @@
<< ", data=" << PrettySize(DataCacheSize());
{
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
// Increase the code cache only when we do partial collections.
// TODO: base this strategy on how full the code cache is?
@@ -1545,7 +1218,7 @@
last_collection_increased_code_cache_ = false;
} else {
last_collection_increased_code_cache_ = true;
- IncreaseCodeCacheCapacity();
+ private_region_.IncreaseCodeCacheCapacity();
}
bool next_collection_will_be_full = ShouldDoFullCollection();
@@ -1597,8 +1270,8 @@
ScopedTrace trace(__FUNCTION__);
std::unordered_set<OatQuickMethodHeader*> method_headers;
{
- MutexLock mu(self, lock_);
- ScopedCodeCacheWrite scc(this);
+ MutexLock mu(self, *Locks::jit_lock_);
+ ScopedCodeCacheWrite scc(private_region_);
// Iterate over all compiled code and remove entries that are not marked.
for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
JniStubData* data = &it->second;
@@ -1627,13 +1300,13 @@
}
bool JitCodeCache::GetGarbageCollectCode() {
- MutexLock mu(Thread::Current(), lock_);
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
return garbage_collect_code_;
}
void JitCodeCache::SetGarbageCollectCode(bool value) {
Thread* self = Thread::Current();
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
if (garbage_collect_code_ != value) {
if (garbage_collect_code_) {
// When dynamically disabling the garbage collection, we neee
@@ -1652,7 +1325,7 @@
void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
ScopedTrace trace(__FUNCTION__);
{
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
if (collect_profiling_info) {
// Clear the profiling info of methods that do not have compiled code as entrypoint.
// Also remove the saved entry point from the ProfilingInfo objects.
@@ -1722,7 +1395,7 @@
RemoveUnmarkedCode(self);
if (collect_profiling_info) {
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
// Free all profiling infos of methods not compiled nor being compiled.
auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
[this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS {
@@ -1738,7 +1411,7 @@
info->GetMethod()->SetProfilingInfo(info);
} else if (info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) != info) {
// No need for this ProfilingInfo object anymore.
- FreeData(reinterpret_cast<uint8_t*>(info));
+ private_region_.FreeData(reinterpret_cast<uint8_t*>(info));
return true;
}
return false;
@@ -1762,7 +1435,7 @@
CHECK(method != nullptr);
}
- MutexLock mu(Thread::Current(), lock_);
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
OatQuickMethodHeader* method_header = nullptr;
ArtMethod* found_method = nullptr; // Only for DCHECK(), not for JNI stubs.
if (method != nullptr && UNLIKELY(method->IsNative())) {
@@ -1811,7 +1484,7 @@
}
OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) {
- MutexLock mu(Thread::Current(), lock_);
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
auto it = osr_code_map_.find(method);
if (it == osr_code_map_.end()) {
return nullptr;
@@ -1829,19 +1502,19 @@
if (!retry_allocation) {
// If we are allocating for the interpreter, just try to lock, to avoid
// lock contention with the JIT.
- if (lock_.ExclusiveTryLock(self)) {
+ if (Locks::jit_lock_->ExclusiveTryLock(self)) {
info = AddProfilingInfoInternal(self, method, entries);
- lock_.ExclusiveUnlock(self);
+ Locks::jit_lock_->ExclusiveUnlock(self);
}
} else {
{
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
info = AddProfilingInfoInternal(self, method, entries);
}
if (info == nullptr) {
GarbageCollectCache(self);
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
info = AddProfilingInfoInternal(self, method, entries);
}
}
@@ -1861,7 +1534,7 @@
return info;
}
- uint8_t* data = AllocateData(profile_info_size);
+ uint8_t* data = private_region_.AllocateData(profile_info_size);
if (data == nullptr) {
return nullptr;
}
@@ -1877,28 +1550,15 @@
return info;
}
-// NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock
-// is already held.
-void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS {
- if (mspace == exec_mspace_) {
- DCHECK(exec_mspace_ != nullptr);
- const MemMap* const code_pages = GetUpdatableCodeMapping();
- void* result = code_pages->Begin() + exec_end_;
- exec_end_ += increment;
- return result;
- } else {
- DCHECK_EQ(data_mspace_, mspace);
- void* result = data_pages_.Begin() + data_end_;
- data_end_ += increment;
- return result;
- }
+void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) {
+ return private_region_.MoreCore(mspace, increment);
}
void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations,
std::vector<ProfileMethodInfo>& methods) {
Thread* self = Thread::Current();
WaitUntilInlineCacheAccessible(self);
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
ScopedTrace trace(__FUNCTION__);
uint16_t jit_compile_threshold = Runtime::Current()->GetJITOptions()->GetCompileThreshold();
for (const ProfilingInfo* info : profiling_infos_) {
@@ -1979,7 +1639,7 @@
}
bool JitCodeCache::IsOsrCompiled(ArtMethod* method) {
- MutexLock mu(Thread::Current(), lock_);
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
return osr_code_map_.find(method) != osr_code_map_.end();
}
@@ -2002,7 +1662,7 @@
}
}
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
if (osr && (osr_code_map_.find(method) != osr_code_map_.end())) {
return false;
}
@@ -2063,7 +1723,7 @@
}
ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
if (info != nullptr) {
if (!info->IncrementInlineUse()) {
@@ -2075,7 +1735,7 @@
}
void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) {
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
DCHECK(info != nullptr);
info->DecrementInlineUse();
@@ -2083,7 +1743,7 @@
void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self, bool osr) {
DCHECK_EQ(Thread::Current(), self);
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::jit_lock_);
if (UNLIKELY(method->IsNative())) {
auto it = jni_stubs_map_.find(JniStubKey(method));
DCHECK(it != jni_stubs_map_.end());
@@ -2124,7 +1784,7 @@
method, GetQuickToInterpreterBridge());
ClearMethodCounter(method, /*was_warm=*/ profiling_info != nullptr);
} else {
- MutexLock mu(Thread::Current(), lock_);
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
auto it = osr_code_map_.find(method);
if (it != osr_code_map_.end() && OatQuickMethodHeader::FromCodePointer(it->second) == header) {
// Remove the OSR method, to avoid using it again.
@@ -2133,49 +1793,14 @@
}
}
-uint8_t* JitCodeCache::AllocateCode(size_t allocation_size) {
- // Each allocation should be on its own set of cache lines. The allocation must be large enough
- // for header, code, and any padding.
- uint8_t* result = reinterpret_cast<uint8_t*>(
- mspace_memalign(exec_mspace_, kJitCodeAlignment, allocation_size));
- size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
- size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
- // Ensure the header ends up at expected instruction alignment.
- DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment);
- used_memory_for_code_ += mspace_usable_size(result);
- return result;
-}
-
-void JitCodeCache::FreeCode(uint8_t* code) {
- if (IsInZygoteExecSpace(code)) {
- // No need to free, this is shared memory.
- return;
- }
- used_memory_for_code_ -= mspace_usable_size(code);
- mspace_free(exec_mspace_, code);
-}
-
-uint8_t* JitCodeCache::AllocateData(size_t data_size) {
- void* result = mspace_malloc(data_mspace_, data_size);
- used_memory_for_data_ += mspace_usable_size(result);
- return reinterpret_cast<uint8_t*>(result);
-}
-
-void JitCodeCache::FreeData(uint8_t* data) {
- if (IsInZygoteDataSpace(data)) {
- // No need to free, this is shared memory.
- return;
- }
- used_memory_for_data_ -= mspace_usable_size(data);
- mspace_free(data_mspace_, data);
-}
-
void JitCodeCache::Dump(std::ostream& os) {
- MutexLock mu(Thread::Current(), lock_);
- os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n"
- << "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n"
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ os << "Current JIT code cache size: " << PrettySize(private_region_.GetUsedMemoryForCode())
+ << "\n"
+ << "Current JIT data cache size: " << PrettySize(private_region_.GetUsedMemoryForData())
+ << "\n"
<< "Current JIT mini-debug-info size: " << PrettySize(GetJitMiniDebugInfoMemUsage()) << "\n"
- << "Current JIT capacity: " << PrettySize(current_capacity_) << "\n"
+ << "Current JIT capacity: " << PrettySize(private_region_.GetCurrentCapacity()) << "\n"
<< "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n"
<< "Current number of JIT code cache entries: " << method_code_map_.size() << "\n"
<< "Total number of JIT compilations: " << number_of_compilations_ << "\n"
@@ -2192,25 +1817,28 @@
// Don't transition if this is for a child zygote.
return;
}
- MutexLock mu(Thread::Current(), lock_);
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
- zygote_data_pages_ = std::move(data_pages_);
- zygote_exec_pages_ = std::move(exec_pages_);
- zygote_data_mspace_ = data_mspace_;
- zygote_exec_mspace_ = exec_mspace_;
+ shared_region_ = std::move(private_region_);
+
+ // Reset all statistics to be specific to this process.
+ number_of_compilations_ = 0;
+ number_of_osr_compilations_ = 0;
+ number_of_collections_ = 0;
size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity();
size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity();
- InitializeState(initial_capacity, max_capacity);
+ private_region_.InitializeState(initial_capacity, max_capacity);
std::string error_msg;
- if (!InitializeMappings(/* rwx_memory_allowed= */ !is_system_server, is_zygote, &error_msg)) {
+ if (!private_region_.InitializeMappings(
+ /* rwx_memory_allowed= */ !is_system_server, is_zygote, &error_msg)) {
LOG(WARNING) << "Could not reset JIT state after zygote fork: " << error_msg;
return;
}
- InitializeSpaces();
+ private_region_.InitializeSpaces();
}
} // namespace jit