Use DlMallocSpace for the JIT code cache.
- Also tidy up some code in the JIT compiler.
- And mprotect code space to be writable only when allocating.
Change-Id: I46ea5c029aec489f2af63452de31db3736aebc20
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 3a0d814..b1572cc 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -56,7 +56,7 @@
mark_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), NonGrowthLimitCapacity()));
- CHECK(live_bitmap_.get() != nullptr) << "could not create allocspace mark bitmap #"
+ CHECK(mark_bitmap_.get() != nullptr) << "could not create allocspace mark bitmap #"
<< bitmap_index;
}
for (auto& freed : recent_freed_objects_) {
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 4c53162..7e95e71 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -25,37 +25,77 @@
namespace art {
namespace jit {
+static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
+static constexpr int kProtData = PROT_READ | PROT_WRITE;
+static constexpr int kProtCode = PROT_READ | PROT_EXEC;
+
+#define CHECKED_MPROTECT(memory, size, prot) \
+ do { \
+ int rc = mprotect(memory, size, prot); \
+ if (UNLIKELY(rc != 0)) { \
+ errno = rc; \
+ PLOG(FATAL) << "Failed to mprotect jit code cache"; \
+ } \
+ } while (false) \
+
JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) {
CHECK_GT(capacity, 0U);
CHECK_LT(capacity, kMaxCapacity);
std::string error_str;
// Map name specific for android_os_Debug.cpp accounting.
- MemMap* map = MemMap::MapAnonymous("jit-code-cache", nullptr, capacity,
- PROT_READ | PROT_WRITE | PROT_EXEC, false, false, &error_str);
- if (map == nullptr) {
+ MemMap* data_map = MemMap::MapAnonymous(
+ "data-code-cache", nullptr, capacity, kProtAll, false, false, &error_str);
+ if (data_map == nullptr) {
std::ostringstream oss;
oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
*error_msg = oss.str();
return nullptr;
}
- return new JitCodeCache(map);
+
+ // Data cache is 1 / 4 of the map.
+ // TODO: Make this variable?
+ size_t data_size = RoundUp(data_map->Size() / 4, kPageSize);
+ size_t code_size = data_map->Size() - data_size;
+ uint8_t* divider = data_map->Begin() + data_size;
+
+ // We need to have 32 bit offsets from method headers in code cache which point to things
+ // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
+ MemMap* code_map = data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str);
+ if (code_map == nullptr) {
+ std::ostringstream oss;
+ oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
+ *error_msg = oss.str();
+ return nullptr;
+ }
+ DCHECK_EQ(code_map->Size(), code_size);
+ DCHECK_EQ(code_map->Begin(), divider);
+ return new JitCodeCache(code_map, data_map);
}
-JitCodeCache::JitCodeCache(MemMap* mem_map)
- : lock_("Jit code cache", kJitCodeCacheLock), num_methods_(0) {
- VLOG(jit) << "Created jit code cache size=" << PrettySize(mem_map->Size());
- mem_map_.reset(mem_map);
- uint8_t* divider = mem_map->Begin() + RoundUp(mem_map->Size() / 4, kPageSize);
- // Data cache is 1 / 4 of the map. TODO: Make this variable?
- // Put data at the start.
- data_cache_ptr_ = mem_map->Begin();
- data_cache_end_ = divider;
- data_cache_begin_ = data_cache_ptr_;
- mprotect(data_cache_ptr_, data_cache_end_ - data_cache_begin_, PROT_READ | PROT_WRITE);
- // Code cache after.
- code_cache_begin_ = divider;
- code_cache_ptr_ = divider;
- code_cache_end_ = mem_map->End();
+JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map)
+ : lock_("Jit code cache", kJitCodeCacheLock),
+ code_map_(code_map),
+ data_map_(data_map),
+ num_methods_(0) {
+
+ VLOG(jit) << "Created jit code cache: data size="
+ << PrettySize(data_map_->Size())
+ << ", code size="
+ << PrettySize(code_map_->Size());
+
+ code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/);
+ data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/);
+
+ if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
+ PLOG(FATAL) << "create_mspace_with_base failed";
+ }
+
+ // Prevent morecore requests from the mspace.
+ mspace_set_footprint_limit(code_mspace_, code_map_->Size());
+ mspace_set_footprint_limit(data_mspace_, data_map_->Size());
+
+ CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
+ CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
}
bool JitCodeCache::ContainsMethod(ArtMethod* method) const {
@@ -63,44 +103,93 @@
}
bool JitCodeCache::ContainsCodePtr(const void* ptr) const {
- return ptr >= code_cache_begin_ && ptr < code_cache_end_;
+ return code_map_->Begin() <= ptr && ptr < code_map_->End();
}
-void JitCodeCache::FlushInstructionCache() {
- UNIMPLEMENTED(FATAL);
- // TODO: Investigate if we need to do this.
- // __clear_cache(reinterpret_cast<char*>(code_cache_begin_), static_cast<int>(CodeCacheSize()));
-}
-
-uint8_t* JitCodeCache::ReserveCode(Thread* self, size_t size) {
- MutexLock mu(self, lock_);
- if (size > CodeCacheRemain()) {
- return nullptr;
+class ScopedCodeCacheWrite {
+ public:
+ explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) {
+ CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll);
}
+ ~ScopedCodeCacheWrite() {
+ CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
+ }
+ private:
+ MemMap* const code_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
+};
+
+uint8_t* JitCodeCache::CommitCode(Thread* self,
+ const uint8_t* mapping_table,
+ const uint8_t* vmap_table,
+ const uint8_t* gc_map,
+ size_t frame_size_in_bytes,
+ size_t core_spill_mask,
+ size_t fp_spill_mask,
+ const uint8_t* code,
+ size_t code_size) {
+ size_t total_size = RoundUp(sizeof(OatQuickMethodHeader) + code_size + 32, sizeof(void*));
+ OatQuickMethodHeader* method_header = nullptr;
+ uint8_t* code_ptr;
+
+ MutexLock mu(self, lock_);
+ {
+ ScopedCodeCacheWrite scc(code_map_.get());
+ uint8_t* result = reinterpret_cast<uint8_t*>(mspace_malloc(code_mspace_, total_size));
+ if (result == nullptr) {
+ return nullptr;
+ }
+ code_ptr = reinterpret_cast<uint8_t*>(
+ RoundUp(reinterpret_cast<size_t>(result + sizeof(OatQuickMethodHeader)),
+ GetInstructionSetAlignment(kRuntimeISA)));
+
+ std::copy(code, code + code_size, code_ptr);
+ method_header = reinterpret_cast<OatQuickMethodHeader*>(code_ptr) - 1;
+ new (method_header) OatQuickMethodHeader(
+ (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
+ (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
+ (gc_map == nullptr) ? 0 : code_ptr - gc_map,
+ frame_size_in_bytes,
+ core_spill_mask,
+ fp_spill_mask,
+ code_size);
+ }
+
+ __builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
+ reinterpret_cast<char*>(code_ptr + code_size));
+
++num_methods_; // TODO: This is hacky but works since each method has exactly one code region.
- code_cache_ptr_ += size;
- return code_cache_ptr_ - size;
+ return reinterpret_cast<uint8_t*>(method_header);
+}
+
+size_t JitCodeCache::CodeCacheSize() {
+ MutexLock mu(Thread::Current(), lock_);
+ size_t bytes_allocated = 0;
+ mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
+ return bytes_allocated;
+}
+
+size_t JitCodeCache::DataCacheSize() {
+ MutexLock mu(Thread::Current(), lock_);
+ size_t bytes_allocated = 0;
+ mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
+ return bytes_allocated;
}
uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
- MutexLock mu(self, lock_);
size = RoundUp(size, sizeof(void*));
- if (size > DataCacheRemain()) {
- return nullptr;
- }
- data_cache_ptr_ += size;
- return data_cache_ptr_ - size;
+ MutexLock mu(self, lock_);
+ return reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
}
uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
- MutexLock mu(self, lock_);
- const size_t size = RoundUp(end - begin, sizeof(void*));
- if (size > DataCacheRemain()) {
+ uint8_t* result = ReserveData(self, end - begin);
+ if (result == nullptr) {
return nullptr; // Out of space in the data cache.
}
- std::copy(begin, end, data_cache_ptr_);
- data_cache_ptr_ += size;
- return data_cache_ptr_ - size;
+ std::copy(begin, end, result);
+ return result;
}
const void* JitCodeCache::GetCodeFor(ArtMethod* method) {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index f485e4a..fa90c18 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -22,6 +22,7 @@
#include "atomic.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "gc/allocator/dlmalloc.h"
#include "gc_root.h"
#include "jni.h"
#include "oat_file.h"
@@ -48,34 +49,26 @@
// in the out arg error_msg.
static JitCodeCache* Create(size_t capacity, std::string* error_msg);
- const uint8_t* CodeCachePtr() const {
- return code_cache_ptr_;
- }
-
- size_t CodeCacheSize() const {
- return code_cache_ptr_ - code_cache_begin_;
- }
-
- size_t CodeCacheRemain() const {
- return code_cache_end_ - code_cache_ptr_;
- }
-
- const uint8_t* DataCachePtr() const {
- return data_cache_ptr_;
- }
-
- size_t DataCacheSize() const {
- return data_cache_ptr_ - data_cache_begin_;
- }
-
- size_t DataCacheRemain() const {
- return data_cache_end_ - data_cache_ptr_;
- }
-
size_t NumMethods() const {
return num_methods_;
}
+ size_t CodeCacheSize() REQUIRES(!lock_);
+
+ size_t DataCacheSize() REQUIRES(!lock_);
+
+ // Allocate and write code and its metadata to the code cache.
+ uint8_t* CommitCode(Thread* self,
+ const uint8_t* mapping_table,
+ const uint8_t* vmap_table,
+ const uint8_t* gc_map,
+ size_t frame_size_in_bytes,
+ size_t core_spill_mask,
+ size_t fp_spill_mask,
+ const uint8_t* code,
+ size_t code_size)
+ REQUIRES(!lock_);
+
// Return true if the code cache contains the code pointer which si the entrypoint of the method.
bool ContainsMethod(ArtMethod* method) const
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -83,9 +76,6 @@
// Return true if the code cache contains a code ptr.
bool ContainsCodePtr(const void* ptr) const;
- // Reserve a region of code of size at least "size". Returns null if there is no more room.
- uint8_t* ReserveCode(Thread* self, size_t size) REQUIRES(!lock_);
-
// Reserve a region of data of size at least "size". Returns null if there is no more room.
uint8_t* ReserveData(Thread* self, size_t size) REQUIRES(!lock_);
@@ -105,25 +95,19 @@
private:
// Takes ownership of code_mem_map.
- explicit JitCodeCache(MemMap* code_mem_map);
-
- // Unimplemented, TODO: Determine if it is necessary.
- void FlushInstructionCache();
+ JitCodeCache(MemMap* code_map, MemMap* data_map);
// Lock which guards.
Mutex lock_;
- // Mem map which holds code and data. We do this since we need to have 32 bit offsets from method
- // headers in code cache which point to things in the data cache. If the maps are more than 4GB
- // apart, having multiple maps wouldn't work.
- std::unique_ptr<MemMap> mem_map_;
- // Code cache section.
- uint8_t* code_cache_ptr_;
- const uint8_t* code_cache_begin_;
- const uint8_t* code_cache_end_;
- // Data cache section.
- uint8_t* data_cache_ptr_;
- const uint8_t* data_cache_begin_;
- const uint8_t* data_cache_end_;
+ // Mem map which holds code.
+ std::unique_ptr<MemMap> code_map_;
+ // Mem map which holds data (stack maps and profiling info).
+ std::unique_ptr<MemMap> data_map_;
+ // The opaque mspace for allocating code.
+ void* code_mspace_;
+ // The opaque mspace for allocating data.
+ void* data_mspace_;
+ // Number of compiled methods.
size_t num_methods_;
// This map holds code for methods if they were deoptimized by the instrumentation stubs. This is
// required since we have to implement ClassLinker::GetQuickOatCodeFor for walking stacks.
diff --git a/runtime/jit/jit_code_cache_test.cc b/runtime/jit/jit_code_cache_test.cc
deleted file mode 100644
index c76dc11..0000000
--- a/runtime/jit/jit_code_cache_test.cc
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "common_runtime_test.h"
-
-#include "art_method-inl.h"
-#include "class_linker.h"
-#include "jit_code_cache.h"
-#include "scoped_thread_state_change.h"
-#include "thread-inl.h"
-
-namespace art {
-namespace jit {
-
-class JitCodeCacheTest : public CommonRuntimeTest {
- public:
-};
-
-TEST_F(JitCodeCacheTest, TestCoverage) {
- std::string error_msg;
- constexpr size_t kSize = 1 * MB;
- std::unique_ptr<JitCodeCache> code_cache(
- JitCodeCache::Create(kSize, &error_msg));
- ASSERT_TRUE(code_cache.get() != nullptr) << error_msg;
- ASSERT_TRUE(code_cache->CodeCachePtr() != nullptr);
- ASSERT_EQ(code_cache->CodeCacheSize(), 0u);
- ASSERT_GT(code_cache->CodeCacheRemain(), 0u);
- ASSERT_TRUE(code_cache->DataCachePtr() != nullptr);
- ASSERT_EQ(code_cache->DataCacheSize(), 0u);
- ASSERT_GT(code_cache->DataCacheRemain(), 0u);
- ASSERT_EQ(code_cache->CodeCacheRemain() + code_cache->DataCacheRemain(), kSize);
- ASSERT_EQ(code_cache->NumMethods(), 0u);
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- uint8_t* const reserved_code = code_cache->ReserveCode(soa.Self(), 4 * KB);
- ASSERT_TRUE(reserved_code != nullptr);
- ASSERT_TRUE(code_cache->ContainsCodePtr(reserved_code));
- ASSERT_EQ(code_cache->NumMethods(), 1u);
- Runtime* const runtime = Runtime::Current();
- ClassLinker* const class_linker = runtime->GetClassLinker();
- ArtMethod* method = &class_linker->AllocArtMethodArray(soa.Self(),
- runtime->GetLinearAlloc(),
- 1)->At(0);
- ASSERT_FALSE(code_cache->ContainsMethod(method));
- method->SetEntryPointFromQuickCompiledCode(reserved_code);
- ASSERT_TRUE(code_cache->ContainsMethod(method));
- ASSERT_EQ(code_cache->GetCodeFor(method), reserved_code);
- // Save the code and then change it.
- code_cache->SaveCompiledCode(method, reserved_code);
- method->SetEntryPointFromQuickCompiledCode(nullptr);
- ASSERT_EQ(code_cache->GetCodeFor(method), reserved_code);
- const uint8_t data_arr[] = {1, 2, 3, 4, 5};
- uint8_t* data_ptr = code_cache->AddDataArray(soa.Self(), data_arr, data_arr + sizeof(data_arr));
- ASSERT_TRUE(data_ptr != nullptr);
- ASSERT_EQ(memcmp(data_ptr, data_arr, sizeof(data_arr)), 0);
-}
-
-TEST_F(JitCodeCacheTest, TestOverflow) {
- std::string error_msg;
- constexpr size_t kSize = 1 * MB;
- std::unique_ptr<JitCodeCache> code_cache(
- JitCodeCache::Create(kSize, &error_msg));
- ASSERT_TRUE(code_cache.get() != nullptr) << error_msg;
- ASSERT_TRUE(code_cache->CodeCachePtr() != nullptr);
- size_t code_bytes = 0;
- size_t data_bytes = 0;
- constexpr size_t kCodeArrSize = 4 * KB;
- constexpr size_t kDataArrSize = 4 * KB;
- uint8_t data_arr[kDataArrSize];
- std::fill_n(data_arr, arraysize(data_arr), 53);
- // Add code and data until we are full.
- uint8_t* code_ptr = nullptr;
- uint8_t* data_ptr = nullptr;
- do {
- code_ptr = code_cache->ReserveCode(Thread::Current(), kCodeArrSize);
- data_ptr = code_cache->AddDataArray(Thread::Current(), data_arr, data_arr + kDataArrSize);
- if (code_ptr != nullptr) {
- code_bytes += kCodeArrSize;
- }
- if (data_ptr != nullptr) {
- data_bytes += kDataArrSize;
- }
- } while (code_ptr != nullptr || data_ptr != nullptr);
- // Make sure we added a reasonable amount
- CHECK_GT(code_bytes, 0u);
- CHECK_LE(code_bytes, kSize);
- CHECK_GT(data_bytes, 0u);
- CHECK_LE(data_bytes, kSize);
- CHECK_GE(code_bytes + data_bytes, kSize * 4 / 5);
-}
-
-} // namespace jit
-} // namespace art