summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--libartbase/base/mem_map.cc29
-rw-r--r--libartbase/base/mem_map.h10
-rw-r--r--libartbase/base/mem_map_test.cc47
-rw-r--r--libartbase/base/memfd.cc16
-rw-r--r--libartbase/base/memfd.h3
-rw-r--r--runtime/class_linker.cc10
-rw-r--r--runtime/class_linker.h2
-rw-r--r--runtime/jit/jit_code_cache.cc408
-rw-r--r--runtime/jit/jit_code_cache.h44
-rw-r--r--runtime/oat_file_manager.cc2
-rw-r--r--tools/veridex/flow_analysis.cc4
11 files changed, 176 insertions, 399 deletions
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 92551f17b6..1bf553d293 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -692,24 +692,6 @@ MemMap MemMap::RemapAtEnd(uint8_t* new_end,
int tail_prot,
std::string* error_msg,
bool use_debug_name) {
- return RemapAtEnd(new_end,
- tail_name,
- tail_prot,
- MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
- /* fd */ -1,
- /* offset */ 0,
- error_msg,
- use_debug_name);
-}
-
-MemMap MemMap::RemapAtEnd(uint8_t* new_end,
- const char* tail_name,
- int tail_prot,
- int flags,
- int fd,
- off_t offset,
- std::string* error_msg,
- bool use_debug_name) {
DCHECK_GE(new_end, Begin());
DCHECK_LE(new_end, End());
DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
@@ -733,6 +715,9 @@ MemMap MemMap::RemapAtEnd(uint8_t* new_end,
DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
DCHECK_ALIGNED(tail_base_size, kPageSize);
+ unique_fd fd;
+ int flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
+
MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
// Note: Do not explicitly unmap the tail region, mmap() with MAP_FIXED automatically
// removes old mappings for the overlapping region. This makes the operation atomic
@@ -741,13 +726,13 @@ MemMap MemMap::RemapAtEnd(uint8_t* new_end,
tail_base_size,
tail_prot,
flags,
- fd,
- offset));
+ fd.get(),
+ 0));
if (actual == MAP_FAILED) {
PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
- *error_msg = StringPrintf("map(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
+ *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
"maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
- fd);
+ fd.get());
return Invalid();
}
// Update *this.
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 309da27319..20eda324e1 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -261,16 +261,6 @@ class MemMap {
std::string* error_msg,
bool use_debug_name = true);
- // Unmap the pages of a file at end and remap them to create another memory map.
- MemMap RemapAtEnd(uint8_t* new_end,
- const char* tail_name,
- int tail_prot,
- int tail_flags,
- int fd,
- off_t offset,
- std::string* error_msg,
- bool use_debug_name = true);
-
// Take ownership of pages at the beginning of the mapping. The mapping must be an
// anonymous reservation mapping, owning entire pages. The `byte_count` must not
// exceed the size of this reservation.
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index bf143d472d..ab3d18ff04 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -455,53 +455,6 @@ TEST_F(MemMapTest, RemapAtEnd32bit) {
}
#endif
-TEST_F(MemMapTest, RemapFileViewAtEnd) {
- CommonInit();
- std::string error_msg;
- ScratchFile scratch_file;
-
- // Create a scratch file 3 pages large.
- constexpr size_t kMapSize = 3 * kPageSize;
- std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
- memset(data.get(), 1, kPageSize);
- memset(&data[0], 0x55, kPageSize);
- memset(&data[kPageSize], 0x5a, kPageSize);
- memset(&data[2 * kPageSize], 0xaa, kPageSize);
- ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
-
- MemMap map = MemMap::MapFile(/*byte_count*/kMapSize,
- PROT_READ,
- MAP_PRIVATE,
- scratch_file.GetFd(),
- /*start*/0,
- /*low_4gb*/true,
- scratch_file.GetFilename().c_str(),
- &error_msg);
- ASSERT_TRUE(map.IsValid()) << error_msg;
- ASSERT_TRUE(error_msg.empty());
- ASSERT_EQ(map.Size(), kMapSize);
- ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
- ASSERT_EQ(data[0], *map.Begin());
- ASSERT_EQ(data[kPageSize], *(map.Begin() + kPageSize));
- ASSERT_EQ(data[2 * kPageSize], *(map.Begin() + 2 * kPageSize));
-
- for (size_t offset = 2 * kPageSize; offset > 0; offset -= kPageSize) {
- MemMap tail = map.RemapAtEnd(map.Begin() + offset,
- "bad_offset_map",
- PROT_READ,
- MAP_PRIVATE | MAP_FIXED,
- scratch_file.GetFd(),
- offset,
- &error_msg);
- ASSERT_TRUE(tail.IsValid()) << error_msg;
- ASSERT_TRUE(error_msg.empty());
- ASSERT_EQ(offset, map.Size());
- ASSERT_EQ(static_cast<size_t>(kPageSize), tail.Size());
- ASSERT_EQ(tail.Begin(), map.Begin() + map.Size());
- ASSERT_EQ(data[offset], *tail.Begin());
- }
-}
-
TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
// Some MIPS32 hardware (namely the Creator Ci20 development board)
// cannot allocate in the 2GB-4GB region.
diff --git a/libartbase/base/memfd.cc b/libartbase/base/memfd.cc
index 1afcd7b311..7c2040147e 100644
--- a/libartbase/base/memfd.cc
+++ b/libartbase/base/memfd.cc
@@ -17,7 +17,9 @@
#include "memfd.h"
#include <errno.h>
+#include <stdio.h>
#include <sys/syscall.h>
+#include <sys/utsname.h>
#include <unistd.h>
#include "macros.h"
@@ -37,6 +39,20 @@ namespace art {
#if defined(__NR_memfd_create)
int memfd_create(const char* name, unsigned int flags) {
+ // Check kernel version supports memfd_create(). Some older kernels segfault executing
+ // memfd_create() rather than returning ENOSYS (b/116769556).
+ static constexpr int kRequiredMajor = 3;
+ static constexpr int kRequiredMinor = 17;
+ struct utsname uts;
+ int major, minor;
+ if (uname(&uts) != 0 ||
+ strcmp(uts.sysname, "Linux") != 0 ||
+ sscanf(uts.release, "%d.%d", &major, &minor) != 2 ||
+ (major < kRequiredMajor || (major == kRequiredMajor && minor < kRequiredMinor))) {
+ errno = ENOSYS;
+ return -1;
+ }
+
return syscall(__NR_memfd_create, name, flags);
}
diff --git a/libartbase/base/memfd.h b/libartbase/base/memfd.h
index 4367198185..91db0b2d8f 100644
--- a/libartbase/base/memfd.h
+++ b/libartbase/base/memfd.h
@@ -19,7 +19,8 @@
namespace art {
- // Call memfd(2) if available on platform and return result.
+// Call memfd(2) if available on platform and return result. This call also makes a kernel version
+// check for safety on older kernels (b/116769556)..
int memfd_create(const char* name, unsigned int flags);
} // namespace art
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index bcc3a22c86..d95f71a315 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1251,6 +1251,8 @@ void AppImageClassLoadersAndDexCachesHelper::Update(
ClassTable::ClassSet* new_class_set)
REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ ScopedTrace app_image_timing("AppImage:Updating");
+
Thread* const self = Thread::Current();
gc::Heap* const heap = Runtime::Current()->GetHeap();
const ImageHeader& header = space->GetImageHeader();
@@ -1311,7 +1313,7 @@ void AppImageClassLoadersAndDexCachesHelper::Update(
}
if (ClassLinker::kAppImageMayContainStrings) {
// Fixup all the literal strings happens at app images which are supposed to be interned.
- ScopedTrace timing("Fixup String Intern in image and dex_cache");
+ ScopedTrace timing("AppImage:InternString");
const auto& image_header = space->GetImageHeader();
const auto bitmap = space->GetMarkBitmap(); // bitmap of objects
const uint8_t* target_base = space->GetMemMap()->Begin();
@@ -1324,7 +1326,7 @@ void AppImageClassLoadersAndDexCachesHelper::Update(
bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_intern_visitor);
}
if (kVerifyArtMethodDeclaringClasses) {
- ScopedTrace timing("Verify declaring classes");
+ ScopedTrace timing("AppImage:VerifyDeclaringClasses");
ReaderMutexLock rmu(self, *Locks::heap_bitmap_lock_);
VerifyDeclaringClassVisitor visitor;
header.VisitPackedArtMethods(&visitor, space->Begin(), kRuntimePointerSize);
@@ -1842,7 +1844,7 @@ bool ClassLinker::AddImageSpace(
// Force every app image class's SubtypeCheck to be at least kIninitialized.
//
// See also ImageWriter::FixupClass.
- ScopedTrace trace("Recalculate app image SubtypeCheck bitstrings");
+ ScopedTrace trace("AppImage:RecacluateSubtypeCheckBitstrings");
MutexLock subtype_check_lock(Thread::Current(), *Locks::subtype_check_lock_);
for (const ClassTable::TableSlot& root : temp_set) {
SubtypeCheck<ObjPtr<mirror::Class>>::EnsureInitialized(root.Read());
@@ -1862,7 +1864,7 @@ bool ClassLinker::AddImageSpace(
if (kIsDebugBuild && app_image) {
// This verification needs to happen after the classes have been added to the class loader.
// Since it ensures classes are in the class table.
- ScopedTrace trace("VerifyAppImage");
+ ScopedTrace trace("AppImage:Verify");
VerifyAppImage(header, class_loader, dex_caches, class_table, space);
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index efe29d3127..e06a398089 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -111,7 +111,7 @@ class AllocatorVisitor {
class ClassLinker {
public:
- static constexpr bool kAppImageMayContainStrings = false;
+ static constexpr bool kAppImageMayContainStrings = true;
explicit ClassLinker(InternTable* intern_table);
virtual ~ClassLinker();
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 1119317867..33d228f255 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -18,15 +18,12 @@
#include <sstream>
-#include "android-base/unique_fd.h"
-
#include "arch/context.h"
#include "art_method-inl.h"
#include "base/enums.h"
#include "base/histogram-inl.h"
#include "base/logging.h" // For VLOG.
#include "base/membarrier.h"
-#include "base/memfd.h"
#include "base/mem_map.h"
#include "base/quasi_atomic.h"
#include "base/stl_util.h"
@@ -55,32 +52,16 @@
#include "thread-current-inl.h"
#include "thread_list.h"
-using android::base::unique_fd;
-
namespace art {
namespace jit {
+static constexpr int kProtCode = PROT_READ | PROT_EXEC;
+static constexpr int kProtData = PROT_READ | PROT_WRITE;
+static constexpr int kProtProfile = PROT_READ;
+
static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
-static constexpr int kProtR = PROT_READ;
-static constexpr int kProtRW = PROT_READ | PROT_WRITE;
-static constexpr int kProtRWX = PROT_READ | PROT_WRITE | PROT_EXEC;
-static constexpr int kProtRX = PROT_READ | PROT_EXEC;
-
-namespace {
-
-// Translate an address belonging to one memory map into an address in a second. This is useful
-// when there are two virtual memory ranges for the same physical memory range.
-template <typename T>
-T* TranslateAddress(T* src_ptr, const MemMap& src, const MemMap& dst) {
- CHECK(src.HasAddress(src_ptr));
- uint8_t* const raw_src_ptr = reinterpret_cast<uint8_t*>(src_ptr);
- return reinterpret_cast<T*>(raw_src_ptr - src.Begin() + dst.Begin());
-}
-
-} // namespace
-
class JitCodeCache::JniStubKey {
public:
explicit JniStubKey(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -209,41 +190,17 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
// Register for membarrier expedited sync core if JIT will be generating code.
if (!used_only_for_profile_data) {
- if (art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore) != 0) {
- // MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ensures that CPU instruction pipelines are
- // flushed and it's used when adding code to the JIT. The memory used by the new code may
- // have just been released and, in theory, the old code could still be in a pipeline.
- VLOG(jit) << "Kernel does not support membarrier sync-core";
- }
- }
-
- // File descriptor enabling dual-view mapping of code section.
- unique_fd mem_fd;
-
- // Bionic supports memfd_create, but the call may fail on older kernels.
- mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags */ 0));
- if (mem_fd.get() < 0) {
- VLOG(jit) << "Failed to initialize dual view JIT. memfd_create() error: "
- << strerror(errno);
- }
-
- if (mem_fd.get() >= 0 && ftruncate(mem_fd, max_capacity) != 0) {
- std::ostringstream oss;
- oss << "Failed to initialize memory file: " << strerror(errno);
- *error_msg = oss.str();
- return nullptr;
+ art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore);
}
- // Data cache will be half of the initial allocation.
- // Code cache will be the other half of the initial allocation.
- // TODO: Make this variable?
-
- // Align both capacities to page size, as that's the unit mspaces use.
- initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
- max_capacity = RoundDown(max_capacity, 2 * kPageSize);
- const size_t data_capacity = max_capacity / 2;
- const size_t exec_capacity = used_only_for_profile_data ? 0 : max_capacity - data_capacity;
- DCHECK_LE(data_capacity + exec_capacity, max_capacity);
+ // Decide how we should map the code and data sections.
+ // If we use the code cache just for profiling we do not need to map the code section as
+ // executable.
+ // NOTE 1: this is yet another workaround to bypass strict SElinux policies in order to be able
+ // to profile system server.
+ // NOTE 2: We could just not create the code section at all but we will need to
+ // special case too many cases.
+ int memmap_flags_prot_code = used_only_for_profile_data ? kProtProfile : kProtCode;
std::string error_str;
// Map name specific for android_os_Debug.cpp accounting.
@@ -251,149 +208,71 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
// We could do PC-relative addressing to avoid this problem, but that
// would require reserving code and data area before submitting, which
// means more windows for the code memory to be RWX.
- int base_flags;
- MemMap data_pages;
- if (mem_fd.get() >= 0) {
- // Dual view of JIT code cache case. Create an initial mapping of data pages large enough
- // for data and non-writable view of JIT code pages. We use the memory file descriptor to
- // enable dual mapping - we'll create a second mapping using the descriptor below. The
- // mappings will look like:
- //
- // VA PA
- //
- // +---------------+
- // | non exec code |\
- // +---------------+ \
- // : :\ \
- // +---------------+.\.+---------------+
- // | exec code | \| code |
- // +---------------+...+---------------+
- // | data | | data |
- // +---------------+...+---------------+
- //
- // In this configuration code updates are written to the non-executable view of the code
- // cache, and the executable view of the code cache has fixed RX memory protections.
- //
- // This memory needs to be mapped shared as the code portions will have two mappings.
- base_flags = MAP_SHARED;
- data_pages = MemMap::MapFile(
- data_capacity + exec_capacity,
- kProtRW,
- base_flags,
- mem_fd,
- /* start */ 0,
- /* low_4gb */ true,
- "data-code-cache",
- &error_str);
- } else {
- // Single view of JIT code cache case. Create an initial mapping of data pages large enough
- // for data and JIT code pages. The mappings will look like:
- //
- // VA PA
- //
- // +---------------+...+---------------+
- // | exec code | | code |
- // +---------------+...+---------------+
- // | data | | data |
- // +---------------+...+---------------+
- //
- // In this configuration code updates are written to the executable view of the code cache,
- // and the executable view of the code cache transitions RX to RWX for the update and then
- // back to RX after the update.
- base_flags = MAP_PRIVATE | MAP_ANON;
- data_pages = MemMap::MapAnonymous(
- "data-code-cache",
- /* addr */ nullptr,
- data_capacity + exec_capacity,
- kProtRW,
- /* low_4gb */ true,
- /* reuse */ false,
- /* reservation */ nullptr,
- &error_str);
- }
-
- if (!data_pages.IsValid()) {
+ MemMap data_map = MemMap::MapAnonymous(
+ "data-code-cache",
+ /* addr */ nullptr,
+ max_capacity,
+ kProtData,
+ /* low_4gb */ true,
+ /* reuse */ false,
+ /* reservation */ nullptr,
+ &error_str);
+ if (!data_map.IsValid()) {
std::ostringstream oss;
oss << "Failed to create read write cache: " << error_str << " size=" << max_capacity;
*error_msg = oss.str();
return nullptr;
}
- MemMap exec_pages;
- MemMap non_exec_pages;
- if (exec_capacity > 0) {
- uint8_t* const divider = data_pages.Begin() + data_capacity;
- // Set initial permission for executable view to catch any SELinux permission problems early
- // (for processes that cannot map WX pages). Otherwise, this region does not need to be
- // executable as there is no code in the cache yet.
- exec_pages = data_pages.RemapAtEnd(divider,
- "jit-code-cache",
- kProtRX,
- base_flags | MAP_FIXED,
- mem_fd.get(),
- (mem_fd.get() >= 0) ? data_capacity : 0,
- &error_str);
- if (!exec_pages.IsValid()) {
- std::ostringstream oss;
- oss << "Failed to create read execute code cache: " << error_str << " size=" << max_capacity;
- *error_msg = oss.str();
- return nullptr;
- }
+ // Align both capacities to page size, as that's the unit mspaces use.
+ initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
+ max_capacity = RoundDown(max_capacity, 2 * kPageSize);
- if (mem_fd.get() >= 0) {
- // For dual view, create the secondary view of code memory used for updating code. This view
- // is never executable.
- non_exec_pages = MemMap::MapFile(exec_capacity,
- kProtR,
- base_flags,
- mem_fd,
- /* start */ data_capacity,
- /* low_4GB */ false,
- "jit-code-cache-rw",
- &error_str);
- if (!exec_pages.IsValid()) {
- std::ostringstream oss;
- oss << "Failed to create read write code cache: " << error_str << " size=" << max_capacity;
- *error_msg = oss.str();
- return nullptr;
- }
- }
- } else {
- // Profiling only. No memory for code required.
- DCHECK(used_only_for_profile_data);
+ // Data cache is 1 / 2 of the map.
+ // TODO: Make this variable?
+ size_t data_size = max_capacity / 2;
+ size_t code_size = max_capacity - data_size;
+ DCHECK_EQ(code_size + data_size, max_capacity);
+ uint8_t* divider = data_map.Begin() + data_size;
+
+ MemMap code_map = data_map.RemapAtEnd(
+ divider, "jit-code-cache", memmap_flags_prot_code | PROT_WRITE, &error_str);
+ if (!code_map.IsValid()) {
+ std::ostringstream oss;
+ oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
+ *error_msg = oss.str();
+ return nullptr;
}
-
- const size_t initial_data_capacity = initial_capacity / 2;
- const size_t initial_exec_capacity =
- (exec_capacity == 0) ? 0 : (initial_capacity - initial_data_capacity);
-
+ DCHECK_EQ(code_map.Begin(), divider);
+ data_size = initial_capacity / 2;
+ code_size = initial_capacity - data_size;
+ DCHECK_EQ(code_size + data_size, initial_capacity);
return new JitCodeCache(
- std::move(data_pages),
- std::move(exec_pages),
- std::move(non_exec_pages),
- initial_data_capacity,
- initial_exec_capacity,
+ std::move(code_map),
+ std::move(data_map),
+ code_size,
+ data_size,
max_capacity,
- garbage_collect_code);
+ garbage_collect_code,
+ memmap_flags_prot_code);
}
-JitCodeCache::JitCodeCache(MemMap&& data_pages,
- MemMap&& exec_pages,
- MemMap&& non_exec_pages,
+JitCodeCache::JitCodeCache(MemMap&& code_map,
+ MemMap&& data_map,
+ size_t initial_code_capacity,
size_t initial_data_capacity,
- size_t initial_exec_capacity,
size_t max_capacity,
- bool garbage_collect_code)
+ bool garbage_collect_code,
+ int memmap_flags_prot_code)
: lock_("Jit code cache", kJitCodeCacheLock),
lock_cond_("Jit code cache condition variable", lock_),
collection_in_progress_(false),
- data_pages_(std::move(data_pages)),
- exec_pages_(std::move(exec_pages)),
- non_exec_pages_(std::move(non_exec_pages)),
+ code_map_(std::move(code_map)),
+ data_map_(std::move(data_map)),
max_capacity_(max_capacity),
- current_capacity_(initial_exec_capacity + initial_data_capacity),
+ current_capacity_(initial_code_capacity + initial_data_capacity),
+ code_end_(initial_code_capacity),
data_end_(initial_data_capacity),
- exec_end_(initial_exec_capacity),
last_collection_increased_code_cache_(false),
garbage_collect_code_(garbage_collect_code),
used_memory_for_data_(0),
@@ -405,46 +284,40 @@ JitCodeCache::JitCodeCache(MemMap&& data_pages,
histogram_code_memory_use_("Memory used for compiled code", 16),
histogram_profiling_info_memory_use_("Memory used for profiling info", 16),
is_weak_access_enabled_(true),
- inline_cache_cond_("Jit inline cache condition variable", lock_) {
-
- DCHECK_GE(max_capacity, initial_exec_capacity + initial_data_capacity);
-
- // Initialize the data heap
- data_mspace_ = create_mspace_with_base(data_pages_.Begin(), data_end_, false /*locked*/);
- CHECK(data_mspace_ != nullptr) << "create_mspace_with_base (data) failed";
-
- // Initialize the code heap
- MemMap* code_heap = nullptr;
- if (non_exec_pages_.IsValid()) {
- code_heap = &non_exec_pages_;
- } else if (exec_pages_.IsValid()) {
- code_heap = &exec_pages_;
- }
- if (code_heap != nullptr) {
- // Make all pages reserved for the code heap writable. The mspace allocator, that manages the
- // heap, will take and initialize pages in create_mspace_with_base().
- CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW);
- exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
- CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
- SetFootprintLimit(current_capacity_);
- // Protect pages containing heap metadata. Updates to the code heap toggle write permission to
- // perform the update and there are no other times write access is required.
- CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR);
- } else {
- exec_mspace_ = nullptr;
- SetFootprintLimit(current_capacity_);
+ inline_cache_cond_("Jit inline cache condition variable", lock_),
+ memmap_flags_prot_code_(memmap_flags_prot_code) {
+
+ DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
+ code_mspace_ = create_mspace_with_base(code_map_.Begin(), code_end_, false /*locked*/);
+ data_mspace_ = create_mspace_with_base(data_map_.Begin(), data_end_, false /*locked*/);
+
+ if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
+ PLOG(FATAL) << "create_mspace_with_base failed";
}
+ SetFootprintLimit(current_capacity_);
+
+ CheckedCall(mprotect,
+ "mprotect jit code cache",
+ code_map_.Begin(),
+ code_map_.Size(),
+ memmap_flags_prot_code_);
+ CheckedCall(mprotect,
+ "mprotect jit data cache",
+ data_map_.Begin(),
+ data_map_.Size(),
+ kProtData);
+
VLOG(jit) << "Created jit code cache: initial data size="
<< PrettySize(initial_data_capacity)
<< ", initial code size="
- << PrettySize(initial_exec_capacity);
+ << PrettySize(initial_code_capacity);
}
JitCodeCache::~JitCodeCache() {}
bool JitCodeCache::ContainsPc(const void* ptr) const {
- return exec_pages_.Begin() <= ptr && ptr < exec_pages_.End();
+ return code_map_.Begin() <= ptr && ptr < code_map_.End();
}
bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
@@ -512,20 +385,22 @@ class ScopedCodeCacheWrite : ScopedTrace {
: ScopedTrace("ScopedCodeCacheWrite"),
code_cache_(code_cache) {
ScopedTrace trace("mprotect all");
- const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping();
- if (updatable_pages != nullptr) {
- int prot = code_cache_->HasDualCodeMapping() ? kProtRW : kProtRWX;
- CheckedCall(mprotect, "Cache +W", updatable_pages->Begin(), updatable_pages->Size(), prot);
- }
+ CheckedCall(
+ mprotect,
+ "make code writable",
+ code_cache_->code_map_.Begin(),
+ code_cache_->code_map_.Size(),
+ code_cache_->memmap_flags_prot_code_ | PROT_WRITE);
}
~ScopedCodeCacheWrite() {
ScopedTrace trace("mprotect code");
- const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping();
- if (updatable_pages != nullptr) {
- int prot = code_cache_->HasDualCodeMapping() ? kProtR : kProtRX;
- CheckedCall(mprotect, "Cache -W", updatable_pages->Begin(), updatable_pages->Size(), prot);
- }
+ CheckedCall(
+ mprotect,
+ "make code protected",
+ code_cache_->code_map_.Begin(),
+ code_cache_->code_map_.Size(),
+ code_cache_->memmap_flags_prot_code_);
}
private:
@@ -727,13 +602,7 @@ void JitCodeCache::FreeCodeAndData(const void* code_ptr) {
if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
FreeData(GetRootTable(code_ptr));
} // else this is a JNI stub without any data.
-
- uint8_t* code_allocation = reinterpret_cast<uint8_t*>(allocation);
- if (HasDualCodeMapping()) {
- code_allocation = TranslateAddress(code_allocation, exec_pages_, non_exec_pages_);
- }
-
- FreeCode(code_allocation);
+ FreeCode(reinterpret_cast<uint8_t*>(allocation));
}
void JitCodeCache::FreeAllMethodHeaders(
@@ -884,16 +753,6 @@ void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) {
}
}
-const MemMap* JitCodeCache::GetUpdatableCodeMapping() const {
- if (HasDualCodeMapping()) {
- return &non_exec_pages_;
- } else if (HasCodeMapping()) {
- return &exec_pages_;
- } else {
- return nullptr;
- }
-}
-
uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
ArtMethod* method,
uint8_t* stack_map,
@@ -914,52 +773,31 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
DCheckRootsAreValid(roots);
}
+ size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+ // Ensure the header ends up at expected instruction alignment.
+ size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
+ size_t total_size = header_size + code_size;
+
OatQuickMethodHeader* method_header = nullptr;
uint8_t* code_ptr = nullptr;
-
+ uint8_t* memory = nullptr;
MutexLock mu(self, lock_);
// We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
// finish.
WaitForPotentialCollectionToCompleteRunnable(self);
{
ScopedCodeCacheWrite scc(this);
-
- size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
- // Ensure the header ends up at expected instruction alignment.
- size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
- size_t total_size = header_size + code_size;
-
- // AllocateCode allocates memory in non-executable region for alignment header and code. The
- // header size may include alignment padding.
- uint8_t* nox_memory = AllocateCode(total_size);
- if (nox_memory == nullptr) {
+ memory = AllocateCode(total_size);
+ if (memory == nullptr) {
return nullptr;
}
+ code_ptr = memory + header_size;
- // code_ptr points to non-executable code.
- code_ptr = nox_memory + header_size;
std::copy(code, code + code_size, code_ptr);
method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
-
- // From here code_ptr points to executable code.
- if (non_exec_pages_.IsValid()) {
- code_ptr = TranslateAddress(code_ptr, non_exec_pages_, exec_pages_);
- }
-
new (method_header) OatQuickMethodHeader(
(stack_map != nullptr) ? code_ptr - stack_map : 0u,
code_size);
-
- DCHECK(!Runtime::Current()->IsAotCompiler());
- if (has_should_deoptimize_flag) {
- method_header->SetHasShouldDeoptimizeFlag();
- }
-
- // Update method_header pointer to executable code region.
- if (non_exec_pages_.IsValid()) {
- method_header = TranslateAddress(method_header, non_exec_pages_, exec_pages_);
- }
-
// Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
// trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
// This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
@@ -975,14 +813,16 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
// shootdown (incidentally invalidating the CPU pipelines by sending an IPI to all cores to
// notify them of the TLB invalidation). Some architectures, notably ARM and ARM64, have
// hardware support that broadcasts TLB invalidations and so their kernels have no software
- // based TLB shootdown. The sync-core flavor of membarrier was introduced in Linux 4.16 to
- // address this (see mbarrier(2)). The membarrier here will fail on prior kernels and on
- // platforms lacking the appropriate support.
+ // based TLB shootdown.
art::membarrier(art::MembarrierCommand::kPrivateExpeditedSyncCore);
+ DCHECK(!Runtime::Current()->IsAotCompiler());
+ if (has_should_deoptimize_flag) {
+ method_header->SetHasShouldDeoptimizeFlag();
+ }
+
number_of_compilations_++;
}
-
// We need to update the entry point in the runnable state for the instrumentation.
{
// The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the
@@ -1327,9 +1167,9 @@ void JitCodeCache::SetFootprintLimit(size_t new_footprint) {
DCHECK(IsAlignedParam(per_space_footprint, kPageSize));
DCHECK_EQ(per_space_footprint * 2, new_footprint);
mspace_set_footprint_limit(data_mspace_, per_space_footprint);
- if (HasCodeMapping()) {
+ {
ScopedCodeCacheWrite scc(this);
- mspace_set_footprint_limit(exec_mspace_, per_space_footprint);
+ mspace_set_footprint_limit(code_mspace_, per_space_footprint);
}
}
@@ -1404,8 +1244,8 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
number_of_collections_++;
live_bitmap_.reset(CodeCacheBitmap::Create(
"code-cache-bitmap",
- reinterpret_cast<uintptr_t>(exec_pages_.Begin()),
- reinterpret_cast<uintptr_t>(exec_pages_.Begin() + current_capacity_ / 2)));
+ reinterpret_cast<uintptr_t>(code_map_.Begin()),
+ reinterpret_cast<uintptr_t>(code_map_.Begin() + current_capacity_ / 2)));
collection_in_progress_ = true;
}
}
@@ -1774,17 +1614,15 @@ ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self ATTRIBUTE_UNU
// NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock
// is already held.
void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS {
- if (mspace == exec_mspace_) {
- DCHECK(exec_mspace_ != nullptr);
- const MemMap* const code_pages = GetUpdatableCodeMapping();
- void* result = code_pages->Begin() + exec_end_;
- exec_end_ += increment;
- return result;
+ if (code_mspace_ == mspace) {
+ size_t result = code_end_;
+ code_end_ += increment;
+ return reinterpret_cast<void*>(result + code_map_.Begin());
} else {
DCHECK_EQ(data_mspace_, mspace);
- void* result = data_pages_.Begin() + data_end_;
+ size_t result = data_end_;
data_end_ += increment;
- return result;
+ return reinterpret_cast<void*>(result + data_map_.Begin());
}
}
@@ -2011,7 +1849,7 @@ void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
uint8_t* JitCodeCache::AllocateCode(size_t code_size) {
size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
uint8_t* result = reinterpret_cast<uint8_t*>(
- mspace_memalign(exec_mspace_, alignment, code_size));
+ mspace_memalign(code_mspace_, alignment, code_size));
size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
// Ensure the header ends up at expected instruction alignment.
DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment);
@@ -2021,7 +1859,7 @@ uint8_t* JitCodeCache::AllocateCode(size_t code_size) {
void JitCodeCache::FreeCode(uint8_t* code) {
used_memory_for_code_ -= mspace_usable_size(code);
- mspace_free(exec_mspace_, code);
+ mspace_free(code_mspace_, code);
}
uint8_t* JitCodeCache::AllocateData(size_t data_size) {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 76ad8db886..e2aa01c121 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -223,7 +223,7 @@ class JitCodeCache {
REQUIRES_SHARED(Locks::mutator_lock_);
bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
- return mspace == data_mspace_ || mspace == exec_mspace_;
+ return mspace == code_mspace_ || mspace == data_mspace_;
}
void* MoreCore(const void* mspace, intptr_t increment);
@@ -279,13 +279,13 @@ class JitCodeCache {
private:
// Take ownership of maps.
- JitCodeCache(MemMap&& data_pages,
- MemMap&& exec_pages,
- MemMap&& non_exec_pages,
+ JitCodeCache(MemMap&& code_map,
+ MemMap&& data_map,
+ size_t initial_code_capacity,
size_t initial_data_capacity,
- size_t initial_exec_capacity,
size_t max_capacity,
- bool garbage_collect_code);
+ bool garbage_collect_code,
+ int memmap_flags_prot_code);
// Internal version of 'CommitCode' that will not retry if the
// allocation fails. Return null if the allocation fails.
@@ -381,16 +381,6 @@ class JitCodeCache {
uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
void FreeData(uint8_t* data) REQUIRES(lock_);
- bool HasDualCodeMapping() const {
- return non_exec_pages_.IsValid();
- }
-
- bool HasCodeMapping() const {
- return exec_pages_.IsValid();
- }
-
- const MemMap* GetUpdatableCodeMapping() const;
-
bool IsWeakAccessEnabled(Thread* self) const;
void WaitUntilInlineCacheAccessible(Thread* self)
REQUIRES(!lock_)
@@ -405,17 +395,14 @@ class JitCodeCache {
ConditionVariable lock_cond_ GUARDED_BY(lock_);
// Whether there is a code cache collection in progress.
bool collection_in_progress_ GUARDED_BY(lock_);
+ // Mem map which holds code.
+ MemMap code_map_;
// Mem map which holds data (stack maps and profiling info).
- MemMap data_pages_;
- // Mem map which holds code and has executable permission.
- MemMap exec_pages_;
- // Mem map which holds code with non executable permission. Only valid for dual view JIT when
- // this is the non-executable view of code used to write updates.
- MemMap non_exec_pages_;
+ MemMap data_map_;
+ // The opaque mspace for allocating code.
+ void* code_mspace_ GUARDED_BY(lock_);
// The opaque mspace for allocating data.
void* data_mspace_ GUARDED_BY(lock_);
- // The opaque mspace for allocating code.
- void* exec_mspace_ GUARDED_BY(lock_);
// Bitmap for collecting code and data.
std::unique_ptr<CodeCacheBitmap> live_bitmap_;
// Holds compiled code associated with the shorty for a JNI stub.
@@ -433,12 +420,12 @@ class JitCodeCache {
// The current capacity in bytes of the code cache.
size_t current_capacity_ GUARDED_BY(lock_);
+ // The current footprint in bytes of the code portion of the code cache.
+ size_t code_end_ GUARDED_BY(lock_);
+
// The current footprint in bytes of the data portion of the code cache.
size_t data_end_ GUARDED_BY(lock_);
- // The current footprint in bytes of the code portion of the code cache.
- size_t exec_end_ GUARDED_BY(lock_);
-
// Whether the last collection round increased the code cache.
bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
@@ -477,6 +464,9 @@ class JitCodeCache {
// Condition to wait on for accessing inline caches.
ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
+ // Mapping flags for the code section.
+ const int memmap_flags_prot_code_;
+
friend class art::JitJniStubTestHelper;
friend class ScopedCodeCacheWrite;
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 92d2d44699..a9ef9a3fa9 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -526,6 +526,8 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
if (source_oat_file != nullptr) {
bool added_image_space = false;
if (source_oat_file->IsExecutable()) {
+ ScopedTrace app_image_timing("AppImage:Loading");
+
// We need to throw away the image space if we are debuggable but the oat-file source of the
// image is not otherwise we might get classes with inlined methods or other such things.
std::unique_ptr<gc::space::ImageSpace> image_space;
diff --git a/tools/veridex/flow_analysis.cc b/tools/veridex/flow_analysis.cc
index f5eb4ea67d..69f7def329 100644
--- a/tools/veridex/flow_analysis.cc
+++ b/tools/veridex/flow_analysis.cc
@@ -495,7 +495,7 @@ void VeriFlowAnalysis::ProcessDexInstruction(const Instruction& instruction) {
case Instruction::DIV_INT_LIT8:
case Instruction::REM_INT_LIT8:
case Instruction::SHL_INT_LIT8:
- case Instruction::SHR_INT_LIT8: {
+ case Instruction::SHR_INT_LIT8:
case Instruction::USHR_INT_LIT8: {
UpdateRegister(instruction.VRegA(), VeriClass::integer_);
break;
@@ -537,7 +537,7 @@ void VeriFlowAnalysis::ProcessDexInstruction(const Instruction& instruction) {
case Instruction::CMPG_FLOAT:
case Instruction::CMPG_DOUBLE:
case Instruction::CMPL_FLOAT:
- case Instruction::CMPL_DOUBLE:
+ case Instruction::CMPL_DOUBLE: {
UpdateRegister(instruction.VRegA(), VeriClass::integer_);
break;
}