summaryrefslogtreecommitdiff
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S3
-rw-r--r--runtime/arch/stub_test.cc10
-rw-r--r--runtime/base/array_ref.h6
-rw-r--r--runtime/base/mutex.cc6
-rw-r--r--runtime/base/mutex.h7
-rw-r--r--runtime/class_linker.cc18
-rw-r--r--runtime/class_linker.h1
-rw-r--r--runtime/dex/compact_dex_file.h6
-rw-r--r--runtime/dex/dex_file.h7
-rw-r--r--runtime/dex/dex_file_layout.h2
-rw-r--r--runtime/dex/standard_dex_file.h4
-rw-r--r--runtime/fault_handler.cc26
-rw-r--r--runtime/gc/allocator/dlmalloc.cc1
-rw-r--r--runtime/gc/allocator/dlmalloc.h1
-rw-r--r--runtime/gc/heap-inl.h11
-rw-r--r--runtime/gc/heap.cc15
-rw-r--r--runtime/gc/heap_test.cc1
-rw-r--r--runtime/hidden_api.h22
-rw-r--r--runtime/instrumentation.cc4
-rw-r--r--runtime/interpreter/interpreter_common.cc7
-rw-r--r--runtime/jit/debugger_interface.cc62
-rw-r--r--runtime/jit/debugger_interface.h20
-rw-r--r--runtime/jit/jit_code_cache.cc4
-rw-r--r--runtime/mem_map.cc115
-rw-r--r--runtime/mem_map.h33
-rw-r--r--runtime/mem_map_test.cc200
-rw-r--r--runtime/native/dalvik_system_DexFile.cc2
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc3
-rw-r--r--runtime/native_stack_dump.cc4
-rw-r--r--runtime/quicken_info.h16
-rw-r--r--runtime/runtime.cc15
-rw-r--r--runtime/runtime.h13
-rw-r--r--runtime/vdex_file.cc3
33 files changed, 566 insertions, 82 deletions
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 63f4f6cb8c..58e0e44813 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -2207,8 +2207,9 @@ ENTRY art_quick_instrumentation_entry
# Deliver exception if we got nullptr as function.
move $t9, $v0 # $t9 holds reference to code
ld $a0, 8($sp) # Restore arg0.
+ dla $v0, art_quick_instrumentation_exit
RESTORE_SAVE_REFS_AND_ARGS_FRAME
- dla $ra, art_quick_instrumentation_exit
+ move $ra, $v0
jic $t9, 0 # call method, returning to art_quick_instrumentation_exit
.Ldeliver_instrumentation_entry_exception:
RESTORE_SAVE_REFS_AND_ARGS_FRAME
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index bd51809c22..4be4b12611 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -186,10 +186,9 @@ class StubTest : public CommonRuntimeTest {
"stp x2, x3, [sp, #16]\n\t"
"stp x4, x5, [sp, #32]\n\t"
"stp x6, x7, [sp, #48]\n\t"
- // To be extra defensive, store x20. We do this because some of the stubs might make a
+ // To be extra defensive, store x20,x21. We do this because some of the stubs might make a
// transition into the runtime via the blr instruction below and *not* save x20.
- "str x20, [sp, #64]\n\t"
- // 8 byte buffer
+ "stp x20, x21, [sp, #64]\n\t"
"sub sp, sp, #16\n\t" // Reserve stack space, 16B aligned
".cfi_adjust_cfa_offset 16\n\t"
@@ -288,7 +287,7 @@ class StubTest : public CommonRuntimeTest {
"ldp x2, x3, [sp, #16]\n\t"
"ldp x4, x5, [sp, #32]\n\t"
"ldp x6, x7, [sp, #48]\n\t"
- "ldr x20, [sp, #64]\n\t"
+ "ldp x20, x21, [sp, #64]\n\t"
"add sp, sp, #80\n\t" // Free stack space, now sp as on entry
".cfi_adjust_cfa_offset -80\n\t"
@@ -312,8 +311,9 @@ class StubTest : public CommonRuntimeTest {
// -fstack-protector-strong. According to AAPCS64 registers x9-x15 are caller-saved,
// which means we should unclobber one of the callee-saved registers that are unused.
// Here we use x20.
+ // http://b/72613441, Clang 7.0 asks for one more register, so we do not reserve x21.
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19",
- "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
+ "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
diff --git a/runtime/base/array_ref.h b/runtime/base/array_ref.h
index ef86512cf7..2753c81bd5 100644
--- a/runtime/base/array_ref.h
+++ b/runtime/base/array_ref.h
@@ -106,6 +106,12 @@ class ArrayRef {
return *this = ArrayRef(other);
}
+ template <typename U>
+ static ArrayRef Cast(const ArrayRef<U>& src) {
+ return ArrayRef(reinterpret_cast<const T*>(src.data()),
+ src.size() * sizeof(T) / sizeof(U));
+ }
+
// Destructor.
~ArrayRef() = default;
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 9f17ad051c..a4c32dd814 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -74,6 +74,7 @@ Uninterruptible Roles::uninterruptible_;
ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
Mutex* Locks::jni_weak_globals_lock_ = nullptr;
ReaderWriterMutex* Locks::dex_lock_ = nullptr;
+Mutex* Locks::native_debug_interface_lock_ = nullptr;
std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_;
Atomic<const BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_guard_;
@@ -1073,6 +1074,7 @@ void Locks::Init() {
DCHECK(unexpected_signal_lock_ != nullptr);
DCHECK(user_code_suspension_lock_ != nullptr);
DCHECK(dex_lock_ != nullptr);
+ DCHECK(native_debug_interface_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
LockLevel current_lock_level = kInstrumentEntrypointsLock;
@@ -1228,6 +1230,10 @@ void Locks::Init() {
DCHECK(unexpected_signal_lock_ == nullptr);
unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
+ UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock);
+ DCHECK(native_debug_interface_lock_ == nullptr);
+ native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
DCHECK(logging_lock_ == nullptr);
logging_lock_ = new Mutex("logging lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index d541b79a98..bf27b7f17c 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -58,10 +58,12 @@ class Thread;
// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
enum LockLevel {
kLoggingLock = 0,
+ kNativeDebugInterfaceLock,
kSwapMutexesLock,
kUnexpectedSignalLock,
kThreadSuspendCountLock,
kAbortLock,
+ kSignalHandlingLock,
kJdwpAdbStateLock,
kJdwpSocketLock,
kRegionSpaceRegionLock,
@@ -745,8 +747,11 @@ class Locks {
// One unexpected signal at a time lock.
static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
+ // Guards the magic global variables used by native tools (e.g. libunwind).
+ static Mutex* native_debug_interface_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+
// Have an exclusive logging thread.
- static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+ static Mutex* logging_lock_ ACQUIRED_AFTER(native_debug_interface_lock_);
// List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
// avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 32d304073c..800427d6ab 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -73,6 +73,7 @@
#include "intern_table.h"
#include "interpreter/interpreter.h"
#include "java_vm_ext.h"
+#include "jit/debugger_interface.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "jit/profile_compilation_info.h"
@@ -3432,6 +3433,7 @@ void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
data.weak_root = dex_cache_jweak;
data.dex_file = dex_cache->GetDexFile();
data.class_table = ClassTableForClassLoader(class_loader);
+ RegisterDexFileForNative(self, data.dex_file->Begin());
DCHECK(data.class_table != nullptr);
// Make sure to hold the dex cache live in the class table. This case happens for the boot class
// path dex caches without an image.
@@ -8368,7 +8370,6 @@ mirror::MethodHandle* ClassLinker::ResolveMethodHandleForField(
mirror::MethodHandle* ClassLinker::ResolveMethodHandleForMethod(
Thread* self,
- const DexFile* const dex_file,
const DexFile::MethodHandleItem& method_handle,
ArtMethod* referrer) {
DexFile::MethodHandleType handle_type =
@@ -8492,19 +8493,20 @@ mirror::MethodHandle* ClassLinker::ResolveMethodHandleForMethod(
return nullptr;
}
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
int32_t index = 0;
-
if (receiver_count != 0) {
// Insert receiver
method_params->Set(index++, target_method->GetDeclaringClass());
}
-
- DexFileParameterIterator it(*dex_file, target_method->GetPrototype());
+ DexFileParameterIterator it(*target_method->GetDexFile(), target_method->GetPrototype());
+ Handle<mirror::DexCache> target_method_dex_cache(hs.NewHandle(target_method->GetDexCache()));
+ Handle<mirror::ClassLoader> target_method_class_loader(hs.NewHandle(target_method->GetClassLoader()));
while (it.HasNext()) {
+ DCHECK_LT(index, num_params);
const dex::TypeIndex type_idx = it.GetTypeIdx();
- ObjPtr<mirror::Class> klass = ResolveType(type_idx, dex_cache, class_loader);
+ ObjPtr<mirror::Class> klass = ResolveType(type_idx,
+ target_method_dex_cache,
+ target_method_class_loader);
if (nullptr == klass) {
DCHECK(self->IsExceptionPending());
return nullptr;
@@ -8554,7 +8556,7 @@ ObjPtr<mirror::MethodHandle> ClassLinker::ResolveMethodHandle(Thread* self,
case DexFile::MethodHandleType::kInvokeConstructor:
case DexFile::MethodHandleType::kInvokeDirect:
case DexFile::MethodHandleType::kInvokeInterface:
- return ResolveMethodHandleForMethod(self, dex_file, method_handle, referrer);
+ return ResolveMethodHandleForMethod(self, method_handle, referrer);
}
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 3e3425f5ac..16fa1ce801 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -979,7 +979,6 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::MethodHandle* ResolveMethodHandleForMethod(Thread* self,
- const DexFile* const dex_file,
const DexFile::MethodHandleItem& method_handle,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/dex/compact_dex_file.h b/runtime/dex/compact_dex_file.h
index 1ecff04cba..31aeb27872 100644
--- a/runtime/dex/compact_dex_file.h
+++ b/runtime/dex/compact_dex_file.h
@@ -245,6 +245,12 @@ class CompactDexFile : public DexFile {
static bool IsVersionValid(const uint8_t* magic);
virtual bool IsVersionValid() const OVERRIDE;
+ // TODO This is completely a guess. We really need to do better. b/72402467
+ // We ask for 64 megabytes which should be big enough for any realistic dex file.
+ virtual size_t GetDequickenedSize() const OVERRIDE {
+ return 64 * MB;
+ }
+
const Header& GetHeader() const {
return down_cast<const Header&>(DexFile::GetHeader());
}
diff --git a/runtime/dex/dex_file.h b/runtime/dex/dex_file.h
index 7e2fe98923..cf8c840b59 100644
--- a/runtime/dex/dex_file.h
+++ b/runtime/dex/dex_file.h
@@ -456,6 +456,13 @@ class DexFile {
// Returns true if the dex file supports default methods.
virtual bool SupportsDefaultMethods() const = 0;
+ // Returns the maximum size in bytes needed to store an equivalent dex file strictly conforming to
+ // the dex file specification. That is the size if we wanted to get rid of all the
+ // quickening/compact-dexing/etc.
+ //
+ // TODO This should really be an exact size! b/72402467
+ virtual size_t GetDequickenedSize() const = 0;
+
// Returns the number of string identifiers in the .dex file.
size_t NumStringIds() const {
DCHECK(header_ != nullptr) << GetLocation();
diff --git a/runtime/dex/dex_file_layout.h b/runtime/dex/dex_file_layout.h
index a7b9051f24..793e3b5de7 100644
--- a/runtime/dex/dex_file_layout.h
+++ b/runtime/dex/dex_file_layout.h
@@ -83,7 +83,7 @@ class DexLayoutSection {
}
void CombineSection(uint32_t start_offset, uint32_t end_offset) {
- DCHECK_LT(start_offset, end_offset);
+ DCHECK_LE(start_offset, end_offset);
if (start_offset_ == end_offset_) {
start_offset_ = start_offset;
end_offset_ = end_offset;
diff --git a/runtime/dex/standard_dex_file.h b/runtime/dex/standard_dex_file.h
index 94ef1f2a8e..e0e9f2f11c 100644
--- a/runtime/dex/standard_dex_file.h
+++ b/runtime/dex/standard_dex_file.h
@@ -83,6 +83,10 @@ class StandardDexFile : public DexFile {
uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const OVERRIDE;
+ virtual size_t GetDequickenedSize() const OVERRIDE {
+ return Size();
+ }
+
private:
StandardDexFile(const uint8_t* base,
size_t size,
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 9d6e5de803..3015b10103 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -17,6 +17,7 @@
#include "fault_handler.h"
#include <setjmp.h>
+#include <string.h>
#include <sys/mman.h>
#include <sys/ucontext.h>
@@ -183,8 +184,31 @@ bool FaultManager::HandleFaultByOtherHandlers(int sig, siginfo_t* info, void* co
return false;
}
+static const char* SignalCodeName(int sig, int code) {
+ if (sig != SIGSEGV) {
+ return "UNKNOWN";
+ } else {
+ switch (code) {
+ case SEGV_MAPERR: return "SEGV_MAPERR";
+ case SEGV_ACCERR: return "SEGV_ACCERR";
+ default: return "UNKNOWN";
+ }
+ }
+}
+static std::ostream& PrintSignalInfo(std::ostream& os, siginfo_t* info) {
+ os << " si_signo: " << info->si_signo << " (" << strsignal(info->si_signo) << ")\n"
+ << " si_code: " << info->si_code
+ << " (" << SignalCodeName(info->si_signo, info->si_code) << ")";
+ if (info->si_signo == SIGSEGV) {
+ os << "\n" << " si_addr: " << info->si_addr;
+ }
+ return os;
+}
+
bool FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
- VLOG(signals) << "Handling fault";
+ if (VLOG_IS_ON(signals)) {
+ PrintSignalInfo(VLOG_STREAM(signals) << "Handling fault:" << "\n", info);
+ }
#ifdef TEST_NESTED_SIGNAL
// Simulate a crash in a handler.
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index 65062208d6..4570e9c1b8 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -37,6 +37,7 @@ static void art_heap_usage_error(const char* function, void* p);
#pragma GCC diagnostic ignored "-Wredundant-decls"
#pragma GCC diagnostic ignored "-Wempty-body"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
+#pragma GCC diagnostic ignored "-Wnull-pointer-arithmetic"
#include "../../../external/dlmalloc/malloc.c"
// Note: malloc.c uses a DEBUG define to drive debug code. This interferes with the DEBUG severity
// of libbase, so undefine it now.
diff --git a/runtime/gc/allocator/dlmalloc.h b/runtime/gc/allocator/dlmalloc.h
index 29b96ee96c..b12691ad0e 100644
--- a/runtime/gc/allocator/dlmalloc.h
+++ b/runtime/gc/allocator/dlmalloc.h
@@ -32,6 +32,7 @@
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wredundant-decls"
+#pragma GCC diagnostic ignored "-Wnull-pointer-arithmetic"
#include "../../external/dlmalloc/malloc.h"
#pragma GCC diagnostic pop
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 52dd104ac8..6735961591 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -106,8 +106,8 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
} else {
- // bytes allocated that takes bulk thread-local buffer allocations into account.
- size_t bytes_tl_bulk_allocated = 0;
+ // Bytes allocated that takes bulk thread-local buffer allocations into account.
+ size_t bytes_tl_bulk_allocated = 0u;
obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
&usable_size, &bytes_tl_bulk_allocated);
if (UNLIKELY(obj == nullptr)) {
@@ -154,12 +154,13 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
}
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
- new_num_bytes_allocated = num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated) +
- bytes_tl_bulk_allocated;
+ size_t num_bytes_allocated_before =
+ num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated);
+ new_num_bytes_allocated = num_bytes_allocated_before + bytes_tl_bulk_allocated;
if (bytes_tl_bulk_allocated > 0) {
// Only trace when we get an increase in the number of bytes allocated. This happens when
// obtaining a new TLAB and isn't often enough to hurt performance according to golem.
- TraceHeapSize(new_num_bytes_allocated + bytes_tl_bulk_allocated);
+ TraceHeapSize(new_num_bytes_allocated);
}
}
if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index b1932d1a29..cf5bd4aed2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1890,7 +1890,10 @@ HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
count_requested_homogeneous_space_compaction_++;
// Store performed homogeneous space compaction at a new request arrival.
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
- Locks::mutator_lock_->AssertNotHeld(self);
+ // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
+ // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
+ // http://b/71769596
+ // Locks::mutator_lock_->AssertNotHeld(self);
{
ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
MutexLock mu(self, *gc_complete_lock_);
@@ -1968,7 +1971,10 @@ void Heap::TransitionCollector(CollectorType collector_type) {
Runtime* const runtime = Runtime::Current();
Thread* const self = Thread::Current();
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
- Locks::mutator_lock_->AssertNotHeld(self);
+ // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
+ // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
+ // http://b/71769596
+ // Locks::mutator_lock_->AssertNotHeld(self);
// Busy wait until we can GC (StartGC can fail if we have a non-zero
// compacting_gc_disable_count_, this should rarely occurs).
for (;;) {
@@ -2511,7 +2517,10 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
}
}
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
- Locks::mutator_lock_->AssertNotHeld(self);
+ // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
+ // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
+ // http://b/71769596
+ // Locks::mutator_lock_->AssertNotHeld(self);
if (self->IsHandlingStackOverflow()) {
// If we are throwing a stack overflow error we probably don't have enough remaining stack
// space to run the GC.
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 6d426c2dd0..9d8e5d23eb 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -81,6 +81,7 @@ class ZygoteHeapTest : public CommonRuntimeTest {
void SetUpRuntimeOptions(RuntimeOptions* options) {
CommonRuntimeTest::SetUpRuntimeOptions(options);
options->push_back(std::make_pair("-Xzygote", nullptr));
+ options->push_back(std::make_pair("-Xno-hidden-api-checks", nullptr));
}
};
diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h
index f476028d5f..05e68e66dd 100644
--- a/runtime/hidden_api.h
+++ b/runtime/hidden_api.h
@@ -27,6 +27,7 @@ namespace hiddenapi {
enum Action {
kAllow,
kAllowButWarn,
+ kAllowButWarnAndToast,
kDeny
};
@@ -35,8 +36,9 @@ inline Action GetMemberAction(uint32_t access_flags) {
case HiddenApiAccessFlags::kWhitelist:
return kAllow;
case HiddenApiAccessFlags::kLightGreylist:
- case HiddenApiAccessFlags::kDarkGreylist:
return kAllowButWarn;
+ case HiddenApiAccessFlags::kDarkGreylist:
+ return kAllowButWarnAndToast;
case HiddenApiAccessFlags::kBlacklist:
return kDeny;
}
@@ -70,8 +72,9 @@ inline bool ShouldBlockAccessToMember(T* member,
std::function<bool(Thread*)> fn_caller_in_boot)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(member != nullptr);
+ Runtime* runtime = Runtime::Current();
- if (!Runtime::Current()->AreHiddenApiChecksEnabled()) {
+ if (!runtime->AreHiddenApiChecksEnabled()) {
// Exit early. Nothing to enforce.
return false;
}
@@ -90,20 +93,23 @@ inline bool ShouldBlockAccessToMember(T* member,
}
// Member is hidden and we are not in the boot class path. Act accordingly.
- if (action == kAllowButWarn) {
+ if (action == kDeny) {
+ return true;
+ } else {
+ DCHECK(action == kAllowButWarn || action == kAllowButWarnAndToast);
+
// Allow access to this member but print a warning. Depending on a runtime
// flag, we might move the member into whitelist and skip the warning the
// next time the member is used.
- Runtime::Current()->SetPendingHiddenApiWarning(true);
- if (Runtime::Current()->ShouldDedupeHiddenApiWarnings()) {
+ if (runtime->ShouldDedupeHiddenApiWarnings()) {
member->SetAccessFlags(HiddenApiAccessFlags::EncodeForRuntime(
member->GetAccessFlags(), HiddenApiAccessFlags::kWhitelist));
}
WarnAboutMemberAccess(member);
+ if (action == kAllowButWarnAndToast || runtime->ShouldAlwaysSetHiddenApiWarningFlag()) {
+ Runtime::Current()->SetPendingHiddenApiWarning(true);
+ }
return false;
- } else {
- DCHECK_EQ(action, hiddenapi::kDeny);
- return true;
}
}
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 2101f6837b..24cedb093b 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -1384,8 +1384,8 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self,
reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint()));
} else {
if (deoptimize && !Runtime::Current()->IsAsyncDeoptimizeable(*return_pc)) {
- LOG(WARNING) << "Got a deoptimization request on un-deoptimizable " << method->PrettyMethod()
- << " at PC " << reinterpret_cast<void*>(*return_pc);
+ VLOG(deopt) << "Got a deoptimization request on un-deoptimizable " << method->PrettyMethod()
+ << " at PC " << reinterpret_cast<void*>(*return_pc);
}
if (kVerboseInstrumentation) {
LOG(INFO) << "Returning from " << method->PrettyMethod()
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index d53da215a2..12b8c38bbb 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -1183,10 +1183,9 @@ static ObjPtr<mirror::CallSite> InvokeBootstrapMethod(Thread* self,
}
Handle<mirror::Object> object(hs.NewHandle(result.GetL()));
-
- // Check the result is not null.
if (UNLIKELY(object.IsNull())) {
- ThrowNullPointerException("CallSite == null");
+ // This will typically be for LambdaMetafactory which is not supported.
+ ThrowNullPointerException("Bootstrap method returned null");
return nullptr;
}
@@ -1202,7 +1201,7 @@ static ObjPtr<mirror::CallSite> InvokeBootstrapMethod(Thread* self,
// Check the call site target is not null as we're going to invoke it.
Handle<mirror::MethodHandle> target = hs.NewHandle(call_site->GetTarget());
if (UNLIKELY(target.IsNull())) {
- ThrowNullPointerException("CallSite target == null");
+ ThrowNullPointerException("Target for call-site is null");
return nullptr;
}
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index 0e295e2442..d60f70a54f 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -68,11 +68,65 @@ extern "C" {
// Static initialization is necessary to prevent GDB from seeing
// uninitialized descriptor.
JITDescriptor __jit_debug_descriptor = { 1, JIT_NOACTION, nullptr, nullptr };
+
+ // Incremented whenever __jit_debug_descriptor is modified.
+ uint32_t __jit_debug_descriptor_timestamp = 0;
+
+ struct DEXFileEntry {
+ DEXFileEntry* next_;
+ DEXFileEntry* prev_;
+ const void* dexfile_;
+ };
+
+ DEXFileEntry* __art_debug_dexfiles = nullptr;
+
+ // Incremented whenever __art_debug_dexfiles is modified.
+ uint32_t __art_debug_dexfiles_timestamp = 0;
}
-Mutex g_jit_debug_mutex("JIT debug interface lock", kJitDebugInterfaceLock);
+static size_t g_jit_debug_mem_usage
+ GUARDED_BY(Locks::native_debug_interface_lock_) = 0;
+
+static std::unordered_map<const void*, DEXFileEntry*> g_dexfile_entries
+ GUARDED_BY(Locks::native_debug_interface_lock_);
+
+void RegisterDexFileForNative(Thread* current_thread, const void* dexfile_header) {
+ MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
+ if (g_dexfile_entries.count(dexfile_header) == 0) {
+ DEXFileEntry* entry = new DEXFileEntry();
+ CHECK(entry != nullptr);
+ entry->dexfile_ = dexfile_header;
+ entry->prev_ = nullptr;
+ entry->next_ = __art_debug_dexfiles;
+ if (entry->next_ != nullptr) {
+ entry->next_->prev_ = entry;
+ }
+ __art_debug_dexfiles = entry;
+ __art_debug_dexfiles_timestamp++;
+ g_dexfile_entries.emplace(dexfile_header, entry);
+ }
+}
-static size_t g_jit_debug_mem_usage = 0;
+void DeregisterDexFileForNative(Thread* current_thread, const void* dexfile_header) {
+ MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
+ auto it = g_dexfile_entries.find(dexfile_header);
+ // We register dex files in the class linker and free them in DexFile_closeDexFile,
+ // but might be cases where we load the dex file without using it in the class linker.
+ if (it != g_dexfile_entries.end()) {
+ DEXFileEntry* entry = it->second;
+ if (entry->prev_ != nullptr) {
+ entry->prev_->next_ = entry->next_;
+ } else {
+ __art_debug_dexfiles = entry->next_;
+ }
+ if (entry->next_ != nullptr) {
+ entry->next_->prev_ = entry->prev_;
+ }
+ __art_debug_dexfiles_timestamp++;
+ delete entry;
+ g_dexfile_entries.erase(it);
+ }
+}
JITCodeEntry* CreateJITCodeEntry(const std::vector<uint8_t>& symfile) {
DCHECK_NE(symfile.size(), 0u);
@@ -96,6 +150,7 @@ JITCodeEntry* CreateJITCodeEntry(const std::vector<uint8_t>& symfile) {
__jit_debug_descriptor.first_entry_ = entry;
__jit_debug_descriptor.relevant_entry_ = entry;
__jit_debug_descriptor.action_flag_ = JIT_REGISTER_FN;
+ __jit_debug_descriptor_timestamp++;
(*__jit_debug_register_code_ptr)();
return entry;
}
@@ -114,6 +169,7 @@ void DeleteJITCodeEntry(JITCodeEntry* entry) {
g_jit_debug_mem_usage -= sizeof(JITCodeEntry) + entry->symfile_size_;
__jit_debug_descriptor.relevant_entry_ = entry;
__jit_debug_descriptor.action_flag_ = JIT_UNREGISTER_FN;
+ __jit_debug_descriptor_timestamp++;
(*__jit_debug_register_code_ptr)();
delete[] entry->symfile_addr_;
delete entry;
@@ -121,7 +177,7 @@ void DeleteJITCodeEntry(JITCodeEntry* entry) {
// Mapping from code address to entry. Used to manage life-time of the entries.
static std::unordered_map<uintptr_t, JITCodeEntry*> g_jit_code_entries
- GUARDED_BY(g_jit_debug_mutex);
+ GUARDED_BY(Locks::native_debug_interface_lock_);
void IncrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address) {
DCHECK(entry != nullptr);
diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h
index 9aec988f67..8c4bb3fdf4 100644
--- a/runtime/jit/debugger_interface.h
+++ b/runtime/jit/debugger_interface.h
@@ -30,36 +30,42 @@ extern "C" {
struct JITCodeEntry;
}
-extern Mutex g_jit_debug_mutex;
+// Notify native tools (e.g. libunwind) that DEX file has been opened.
+// The pointer needs to point the start of the dex data (not the DexFile* object).
+void RegisterDexFileForNative(Thread* current_thread, const void* dexfile_header);
+
+// Notify native tools (e.g. libunwind) that DEX file has been closed.
+// The pointer needs to point the start of the dex data (not the DexFile* object).
+void DeregisterDexFileForNative(Thread* current_thread, const void* dexfile_header);
// Notify native debugger about new JITed code by passing in-memory ELF.
// It takes ownership of the in-memory ELF file.
JITCodeEntry* CreateJITCodeEntry(const std::vector<uint8_t>& symfile)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Notify native debugger that JITed code has been removed.
// It also releases the associated in-memory ELF file.
void DeleteJITCodeEntry(JITCodeEntry* entry)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Helper method to track life-time of JITCodeEntry.
// It registers given code address as being described by the given entry.
void IncrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Helper method to track life-time of JITCodeEntry.
// It de-registers given code address as being described by the given entry.
void DecrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Find the registered JITCodeEntry for given code address.
// There can be only one entry per address at any given time.
JITCodeEntry* GetJITCodeEntry(uintptr_t code_address)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Returns approximate memory used by all JITCodeEntries.
size_t GetJITCodeEntryMemUsage()
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
} // namespace art
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 7f0447732e..c8c13cb20f 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -549,7 +549,7 @@ void JitCodeCache::FreeCode(const void* code_ptr) {
uintptr_t allocation = FromCodeToAllocation(code_ptr);
// Notify native debugger that we are about to remove the code.
// It does nothing if we are not using native debugger.
- MutexLock mu(Thread::Current(), g_jit_debug_mutex);
+ MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
JITCodeEntry* entry = GetJITCodeEntry(reinterpret_cast<uintptr_t>(code_ptr));
if (entry != nullptr) {
DecrementJITCodeEntryRefcount(entry, reinterpret_cast<uintptr_t>(code_ptr));
@@ -1825,7 +1825,7 @@ void JitCodeCache::FreeData(uint8_t* data) {
void JitCodeCache::Dump(std::ostream& os) {
MutexLock mu(Thread::Current(), lock_);
- MutexLock mu2(Thread::Current(), g_jit_debug_mutex);
+ MutexLock mu2(Thread::Current(), *Locks::native_debug_interface_lock_);
os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n"
<< "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n"
<< "Current JIT mini-debug-info size: " << PrettySize(GetJITCodeEntryMemUsage()) << "\n"
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 55e9c390cd..26acef06d6 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -396,6 +396,91 @@ MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
}
+template<typename A, typename B>
+static ptrdiff_t PointerDiff(A* a, B* b) {
+ return static_cast<ptrdiff_t>(reinterpret_cast<intptr_t>(a) - reinterpret_cast<intptr_t>(b));
+}
+
+bool MemMap::ReplaceWith(MemMap** source_ptr, /*out*/std::string* error) {
+#if !HAVE_MREMAP_SYSCALL
+ UNUSED(source_ptr);
+ *error = "Cannot perform atomic replace because we are missing the required mremap syscall";
+ return false;
+#else // !HAVE_MREMAP_SYSCALL
+ CHECK(source_ptr != nullptr);
+ CHECK(*source_ptr != nullptr);
+ if (!MemMap::kCanReplaceMapping) {
+ *error = "Unable to perform atomic replace due to runtime environment!";
+ return false;
+ }
+ MemMap* source = *source_ptr;
+ // neither can be reuse.
+ if (source->reuse_ || reuse_) {
+ *error = "One or both mappings is not a real mmap!";
+ return false;
+ }
+ // TODO Support redzones.
+ if (source->redzone_size_ != 0 || redzone_size_ != 0) {
+ *error = "source and dest have different redzone sizes";
+ return false;
+ }
+ // Make sure they have the same offset from the actual mmap'd address
+ if (PointerDiff(BaseBegin(), Begin()) != PointerDiff(source->BaseBegin(), source->Begin())) {
+ *error =
+ "source starts at a different offset from the mmap. Cannot atomically replace mappings";
+ return false;
+ }
+ // mremap doesn't allow the final [start, end] to overlap with the initial [start, end] (it's like
+ // memcpy but the check is explicit and actually done).
+ if (source->BaseBegin() > BaseBegin() &&
+ reinterpret_cast<uint8_t*>(BaseBegin()) + source->BaseSize() >
+ reinterpret_cast<uint8_t*>(source->BaseBegin())) {
+ *error = "destination memory pages overlap with source memory pages";
+ return false;
+ }
+ // Change the protection to match the new location.
+ int old_prot = source->GetProtect();
+ if (!source->Protect(GetProtect())) {
+ *error = "Could not change protections for source to those required for dest.";
+ return false;
+ }
+
+ // Do the mremap.
+ void* res = mremap(/*old_address*/source->BaseBegin(),
+ /*old_size*/source->BaseSize(),
+ /*new_size*/source->BaseSize(),
+ /*flags*/MREMAP_MAYMOVE | MREMAP_FIXED,
+ /*new_address*/BaseBegin());
+ if (res == MAP_FAILED) {
+ int saved_errno = errno;
+ // Wasn't able to move mapping. Change the protection of source back to the original one and
+ // return.
+ source->Protect(old_prot);
+ *error = std::string("Failed to mremap source to dest. Error was ") + strerror(saved_errno);
+ return false;
+ }
+ CHECK(res == BaseBegin());
+
+ // The new base_size is all the pages of the 'source' plus any remaining dest pages. We will unmap
+ // them later.
+ size_t new_base_size = std::max(source->base_size_, base_size_);
+
+ // Delete the old source, don't unmap it though (set reuse) since it is already gone.
+ *source_ptr = nullptr;
+ size_t source_size = source->size_;
+ source->already_unmapped_ = true;
+ delete source;
+ source = nullptr;
+
+ size_ = source_size;
+ base_size_ = new_base_size;
+ // Reduce base_size if needed (this will unmap the extra pages).
+ SetSize(source_size);
+
+ return true;
+#endif // !HAVE_MREMAP_SYSCALL
+}
+
MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
size_t byte_count,
int prot,
@@ -499,9 +584,11 @@ MemMap::~MemMap() {
if (!reuse_) {
MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
- int result = munmap(base_begin_, base_size_);
- if (result == -1) {
- PLOG(FATAL) << "munmap failed";
+ if (!already_unmapped_) {
+ int result = munmap(base_begin_, base_size_);
+ if (result == -1) {
+ PLOG(FATAL) << "munmap failed";
+ }
}
}
@@ -523,7 +610,7 @@ MemMap::~MemMap() {
MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
size_t base_size, int prot, bool reuse, size_t redzone_size)
: name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
- prot_(prot), reuse_(reuse), redzone_size_(redzone_size) {
+ prot_(prot), reuse_(reuse), already_unmapped_(false), redzone_size_(redzone_size) {
if (size_ == 0) {
CHECK(begin_ == nullptr);
CHECK(base_begin_ == nullptr);
@@ -794,19 +881,21 @@ void MemMap::Shutdown() {
}
void MemMap::SetSize(size_t new_size) {
- if (new_size == base_size_) {
+ CHECK_LE(new_size, size_);
+ size_t new_base_size = RoundUp(new_size + static_cast<size_t>(PointerDiff(Begin(), BaseBegin())),
+ kPageSize);
+ if (new_base_size == base_size_) {
+ size_ = new_size;
return;
}
- CHECK_ALIGNED(new_size, kPageSize);
- CHECK_EQ(base_size_, size_) << "Unsupported";
- CHECK_LE(new_size, base_size_);
+ CHECK_LT(new_base_size, base_size_);
MEMORY_TOOL_MAKE_UNDEFINED(
reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
- new_size),
- base_size_ - new_size);
- CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size),
- base_size_ - new_size), 0) << new_size << " " << base_size_;
- base_size_ = new_size;
+ new_base_size),
+ base_size_ - new_base_size);
+ CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_base_size),
+ base_size_ - new_base_size), 0) << new_base_size << " " << base_size_;
+ base_size_ = new_base_size;
size_ = new_size;
}
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 5603963eac..0ecb414614 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -39,8 +39,12 @@ namespace art {
#ifdef __linux__
static constexpr bool kMadviseZeroes = true;
+#define HAVE_MREMAP_SYSCALL true
#else
static constexpr bool kMadviseZeroes = false;
+// We cannot ever perform MemMap::ReplaceWith on non-linux hosts since the syscall is not
+// present.
+#define HAVE_MREMAP_SYSCALL false
#endif
// Used to keep track of mmap segments.
@@ -52,6 +56,32 @@ static constexpr bool kMadviseZeroes = false;
// Otherwise, calls might see uninitialized values.
class MemMap {
public:
+ static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL;
+
+ // Replace the data in this memmmap with the data in the memmap pointed to by source. The caller
+ // relinquishes ownership of the source mmap.
+ //
+ // For the call to be successful:
+ // * The range [dest->Begin, dest->Begin() + source->Size()] must not overlap with
+ // [source->Begin(), source->End()].
+ // * Neither source nor dest may be 'reused' mappings (they must own all the pages associated
+ // with them.
+ // * kCanReplaceMapping must be true.
+ // * Neither source nor dest may use manual redzones.
+ // * Both source and dest must have the same offset from the nearest page boundary.
+ // * mremap must succeed when called on the mappings.
+ //
+ // If this call succeeds it will return true and:
+ // * Deallocate *source
+ // * Sets *source to nullptr
+ // * The protection of this will remain the same.
+ // * The size of this will be the size of the source
+ // * The data in this will be the data from source.
+ //
+ // If this call fails it will return false and make no changes to *source or this. The ownership
+ // of the source mmap is returned to the caller.
+ bool ReplaceWith(/*in-out*/MemMap** source, /*out*/std::string* error);
+
// Request an anonymous region of length 'byte_count' and a requested base address.
// Use null as the requested base address if you don't care.
// "reuse" allows re-mapping an address range from an existing mapping.
@@ -246,6 +276,9 @@ class MemMap {
// unmapping.
const bool reuse_;
+ // When already_unmapped_ is true the destructor will not call munmap.
+ bool already_unmapped_;
+
const size_t redzone_size_;
#if USE_ART_LOW_4G_ALLOCATOR
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index a4ebb16d09..3adbf18a7a 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -19,6 +19,7 @@
#include <sys/mman.h>
#include <memory>
+#include <random>
#include "base/memory_tool.h"
#include "base/unix_file/fd_file.h"
@@ -36,6 +37,25 @@ class MemMapTest : public CommonRuntimeTest {
return mem_map->base_size_;
}
+ static bool IsAddressMapped(void* addr) {
+ bool res = msync(addr, 1, MS_SYNC) == 0;
+ if (!res && errno != ENOMEM) {
+ PLOG(FATAL) << "Unexpected error occurred on msync";
+ }
+ return res;
+ }
+
+ static std::vector<uint8_t> RandomData(size_t size) {
+ std::random_device rd;
+ std::uniform_int_distribution<uint8_t> dist;
+ std::vector<uint8_t> res;
+ res.resize(size);
+ for (size_t i = 0; i < size; i++) {
+ res[i] = dist(rd);
+ }
+ return res;
+ }
+
static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
// Find a valid map address and unmap it before returning.
std::string error_msg;
@@ -143,6 +163,186 @@ TEST_F(MemMapTest, Start) {
}
#endif
+// We need mremap to be able to test ReplaceMapping at all
+#if HAVE_MREMAP_SYSCALL
+TEST_F(MemMapTest, ReplaceMapping_SameSize) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ kPageSize,
+ PROT_READ,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ nullptr,
+ kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ void* source_addr = source->Begin();
+ void* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+
+ std::vector<uint8_t> data = RandomData(kPageSize);
+ memcpy(source->Begin(), data.data(), data.size());
+
+ ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_FALSE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_TRUE(source == nullptr);
+
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+
+ ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+}
+
+TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ 5 * kPageSize, // Need to make it larger
+ // initially so we know
+ // there won't be mappings
+ // in the way we we move
+ // source.
+ PROT_READ,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ nullptr,
+ 3 * kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ uint8_t* source_addr = source->Begin();
+ uint8_t* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+
+ // Fill the source with random data.
+ std::vector<uint8_t> data = RandomData(3 * kPageSize);
+ memcpy(source->Begin(), data.data(), data.size());
+
+ // Make the dest smaller so that we know we'll have space.
+ dest->SetSize(kPageSize);
+
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+
+ ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_FALSE(IsAddressMapped(source_addr));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_TRUE(source == nullptr);
+
+ ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+}
+
+TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ 3 * kPageSize,
+ PROT_READ,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ nullptr,
+ kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ uint8_t* source_addr = source->Begin();
+ uint8_t* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+
+ std::vector<uint8_t> data = RandomData(kPageSize);
+ memcpy(source->Begin(), data.data(), kPageSize);
+
+ ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_FALSE(IsAddressMapped(source_addr));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_TRUE(source == nullptr);
+
+ ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+}
+
+TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(
+ MemMap::MapAnonymous(
+ "MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ 3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
+ // the way we we move source.
+ PROT_READ | PROT_WRITE,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ // Resize down to 1 page so we can remap the rest.
+ dest->SetSize(kPageSize);
+ // Create source from the last 2 pages
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ dest->Begin() + kPageSize,
+ 2 * kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ MemMap* orig_source = source;
+ ASSERT_EQ(dest->Begin() + kPageSize, source->Begin());
+ uint8_t* source_addr = source->Begin();
+ uint8_t* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+
+ // Fill the source and dest with random data.
+ std::vector<uint8_t> data = RandomData(2 * kPageSize);
+ memcpy(source->Begin(), data.data(), data.size());
+ std::vector<uint8_t> dest_data = RandomData(kPageSize);
+ memcpy(dest->Begin(), dest_data.data(), dest_data.size());
+
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+
+ ASSERT_FALSE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_TRUE(source == orig_source);
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_EQ(source->Size(), data.size());
+ ASSERT_EQ(dest->Size(), dest_data.size());
+
+ ASSERT_EQ(memcmp(source->Begin(), data.data(), data.size()), 0);
+ ASSERT_EQ(memcmp(dest->Begin(), dest_data.data(), dest_data.size()), 0);
+
+ delete source;
+}
+#endif // HAVE_MREMAP_SYSCALL
+
TEST_F(MemMapTest, MapAnonymousEmpty) {
CommonInit();
std::string error_msg;
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 0f430874cf..6ea9a7ad62 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -30,6 +30,7 @@
#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
+#include "jit/debugger_interface.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
@@ -331,6 +332,7 @@ static jboolean DexFile_closeDexFile(JNIEnv* env, jclass, jobject cookie) {
int32_t i = kDexFileIndexStart; // Oat file is at index 0.
for (const DexFile* dex_file : dex_files) {
if (dex_file != nullptr) {
+ DeregisterDexFileForNative(soa.Self(), dex_file->Begin());
// Only delete the dex file if the dex cache is not found to prevent runtime crashes if there
// are calls to DexFile.close while the ART DexFile is still in use.
if (!class_linker->IsDexFileRegistered(soa.Self(), *dex_file)) {
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index e58fd9dac9..648a464b6e 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -350,6 +350,9 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
<< "SystemServer should be forked with DISABLE_HIDDEN_API_CHECKS";
Runtime::Current()->SetHiddenApiChecksEnabled(do_hidden_api_checks);
+ // Clear the hidden API warning flag, in case it was set.
+ Runtime::Current()->SetPendingHiddenApiWarning(false);
+
if (instruction_set != nullptr && !is_system_server) {
ScopedUtfChars isa_string(env, instruction_set);
InstructionSet isa = GetInstructionSetFromString(isa_string.c_str());
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index 2fef70b2ae..099d77edaa 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -393,6 +393,10 @@ void DumpKernelStack(std::ostream& os, pid_t tid, const char* prefix, bool inclu
std::vector<std::string> kernel_stack_frames;
Split(kernel_stack, '\n', &kernel_stack_frames);
+ if (kernel_stack_frames.empty()) {
+ os << prefix << "(" << kernel_stack_filename << " is empty)\n";
+ return;
+ }
// We skip the last stack frame because it's always equivalent to "[<ffffffff>] 0xffffffff",
// which looking at the source appears to be the kernel's way of saying "that's all, folks!".
kernel_stack_frames.pop_back();
diff --git a/runtime/quicken_info.h b/runtime/quicken_info.h
index 52eca61c06..32f70054ba 100644
--- a/runtime/quicken_info.h
+++ b/runtime/quicken_info.h
@@ -50,16 +50,17 @@ class QuickenInfoOffsetTableAccessor {
return index % kElementsPerIndex == 0;
}
- explicit QuickenInfoOffsetTableAccessor(const uint8_t* data, uint32_t max_index)
- : table_(reinterpret_cast<const uint32_t*>(data)),
- num_indices_(RoundUp(max_index, kElementsPerIndex) / kElementsPerIndex) {}
+ QuickenInfoOffsetTableAccessor(const ArrayRef<const uint8_t>& data, uint32_t max_index)
+ : table_(ArrayRef<const TableType>::Cast(data).SubArray(
+ 0,
+ RoundUp(max_index, kElementsPerIndex) / kElementsPerIndex)) {}
size_t SizeInBytes() const {
return NumIndices() * sizeof(table_[0]);
}
uint32_t NumIndices() const {
- return num_indices_;
+ return table_.size();
}
// Returns the offset for the index at or before the desired index. If the offset is for an index
@@ -69,17 +70,12 @@ class QuickenInfoOffsetTableAccessor {
return table_[index / kElementsPerIndex];
}
- const uint8_t* DataEnd() const {
- return reinterpret_cast<const uint8_t*>(table_ + NumIndices());
- }
-
static uint32_t Alignment() {
return alignof(TableType);
}
private:
- const TableType* table_;
- uint32_t num_indices_;
+ const ArrayRef<const TableType> table_;
};
// QuickenInfoTable is a table of 16 bit dex indices. There is one slot for every instruction that
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 5a3a6f0cf4..3afd320f05 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -268,6 +268,7 @@ Runtime::Runtime()
do_hidden_api_checks_(true),
pending_hidden_api_warning_(false),
dedupe_hidden_api_warnings_(true),
+ always_set_hidden_api_warning_flag_(false),
dump_native_stack_on_sig_quit_(true),
pruned_dalvik_cache_(false),
// Initially assume we perceive jank in case the process state is never updated.
@@ -1171,11 +1172,15 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
target_sdk_version_ = runtime_options.GetOrDefault(Opt::TargetSdkVersion);
- // Check whether to enforce hidden API access checks. Zygote needs to be exempt
- // but checks may be enabled for forked processes (see dalvik_system_ZygoteHooks).
- if (is_zygote_ || runtime_options.Exists(Opt::NoHiddenApiChecks)) {
- do_hidden_api_checks_ = false;
- }
+ // Check whether to enforce hidden API access checks. The checks are disabled
+ // by default and we only enable them if:
+ // (a) runtime was started with a flag that enables the checks, or
+ // (b) Zygote forked a new process that is not exempt (see ZygoteHooks).
+ // TODO(dbrazdil): Turn the NoHiddenApiChecks negative flag into a positive one
+ // to clean up this logic.
+ do_hidden_api_checks_ = IsAotCompiler() && !runtime_options.Exists(Opt::NoHiddenApiChecks);
+ DCHECK(!is_zygote_ || !do_hidden_api_checks_)
+ << "Zygote should not be started with hidden API checks";
no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
force_native_bridge_ = runtime_options.Exists(Opt::ForceNativeBridge);
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 184e4e5b91..7ab9be5c5b 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -544,6 +544,14 @@ class Runtime {
return dedupe_hidden_api_warnings_;
}
+ void AlwaysSetHiddenApiWarningFlag() {
+ always_set_hidden_api_warning_flag_ = true;
+ }
+
+ bool ShouldAlwaysSetHiddenApiWarningFlag() const {
+ return always_set_hidden_api_warning_flag_;
+ }
+
bool IsDexFileFallbackEnabled() const {
return allow_dex_file_fallback_;
}
@@ -992,6 +1000,11 @@ class Runtime {
// This is only used for testing.
bool dedupe_hidden_api_warnings_;
+ // Hidden API can print warnings into the log and/or set a flag read by the
+ // framework to show a UI warning. If this flag is set, always set the flag
+ // when there is a warning. This is only used for testing.
+ bool always_set_hidden_api_warning_flag_;
+
// Whether threads should dump their native stack on SIGQUIT.
bool dump_native_stack_on_sig_quit_;
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 7428e98dbb..0829c5422e 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -228,8 +228,7 @@ QuickenInfoOffsetTableAccessor VdexFile::GetQuickenInfoOffsetTable(
const ArrayRef<const uint8_t>& quickening_info) const {
// The offset a is in preheader right before the dex file.
const uint32_t offset = GetQuickeningInfoTableOffset(source_dex_begin);
- const uint8_t* data_ptr = quickening_info.data() + offset;
- return QuickenInfoOffsetTableAccessor(data_ptr, num_method_ids);
+ return QuickenInfoOffsetTableAccessor(quickening_info.SubArray(offset), num_method_ids);
}
QuickenInfoOffsetTableAccessor VdexFile::GetQuickenInfoOffsetTable(