summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/optimizing/instruction_builder.cc15
-rw-r--r--libartbase/Android.bp5
-rw-r--r--libdexfile/Android.bp4
-rw-r--r--libprofile/Android.bp5
-rw-r--r--runtime/base/mutex.cc47
-rw-r--r--runtime/base/mutex.h4
-rw-r--r--runtime/gc/collector/concurrent_copying.cc64
-rw-r--r--runtime/gc/heap.cc4
-rw-r--r--runtime/gc/heap.h3
-rw-r--r--runtime/jit/jit.cc48
-rw-r--r--runtime/jit/jit.h39
-rw-r--r--runtime/jit/jit_code_cache.cc43
-rw-r--r--runtime/jit/jit_code_cache.h9
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc10
-rw-r--r--runtime/runtime.cc75
-rw-r--r--runtime/runtime.h4
-rw-r--r--test/174-escaping-instance-of-bad-class/expected.txt2
-rw-r--r--test/174-escaping-instance-of-bad-class/src/Main.java21
-rw-r--r--test/551-checker-clinit/src/Main.java22
-rw-r--r--tools/veridex/Android.bp1
20 files changed, 249 insertions, 176 deletions
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index bd94789144..63b2705b43 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1304,15 +1304,18 @@ bool HInstructionBuilder::IsInitialized(ScopedObjectAccess& soa, Handle<mirror::
}
}
- // We can avoid the class initialization check for `cls` in static methods in the
- // very same class. Instance methods of the same class can run on an escaped instance
+ // We can avoid the class initialization check for `cls` in static methods and constructors
+ // in the very same class; invoking a static method involves a class initialization check
+ // and so does the instance allocation that must be executed before invoking a constructor.
+ // Other instance methods of the same class can run on an escaped instance
// of an erroneous class. Even a superclass may need to be checked as the subclass
// can be completely initialized while the superclass is initializing and the subclass
// remains initialized when the superclass initializer throws afterwards. b/62478025
// Note: The HClinitCheck+HInvokeStaticOrDirect merging can still apply.
ObjPtr<mirror::Class> outermost_cls = ResolveOutermostCompilingClass(soa);
- bool is_static = (dex_compilation_unit_->GetAccessFlags() & kAccStatic) != 0u;
- if (is_static && outermost_cls == cls.Get()) {
+ bool is_outer_static_or_constructor =
+ (outer_compilation_unit_->GetAccessFlags() & (kAccStatic | kAccConstructor)) != 0u;
+ if (is_outer_static_or_constructor && outermost_cls == cls.Get()) {
return true;
}
// Remember if the compiled class is a subclass of `cls`. By the time this is used
@@ -1325,7 +1328,9 @@ bool HInstructionBuilder::IsInitialized(ScopedObjectAccess& soa, Handle<mirror::
// TODO: We should walk over the entire inlined method chain, but we don't pass that
// information to the builder.
ObjPtr<mirror::Class> innermost_cls = ResolveCompilingClass(soa);
- if (is_static && innermost_cls == cls.Get()) {
+ bool is_inner_static_or_constructor =
+ (dex_compilation_unit_->GetAccessFlags() & (kAccStatic | kAccConstructor)) != 0u;
+ if (is_inner_static_or_constructor && innermost_cls == cls.Get()) {
return true;
}
is_subclass = is_subclass || IsSubClass(innermost_cls, cls.Get());
diff --git a/libartbase/Android.bp b/libartbase/Android.bp
index 0c6b1a2ef6..5010f683d7 100644
--- a/libartbase/Android.bp
+++ b/libartbase/Android.bp
@@ -54,10 +54,6 @@ cc_defaults {
"libziparchive",
"libz",
],
- shared_libs: [
- // For android::FileMap used by libziparchive.
- "libutils",
- ],
},
host: {
shared_libs: [
@@ -88,7 +84,6 @@ cc_defaults {
"libbase",
"libcutils",
"liblog",
- "libutils",
"libz",
"libziparchive",
],
diff --git a/libdexfile/Android.bp b/libdexfile/Android.bp
index 49b1278fcb..7f25f02dea 100644
--- a/libdexfile/Android.bp
+++ b/libdexfile/Android.bp
@@ -43,9 +43,6 @@ cc_defaults {
"libziparchive",
"libz",
],
- shared_libs: [
- "libutils",
- ],
},
host: {
shared_libs: [
@@ -78,7 +75,6 @@ cc_defaults {
"libbase",
"libcutils",
"liblog",
- "libutils",
"libz",
"libziparchive",
],
diff --git a/libprofile/Android.bp b/libprofile/Android.bp
index edd9fa858b..a8d8b2f0f5 100644
--- a/libprofile/Android.bp
+++ b/libprofile/Android.bp
@@ -28,10 +28,6 @@ cc_defaults {
"libziparchive",
"libz",
],
- shared_libs: [
- // For android::FileMap used by libziparchive.
- "libutils",
- ],
},
host: {
shared_libs: [
@@ -61,7 +57,6 @@ cc_defaults {
static_libs: [
"libbase",
"libcutils",
- "libutils",
"libz",
"libziparchive",
],
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 9952283272..0adcb37a59 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -895,40 +895,37 @@ void ConditionVariable::Broadcast(Thread* self) {
// guard_.AssertExclusiveHeld(self);
DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
#if ART_USE_FUTEXES
- if (num_waiters_ > 0) {
- sequence_++; // Indicate the broadcast occurred.
- bool done = false;
- do {
- int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
- // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
- // mutex unlocks will awaken the requeued waiter thread.
- done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
- reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()),
- guard_.state_.Address(), cur_sequence) != -1;
- if (!done) {
- if (errno != EAGAIN && errno != EINTR) {
- PLOG(FATAL) << "futex cmp requeue failed for " << name_;
- }
- }
- } while (!done);
- }
+ RequeueWaiters(std::numeric_limits<int32_t>::max());
#else
CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
#endif
}
-void ConditionVariable::Signal(Thread* self) {
- DCHECK(self == nullptr || self == Thread::Current());
- guard_.AssertExclusiveHeld(self);
#if ART_USE_FUTEXES
+void ConditionVariable::RequeueWaiters(int32_t count) {
if (num_waiters_ > 0) {
sequence_++; // Indicate a signal occurred.
- // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
- // to avoid this, however, requeueing can only move all waiters.
- int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
- // Check something was woken or else we changed sequence_ before they had chance to wait.
- CHECK((num_woken == 0) || (num_woken == 1));
+ // Move waiters from the condition variable's futex to the guard's futex,
+ // so that they will be woken up when the mutex is released.
+ bool done = futex(sequence_.Address(),
+ FUTEX_REQUEUE,
+ /* Threads to wake */ 0,
+ /* Threads to requeue*/ reinterpret_cast<const timespec*>(count),
+ guard_.state_.Address(),
+ 0) != -1;
+ if (!done && errno != EAGAIN && errno != EINTR) {
+ PLOG(FATAL) << "futex requeue failed for " << name_;
+ }
}
+}
+#endif
+
+
+void ConditionVariable::Signal(Thread* self) {
+ DCHECK(self == nullptr || self == Thread::Current());
+ guard_.AssertExclusiveHeld(self);
+#if ART_USE_FUTEXES
+ RequeueWaiters(1);
#else
CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
#endif
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index d127d0f01f..7711be9c90 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -480,7 +480,9 @@ class ConditionVariable {
ConditionVariable(const char* name, Mutex& mutex);
~ConditionVariable();
+ // Requires the mutex to be held.
void Broadcast(Thread* self);
+ // Requires the mutex to be held.
void Signal(Thread* self);
// TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
// pointer copy, thereby defeating annotalysis.
@@ -505,6 +507,8 @@ class ConditionVariable {
// Number of threads that have come into to wait, not the length of the waiters on the futex as
// waiters may have been requeued onto guard_. Guarded by guard_.
volatile int32_t num_waiters_;
+
+ void RequeueWaiters(int32_t count);
#else
pthread_cond_t cond_;
#endif
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index be1014c0f0..e0bbf43622 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1650,36 +1650,15 @@ size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_acc
inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
DCHECK(!region_space_->IsInFromSpace(to_ref));
space::RegionSpace::RegionType rtype = region_space_->GetRegionType(to_ref);
- auto find_space_from_ref = [this] (mirror::Object* ref)
- REQUIRES_SHARED(Locks::mutator_lock_) -> space::Space* {
- for (const auto& space : heap_->GetContinuousSpaces()) {
- if (space->Contains(ref)) {
- return space;
- }
- }
- for (const auto& space : heap_->GetDiscontinuousSpaces()) {
- if (space->Contains(ref)) {
- return space;
- }
- }
- return nullptr;
- };
- if (kUseBakerReadBarrier &&
- kIsDebugBuild &&
- to_ref->GetReadBarrierState() != ReadBarrier::GrayState()) {
- space::Space* space = find_space_from_ref(to_ref);
- LOG(FATAL_WITHOUT_ABORT) << " " << to_ref
- << " " << to_ref->GetReadBarrierState()
- << " is_marked=" << IsMarked(to_ref)
- << " type=" << to_ref->PrettyTypeOf()
- << " is_young_gc=" << young_gen_;
- if (space == region_space_) {
- LOG(FATAL) << " region_type=" << rtype;
- } else if (space != nullptr) {
- LOG(FATAL) << " space=" << space->GetName();
- } else {
- LOG(FATAL) << "no space";
- }
+ if (kUseBakerReadBarrier) {
+ DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
+ << " to_ref=" << to_ref
+ << " rb_state=" << to_ref->GetReadBarrierState()
+ << " is_marked=" << IsMarked(to_ref)
+ << " type=" << to_ref->PrettyTypeOf()
+ << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
+ << " space=" << heap_->DumpSpaceNameFromAddress(to_ref)
+ << " region_type=" << rtype;
}
bool add_to_live_bytes = false;
// Invariant: There should be no object from a newly-allocated
@@ -1715,22 +1694,15 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
Scan<false>(to_ref);
}
}
- if (kUseBakerReadBarrier &&
- kIsDebugBuild &&
- to_ref->GetReadBarrierState() != ReadBarrier::GrayState()) {
- space::Space* space = find_space_from_ref(to_ref);
- LOG(FATAL_WITHOUT_ABORT) << " " << to_ref
- << " " << to_ref->GetReadBarrierState()
- << " is_marked=" << IsMarked(to_ref)
- << " type=" << to_ref->PrettyTypeOf()
- << " is_young_gc=" << young_gen_;
- if (space == region_space_) {
- LOG(FATAL) << " region_type=" << rtype;
- } else if (space != nullptr) {
- LOG(FATAL) << " space=" << space->GetName();
- } else {
- LOG(FATAL) << "no space";
- }
+ if (kUseBakerReadBarrier) {
+ DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
+ << " to_ref=" << to_ref
+ << " rb_state=" << to_ref->GetReadBarrierState()
+ << " is_marked=" << IsMarked(to_ref)
+ << " type=" << to_ref->PrettyTypeOf()
+ << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
+ << " space=" << heap_->DumpSpaceNameFromAddress(to_ref)
+ << " region_type=" << rtype;
}
#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
mirror::Object* referent = nullptr;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 467b22c509..f0f81fc67e 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1271,6 +1271,10 @@ space::Space* Heap::FindSpaceFromAddress(const void* addr) const {
return nullptr;
}
+std::string Heap::DumpSpaceNameFromAddress(const void* addr) const {
+ space::Space* space = FindSpaceFromAddress(addr);
+ return (space != nullptr) ? space->GetName() : "no space";
+}
void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
// If we're in a stack overflow, do not create a new exception. It would require running the
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 6c4b9367d1..c3ee5267b5 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -554,6 +554,9 @@ class Heap {
space::Space* FindSpaceFromAddress(const void* ptr) const
REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string DumpSpaceNameFromAddress(const void* addr) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
// Do a pending collector transition.
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index ef893eec30..e876a1bc89 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -168,34 +168,26 @@ void Jit::AddTimingLogger(const TimingLogger& logger) {
cumulative_timings_.AddLogger(logger);
}
-Jit::Jit(JitOptions* options) : options_(options),
- cumulative_timings_("JIT timings"),
- memory_use_("Memory used for compilation", 16),
- lock_("JIT memory use lock") {}
-
-Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
- DCHECK(options->UseJitCompilation() || options->GetProfileSaverOptions().IsEnabled());
- std::unique_ptr<Jit> jit(new Jit(options));
- if (jit_compiler_handle_ == nullptr && !LoadCompiler(error_msg)) {
- return nullptr;
- }
- bool code_cache_only_for_profile_data = !options->UseJitCompilation();
- jit->code_cache_.reset(JitCodeCache::Create(
- options->GetCodeCacheInitialCapacity(),
- options->GetCodeCacheMaxCapacity(),
- jit->generate_debug_info_,
- code_cache_only_for_profile_data,
- error_msg));
- if (jit->GetCodeCache() == nullptr) {
+Jit::Jit(JitCodeCache* code_cache, JitOptions* options)
+ : code_cache_(code_cache),
+ options_(options),
+ cumulative_timings_("JIT timings"),
+ memory_use_("Memory used for compilation", 16),
+ lock_("JIT memory use lock") {}
+
+Jit* Jit::Create(JitCodeCache* code_cache, JitOptions* options) {
+ CHECK(jit_compiler_handle_ != nullptr) << "Jit::LoadLibrary() needs to be called first";
+ std::unique_ptr<Jit> jit(new Jit(code_cache, options));
+ if (jit_compiler_handle_ == nullptr) {
return nullptr;
}
+
VLOG(jit) << "JIT created with initial_capacity="
<< PrettySize(options->GetCodeCacheInitialCapacity())
<< ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity())
<< ", compile_threshold=" << options->GetCompileThreshold()
<< ", profile_saver_options=" << options->GetProfileSaverOptions();
-
jit->CreateThreadPool();
// Notify native debugger about the classes already loaded before the creation of the jit.
@@ -203,7 +195,7 @@ Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
return jit.release();
}
-bool Jit::LoadCompilerLibrary(std::string* error_msg) {
+bool Jit::BindCompilerMethods(std::string* error_msg) {
jit_library_handle_ = dlopen(
kIsDebugBuild ? "libartd-compiler.so" : "libart-compiler.so", RTLD_NOW);
if (jit_library_handle_ == nullptr) {
@@ -243,7 +235,7 @@ bool Jit::LoadCompilerLibrary(std::string* error_msg) {
}
bool Jit::LoadCompiler(std::string* error_msg) {
- if (jit_library_handle_ == nullptr && !LoadCompilerLibrary(error_msg)) {
+ if (jit_library_handle_ == nullptr && !BindCompilerMethods(error_msg)) {
return false;
}
bool will_generate_debug_symbols = false;
@@ -308,6 +300,11 @@ bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) {
return success;
}
+bool Jit::ShouldGenerateDebugInfo() {
+ CHECK(CompilerIsLoaded());
+ return generate_debug_info_;
+}
+
void Jit::CreateThreadPool() {
// There is a DCHECK in the 'AddSamples' method to ensure the tread pool
// is not null when we instrument.
@@ -347,10 +344,7 @@ void Jit::DeleteThreadPool() {
void Jit::StartProfileSaver(const std::string& filename,
const std::vector<std::string>& code_paths) {
if (options_->GetSaveProfilingInfo()) {
- ProfileSaver::Start(options_->GetProfileSaverOptions(),
- filename,
- code_cache_.get(),
- code_paths);
+ ProfileSaver::Start(options_->GetProfileSaverOptions(), filename, code_cache_, code_paths);
}
}
@@ -391,7 +385,7 @@ void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
return;
}
jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit->generate_debug_info_) {
+ if (generate_debug_info_) {
DCHECK(jit->jit_types_loaded_ != nullptr);
jit->jit_types_loaded_(jit->jit_compiler_handle_, &type, 1);
}
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index edaf348cc4..b0ea19b303 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -100,6 +100,10 @@ class JitOptions {
return use_jit_compilation_;
}
+ bool RWXMemoryAllowed() const {
+ return rwx_memory_allowed_;
+ }
+
void SetUseJitCompilation(bool b) {
use_jit_compilation_ = b;
}
@@ -121,6 +125,10 @@ class JitOptions {
compile_threshold_ = 0;
}
+ void SetRWXMemoryAllowed(bool rwx_allowed) {
+ rwx_memory_allowed_ = rwx_allowed;
+ }
+
private:
bool use_jit_compilation_;
size_t code_cache_initial_capacity_;
@@ -132,6 +140,7 @@ class JitOptions {
uint16_t invoke_transition_weight_;
bool dump_info_on_shutdown_;
int thread_pool_pthread_priority_;
+ bool rwx_memory_allowed_;
ProfileSaverOptions profile_saver_options_;
JitOptions()
@@ -144,7 +153,8 @@ class JitOptions {
priority_thread_weight_(0),
invoke_transition_weight_(0),
dump_info_on_shutdown_(false),
- thread_pool_pthread_priority_(kJitPoolThreadPthreadDefaultPriority) {}
+ thread_pool_pthread_priority_(kJitPoolThreadPthreadDefaultPriority),
+ rwx_memory_allowed_(true) {}
DISALLOW_COPY_AND_ASSIGN(JitOptions);
};
@@ -157,20 +167,24 @@ class Jit {
static constexpr int16_t kJitRecheckOSRThreshold = 100;
virtual ~Jit();
- static Jit* Create(JitOptions* options, std::string* error_msg);
+
+ // Create JIT itself.
+ static Jit* Create(JitCodeCache* code_cache, JitOptions* options);
+
bool CompileMethod(ArtMethod* method, Thread* self, bool osr)
REQUIRES_SHARED(Locks::mutator_lock_);
- void CreateThreadPool();
const JitCodeCache* GetCodeCache() const {
- return code_cache_.get();
+ return code_cache_;
}
JitCodeCache* GetCodeCache() {
- return code_cache_.get();
+ return code_cache_;
}
+ void CreateThreadPool();
void DeleteThreadPool();
+
// Dump interesting info: #methods compiled, code vs data size, compile / verify cumulative
// loggers.
void DumpInfo(std::ostream& os) REQUIRES(!lock_);
@@ -268,7 +282,13 @@ class Jit {
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_);
- static bool LoadCompilerLibrary(std::string* error_msg);
+ // Load and initialize compiler.
+ static bool LoadCompiler(std::string* error_msg);
+
+ static bool CompilerIsLoaded() { return jit_compiler_handle_ != nullptr; }
+
+ // Return whether debug info should be generated. Requires LoadCompiler() to have been called.
+ static bool ShouldGenerateDebugInfo();
ThreadPool* GetThreadPool() const {
return thread_pool_.get();
@@ -281,9 +301,9 @@ class Jit {
void Start();
private:
- explicit Jit(JitOptions* options);
+ Jit(JitCodeCache* code_cache, JitOptions* options);
- static bool LoadCompiler(std::string* error_msg);
+ static bool BindCompilerMethods(std::string* error_msg);
// JIT compiler
static void* jit_library_handle_;
@@ -296,9 +316,10 @@ class Jit {
// We make this static to simplify the interaction with libart-compiler.so.
static bool generate_debug_info_;
+ // JIT resources owned by runtime.
+ jit::JitCodeCache* const code_cache_;
const JitOptions* const options_;
- std::unique_ptr<jit::JitCodeCache> code_cache_;
std::unique_ptr<ThreadPool> thread_pool_;
// Performance monitoring.
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 082b311fbe..a15a9be6f5 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -185,18 +185,12 @@ class JitCodeCache::JniStubData {
JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
size_t max_capacity,
- bool generate_debug_info,
bool used_only_for_profile_data,
+ bool rwx_memory_allowed,
std::string* error_msg) {
ScopedTrace trace(__PRETTY_FUNCTION__);
CHECK_GE(max_capacity, initial_capacity);
- // With 'perf', we want a 1-1 mapping between an address and a method.
- // We aren't able to keep method pointers live during the instrumentation method entry trampoline
- // so we will just disable jit-gc if we are doing that.
- bool garbage_collect_code = !generate_debug_info &&
- !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
-
// We need to have 32 bit offsets from method headers in code cache which point to things
// in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
// Ensure we're below 1 GB to be safe.
@@ -224,8 +218,15 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
// Bionic supports memfd_create, but the call may fail on older kernels.
mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
if (mem_fd.get() < 0) {
- VLOG(jit) << "Failed to initialize dual view JIT. memfd_create() error: "
- << strerror(errno);
+ std::ostringstream oss;
+ oss << "Failed to initialize dual view JIT. memfd_create() error: " << strerror(errno);
+ if (!rwx_memory_allowed) {
+ // Without using RWX page permissions, the JIT can not fallback to single mapping as it
+ // requires tranitioning the code pages to RWX for updates.
+ *error_msg = oss.str();
+ return nullptr;
+ }
+ VLOG(jit) << oss.str();
}
if (mem_fd.get() >= 0 && ftruncate(mem_fd, max_capacity) != 0) {
@@ -350,8 +351,14 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
"jit-code-cache-rw",
&error_str);
if (!non_exec_pages.IsValid()) {
- // Log and continue as single view JIT.
- VLOG(jit) << "Failed to map non-executable view of JIT code cache";
+ static const char* kFailedNxView = "Failed to map non-executable view of JIT code cache";
+ if (rwx_memory_allowed) {
+ // Log and continue as single view JIT (requires RWX memory).
+ VLOG(jit) << kFailedNxView;
+ } else {
+ *error_msg = kFailedNxView;
+ return nullptr;
+ }
}
}
} else {
@@ -369,8 +376,7 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
std::move(non_exec_pages),
initial_data_capacity,
initial_exec_capacity,
- max_capacity,
- garbage_collect_code);
+ max_capacity);
}
JitCodeCache::JitCodeCache(MemMap&& data_pages,
@@ -378,8 +384,7 @@ JitCodeCache::JitCodeCache(MemMap&& data_pages,
MemMap&& non_exec_pages,
size_t initial_data_capacity,
size_t initial_exec_capacity,
- size_t max_capacity,
- bool garbage_collect_code)
+ size_t max_capacity)
: lock_("Jit code cache", kJitCodeCacheLock),
lock_cond_("Jit code cache condition variable", lock_),
collection_in_progress_(false),
@@ -391,7 +396,7 @@ JitCodeCache::JitCodeCache(MemMap&& data_pages,
data_end_(initial_data_capacity),
exec_end_(initial_exec_capacity),
last_collection_increased_code_cache_(false),
- garbage_collect_code_(garbage_collect_code),
+ garbage_collect_code_(true),
used_memory_for_data_(0),
used_memory_for_code_(0),
number_of_compilations_(0),
@@ -431,6 +436,12 @@ JitCodeCache::JitCodeCache(MemMap&& data_pages,
SetFootprintLimit(current_capacity_);
}
+ // With 'perf', we want a 1-1 mapping between an address and a method.
+ // We aren't able to keep method pointers live during the instrumentation method entry trampoline
+ // so we will just disable jit-gc if we are doing that.
+ garbage_collect_code_ = !Jit::ShouldGenerateDebugInfo() &&
+ !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
+
VLOG(jit) << "Created jit code cache: initial data size="
<< PrettySize(initial_data_capacity)
<< ", initial code size="
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 76ad8db886..126fd441db 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -92,8 +92,8 @@ class JitCodeCache {
// in the out arg error_msg.
static JitCodeCache* Create(size_t initial_capacity,
size_t max_capacity,
- bool generate_debug_info,
bool used_only_for_profile_data,
+ bool rwx_memory_allowed,
std::string* error_msg);
~JitCodeCache();
@@ -261,8 +261,8 @@ class JitCodeCache {
void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
- // Dynamically change whether we want to garbage collect code. Should only be used
- // by tests.
+ // Dynamically change whether we want to garbage collect code. Should only be used during JIT
+ // initialization or by tests.
void SetGarbageCollectCode(bool value) {
garbage_collect_code_ = value;
}
@@ -284,8 +284,7 @@ class JitCodeCache {
MemMap&& non_exec_pages,
size_t initial_data_capacity,
size_t initial_exec_capacity,
- size_t max_capacity,
- bool garbage_collect_code);
+ size_t max_capacity);
// Internal version of 'CommitCode' that will not retry if the
// allocation fails. Return null if the allocation fails.
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index f5c0704098..4d3ad620cc 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -282,6 +282,15 @@ static jlong ZygoteHooks_nativePreFork(JNIEnv* env, jclass) {
return reinterpret_cast<jlong>(ThreadForEnv(env));
}
+static void ZygoteHooks_nativePostForkSystemServer(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED) {
+ // This JIT code cache for system server is created whilst the runtime is still single threaded.
+ // System server has a window where it can create executable pages for this purpose, but this is
+ // turned off after this hook. Consequently, the only JIT mode supported is the dual-view JIT
+ // where one mapping is R->RW and the other is RX. Single view requires RX->RWX->RX.
+ Runtime::Current()->CreateJitCodeCache(/*rwx_memory_allowed=*/false);
+}
+
static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
jclass,
jlong token,
@@ -419,6 +428,7 @@ static void ZygoteHooks_stopZygoteNoThreadCreation(JNIEnv* env ATTRIBUTE_UNUSED,
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(ZygoteHooks, nativePreFork, "()J"),
+ NATIVE_METHOD(ZygoteHooks, nativePostForkSystemServer, "()V"),
NATIVE_METHOD(ZygoteHooks, nativePostForkChild, "(JIZZLjava/lang/String;)V"),
NATIVE_METHOD(ZygoteHooks, startZygoteNoThreadCreation, "()V"),
NATIVE_METHOD(ZygoteHooks, stopZygoteNoThreadCreation, "()V"),
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index c3121269c4..34b84f52c6 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -407,6 +407,7 @@ Runtime::~Runtime() {
if (jit_ != nullptr) {
VLOG(jit) << "Deleting jit";
jit_.reset(nullptr);
+ jit_code_cache_.reset(nullptr);
}
// Shutdown the fault manager if it was initialized.
@@ -785,17 +786,15 @@ bool Runtime::Start() {
// TODO(calin): We use the JIT class as a proxy for JIT compilation and for
// recoding profiles. Maybe we should consider changing the name to be more clear it's
// not only about compiling. b/28295073.
- if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
+ if (!safe_mode_ && (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo())) {
+ // Try to load compiler pre zygote to reduce PSS. b/27744947
std::string error_msg;
- if (!IsZygote()) {
- // If we are the zygote then we need to wait until after forking to create the code cache
- // due to SELinux restrictions on r/w/x memory regions.
- CreateJit();
- } else if (jit_options_->UseJitCompilation()) {
- if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
- // Try to load compiler pre zygote to reduce PSS. b/27744947
- LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
- }
+ if (!jit::Jit::LoadCompiler(&error_msg)) {
+ LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
+ } else if (!IsZygote()) {
+ // If we are the zygote then we need to wait until after forking to create the code cache
+ // due to SELinux restrictions on r/w/x memory regions.
+ CreateJitCodeCache(/*rwx_memory_allowed=*/true);
}
}
@@ -892,29 +891,26 @@ void Runtime::InitNonZygoteOrPostFork(
}
}
- // Create the thread pools.
- heap_->CreateThreadPool();
- // Reset the gc performance data at zygote fork so that the GCs
- // before fork aren't attributed to an app.
- heap_->ResetGcPerformanceInfo();
-
- // We may want to collect profiling samples for system server, but we never want to JIT there.
if (is_system_server) {
- jit_options_->SetUseJitCompilation(false);
jit_options_->SetSaveProfilingInfo(profile_system_server);
if (profile_system_server) {
jit_options_->SetWaitForJitNotificationsToSaveProfile(false);
VLOG(profiler) << "Enabling system server profiles";
}
}
- if (!safe_mode_ &&
- (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) &&
- jit_ == nullptr) {
+
+ if (jit_ == nullptr) {
// Note that when running ART standalone (not zygote, nor zygote fork),
// the jit may have already been created.
CreateJit();
}
+ // Create the thread pools.
+ heap_->CreateThreadPool();
+ // Reset the gc performance data at zygote fork so that the GCs
+ // before fork aren't attributed to an app.
+ heap_->ResetGcPerformanceInfo();
+
StartSignalCatcher();
// Start the JDWP thread. If the command-line debugger flags specified "suspend=y",
@@ -2484,18 +2480,43 @@ void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::strin
argv->push_back(feature_string);
}
-void Runtime::CreateJit() {
- CHECK(!IsAotCompiler());
+void Runtime::CreateJitCodeCache(bool rwx_memory_allowed) {
if (kIsDebugBuild && GetInstrumentation()->IsForcedInterpretOnly()) {
DCHECK(!jit_options_->UseJitCompilation());
}
+
+ if (safe_mode_ || (!jit_options_->UseJitCompilation() && !jit_options_->GetSaveProfilingInfo())) {
+ return;
+ }
+
+ // SystemServer has execmem blocked by SELinux so can not use RWX page permissions after the
+ // cache initialized.
+ jit_options_->SetRWXMemoryAllowed(rwx_memory_allowed);
+
std::string error_msg;
- jit::Jit* jit = jit::Jit::Create(jit_options_.get(), &error_msg);
- DoAndMaybeSwitchInterpreter([=](){ jit_.reset(jit); });
- if (jit_.get() == nullptr) {
- LOG(WARNING) << "Failed to create JIT " << error_msg;
+ bool profiling_only = !jit_options_->UseJitCompilation();
+ jit_code_cache_.reset(jit::JitCodeCache::Create(jit_options_->GetCodeCacheInitialCapacity(),
+ jit_options_->GetCodeCacheMaxCapacity(),
+ profiling_only,
+ jit_options_->RWXMemoryAllowed(),
+ &error_msg));
+ if (jit_code_cache_.get() == nullptr) {
+ LOG(WARNING) << "Failed to create JIT Code Cache: " << error_msg;
+ }
+}
+
+void Runtime::CreateJit() {
+ if (jit_code_cache_.get() == nullptr) {
return;
}
+
+ jit::Jit* jit = jit::Jit::Create(jit_code_cache_.get(), jit_options_.get());
+ DoAndMaybeSwitchInterpreter([=](){ jit_.reset(jit); });
+ if (jit == nullptr) {
+ LOG(WARNING) << "Failed to allocate JIT";
+ // Release JIT code cache resources (several MB of memory).
+ jit_code_cache_.reset(nullptr);
+ }
}
bool Runtime::CanRelocate() const {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index e27c87d616..4fb0d2ede0 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -56,6 +56,7 @@ enum class EnforcementPolicy;
namespace jit {
class Jit;
+class JitCodeCache;
class JitOptions;
} // namespace jit
@@ -614,6 +615,8 @@ class Runtime {
return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
}
+ void CreateJitCodeCache(bool rwx_memory_allowed);
+
// Create the JIT and instrumentation and code cache.
void CreateJit();
@@ -906,6 +909,7 @@ class Runtime {
std::unique_ptr<JavaVMExt> java_vm_;
std::unique_ptr<jit::Jit> jit_;
+ std::unique_ptr<jit::JitCodeCache> jit_code_cache_;
std::unique_ptr<jit::JitOptions> jit_options_;
// Fault message, printed when we get a SIGSEGV.
diff --git a/test/174-escaping-instance-of-bad-class/expected.txt b/test/174-escaping-instance-of-bad-class/expected.txt
index e287759d44..611d698f1a 100644
--- a/test/174-escaping-instance-of-bad-class/expected.txt
+++ b/test/174-escaping-instance-of-bad-class/expected.txt
@@ -1,6 +1,8 @@
Bad.foo()
Bad.instanceValue = 33
Caught NoClassDefFoundError.
+Bad.bar()
+Caught NoClassDefFoundError.
BadSuper.foo()
BadSuper.superInstanceValue = 1
Caught NoClassDefFoundError.
diff --git a/test/174-escaping-instance-of-bad-class/src/Main.java b/test/174-escaping-instance-of-bad-class/src/Main.java
index 4346152f6c..4f66a31837 100644
--- a/test/174-escaping-instance-of-bad-class/src/Main.java
+++ b/test/174-escaping-instance-of-bad-class/src/Main.java
@@ -39,6 +39,17 @@ public class Main {
ncdfe.printStackTrace();
}
}
+ // Call bar() on the escaped instance of Bad.
+ try {
+ bad.bar();
+ } catch (NoClassDefFoundError ncdfe) {
+ // On RI, the NCDFE has no cause. On ART, the badClinit is the cause.
+ if (ncdfe.getCause() == badClinit || ncdfe.getCause() == null) {
+ System.out.println("Caught NoClassDefFoundError.");
+ } else {
+ ncdfe.printStackTrace();
+ }
+ }
}
public static void hierarchyTest() {
@@ -117,9 +128,19 @@ class Bad {
System.out.println("Bad.instanceValue = " + instanceValue);
System.out.println("Bad.staticValue = " + staticValue);
}
+ public void bar() {
+ System.out.println("Bad.bar()");
+ System.out.println("Bad.staticValue [indirect] = " + Helper.$inline$getBadStaticValue());
+ }
public Bad(int iv) { instanceValue = iv; }
public int instanceValue;
public static int staticValue;
+
+ public static class Helper {
+ public static int $inline$getBadStaticValue() {
+ return Bad.staticValue;
+ }
+ }
}
class BadSuper {
diff --git a/test/551-checker-clinit/src/Main.java b/test/551-checker-clinit/src/Main.java
index ab92cd03fd..0eea8009a6 100644
--- a/test/551-checker-clinit/src/Main.java
+++ b/test/551-checker-clinit/src/Main.java
@@ -57,7 +57,7 @@ class Sub extends Main {
}
/// CHECK-START: void Sub.invokeSubClass() builder (after)
- /// CHECK: ClinitCheck
+ /// CHECK: ClinitCheck
public void invokeSubClass() {
int a = SubSub.foo;
}
@@ -71,3 +71,23 @@ class SubSub {
}
public static int foo = 42;
}
+
+class NonTrivial {
+ public static int staticFoo = 42;
+ public int instanceFoo;
+
+ static {
+ System.out.println("NonTrivial.<clinit>");
+ }
+
+ /// CHECK-START: void NonTrivial.<init>() builder (after)
+ /// CHECK-NOT: ClinitCheck
+
+ /// CHECK-START: void NonTrivial.<init>() builder (after)
+ /// CHECK: StaticFieldGet
+ public NonTrivial() {
+ // ClinitCheck is eliminated because this is a constructor and therefore the
+ // corresponding new-instance in the caller must have performed the check.
+ instanceFoo = staticFoo;
+ }
+}
diff --git a/tools/veridex/Android.bp b/tools/veridex/Android.bp
index 92ace035cf..c375138b08 100644
--- a/tools/veridex/Android.bp
+++ b/tools/veridex/Android.bp
@@ -30,7 +30,6 @@ cc_binary {
"libartbase",
"libbase",
"liblog",
- "libutils",
"libz",
"libziparchive",
],