summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/base/mutex.h12
-rw-r--r--runtime/instrumentation.cc11
-rw-r--r--runtime/instrumentation.h2
-rw-r--r--runtime/jit/jit_code_cache.cc36
-rw-r--r--runtime/jit/jit_code_cache.h7
5 files changed, 45 insertions, 23 deletions
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index ee47e7ce56..ced0cb1d91 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -65,10 +65,14 @@ enum LockLevel : uint8_t {
kAbortLock,
kNativeDebugInterfaceLock,
kSignalHandlingLock,
+ // A generic lock level for mutexs that should not allow any additional mutexes to be gained after
+ // acquiring it.
+ kGenericBottomLock,
kJdwpAdbStateLock,
kJdwpSocketLock,
kRegionSpaceRegionLock,
kMarkSweepMarkStackLock,
+ kJitCodeCacheLock,
kRosAllocGlobalLock,
kRosAllocBracketLock,
kRosAllocBulkFreeLock,
@@ -94,7 +98,6 @@ enum LockLevel : uint8_t {
kOatFileManagerLock,
kTracingUniqueMethodsLock,
kTracingStreamingLock,
- kDeoptimizedMethodsLock,
kClassLoaderClassesLock,
kDefaultMutexLevel,
kDexLock,
@@ -105,7 +108,6 @@ enum LockLevel : uint8_t {
kMonitorPoolLock,
kClassLinkerClassesLock, // TODO rename.
kDexToDexCompilerLock,
- kJitCodeCacheLock,
kCHALock,
kSubtypeCheckLock,
kBreakpointLock,
@@ -736,6 +738,12 @@ class Locks {
// Guard accesses to the JNI function table override.
static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
+ // When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
+ // doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this
+ // actually only encodes the mutex being below jni_function_table_lock_ although having
+ // kGenericBottomLock level is lower than this.
+ #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::jni_function_table_lock_)
+
// Have an exclusive aborting thread.
static Mutex* abort_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index d25893f4c9..4196e19383 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -162,7 +162,7 @@ Instrumentation::Instrumentation()
have_branch_listeners_(false),
have_invoke_virtual_or_interface_listeners_(false),
have_exception_handled_listeners_(false),
- deoptimized_methods_lock_("deoptimized methods lock", kDeoptimizedMethodsLock),
+ deoptimized_methods_lock_("deoptimized methods lock", kGenericBottomLock),
deoptimization_enabled_(false),
interpreter_handler_table_(kMainHandlerTable),
quick_alloc_entry_points_instrumentation_counter_(0),
@@ -225,14 +225,7 @@ void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
if ((forced_interpret_only_ || IsDeoptimized(method)) && !method->IsNative()) {
new_quick_code = GetQuickToInterpreterBridge();
} else if (is_class_initialized || !method->IsStatic() || method->IsConstructor()) {
- // It would be great to search the JIT for its implementation here but we cannot due to the
- // locks we hold. Instead just set to the interpreter bridge and that code will search the JIT
- // when it gets called and replace the entrypoint then.
- if (NeedDebugVersionFor(method)) {
- new_quick_code = GetQuickToInterpreterBridge();
- } else {
- new_quick_code = class_linker->GetQuickOatCodeFor(method);
- }
+ new_quick_code = GetCodeForInvoke(method);
} else {
new_quick_code = GetQuickResolutionStub();
}
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 0e366d3672..e5d8800fcc 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -720,7 +720,7 @@ class Instrumentation {
// The set of methods being deoptimized (by the debugger) which must be executed with interpreter
// only.
- mutable ReaderWriterMutex deoptimized_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ mutable ReaderWriterMutex deoptimized_methods_lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
std::unordered_set<ArtMethod*> deoptimized_methods_ GUARDED_BY(deoptimized_methods_lock_);
bool deoptimization_enabled_;
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 52262f4d61..86582ef804 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -507,21 +507,31 @@ static const uint8_t* FromStackMapToRoots(const uint8_t* stack_map_data) {
return stack_map_data - ComputeRootTableSize(GetNumberOfRoots(stack_map_data));
}
-static void FillRootTable(uint8_t* roots_data, Handle<mirror::ObjectArray<mirror::Object>> roots)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- GcRoot<mirror::Object>* gc_roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
+static void DCheckRootsAreValid(Handle<mirror::ObjectArray<mirror::Object>> roots)
+ REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!kIsDebugBuild) {
+ return;
+ }
const uint32_t length = roots->GetLength();
// Put all roots in `roots_data`.
for (uint32_t i = 0; i < length; ++i) {
ObjPtr<mirror::Object> object = roots->Get(i);
- if (kIsDebugBuild) {
- // Ensure the string is strongly interned. b/32995596
- if (object->IsString()) {
- ObjPtr<mirror::String> str = ObjPtr<mirror::String>::DownCast(object);
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr);
- }
+ // Ensure the string is strongly interned. b/32995596
+ if (object->IsString()) {
+ ObjPtr<mirror::String> str = ObjPtr<mirror::String>::DownCast(object);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr);
}
+ }
+}
+
+void JitCodeCache::FillRootTable(uint8_t* roots_data,
+ Handle<mirror::ObjectArray<mirror::Object>> roots) {
+ GcRoot<mirror::Object>* gc_roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
+ const uint32_t length = roots->GetLength();
+ // Put all roots in `roots_data`.
+ for (uint32_t i = 0; i < length; ++i) {
+ ObjPtr<mirror::Object> object = roots->Get(i);
gc_roots[i] = GcRoot<mirror::Object>(object);
}
}
@@ -853,6 +863,12 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
single_impl, method, method_header);
}
+ if (!method->IsNative()) {
+ // We need to do this before grabbing the lock_ because it needs to be able to see the string
+ // InternTable. Native methods do not have roots.
+ DCheckRootsAreValid(roots);
+ }
+
// The following needs to be guarded by cha_lock_ also. Otherwise it's
// possible that the compiled code is considered invalidated by some class linking,
// but below we still make the compiled code valid for the method.
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 19de978cd4..ee6111a430 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -315,6 +315,11 @@ class JitCodeCache {
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Adds the given roots to the roots_data. Only a member for annotalysis.
+ void FillRootTable(uint8_t* roots_data, Handle<mirror::ObjectArray<mirror::Object>> roots)
+ REQUIRES(lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
ProfilingInfo* AddProfilingInfoInternal(Thread* self,
ArtMethod* method,
const std::vector<uint32_t>& entries)
@@ -391,7 +396,7 @@ class JitCodeCache {
class JniStubData;
// Lock for guarding allocations, collections, and the method_code_map_.
- Mutex lock_;
+ Mutex lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
// Condition to wait on during collection.
ConditionVariable lock_cond_ GUARDED_BY(lock_);
// Whether there is a code cache collection in progress.