summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/art_method.h6
-rw-r--r--runtime/interpreter/interpreter.cc11
-rw-r--r--runtime/interpreter/interpreter_common.h18
-rw-r--r--runtime/interpreter/interpreter_goto_table_impl.cc5
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc6
-rw-r--r--runtime/modifiers.h7
-rw-r--r--runtime/stack.cc6
-rw-r--r--runtime/stack.h33
-rw-r--r--runtime/verifier/method_verifier.cc9
9 files changed, 54 insertions, 47 deletions
diff --git a/runtime/art_method.h b/runtime/art_method.h
index d239b423c2..a012a5a9ca 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -351,6 +351,12 @@ class ArtMethod FINAL {
SetAccessFlags(GetAccessFlags() | kAccSkipAccessChecks);
}
+ // Should this method be run in the interpreter and count locks (e.g., failed structured-
+ // locking verification)?
+ bool MustCountLocks() {
+ return (GetAccessFlags() & kAccMustCountLocks) != 0;
+ }
+
// Returns true if this method could be overridden by a default method.
bool IsOverridableByDefaultMethod() SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 97dbe5d219..81a396a925 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -303,8 +303,13 @@ static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item,
shadow_frame.GetMethod()->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
+ // Lock counting is a special version of accessibility checks, and for simplicity and
+ // reduction of template parameters, we gate it behind access-checks mode.
+ ArtMethod* method = shadow_frame.GetMethod();
+ DCHECK(!method->SkipAccessChecks() || !method->MustCountLocks());
+
bool transaction_active = Runtime::Current()->IsActiveTransaction();
- if (LIKELY(shadow_frame.GetMethod()->SkipAccessChecks())) {
+ if (LIKELY(method->SkipAccessChecks())) {
// Enter the "without access check" interpreter.
if (kInterpreterImplKind == kMterpImplKind) {
if (transaction_active) {
@@ -487,6 +492,10 @@ void EnterInterpreterFromDeoptimize(Thread* self,
// Are we executing the first shadow frame?
bool first = true;
while (shadow_frame != nullptr) {
+ // We do not want to recover lock state for lock counting when deoptimizing. Currently,
+ // the compiler should not have compiled a method that failed structured-locking checks.
+ DCHECK(!shadow_frame->GetMethod()->MustCountLocks());
+
self->SetTopOfShadowStack(shadow_frame);
const DexFile::CodeItem* code_item = shadow_frame->GetMethod()->GetCodeItem();
const uint32_t dex_pc = shadow_frame->GetDexPC();
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index e5b89e2f98..69376fd7a1 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -95,7 +95,9 @@ static inline void DoMonitorEnter(Thread* self,
StackHandleScope<1> hs(self);
Handle<Object> h_ref(hs.NewHandle(ref));
h_ref->MonitorEnter(self);
- frame->GetLockCountData().AddMonitor<kMonitorCounting>(self, h_ref.Get());
+ if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
+ frame->GetLockCountData().AddMonitor(self, h_ref.Get());
+ }
}
template <bool kMonitorCounting>
@@ -107,7 +109,19 @@ static inline void DoMonitorExit(Thread* self,
StackHandleScope<1> hs(self);
Handle<Object> h_ref(hs.NewHandle(ref));
h_ref->MonitorExit(self);
- frame->GetLockCountData().RemoveMonitorOrThrow<kMonitorCounting>(self, h_ref.Get());
+ if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
+ frame->GetLockCountData().RemoveMonitorOrThrow(self, h_ref.Get());
+ }
+}
+
+template <bool kMonitorCounting>
+static inline bool DoMonitorCheckOnExit(Thread* self, ShadowFrame* frame)
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!Roles::uninterruptible_) {
+ if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
+ return frame->GetLockCountData().CheckAllMonitorsReleasedOrThrow(self);
+ }
+ return true;
}
void AbortTransactionF(Thread* self, const char* fmt, ...)
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 13cfb9877d..f03036b6a8 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -104,8 +104,7 @@ namespace interpreter {
} HANDLE_INSTRUCTION_END();
#define HANDLE_MONITOR_CHECKS() \
- if (!shadow_frame.GetLockCountData(). \
- CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self)) { \
+ if (!DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame)) { \
HANDLE_PENDING_EXCEPTION(); \
}
@@ -2584,7 +2583,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
instrumentation);
if (found_dex_pc == DexFile::kDexNoIndex) {
// Structured locking is to be enforced for abnormal termination, too.
- shadow_frame.GetLockCountData().CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self);
+ DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame);
return JValue(); /* Handled in caller. */
} else {
int32_t displacement = static_cast<int32_t>(found_dex_pc) - static_cast<int32_t>(dex_pc);
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 4323d4f425..18330babe0 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -34,8 +34,7 @@ namespace interpreter {
instrumentation); \
if (found_dex_pc == DexFile::kDexNoIndex) { \
/* Structured locking is to be enforced for abnormal termination, too. */ \
- shadow_frame.GetLockCountData(). \
- CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self); \
+ DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame); \
if (interpret_one_instruction) { \
/* Signal mterp to return to caller */ \
shadow_frame.SetDexPC(DexFile::kDexNoIndex); \
@@ -57,8 +56,7 @@ namespace interpreter {
} while (false)
#define HANDLE_MONITOR_CHECKS() \
- if (!shadow_frame.GetLockCountData(). \
- CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self)) { \
+ if (!DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame)) { \
HANDLE_PENDING_EXCEPTION(); \
}
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index 6dd182a11b..fd7a125bc3 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -60,10 +60,13 @@ static constexpr uint32_t kAccDefault = 0x00400000; // method (ru
// This is set by the class linker during LinkInterfaceMethods. Prior to that point we do not know
// if any particular method needs to be a default conflict. Used to figure out at runtime if
// invoking this method will throw an exception.
-static constexpr uint32_t kAccDefaultConflict = 0x00800000; // method (runtime)
+static constexpr uint32_t kAccDefaultConflict = 0x00800000; // method (runtime)
// Set by the verifier for a method we do not want the compiler to compile.
-static constexpr uint32_t kAccCompileDontBother = 0x01000000; // method (runtime)
+static constexpr uint32_t kAccCompileDontBother = 0x01000000; // method (runtime)
+
+// Set by the verifier for a method that could not be verified to follow structured locking.
+static constexpr uint32_t kAccMustCountLocks = 0x02000000; // method (runtime)
// Special runtime-only flags.
// Interface and all its super-interfaces with default methods have been recursively initialized.
diff --git a/runtime/stack.cc b/runtime/stack.cc
index e56d35a91e..a5ca527aa2 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -950,7 +950,7 @@ int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item,
}
}
-void LockCountData::AddMonitorInternal(Thread* self, mirror::Object* obj) {
+void LockCountData::AddMonitor(Thread* self, mirror::Object* obj) {
if (obj == nullptr) {
return;
}
@@ -967,7 +967,7 @@ void LockCountData::AddMonitorInternal(Thread* self, mirror::Object* obj) {
monitors_->push_back(obj);
}
-void LockCountData::RemoveMonitorInternal(Thread* self, const mirror::Object* obj) {
+void LockCountData::RemoveMonitorOrThrow(Thread* self, const mirror::Object* obj) {
if (obj == nullptr) {
return;
}
@@ -1000,7 +1000,7 @@ void MonitorExitHelper(Thread* self, mirror::Object* obj) NO_THREAD_SAFETY_ANALY
obj->MonitorExit(self);
}
-bool LockCountData::CheckAllMonitorsReleasedInternal(Thread* self) {
+bool LockCountData::CheckAllMonitorsReleasedOrThrow(Thread* self) {
DCHECK(self != nullptr);
if (monitors_ != nullptr) {
if (!monitors_->empty()) {
diff --git a/runtime/stack.h b/runtime/stack.h
index 7301184a9e..e77ab4647e 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -80,39 +80,18 @@ class LockCountData {
public:
// Add the given object to the list of monitors, that is, objects that have been locked. This
// will not throw (but be skipped if there is an exception pending on entry).
- template <bool kLockCounting>
- void AddMonitor(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(self != nullptr);
- if (!kLockCounting) {
- return;
- }
- AddMonitorInternal(self, obj);
- }
+ void AddMonitor(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
// Try to remove the given object from the monitor list, indicating an unlock operation.
// This will throw an IllegalMonitorStateException (clearing any already pending exception), in
// case that there wasn't a lock recorded for the object.
- template <bool kLockCounting>
void RemoveMonitorOrThrow(Thread* self,
- const mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(self != nullptr);
- if (!kLockCounting) {
- return;
- }
- RemoveMonitorInternal(self, obj);
- }
+ const mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
// Check whether all acquired monitors have been released. This will potentially throw an
// IllegalMonitorStateException, clearing any already pending exception. Returns true if the
// check shows that everything is OK wrt/ lock counting, false otherwise.
- template <bool kLockCounting>
- bool CheckAllMonitorsReleasedOrThrow(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(self != nullptr);
- if (!kLockCounting) {
- return true;
- }
- return CheckAllMonitorsReleasedInternal(self);
- }
+ bool CheckAllMonitorsReleasedOrThrow(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
template <typename T, typename... Args>
void VisitMonitors(T visitor, Args&&... args) SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -125,12 +104,6 @@ class LockCountData {
}
private:
- // Internal implementations.
- void AddMonitorInternal(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
- void RemoveMonitorInternal(Thread* self, const mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
- bool CheckAllMonitorsReleasedInternal(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
-
// Stores references to the locked-on objects. As noted, this should be visited during thread
// marking.
std::unique_ptr<std::vector<mirror::Object*>> monitors_;
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 8802e62435..d05ae42b0e 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -401,8 +401,13 @@ MethodVerifier::FailureData MethodVerifier::VerifyMethod(Thread* self,
method->SetAccessFlags(method->GetAccessFlags() | kAccCompileDontBother);
}
}
- if (method != nullptr && verifier.HasInstructionThatWillThrow()) {
- method->SetAccessFlags(method->GetAccessFlags() | kAccCompileDontBother);
+ if (method != nullptr) {
+ if (verifier.HasInstructionThatWillThrow()) {
+ method->SetAccessFlags(method->GetAccessFlags() | kAccCompileDontBother);
+ }
+ if ((verifier.encountered_failure_types_ & VerifyError::VERIFY_ERROR_LOCKING) != 0) {
+ method->SetAccessFlags(method->GetAccessFlags() | kAccMustCountLocks);
+ }
}
} else {
// Bad method data.