ART: Lock counting in the interpreter
To support structured locking when balanced-locking verification
failed, count lock and unlock operations in the verifier.
Bug: 23502994
Change-Id: I2bb915da6e3d43c49723a943b42d4d5a7c939aa1
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 5d3ac73..c5492f1 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -719,7 +719,7 @@
uint16_t num_regs = code_item->registers_size_;
// No last shadow coming from quick.
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_regs, nullptr, method, 0);
+ CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0);
ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 2a76f94..b010504 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -333,7 +333,7 @@
// Set up shadow frame with matching number of reference slots to vregs.
ShadowFrame* last_shadow_frame = self->GetManagedStack()->GetTopShadowFrame();
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_regs, last_shadow_frame, method, 0);
+ CREATE_SHADOW_FRAME(num_regs, last_shadow_frame, method, /* dex pc */ 0);
ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
self->PushShadowFrame(shadow_frame);
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 6f5b843..44eb29e 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -619,7 +619,7 @@
// Allocate shadow frame on the stack.
const char* old_cause = self->StartAssertNoThreadSuspension("DoCallCommon");
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_regs, &shadow_frame, called_method, 0);
+ CREATE_SHADOW_FRAME(num_regs, &shadow_frame, called_method, /* dex pc */ 0);
ShadowFrame* new_shadow_frame = shadow_frame_unique_ptr.get();
// Initialize new shadow frame by copying the registers from the callee shadow frame.
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index f57bddb..a5a8d81 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -45,6 +45,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
+#include "stack.h"
#include "thread.h"
#include "well_known_classes.h"
@@ -79,12 +80,20 @@
void ThrowNullPointerExceptionFromInterpreter()
SHARED_REQUIRES(Locks::mutator_lock_);
-static inline void DoMonitorEnter(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
+template <bool kMonitorCounting>
+static inline void DoMonitorEnter(Thread* self,
+ ShadowFrame* frame,
+ Object* ref) NO_THREAD_SAFETY_ANALYSIS {
ref->MonitorEnter(self);
+ frame->GetLockCountData().AddMonitor<kMonitorCounting>(self, ref);
}
-static inline void DoMonitorExit(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
+template <bool kMonitorCounting>
+static inline void DoMonitorExit(Thread* self,
+ ShadowFrame* frame,
+ Object* ref) NO_THREAD_SAFETY_ANALYSIS {
ref->MonitorExit(self);
+ frame->GetLockCountData().RemoveMonitorOrThrow<kMonitorCounting>(self, ref);
}
void AbortTransactionF(Thread* self, const char* fmt, ...)
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 9677d79..4265b50 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -89,6 +89,11 @@
UnexpectedOpcode(inst, shadow_frame); \
} HANDLE_INSTRUCTION_END();
+#define HANDLE_MONITOR_CHECKS() \
+ if (!shadow_frame.GetLockCountData(). \
+ CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self)) { \
+ HANDLE_PENDING_EXCEPTION(); \
+ }
/**
* Interpreter based on computed goto tables.
@@ -275,6 +280,7 @@
HANDLE_INSTRUCTION_START(RETURN_VOID_NO_BARRIER) {
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -289,6 +295,7 @@
QuasiAtomic::ThreadFenceForConstructor();
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -304,6 +311,7 @@
result.SetJ(0);
result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -318,6 +326,7 @@
JValue result;
result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -331,6 +340,7 @@
HANDLE_INSTRUCTION_START(RETURN_OBJECT) {
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
const uint8_t vreg_index = inst->VRegA_11x(inst_data);
Object* obj_result = shadow_frame.GetVRegReference(vreg_index);
if (do_assignability_check && obj_result != nullptr) {
@@ -468,7 +478,7 @@
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
- DoMonitorEnter(self, obj);
+ DoMonitorEnter<do_access_check>(self, &shadow_frame, obj);
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), 1);
}
}
@@ -480,7 +490,7 @@
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
- DoMonitorExit(self, obj);
+ DoMonitorExit<do_access_check>(self, &shadow_frame, obj);
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), 1);
}
}
@@ -2544,6 +2554,8 @@
uint32_t found_dex_pc = FindNextInstructionFollowingException(self, shadow_frame, dex_pc,
instrumentation);
if (found_dex_pc == DexFile::kDexNoIndex) {
+ // Structured locking is to be enforced for abnormal termination, too.
+ shadow_frame.GetLockCountData().CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self);
return JValue(); /* Handled in caller. */
} else {
int32_t displacement = static_cast<int32_t>(found_dex_pc) - static_cast<int32_t>(dex_pc);
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 083dfb5..76d4bb0fc 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -31,6 +31,9 @@
inst->GetDexPc(insns), \
instrumentation); \
if (found_dex_pc == DexFile::kDexNoIndex) { \
+ /* Structured locking is to be enforced for abnormal termination, too. */ \
+ shadow_frame.GetLockCountData(). \
+ CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self); \
return JValue(); /* Handled in caller. */ \
} else { \
int32_t displacement = static_cast<int32_t>(found_dex_pc) - static_cast<int32_t>(dex_pc); \
@@ -47,6 +50,12 @@
} \
} while (false)
+#define HANDLE_MONITOR_CHECKS() \
+ if (!shadow_frame.GetLockCountData(). \
+ CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self)) { \
+ HANDLE_PENDING_EXCEPTION(); \
+ }
+
// Code to run before each dex instruction.
#define PREAMBLE() \
do { \
@@ -182,6 +191,7 @@
PREAMBLE();
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -194,6 +204,7 @@
QuasiAtomic::ThreadFenceForConstructor();
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -207,6 +218,7 @@
result.SetJ(0);
result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -219,6 +231,7 @@
JValue result;
result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -230,6 +243,7 @@
PREAMBLE();
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
const size_t ref_idx = inst->VRegA_11x(inst_data);
Object* obj_result = shadow_frame.GetVRegReference(ref_idx);
if (do_assignability_check && obj_result != nullptr) {
@@ -366,7 +380,7 @@
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
- DoMonitorEnter(self, obj);
+ DoMonitorEnter<do_assignability_check>(self, &shadow_frame, obj);
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
}
break;
@@ -378,7 +392,7 @@
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
- DoMonitorExit(self, obj);
+ DoMonitorExit<do_assignability_check>(self, &shadow_frame, obj);
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
}
break;
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 1d21a64..d93a57d 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -1051,4 +1051,87 @@
}
}
+void LockCountData::AddMonitorInternal(Thread* self, mirror::Object* obj) {
+ if (obj == nullptr) {
+ return;
+ }
+
+ // If there's an error during enter, we won't have locked the monitor. So check there's no
+ // exception.
+ if (self->IsExceptionPending()) {
+ return;
+ }
+
+ if (monitors_ == nullptr) {
+ monitors_.reset(new std::vector<mirror::Object*>());
+ }
+ monitors_->push_back(obj);
+}
+
+void LockCountData::RemoveMonitorInternal(Thread* self, const mirror::Object* obj) {
+ if (obj == nullptr) {
+ return;
+ }
+ bool found_object = false;
+ if (monitors_ != nullptr) {
+ // We need to remove one pointer to ref, as duplicates are used for counting recursive locks.
+ // We arbitrarily choose the first one.
+ auto it = std::find(monitors_->begin(), monitors_->end(), obj);
+ if (it != monitors_->end()) {
+ monitors_->erase(it);
+ found_object = true;
+ }
+ }
+ if (!found_object) {
+ // The object wasn't found. Time for an IllegalMonitorStateException.
+ // The order here isn't fully clear. Assume that any other pending exception is swallowed.
+ // TODO: Maybe make already pending exception a suppressed exception.
+ self->ClearException();
+ self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;",
+ "did not lock monitor on object of type '%s' before unlocking",
+ PrettyTypeOf(const_cast<mirror::Object*>(obj)).c_str());
+ }
+}
+
+// Helper to unlock a monitor. Must be NO_THREAD_SAFETY_ANALYSIS, as we can't statically show
+// that the object was locked.
+void MonitorExitHelper(Thread* self, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK(self != nullptr);
+ DCHECK(obj != nullptr);
+ obj->MonitorExit(self);
+}
+
+bool LockCountData::CheckAllMonitorsReleasedInternal(Thread* self) {
+ DCHECK(self != nullptr);
+ if (monitors_ != nullptr) {
+ if (!monitors_->empty()) {
+ // There may be an exception pending, if the method is terminating abruptly. Clear it.
+ // TODO: Should we add this as a suppressed exception?
+ self->ClearException();
+
+ // OK, there are monitors that are still locked. To enforce structured locking (and avoid
+ // deadlocks) we unlock all of them before we raise the IllegalMonitorState exception.
+ for (mirror::Object* obj : *monitors_) {
+ MonitorExitHelper(self, obj);
+ // If this raised an exception, ignore. TODO: Should we add this as suppressed
+ // exceptions?
+ if (self->IsExceptionPending()) {
+ self->ClearException();
+ }
+ }
+ // Raise an exception, just give the first object as the sample.
+ mirror::Object* first = (*monitors_)[0];
+ self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;",
+ "did not unlock monitor on object of type '%s'",
+ PrettyTypeOf(first).c_str());
+
+ // To make sure this path is not triggered again, clean out the monitors.
+ monitors_->clear();
+
+ return false;
+ }
+ }
+ return true;
+}
+
} // namespace art
diff --git a/runtime/stack.h b/runtime/stack.h
index 31acf0e..32a4765 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -21,6 +21,8 @@
#include <string>
#include "arch/instruction_set.h"
+#include "base/macros.h"
+#include "base/mutex.h"
#include "dex_file.h"
#include "gc_root.h"
#include "mirror/object_reference.h"
@@ -66,6 +68,72 @@
struct ShadowFrameDeleter;
using ShadowFrameAllocaUniquePtr = std::unique_ptr<ShadowFrame, ShadowFrameDeleter>;
+// Counting locks by storing object pointers into a vector. Duplicate entries mark recursive locks.
+// The vector will be visited with the ShadowFrame during GC (so all the locked-on objects are
+// thread roots).
+// Note: implementation is split so that the call sites may be optimized to no-ops in case no
+// lock counting is necessary. The actual implementation is in the cc file to avoid
+// dependencies.
+class LockCountData {
+ public:
+ // Add the given object to the list of monitors, that is, objects that have been locked. This
+ // will not throw (but be skipped if there is an exception pending on entry).
+ template <bool kLockCounting>
+ void AddMonitor(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(self != nullptr);
+ if (!kLockCounting) {
+ return;
+ }
+ AddMonitorInternal(self, obj);
+ }
+
+ // Try to remove the given object from the monitor list, indicating an unlock operation.
+ // This will throw an IllegalMonitorStateException (clearing any already pending exception), in
+ // case that there wasn't a lock recorded for the object.
+ template <bool kLockCounting>
+ void RemoveMonitorOrThrow(Thread* self,
+ const mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(self != nullptr);
+ if (!kLockCounting) {
+ return;
+ }
+ RemoveMonitorInternal(self, obj);
+ }
+
+ // Check whether all acquired monitors have been released. This will potentially throw an
+ // IllegalMonitorStateException, clearing any already pending exception. Returns true if the
+ // check shows that everything is OK wrt/ lock counting, false otherwise.
+ template <bool kLockCounting>
+ bool CheckAllMonitorsReleasedOrThrow(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(self != nullptr);
+ if (!kLockCounting) {
+ return true;
+ }
+ return CheckAllMonitorsReleasedInternal(self);
+ }
+
+ template <typename T, typename... Args>
+ void VisitMonitors(T visitor, Args&&... args) SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (monitors_ != nullptr) {
+ // Visitors may change the Object*. Be careful with the foreach loop.
+ for (mirror::Object*& obj : *monitors_) {
+ visitor(/* inout */ &obj, std::forward<Args>(args)...);
+ }
+ }
+ }
+
+ private:
+ // Internal implementations.
+ void AddMonitorInternal(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void RemoveMonitorInternal(Thread* self, const mirror::Object* obj)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool CheckAllMonitorsReleasedInternal(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Stores references to the locked-on objects. As noted, this should be visited during thread
+ // marking.
+ std::unique_ptr<std::vector<mirror::Object*>> monitors_;
+};
+
// ShadowFrame has 2 possible layouts:
// - interpreter - separate VRegs and reference arrays. References are in the reference array.
// - JNI - just VRegs, but where every VReg holds a reference.
@@ -272,6 +340,10 @@
}
}
+ LockCountData& GetLockCountData() {
+ return lock_count_data_;
+ }
+
static size_t LinkOffset() {
return OFFSETOF_MEMBER(ShadowFrame, link_);
}
@@ -330,6 +402,7 @@
ShadowFrame* link_;
ArtMethod* method_;
uint32_t dex_pc_;
+ LockCountData lock_count_data_; // This may contain GC roots when lock counting is active.
// This is a two-part array:
// - [0..number_of_vregs) holds the raw virtual registers, and each element here is always 4
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 12b2e88..a8e2b23 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2656,6 +2656,8 @@
}
}
}
+ // Mark lock count map required for structured locking checks.
+ shadow_frame->GetLockCountData().VisitMonitors(visitor_, -1, this);
}
private: