diff options
author | 2019-03-20 15:52:13 -0700 | |
---|---|---|
committer | 2019-06-10 18:33:25 +0000 | |
commit | b7c640d364d32b79cb52d04750b063667a9a0c86 (patch) | |
tree | 9caafc96879f83f8e5dd3cd45b9005be6e2b7deb /runtime/interpreter/interpreter_common.h | |
parent | 3ffb5b1576f8af0c361284ebd8d2d54c70ede3ff (diff) |
JVMTI Force early return
Add support for can_force_early_return jvmti capability. This allows
one to force java frames to exit early. Exited frames have all of
their normal locks released.
We implement this by modifying the existing method exit events to
allow one to modify the exit value during the callback. This is used
to implement ForceEarlyReturn by adding internal-only events that will
change the return value of methods once they return (using
kForcePopFrame) avoiding the need to modify the actual interpreter
very deeply. This also makes it simple to continue to use the standard
deoptimization functions to force the actual return.
In order to simplify book-keeping the internal event is refcounted,
not associated with any specific jvmtiEnv, and only settable on
specific threads. The internal event is added by the ForceEarlyReturn
function and then removed by the MethodExit event when we update the
return value.
Bug: 130028055
Test: ./test.py --host
Change-Id: Ifa44605b4e8032605f503a654ddf4bd2fc6b60bf
Diffstat (limited to 'runtime/interpreter/interpreter_common.h')
-rw-r--r-- | runtime/interpreter/interpreter_common.h | 93 |
1 files changed, 93 insertions, 0 deletions
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index 19da77dd3a..752965feff 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -17,6 +17,8 @@ #ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_ #define ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_ +#include "android-base/macros.h" +#include "instrumentation.h" #include "interpreter.h" #include "interpreter_intrinsics.h" @@ -59,6 +61,7 @@ #include "stack.h" #include "thread.h" #include "unstarted_runtime.h" +#include "verifier/method_verifier.h" #include "well_known_classes.h" namespace art { @@ -132,6 +135,96 @@ bool UseFastInterpreterToInterpreterInvoke(ArtMethod* method) NO_INLINE bool CheckStackOverflow(Thread* self, size_t frame_size) REQUIRES_SHARED(Locks::mutator_lock_); + +// Sends the normal method exit event. +// Returns true if the events succeeded and false if there is a pending exception. +template <typename T> bool SendMethodExitEvents( + Thread* self, + const instrumentation::Instrumentation* instrumentation, + ShadowFrame& frame, + ObjPtr<mirror::Object> thiz, + ArtMethod* method, + uint32_t dex_pc, + T& result) REQUIRES_SHARED(Locks::mutator_lock_); + +static inline ALWAYS_INLINE WARN_UNUSED bool +NeedsMethodExitEvent(const instrumentation::Instrumentation* ins) + REQUIRES_SHARED(Locks::mutator_lock_) { + return ins->HasMethodExitListeners() || ins->HasWatchedFramePopListeners(); +} + +// NO_INLINE so we won't bloat the interpreter with this very cold lock-release code. +template <bool kMonitorCounting> +static NO_INLINE void UnlockHeldMonitors(Thread* self, ShadowFrame* shadow_frame) + REQUIRES_SHARED(Locks::mutator_lock_) { + DCHECK(shadow_frame->GetForcePopFrame()); + // Unlock all monitors. + if (kMonitorCounting && shadow_frame->GetMethod()->MustCountLocks()) { + // Get the monitors from the shadow-frame monitor-count data. + shadow_frame->GetLockCountData().VisitMonitors( + [&](mirror::Object** obj) REQUIRES_SHARED(Locks::mutator_lock_) { + // Since we don't use the 'obj' pointer after the DoMonitorExit everything should be fine + // WRT suspension. + DoMonitorExit<kMonitorCounting>(self, shadow_frame, *obj); + }); + } else { + std::vector<verifier::MethodVerifier::DexLockInfo> locks; + verifier::MethodVerifier::FindLocksAtDexPc(shadow_frame->GetMethod(), + shadow_frame->GetDexPC(), + &locks, + Runtime::Current()->GetTargetSdkVersion()); + for (const auto& reg : locks) { + if (UNLIKELY(reg.dex_registers.empty())) { + LOG(ERROR) << "Unable to determine reference locked by " + << shadow_frame->GetMethod()->PrettyMethod() << " at pc " + << shadow_frame->GetDexPC(); + } else { + DoMonitorExit<kMonitorCounting>( + self, shadow_frame, shadow_frame->GetVRegReference(*reg.dex_registers.begin())); + } + } + } +} + +enum class MonitorState { + kNoMonitorsLocked, + kCountingMonitors, + kNormalMonitors, +}; + +template<MonitorState kMonitorState> +static inline ALWAYS_INLINE WARN_UNUSED bool PerformNonStandardReturn( + Thread* self, + ShadowFrame& frame, + JValue& result, + const instrumentation::Instrumentation* instrumentation, + uint16_t num_dex_inst, + uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) { + static constexpr bool kMonitorCounting = (kMonitorState == MonitorState::kCountingMonitors); + ObjPtr<mirror::Object> thiz(frame.GetThisObject(num_dex_inst)); + if (UNLIKELY(frame.GetForcePopFrame())) { + StackHandleScope<1> hs(self); + Handle<mirror::Object> h_thiz(hs.NewHandle(thiz)); + DCHECK(Runtime::Current()->AreNonStandardExitsEnabled()); + if (UNLIKELY(self->IsExceptionPending())) { + LOG(WARNING) << "Suppressing exception for non-standard method exit: " + << self->GetException()->Dump(); + self->ClearException(); + } + if (kMonitorState != MonitorState::kNoMonitorsLocked) { + UnlockHeldMonitors<kMonitorCounting>(self, &frame); + } + DoMonitorCheckOnExit<kMonitorCounting>(self, &frame); + result = JValue(); + if (UNLIKELY(NeedsMethodExitEvent(instrumentation))) { + SendMethodExitEvents( + self, instrumentation, frame, h_thiz.Get(), frame.GetMethod(), dex_pc, result); + } + return true; + } + return false; +} + // Handles all invoke-XXX/range instructions except for invoke-polymorphic[/range]. // Returns true on success, otherwise throws an exception and returns false. template<InvokeType type, bool is_range, bool do_access_check, bool is_mterp, bool is_quick = false> |