JVMTI Force early return
Add support for can_force_early_return jvmti capability. This allows
one to force java frames to exit early. Exited frames have all of
their normal locks released.
We implement this by modifying the existing method exit events to
allow one to modify the exit value during the callback. This is used
to implement ForceEarlyReturn by adding internal-only events that will
change the return value of methods once they return (using
kForcePopFrame) avoiding the need to modify the actual interpreter
very deeply. This also makes it simple to continue to use the standard
deoptimization functions to force the actual return.
In order to simplify book-keeping the internal event is refcounted,
not associated with any specific jvmtiEnv, and only settable on
specific threads. The internal event is added by the ForceEarlyReturn
function and then removed by the MethodExit event when we update the
return value.
Bug: 130028055
Test: ./test.py --host
Change-Id: Ifa44605b4e8032605f503a654ddf4bd2fc6b60bf
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index e889f98..e4ce825 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -344,50 +344,40 @@
return StackUtil::NotifyFramePop(env, thread, depth);
}
- static jvmtiError ForceEarlyReturnObject(jvmtiEnv* env,
- jthread thread ATTRIBUTE_UNUSED,
- jobject value ATTRIBUTE_UNUSED) {
+ static jvmtiError ForceEarlyReturnObject(jvmtiEnv* env, jthread thread, jobject value) {
ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
- return ERR(NOT_IMPLEMENTED);
+ return StackUtil::ForceEarlyReturn(env, gEventHandler, thread, value);
}
- static jvmtiError ForceEarlyReturnInt(jvmtiEnv* env,
- jthread thread ATTRIBUTE_UNUSED,
- jint value ATTRIBUTE_UNUSED) {
+ static jvmtiError ForceEarlyReturnInt(jvmtiEnv* env, jthread thread, jint value) {
ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
- return ERR(NOT_IMPLEMENTED);
+ return StackUtil::ForceEarlyReturn(env, gEventHandler, thread, value);
}
- static jvmtiError ForceEarlyReturnLong(jvmtiEnv* env,
- jthread thread ATTRIBUTE_UNUSED,
- jlong value ATTRIBUTE_UNUSED) {
+ static jvmtiError ForceEarlyReturnLong(jvmtiEnv* env, jthread thread, jlong value) {
ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
- return ERR(NOT_IMPLEMENTED);
+ return StackUtil::ForceEarlyReturn(env, gEventHandler, thread, value);
}
- static jvmtiError ForceEarlyReturnFloat(jvmtiEnv* env,
- jthread thread ATTRIBUTE_UNUSED,
- jfloat value ATTRIBUTE_UNUSED) {
+ static jvmtiError ForceEarlyReturnFloat(jvmtiEnv* env, jthread thread, jfloat value) {
ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
- return ERR(NOT_IMPLEMENTED);
+ return StackUtil::ForceEarlyReturn(env, gEventHandler, thread, value);
}
- static jvmtiError ForceEarlyReturnDouble(jvmtiEnv* env,
- jthread thread ATTRIBUTE_UNUSED,
- jdouble value ATTRIBUTE_UNUSED) {
+ static jvmtiError ForceEarlyReturnDouble(jvmtiEnv* env, jthread thread, jdouble value) {
ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
- return ERR(NOT_IMPLEMENTED);
+ return StackUtil::ForceEarlyReturn(env, gEventHandler, thread, value);
}
- static jvmtiError ForceEarlyReturnVoid(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ static jvmtiError ForceEarlyReturnVoid(jvmtiEnv* env, jthread thread) {
ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
- return ERR(NOT_IMPLEMENTED);
+ return StackUtil::ForceEarlyReturn<nullptr_t>(env, gEventHandler, thread, nullptr);
}
static jvmtiError FollowReferences(jvmtiEnv* env,
diff --git a/openjdkjvmti/art_jvmti.h b/openjdkjvmti/art_jvmti.h
index 7433e54..083ba6d 100644
--- a/openjdkjvmti/art_jvmti.h
+++ b/openjdkjvmti/art_jvmti.h
@@ -278,7 +278,7 @@
.can_generate_native_method_bind_events = 1,
.can_generate_garbage_collection_events = 1,
.can_generate_object_free_events = 1,
- .can_force_early_return = 0,
+ .can_force_early_return = 1,
.can_get_owned_monitor_stack_depth_info = 1,
.can_get_constant_pool = 0,
.can_set_native_method_prefix = 0,
@@ -296,6 +296,7 @@
// can_redefine_any_class:
// can_redefine_classes:
// can_pop_frame:
+// can_force_early_return:
// We need to ensure that inlined code is either not present or can always be deoptimized. This
// is not guaranteed for non-debuggable processes since we might have inlined bootclasspath code
// on a threads stack.
@@ -333,7 +334,7 @@
.can_generate_native_method_bind_events = 0,
.can_generate_garbage_collection_events = 0,
.can_generate_object_free_events = 0,
- .can_force_early_return = 0,
+ .can_force_early_return = 1,
.can_get_owned_monitor_stack_depth_info = 0,
.can_get_constant_pool = 0,
.can_set_native_method_prefix = 0,
diff --git a/openjdkjvmti/events-inl.h b/openjdkjvmti/events-inl.h
index 8e06fe3..627129a 100644
--- a/openjdkjvmti/events-inl.h
+++ b/openjdkjvmti/events-inl.h
@@ -362,7 +362,7 @@
// have to deal with use-after-free or the frames being reallocated later.
art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
return env->notify_frames.erase(frame) != 0 &&
- !frame->GetForcePopFrame() &&
+ !frame->GetSkipMethodExitEvents() &&
ShouldDispatchOnThread<ArtJvmtiEvent::kFramePop>(env, thread);
}
@@ -619,6 +619,7 @@
return (added && caps.can_access_local_variables == 1) ||
caps.can_generate_breakpoint_events == 1 ||
caps.can_pop_frame == 1 ||
+ caps.can_force_early_return == 1 ||
(caps.can_retransform_classes == 1 &&
IsEventEnabledAnywhere(event) &&
env->event_masks.IsEnabledAnywhere(event));
@@ -639,7 +640,7 @@
if (caps.can_generate_breakpoint_events == 1) {
HandleBreakpointEventsChanged(added);
}
- if (caps.can_pop_frame == 1 && added) {
+ if ((caps.can_pop_frame == 1 || caps.can_force_early_return == 1) && added) {
// TODO We should keep track of how many of these have been enabled and remove it if there are
// no more possible users. This isn't expected to be too common.
art::Runtime::Current()->SetNonStandardExitsEnabled();
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index 40e8b80..7174e1b 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -29,9 +29,14 @@
* questions.
*/
+#include <android-base/thread_annotations.h>
+
+#include "base/locks.h"
+#include "base/mutex.h"
#include "events-inl.h"
#include <array>
+#include <functional>
#include <sys/time.h>
#include "arch/context.h"
@@ -41,21 +46,29 @@
#include "base/mutex.h"
#include "deopt_manager.h"
#include "dex/dex_file_types.h"
+#include "events.h"
#include "gc/allocation_listener.h"
#include "gc/gc_pause_listener.h"
#include "gc/heap.h"
#include "gc/scoped_gc_critical_section.h"
#include "handle_scope-inl.h"
+#include "indirect_reference_table.h"
#include "instrumentation.h"
+#include "interpreter/shadow_frame.h"
#include "jni/jni_env_ext-inl.h"
#include "jni/jni_internal.h"
+#include "jvalue-inl.h"
+#include "jvalue.h"
+#include "jvmti.h"
#include "mirror/class.h"
#include "mirror/object-inl.h"
#include "monitor-inl.h"
#include "nativehelper/scoped_local_ref.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
+#include "scoped_thread_state_change.h"
#include "stack.h"
+#include "thread.h"
#include "thread-inl.h"
#include "thread_list.h"
#include "ti_phase.h"
@@ -571,7 +584,34 @@
class JvmtiMethodTraceListener final : public art::instrumentation::InstrumentationListener {
public:
- explicit JvmtiMethodTraceListener(EventHandler* handler) : event_handler_(handler) {}
+ explicit JvmtiMethodTraceListener(EventHandler* handler)
+ : event_handler_(handler),
+ non_standard_exits_lock_("JVMTI NonStandard Exits list lock",
+ art::LockLevel::kGenericBottomLock) {}
+
+ void AddDelayedNonStandardExitEvent(const art::ShadowFrame* frame, bool is_object, jvalue val)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(art::Locks::user_code_suspension_lock_, art::Locks::thread_list_lock_) {
+ art::Thread* self = art::Thread::Current();
+ jobject to_cleanup = nullptr;
+ jobject new_val = is_object ? self->GetJniEnv()->NewGlobalRef(val.l) : nullptr;
+ {
+ art::MutexLock mu(self, non_standard_exits_lock_);
+ NonStandardExitEventInfo saved{ nullptr, { .j = 0 } };
+ if (is_object) {
+ saved.return_val_obj_ = new_val;
+ saved.return_val_.l = saved.return_val_obj_;
+ } else {
+ saved.return_val_.j = val.j;
+ }
+ // only objects need cleanup.
+ if (UNLIKELY(is_object && non_standard_exits_.find(frame) != non_standard_exits_.end())) {
+ to_cleanup = non_standard_exits_.find(frame)->second.return_val_obj_;
+ }
+ non_standard_exits_.insert_or_assign(frame, saved);
+ }
+ self->GetJniEnv()->DeleteGlobalRef(to_cleanup);
+ }
// Call-back for when a method is entered.
void MethodEntered(art::Thread* self,
@@ -589,15 +629,44 @@
}
}
+ // TODO Maybe try to combine this with below using templates?
// Callback for when a method is exited with a reference return value.
void MethodExited(art::Thread* self,
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method,
uint32_t dex_pc ATTRIBUTE_UNUSED,
- art::Handle<art::mirror::Object> return_value)
+ art::instrumentation::OptionalFrame frame,
+ art::MutableHandle<art::mirror::Object>& return_value)
REQUIRES_SHARED(art::Locks::mutator_lock_) override {
- if (!method->IsRuntimeMethod() &&
- event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
+ if (method->IsRuntimeMethod()) {
+ return;
+ }
+ if (frame.has_value() && UNLIKELY(event_handler_->IsEventEnabledAnywhere(
+ ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue))) {
+ DCHECK(!frame->get().GetSkipMethodExitEvents());
+ bool has_return = false;
+ jobject ret_val = nullptr;
+ {
+ art::MutexLock mu(self, non_standard_exits_lock_);
+ const art::ShadowFrame* sframe = &frame.value().get();
+ const auto it = non_standard_exits_.find(sframe);
+ if (it != non_standard_exits_.end()) {
+ ret_val = it->second.return_val_obj_;
+ return_value.Assign(self->DecodeJObject(ret_val));
+ non_standard_exits_.erase(it);
+ has_return = true;
+ }
+ }
+ if (has_return) {
+ ScopedLocalRef<jthread> thr(self->GetJniEnv(),
+ self->GetJniEnv()->NewLocalRef(self->GetPeer()));
+ art::ScopedThreadSuspension sts(self, art::ThreadState::kNative);
+ self->GetJniEnv()->DeleteGlobalRef(ret_val);
+ event_handler_->SetInternalEvent(
+ thr.get(), ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_DISABLE);
+ }
+ }
+ if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
DCHECK_EQ(
method->GetInterfaceMethodIfProxy(art::kRuntimePointerSize)->GetReturnTypePrimitive(),
art::Primitive::kPrimNot) << method->PrettyMethod();
@@ -621,14 +690,36 @@
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method,
uint32_t dex_pc ATTRIBUTE_UNUSED,
- const art::JValue& return_value)
- REQUIRES_SHARED(art::Locks::mutator_lock_) override {
- if (!method->IsRuntimeMethod() &&
- event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
+ art::instrumentation::OptionalFrame frame,
+ art::JValue& return_value) REQUIRES_SHARED(art::Locks::mutator_lock_) override {
+ if (frame.has_value() &&
+ UNLIKELY(event_handler_->IsEventEnabledAnywhere(
+ ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue))) {
+ DCHECK(!frame->get().GetSkipMethodExitEvents());
+ bool has_return = false;
+ {
+ art::MutexLock mu(self, non_standard_exits_lock_);
+ const art::ShadowFrame* sframe = &frame.value().get();
+ const auto it = non_standard_exits_.find(sframe);
+ if (it != non_standard_exits_.end()) {
+ return_value.SetJ(it->second.return_val_.j);
+ non_standard_exits_.erase(it);
+ has_return = true;
+ }
+ }
+ if (has_return) {
+ ScopedLocalRef<jthread> thr(self->GetJniEnv(),
+ self->GetJniEnv()->NewLocalRef(self->GetPeer()));
+ art::ScopedThreadSuspension sts(self, art::ThreadState::kNative);
+ event_handler_->SetInternalEvent(
+ thr.get(), ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_DISABLE);
+ }
+ }
+ if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
DCHECK_NE(
method->GetInterfaceMethodIfProxy(art::kRuntimePointerSize)->GetReturnTypePrimitive(),
art::Primitive::kPrimNot) << method->PrettyMethod();
- DCHECK(!self->IsExceptionPending());
+ DCHECK(!self->IsExceptionPending()) << self->GetException()->Dump();
jvalue val;
art::JNIEnvExt* jnienv = self->GetJniEnv();
// 64bit integer is the largest value in the union so we should be fine simply copying it into
@@ -944,23 +1035,65 @@
}
private:
+ struct NonStandardExitEventInfo {
+ // if non-null is a GlobalReference to the returned value.
+ jobject return_val_obj_;
+ // The return-value to be passed to the MethodExit event.
+ jvalue return_val_;
+ };
+
EventHandler* const event_handler_;
+
+ mutable art::Mutex non_standard_exits_lock_
+ ACQUIRED_BEFORE(art::Locks::instrument_entrypoints_lock_);
+
+ std::unordered_map<const art::ShadowFrame*, NonStandardExitEventInfo> non_standard_exits_
+ GUARDED_BY(non_standard_exits_lock_);
};
-static uint32_t GetInstrumentationEventsFor(ArtJvmtiEvent event) {
+uint32_t EventHandler::GetInstrumentationEventsFor(ArtJvmtiEvent event) {
switch (event) {
case ArtJvmtiEvent::kMethodEntry:
return art::instrumentation::Instrumentation::kMethodEntered;
- case ArtJvmtiEvent::kMethodExit:
- return art::instrumentation::Instrumentation::kMethodExited |
- art::instrumentation::Instrumentation::kMethodUnwind;
+ case ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue:
+ // TODO We want to do this but supporting only having a single one is difficult.
+ // return art::instrumentation::Instrumentation::kMethodExited;
+ case ArtJvmtiEvent::kMethodExit: {
+ DCHECK(event == ArtJvmtiEvent::kMethodExit ||
+ event == ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue)
+ << "event = " << static_cast<uint32_t>(event);
+ ArtJvmtiEvent other = event == ArtJvmtiEvent::kMethodExit
+ ? ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue
+ : ArtJvmtiEvent::kMethodExit;
+ if (LIKELY(!IsEventEnabledAnywhere(other))) {
+ return art::instrumentation::Instrumentation::kMethodExited |
+ art::instrumentation::Instrumentation::kMethodUnwind;
+ } else {
+ // The event needs to be kept around/is already enabled by the other jvmti event that uses
+ // the same instrumentation event.
+ return 0u;
+ }
+ }
case ArtJvmtiEvent::kFieldModification:
return art::instrumentation::Instrumentation::kFieldWritten;
case ArtJvmtiEvent::kFieldAccess:
return art::instrumentation::Instrumentation::kFieldRead;
case ArtJvmtiEvent::kBreakpoint:
- case ArtJvmtiEvent::kSingleStep:
- return art::instrumentation::Instrumentation::kDexPcMoved;
+ case ArtJvmtiEvent::kSingleStep: {
+ // Need to skip adding the listeners if the event is breakpoint/single-step since those events
+ // share the same art-instrumentation underlying event. We need to give them their own deopt
+ // request though so the test waits until here.
+ DCHECK(event == ArtJvmtiEvent::kBreakpoint || event == ArtJvmtiEvent::kSingleStep);
+ ArtJvmtiEvent other = event == ArtJvmtiEvent::kBreakpoint ? ArtJvmtiEvent::kSingleStep
+ : ArtJvmtiEvent::kBreakpoint;
+ if (LIKELY(!IsEventEnabledAnywhere(other))) {
+ return art::instrumentation::Instrumentation::kDexPcMoved;
+ } else {
+ // The event needs to be kept around/is already enabled by the other jvmti event that uses
+ // the same instrumentation event.
+ return 0u;
+ }
+ }
case ArtJvmtiEvent::kFramePop:
return art::instrumentation::Instrumentation::kWatchedFramePop;
case ArtJvmtiEvent::kException:
@@ -999,6 +1132,7 @@
case ArtJvmtiEvent::kFieldAccess:
case ArtJvmtiEvent::kSingleStep:
case ArtJvmtiEvent::kFramePop:
+ case ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue:
return thread == nullptr ? DeoptRequirement::kFull : DeoptRequirement::kThread;
case ArtJvmtiEvent::kVmInit:
case ArtJvmtiEvent::kVmDeath:
@@ -1076,18 +1210,8 @@
bool enable) {
// Add the actual listeners.
uint32_t new_events = GetInstrumentationEventsFor(event);
- if (new_events == art::instrumentation::Instrumentation::kDexPcMoved) {
- // Need to skip adding the listeners if the event is breakpoint/single-step since those events
- // share the same art-instrumentation underlying event. We need to give them their own deopt
- // request though so the test waits until here.
- DCHECK(event == ArtJvmtiEvent::kBreakpoint || event == ArtJvmtiEvent::kSingleStep);
- ArtJvmtiEvent other = event == ArtJvmtiEvent::kBreakpoint ? ArtJvmtiEvent::kSingleStep
- : ArtJvmtiEvent::kBreakpoint;
- if (IsEventEnabledAnywhere(other)) {
- // The event needs to be kept around/is already enabled by the other jvmti event that uses the
- // same instrumentation event.
- return;
- }
+ if (new_events == 0) {
+ return;
}
art::ScopedThreadStateChange stsc(art::Thread::Current(), art::ThreadState::kNative);
art::instrumentation::Instrumentation* instr = art::Runtime::Current()->GetInstrumentation();
@@ -1204,6 +1328,7 @@
case ArtJvmtiEvent::kExceptionCatch:
case ArtJvmtiEvent::kBreakpoint:
case ArtJvmtiEvent::kSingleStep:
+ case ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue:
SetupTraceListener(method_trace_listener_.get(), event, enable);
return;
case ArtJvmtiEvent::kMonitorContendedEnter:
@@ -1278,6 +1403,90 @@
}
}
+static bool IsInternalEvent(ArtJvmtiEvent event) {
+ return static_cast<uint32_t>(event) >=
+ static_cast<uint32_t>(ArtJvmtiEvent::kMinInternalEventTypeVal);
+}
+
+jvmtiError EventHandler::SetInternalEvent(jthread thread,
+ ArtJvmtiEvent event,
+ jvmtiEventMode mode) {
+ CHECK(IsInternalEvent(event)) << static_cast<uint32_t>(event);
+
+ art::Thread* self = art::Thread::Current();
+ art::Thread* target = nullptr;
+ ScopedNoUserCodeSuspension snucs(self);
+ // The overall state across all threads and jvmtiEnvs. This is used to control the state of the
+ // instrumentation handlers since we only want each added once.
+ bool old_state;
+ bool new_state;
+ // The state for just the current 'thread' (including null) across all jvmtiEnvs. This is used to
+ // control the deoptimization state since we do refcounting for that and need to perform different
+ // actions depending on if the event is limited to a single thread or global.
+ bool old_thread_state;
+ bool new_thread_state;
+ {
+ // From now on we know we cannot get suspended by user-code.
+ // NB This does a SuspendCheck (during thread state change) so we need to
+ // make sure we don't have the 'suspend_lock' locked here.
+ art::ScopedObjectAccess soa(self);
+ art::WriterMutexLock el_mu(self, envs_lock_);
+ art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
+ jvmtiError err = ERR(INTERNAL);
+ if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ return err;
+ } else if (target->IsStillStarting() || target->GetState() == art::ThreadState::kStarting) {
+ target->Dump(LOG_STREAM(WARNING) << "Is not alive: ");
+ return ERR(THREAD_NOT_ALIVE);
+ }
+
+ // Make sure we have a valid jthread to pass to deopt-manager.
+ ScopedLocalRef<jthread> thread_lr(
+ soa.Env(), thread != nullptr ? nullptr : soa.AddLocalReference<jthread>(target->GetPeer()));
+ if (thread == nullptr) {
+ thread = thread_lr.get();
+ }
+ CHECK(thread != nullptr);
+
+ {
+ DCHECK_GE(GetInternalEventRefcount(event) + (mode == JVMTI_ENABLE ? 1 : -1), 0)
+ << "Refcount: " << GetInternalEventRefcount(event);
+ DCHECK_GE(GetInternalEventThreadRefcount(event, target) + (mode == JVMTI_ENABLE ? 1 : -1), 0)
+ << "Refcount: " << GetInternalEventThreadRefcount(event, target);
+ DCHECK_GE(GetInternalEventRefcount(event), GetInternalEventThreadRefcount(event, target));
+ old_state = GetInternalEventRefcount(event) > 0;
+ old_thread_state = GetInternalEventThreadRefcount(event, target) > 0;
+ if (mode == JVMTI_ENABLE) {
+ new_state = IncrInternalEventRefcount(event) > 0;
+ new_thread_state = IncrInternalEventThreadRefcount(event, target) > 0;
+ } else {
+ new_state = DecrInternalEventRefcount(event) > 0;
+ new_thread_state = DecrInternalEventThreadRefcount(event, target) > 0;
+ }
+ if (old_state != new_state) {
+ global_mask.Set(event, new_state);
+ }
+ }
+ }
+ // Handle any special work required for the event type. We still have the
+ // user_code_suspend_count_lock_ so there won't be any interleaving here.
+ if (new_state != old_state) {
+ HandleEventType(event, mode == JVMTI_ENABLE);
+ }
+ if (old_thread_state != new_thread_state) {
+ HandleEventDeopt(event, thread, new_thread_state);
+ }
+ return OK;
+}
+
+static bool IsDirectlySettableEvent(ArtJvmtiEvent event) {
+ return !IsInternalEvent(event);
+}
+
+static bool EventIsNormal(ArtJvmtiEvent event) {
+ return EventMask::EventIsInRange(event) && IsDirectlySettableEvent(event);
+}
+
jvmtiError EventHandler::SetEvent(ArtJvmTiEnv* env,
jthread thread,
ArtJvmtiEvent event,
@@ -1286,7 +1495,7 @@
return ERR(ILLEGAL_ARGUMENT);
}
- if (!EventMask::EventIsInRange(event)) {
+ if (!EventIsNormal(event)) {
return ERR(INVALID_EVENT_TYPE);
}
@@ -1385,6 +1594,46 @@
}
}
+void EventHandler::AddDelayedNonStandardExitEvent(const art::ShadowFrame *frame,
+ bool is_object,
+ jvalue val) {
+ method_trace_listener_->AddDelayedNonStandardExitEvent(frame, is_object, val);
+}
+
+static size_t GetInternalEventIndex(ArtJvmtiEvent event) {
+ CHECK(IsInternalEvent(event));
+ return static_cast<size_t>(event) - static_cast<size_t>(ArtJvmtiEvent::kMinInternalEventTypeVal);
+}
+
+int32_t EventHandler::DecrInternalEventThreadRefcount(ArtJvmtiEvent event, art::Thread* target) {
+ return --GetInternalEventThreadRefcount(event, target);
+}
+
+int32_t EventHandler::IncrInternalEventThreadRefcount(ArtJvmtiEvent event, art::Thread* target) {
+ return ++GetInternalEventThreadRefcount(event, target);
+}
+
+int32_t& EventHandler::GetInternalEventThreadRefcount(ArtJvmtiEvent event, art::Thread* target) {
+ auto& refs = internal_event_thread_refcount_[GetInternalEventIndex(event)];
+ UniqueThread target_ut{target, target->GetTid()};
+ if (refs.find(target_ut) == refs.end()) {
+ refs.insert({target_ut, 0});
+ }
+ return refs.at(target_ut);
+}
+
+int32_t EventHandler::DecrInternalEventRefcount(ArtJvmtiEvent event) {
+ return --internal_event_refcount_[GetInternalEventIndex(event)];
+}
+
+int32_t EventHandler::IncrInternalEventRefcount(ArtJvmtiEvent event) {
+ return ++internal_event_refcount_[GetInternalEventIndex(event)];
+}
+
+int32_t EventHandler::GetInternalEventRefcount(ArtJvmtiEvent event) const {
+ return internal_event_refcount_[GetInternalEventIndex(event)];
+}
+
void EventHandler::Shutdown() {
// Need to remove the method_trace_listener_ if it's there.
art::Thread* self = art::Thread::Current();
@@ -1398,7 +1647,8 @@
EventHandler::EventHandler()
: envs_lock_("JVMTI Environment List Lock", art::LockLevel::kPostMutatorTopLockLevel),
- frame_pop_enabled(false) {
+ frame_pop_enabled(false),
+ internal_event_refcount_({0}) {
alloc_listener_.reset(new JvmtiAllocationListener(this));
ddm_listener_.reset(new JvmtiDdmChunkListener(this));
gc_pause_listener_.reset(new JvmtiGcPauseListener(this));
diff --git a/openjdkjvmti/events.h b/openjdkjvmti/events.h
index d54c87a..ac86d0c 100644
--- a/openjdkjvmti/events.h
+++ b/openjdkjvmti/events.h
@@ -18,14 +18,17 @@
#define ART_OPENJDKJVMTI_EVENTS_H_
#include <bitset>
+#include <unordered_map>
#include <vector>
#include <android-base/logging.h>
#include <android-base/thread_annotations.h>
+#include "android-base/thread_annotations.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "jvmti.h"
+#include "managed_stack.h"
#include "thread.h"
namespace openjdkjvmti {
@@ -73,17 +76,43 @@
kGarbageCollectionFinish = JVMTI_EVENT_GARBAGE_COLLECTION_FINISH,
kObjectFree = JVMTI_EVENT_OBJECT_FREE,
kVmObjectAlloc = JVMTI_EVENT_VM_OBJECT_ALLOC,
+ // Internal event to mark a ClassFileLoadHook as one created with the can_retransform_classes
+ // capability.
kClassFileLoadHookRetransformable = JVMTI_MAX_EVENT_TYPE_VAL + 1,
kDdmPublishChunk = JVMTI_MAX_EVENT_TYPE_VAL + 2,
- kMaxEventTypeVal = kDdmPublishChunk,
+ kMaxNormalEventTypeVal = kDdmPublishChunk,
+
+ // All that follow are events used to implement internal JVMTI functions. They are not settable
+ // directly by agents.
+ kMinInternalEventTypeVal = kMaxNormalEventTypeVal + 1,
+
+ // Internal event we use to implement the ForceEarlyReturn functions.
+ kForceEarlyReturnUpdateReturnValue = kMinInternalEventTypeVal,
+ kMaxInternalEventTypeVal = kForceEarlyReturnUpdateReturnValue,
+
+ kMaxEventTypeVal = kMaxInternalEventTypeVal,
};
+constexpr jint kInternalEventCount = static_cast<jint>(ArtJvmtiEvent::kMaxInternalEventTypeVal) -
+ static_cast<jint>(ArtJvmtiEvent::kMinInternalEventTypeVal) + 1;
+
using ArtJvmtiEventDdmPublishChunk = void (*)(jvmtiEnv *jvmti_env,
JNIEnv* jni_env,
jint data_type,
jint data_len,
const jbyte* data);
+// It is not enough to store a Thread pointer, as these may be reused. Use the pointer and the
+// thread id.
+// Note: We could just use the tid like tracing does.
+using UniqueThread = std::pair<art::Thread*, uint32_t>;
+
+struct UniqueThreadHasher {
+ std::size_t operator()(const UniqueThread& k) const {
+ return std::hash<uint32_t>{}(k.second) ^ (std::hash<void*>{}(k.first) << 1);
+ }
+};
+
struct ArtJvmtiEventCallbacks : jvmtiEventCallbacks {
ArtJvmtiEventCallbacks() : DdmPublishChunk(nullptr) {
memset(this, 0, sizeof(jvmtiEventCallbacks));
@@ -141,10 +170,6 @@
// The per-thread enabled events.
- // It is not enough to store a Thread pointer, as these may be reused. Use the pointer and the
- // thread id.
- // Note: We could just use the tid like tracing does.
- using UniqueThread = std::pair<art::Thread*, uint32_t>;
// TODO: Native thread objects are immovable, so we can use them as keys in an (unordered) map,
// if necessary.
std::vector<std::pair<UniqueThread, EventMask>> thread_event_masks;
@@ -198,6 +223,16 @@
return global_mask.Test(event);
}
+ // Sets an internal event. Unlike normal JVMTI events internal events are not associated with any
+ // particular jvmtiEnv and are refcounted. This refcounting is done to allow us to easily enable
+ // events during functions and disable them during the requested event callback. Since these are
+ // used to implement various JVMTI functions these events always have a single target thread. If
+ // target is null the current thread is used.
+ jvmtiError SetInternalEvent(jthread target,
+ ArtJvmtiEvent event,
+ jvmtiEventMode mode)
+ REQUIRES(!envs_lock_, !art::Locks::mutator_lock_);
+
jvmtiError SetEvent(ArtJvmTiEnv* env,
jthread thread,
ArtJvmtiEvent event,
@@ -246,9 +281,15 @@
inline void DispatchEventOnEnv(ArtJvmTiEnv* env, art::Thread* thread, Args... args) const
REQUIRES(!envs_lock_);
+ void AddDelayedNonStandardExitEvent(const art::ShadowFrame* frame, bool is_object, jvalue val)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(art::Locks::user_code_suspension_lock_, art::Locks::thread_list_lock_);
+
private:
void SetupTraceListener(JvmtiMethodTraceListener* listener, ArtJvmtiEvent event, bool enable);
+ uint32_t GetInstrumentationEventsFor(ArtJvmtiEvent event);
+
// Specifically handle the FramePop event which it might not always be possible to turn off.
void SetupFramePopTraceListener(bool enable);
@@ -325,6 +366,21 @@
bool OtherMonitorEventsEnabledAnywhere(ArtJvmtiEvent event);
+ int32_t GetInternalEventRefcount(ArtJvmtiEvent event) const REQUIRES(envs_lock_);
+ // Increment internal event refcount for the given event and return the new count.
+ int32_t IncrInternalEventRefcount(ArtJvmtiEvent event) REQUIRES(envs_lock_);
+ // Decrement internal event refcount for the given event and return the new count.
+ int32_t DecrInternalEventRefcount(ArtJvmtiEvent event) REQUIRES(envs_lock_);
+
+ int32_t& GetInternalEventThreadRefcount(ArtJvmtiEvent event, art::Thread* target)
+ REQUIRES(envs_lock_, art::Locks::thread_list_lock_);
+ // Increment internal event refcount for the given event and return the new count.
+ int32_t IncrInternalEventThreadRefcount(ArtJvmtiEvent event, art::Thread* target)
+ REQUIRES(envs_lock_, art::Locks::thread_list_lock_);
+ // Decrement internal event refcount for the given event and return the new count.
+ int32_t DecrInternalEventThreadRefcount(ArtJvmtiEvent event, art::Thread* target)
+ REQUIRES(envs_lock_, art::Locks::thread_list_lock_);
+
// List of all JvmTiEnv objects that have been created, in their creation order. It is a std::list
// since we mostly access it by iterating over the entire thing, only ever append to the end, and
// need to be able to remove arbitrary elements from it.
@@ -348,6 +404,16 @@
// continue to listen to this event even if it has been disabled.
// TODO We could remove the listeners once all jvmtiEnvs have drained their shadow-frame vectors.
bool frame_pop_enabled;
+
+ // The overall refcount for each internal event across all threads.
+ std::array<int32_t, kInternalEventCount> internal_event_refcount_ GUARDED_BY(envs_lock_);
+ // The refcount for each thread for each internal event.
+ // TODO We should clean both this and the normal EventMask lists up when threads end.
+ std::array<std::unordered_map<UniqueThread, int32_t, UniqueThreadHasher>, kInternalEventCount>
+ internal_event_thread_refcount_
+ GUARDED_BY(envs_lock_) GUARDED_BY(art::Locks::thread_list_lock_);
+
+ friend class JvmtiMethodTraceListener;
};
} // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index 75f0556..38257f1 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -32,10 +32,13 @@
#include "ti_stack.h"
#include <algorithm>
+#include <initializer_list>
#include <list>
#include <unordered_map>
#include <vector>
+#include "android-base/macros.h"
+#include "android-base/thread_annotations.h"
#include "arch/context.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
@@ -44,21 +47,35 @@
#include "barrier.h"
#include "base/bit_utils.h"
#include "base/enums.h"
+#include "base/locks.h"
+#include "base/macros.h"
#include "base/mutex.h"
#include "deopt_manager.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file.h"
#include "dex/dex_file_annotations.h"
#include "dex/dex_file_types.h"
+#include "dex/dex_instruction-inl.h"
+#include "dex/primitive.h"
+#include "events.h"
#include "gc_root.h"
#include "handle_scope-inl.h"
+#include "instrumentation.h"
+#include "interpreter/shadow_frame-inl.h"
+#include "interpreter/shadow_frame.h"
#include "jni/jni_env_ext.h"
#include "jni/jni_internal.h"
+#include "jvalue-inl.h"
+#include "jvalue.h"
+#include "jvmti.h"
#include "mirror/class.h"
#include "mirror/dex_cache.h"
#include "nativehelper/scoped_local_ref.h"
#include "scoped_thread_state_change-inl.h"
+#include "scoped_thread_state_change.h"
#include "stack.h"
+#include "thread.h"
+#include "thread_state.h"
#include "ti_logging.h"
#include "ti_thread.h"
#include "thread-current-inl.h"
@@ -1087,96 +1104,333 @@
return OK;
}
+namespace {
+
+enum class NonStandardExitType {
+ kPopFrame,
+ kForceReturn,
+};
+
+template<NonStandardExitType kExitType>
+class NonStandardExitFrames {
+ public:
+ NonStandardExitFrames(art::Thread* self, jvmtiEnv* env, jthread thread)
+ REQUIRES(!art::Locks::thread_suspend_count_lock_)
+ ACQUIRE_SHARED(art::Locks::mutator_lock_)
+ ACQUIRE(art::Locks::thread_list_lock_, art::Locks::user_code_suspension_lock_)
+ : snucs_(self) {
+ // We keep the user-code-suspend-count lock.
+ art::Locks::user_code_suspension_lock_->AssertExclusiveHeld(self);
+
+ // From now on we know we cannot get suspended by user-code.
+ // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
+ // have the 'suspend_lock' locked here.
+ old_state_ = self->TransitionFromSuspendedToRunnable();
+ art::ScopedObjectAccessUnchecked soau(self);
+
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
+
+ if (!ThreadUtil::GetAliveNativeThread(thread, soau, &target_, &result_)) {
+ return;
+ }
+ {
+ art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_);
+ if (target_ != self && target_->GetUserCodeSuspendCount() == 0) {
+ // We cannot be the current thread for this function.
+ result_ = ERR(THREAD_NOT_SUSPENDED);
+ return;
+ }
+ }
+ JvmtiGlobalTLSData* tls_data = ThreadUtil::GetGlobalTLSData(target_);
+ constexpr art::StackVisitor::StackWalkKind kWalkKind =
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames;
+ if (tls_data != nullptr &&
+ tls_data->disable_pop_frame_depth != JvmtiGlobalTLSData::kNoDisallowedPopFrame &&
+ tls_data->disable_pop_frame_depth ==
+ art::StackVisitor::ComputeNumFrames(target_, kWalkKind)) {
+ JVMTI_LOG(WARNING, env) << "Disallowing frame pop due to in-progress class-load/prepare. "
+ << "Frame at depth " << tls_data->disable_pop_frame_depth << " was "
+ << "marked as un-poppable by the jvmti plugin. See b/117615146 for "
+ << "more information.";
+ result_ = ERR(OPAQUE_FRAME);
+ return;
+ }
+ // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
+ // done.
+ std::unique_ptr<art::Context> context(art::Context::Create());
+ FindFrameAtDepthVisitor final_frame(target_, context.get(), 0);
+ FindFrameAtDepthVisitor penultimate_frame(target_, context.get(), 1);
+ final_frame.WalkStack();
+ penultimate_frame.WalkStack();
+
+ if (!final_frame.FoundFrame() || !penultimate_frame.FoundFrame()) {
+ // Cannot do it if there is only one frame!
+ JVMTI_LOG(INFO, env) << "Can not pop final frame off of a stack";
+ result_ = ERR(NO_MORE_FRAMES);
+ return;
+ }
+
+ art::ArtMethod* called_method = final_frame.GetMethod();
+ art::ArtMethod* calling_method = penultimate_frame.GetMethod();
+ if (!CheckFunctions(env, calling_method, called_method)) {
+ return;
+ }
+ DCHECK(!called_method->IsNative()) << called_method->PrettyMethod();
+
+ // From here we are sure to succeed.
+ result_ = OK;
+
+ // Get/create a shadow frame
+ final_frame_ = final_frame.GetOrCreateShadowFrame(&created_final_frame_);
+ penultimate_frame_ =
+ (calling_method->IsNative()
+ ? nullptr
+ : penultimate_frame.GetOrCreateShadowFrame(&created_penultimate_frame_));
+
+ final_frame_id_ = final_frame.GetFrameId();
+ penultimate_frame_id_ = penultimate_frame.GetFrameId();
+
+ CHECK_NE(final_frame_, penultimate_frame_) << "Frames at different depths not different!";
+ }
+
+ bool CheckFunctions(jvmtiEnv* env, art::ArtMethod* calling, art::ArtMethod* called)
+ REQUIRES(art::Locks::thread_list_lock_, art::Locks::user_code_suspension_lock_)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ ~NonStandardExitFrames() RELEASE_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!art::Locks::thread_list_lock_)
+ RELEASE(art::Locks::user_code_suspension_lock_) {
+ art::Thread* self = art::Thread::Current();
+ DCHECK_EQ(old_state_, art::ThreadState::kNative)
+ << "Unexpected thread state on entering PopFrame!";
+ self->TransitionFromRunnableToSuspended(old_state_);
+ }
+
+ ScopedNoUserCodeSuspension snucs_;
+ art::ShadowFrame* final_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = nullptr;
+ art::ShadowFrame* penultimate_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = nullptr;
+ bool created_final_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = false;
+ bool created_penultimate_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = false;
+ uint32_t final_frame_id_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = -1;
+ uint32_t penultimate_frame_id_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = -1;
+ art::Thread* target_ GUARDED_BY(art::Locks::thread_list_lock_) = nullptr;
+ art::ThreadState old_state_ = art::ThreadState::kTerminated;
+ jvmtiError result_ = ERR(INTERNAL);
+};
+
+template <>
+bool NonStandardExitFrames<NonStandardExitType::kForceReturn>::CheckFunctions(
+ jvmtiEnv* env, art::ArtMethod* calling ATTRIBUTE_UNUSED, art::ArtMethod* called) {
+ if (UNLIKELY(called->IsNative())) {
+ result_ = ERR(OPAQUE_FRAME);
+ JVMTI_LOG(INFO, env) << "Cannot force early return from " << called->PrettyMethod()
+ << " because it is native.";
+ return false;
+ } else {
+ return true;
+ }
+}
+
+template <>
+bool NonStandardExitFrames<NonStandardExitType::kPopFrame>::CheckFunctions(
+ jvmtiEnv* env, art::ArtMethod* calling, art::ArtMethod* called) {
+ if (UNLIKELY(calling->IsNative() || called->IsNative())) {
+ result_ = ERR(OPAQUE_FRAME);
+ JVMTI_LOG(INFO, env) << "Cannot force early return from " << called->PrettyMethod() << " to "
+ << calling->PrettyMethod() << " because at least one of them is native.";
+ return false;
+ } else {
+ return true;
+ }
+}
+
+class SetupMethodExitEvents {
+ public:
+ SetupMethodExitEvents(art::Thread* self,
+ EventHandler* event_handler,
+ jthread target) REQUIRES(!art::Locks::mutator_lock_,
+ !art::Locks::user_code_suspension_lock_,
+ !art::Locks::thread_list_lock_)
+ : self_(self), event_handler_(event_handler), target_(target) {
+ DCHECK(target != nullptr);
+ art::Locks::mutator_lock_->AssertNotHeld(self_);
+ art::Locks::user_code_suspension_lock_->AssertNotHeld(self_);
+ art::Locks::thread_list_lock_->AssertNotHeld(self_);
+ event_handler_->SetInternalEvent(
+ target_, ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_ENABLE);
+ }
+
+ ~SetupMethodExitEvents() REQUIRES(!art::Locks::mutator_lock_,
+ !art::Locks::user_code_suspension_lock_,
+ !art::Locks::thread_list_lock_) {
+ art::Locks::mutator_lock_->AssertNotHeld(self_);
+ art::Locks::user_code_suspension_lock_->AssertNotHeld(self_);
+ art::Locks::thread_list_lock_->AssertNotHeld(self_);
+ if (failed_) {
+ event_handler_->SetInternalEvent(
+ target_, ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_DISABLE);
+ }
+ }
+
+ void NotifyFailure() {
+ failed_ = true;
+ }
+
+ private:
+ art::Thread* self_;
+ EventHandler* event_handler_;
+ jthread target_;
+ bool failed_ = false;
+};
+
+template <typename T>
+void AddDelayedMethodExitEvent(EventHandler* handler, art::ShadowFrame* frame, T value)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(art::Locks::user_code_suspension_lock_, art::Locks::thread_list_lock_);
+
+template <typename T>
+void AddDelayedMethodExitEvent(EventHandler* handler, art::ShadowFrame* frame, T value) {
+ art::JValue val = art::JValue::FromPrimitive(value);
+ jvalue jval{ .j = val.GetJ() };
+ handler->AddDelayedNonStandardExitEvent(frame, false, jval);
+}
+
+template <>
+void AddDelayedMethodExitEvent<std::nullptr_t>(EventHandler* handler,
+ art::ShadowFrame* frame,
+ std::nullptr_t null_val ATTRIBUTE_UNUSED) {
+ jvalue jval;
+ memset(&jval, 0, sizeof(jval));
+ handler->AddDelayedNonStandardExitEvent(frame, false, jval);
+}
+
+template <>
+void AddDelayedMethodExitEvent<jobject>(EventHandler* handler,
+ art::ShadowFrame* frame,
+ jobject obj) {
+ jvalue jval{ .l = art::Thread::Current()->GetJniEnv()->NewGlobalRef(obj) };
+ handler->AddDelayedNonStandardExitEvent(frame, true, jval);
+}
+
+template <typename T>
+bool ValidReturnType(art::Thread* self, art::ObjPtr<art::mirror::Class> return_type, T value)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(art::Locks::user_code_suspension_lock_, art::Locks::thread_list_lock_);
+
+#define SIMPLE_VALID_RETURN_TYPE(type, ...) \
+ template <> \
+ bool ValidReturnType<type>(art::Thread * self ATTRIBUTE_UNUSED, \
+ art::ObjPtr<art::mirror::Class> return_type, \
+ type value ATTRIBUTE_UNUSED) { \
+ static constexpr std::initializer_list<art::Primitive::Type> types{ __VA_ARGS__ }; \
+ return std::find(types.begin(), types.end(), return_type->GetPrimitiveType()) != types.end(); \
+ }
+
+SIMPLE_VALID_RETURN_TYPE(jlong, art::Primitive::kPrimLong);
+SIMPLE_VALID_RETURN_TYPE(jfloat, art::Primitive::kPrimFloat);
+SIMPLE_VALID_RETURN_TYPE(jdouble, art::Primitive::kPrimDouble);
+SIMPLE_VALID_RETURN_TYPE(nullptr_t, art::Primitive::kPrimVoid);
+SIMPLE_VALID_RETURN_TYPE(jint,
+ art::Primitive::kPrimInt,
+ art::Primitive::kPrimChar,
+ art::Primitive::kPrimBoolean,
+ art::Primitive::kPrimShort,
+ art::Primitive::kPrimByte);
+#undef SIMPLE_VALID_RETURN_TYPE
+
+template <>
+bool ValidReturnType<jobject>(art::Thread* self,
+ art::ObjPtr<art::mirror::Class> return_type,
+ jobject return_value) {
+ if (return_type->IsPrimitive()) {
+ return false;
+ }
+ if (return_value == nullptr) {
+ // Null can be used for anything.
+ return true;
+ }
+ return return_type->IsAssignableFrom(self->DecodeJObject(return_value)->GetClass());
+}
+
+} // namespace
+
jvmtiError StackUtil::PopFrame(jvmtiEnv* env, jthread thread) {
art::Thread* self = art::Thread::Current();
- art::Thread* target;
-
- ScopedNoUserCodeSuspension snucs(self);
- // From now on we know we cannot get suspended by user-code.
- // NB This does a SuspendCheck (during thread state change) so we need to make
- // sure we don't have the 'suspend_lock' locked here.
- art::ScopedObjectAccess soa(self);
- art::Locks::thread_list_lock_->ExclusiveLock(self);
- jvmtiError err = ERR(INTERNAL);
- if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ NonStandardExitFrames<NonStandardExitType::kPopFrame> frames(self, env, thread);
+ if (frames.result_ != OK) {
art::Locks::thread_list_lock_->ExclusiveUnlock(self);
- return err;
+ return frames.result_;
}
- {
- art::Locks::thread_suspend_count_lock_->ExclusiveLock(self);
- if (target == self || target->GetUserCodeSuspendCount() == 0) {
- // We cannot be the current thread for this function.
- art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
- art::Locks::thread_list_lock_->ExclusiveUnlock(self);
- return ERR(THREAD_NOT_SUSPENDED);
- }
- art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
- }
- JvmtiGlobalTLSData* tls_data = ThreadUtil::GetGlobalTLSData(target);
- constexpr art::StackVisitor::StackWalkKind kWalkKind =
- art::StackVisitor::StackWalkKind::kIncludeInlinedFrames;
- if (tls_data != nullptr &&
- tls_data->disable_pop_frame_depth !=
- JvmtiGlobalTLSData::kNoDisallowedPopFrame &&
- tls_data->disable_pop_frame_depth ==
- art::StackVisitor::ComputeNumFrames(target, kWalkKind)) {
- JVMTI_LOG(WARNING, env)
- << "Disallowing frame pop due to in-progress class-load/prepare. "
- << "Frame at depth " << tls_data->disable_pop_frame_depth << " was "
- << "marked as un-poppable by the jvmti plugin. See b/117615146 for "
- << "more information.";
- art::Locks::thread_list_lock_->ExclusiveUnlock(self);
- return ERR(OPAQUE_FRAME);
- }
- // We hold the user_code_suspension_lock_ so the target thread is staying
- // suspended until we are done.
- std::unique_ptr<art::Context> context(art::Context::Create());
- FindFrameAtDepthVisitor final_frame(target, context.get(), 0);
- FindFrameAtDepthVisitor penultimate_frame(target, context.get(), 1);
- final_frame.WalkStack();
- penultimate_frame.WalkStack();
-
- if (!final_frame.FoundFrame() || !penultimate_frame.FoundFrame()) {
- // Cannot do it if there is only one frame!
- art::Locks::thread_list_lock_->ExclusiveUnlock(self);
- return ERR(NO_MORE_FRAMES);
- }
-
- art::ArtMethod* called_method = final_frame.GetMethod();
- art::ArtMethod* calling_method = penultimate_frame.GetMethod();
- if (calling_method->IsNative() || called_method->IsNative()) {
- art::Locks::thread_list_lock_->ExclusiveUnlock(self);
- return ERR(OPAQUE_FRAME);
- }
- // From here we are sure to succeed.
-
- // Get/create a shadow frame
- bool created_final_frame = false;
- bool created_penultimate_frame = false;
- art::ShadowFrame* called_shadow_frame =
- final_frame.GetOrCreateShadowFrame(&created_final_frame);
- art::ShadowFrame* calling_shadow_frame =
- penultimate_frame.GetOrCreateShadowFrame(&created_penultimate_frame);
-
- CHECK_NE(called_shadow_frame, calling_shadow_frame)
- << "Frames at different depths not different!";
-
// Tell the shadow-frame to return immediately and skip all exit events.
- called_shadow_frame->SetForcePopFrame(true);
- calling_shadow_frame->SetForceRetryInstruction(true);
-
- // Make sure can we will go to the interpreter and use the shadow frames. The
- // early return for the final frame will force everything to the interpreter
- // so we only need to instrument if it was not present.
- if (created_final_frame) {
- art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ frames.penultimate_frame_->SetForceRetryInstruction(true);
+ frames.final_frame_->SetForcePopFrame(true);
+ frames.final_frame_->SetSkipMethodExitEvents(true);
+ if (frames.created_final_frame_ || frames.created_penultimate_frame_) {
+ art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_){
DeoptManager::Get()->DeoptimizeThread(self);
});
- target->RequestSynchronousCheckpoint(&fc);
+ frames.target_->RequestSynchronousCheckpoint(&fc);
} else {
art::Locks::thread_list_lock_->ExclusiveUnlock(self);
}
return OK;
}
+template <typename T>
+jvmtiError
+StackUtil::ForceEarlyReturn(jvmtiEnv* env, EventHandler* event_handler, jthread thread, T value) {
+ art::Thread* self = art::Thread::Current();
+ // We don't want to use the null == current-thread idiom since for events (that we use internally
+ // to implement force-early-return) we instead have null == all threads. Instead just get the
+ // current jthread if needed.
+ ScopedLocalRef<jthread> cur_thread(self->GetJniEnv(), nullptr);
+ if (UNLIKELY(thread == nullptr)) {
+ art::ScopedObjectAccess soa(self);
+ cur_thread.reset(soa.AddLocalReference<jthread>(self->GetPeer()));
+ thread = cur_thread.get();
+ }
+ // This sets up the exit events we implement early return using before we have the locks and
+ // thanks to destructor ordering will tear them down if something goes wrong.
+ SetupMethodExitEvents smee(self, event_handler, thread);
+ NonStandardExitFrames<NonStandardExitType::kForceReturn> frames(self, env, thread);
+ if (frames.result_ != OK) {
+ smee.NotifyFailure();
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
+ return frames.result_;
+ } else if (!ValidReturnType<T>(
+ self, frames.final_frame_->GetMethod()->ResolveReturnType(), value)) {
+ smee.NotifyFailure();
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
+ return ERR(TYPE_MISMATCH);
+ } else if (frames.final_frame_->GetForcePopFrame()) {
+ // TODO We should really support this.
+ smee.NotifyFailure();
+ std::string thread_name;
+ frames.target_->GetThreadName(thread_name);
+ JVMTI_LOG(WARNING, env) << "PopFrame or force-return already pending on thread " << thread_name;
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
+ return ERR(OPAQUE_FRAME);
+ }
+ // Tell the shadow-frame to return immediately and skip all exit events.
+ frames.final_frame_->SetForcePopFrame(true);
+ AddDelayedMethodExitEvent<T>(event_handler, frames.final_frame_, value);
+ if (frames.created_final_frame_ || frames.created_penultimate_frame_) {
+ art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_){
+ DeoptManager::Get()->DeoptimizeThread(self);
+ });
+ frames.target_->RequestSynchronousCheckpoint(&fc);
+ } else {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
+ }
+ return OK;
+}
+
+// Instantiate the ForceEarlyReturn templates.
+template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jint);
+template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jlong);
+template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jfloat);
+template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jdouble);
+template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jobject);
+template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, nullptr_t);
+
} // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_stack.h b/openjdkjvmti/ti_stack.h
index 55c4269..918aa4c 100644
--- a/openjdkjvmti/ti_stack.h
+++ b/openjdkjvmti/ti_stack.h
@@ -37,6 +37,7 @@
#include "art_method.h"
#include "base/mutex.h"
+#include "events.h"
#include "stack.h"
namespace openjdkjvmti {
@@ -83,6 +84,10 @@
static jvmtiError NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth);
static jvmtiError PopFrame(jvmtiEnv* env, jthread thread);
+
+ template <typename T>
+ static jvmtiError ForceEarlyReturn(
+ jvmtiEnv* env, EventHandler* event_handler, jthread thread, T value);
};
struct FindFrameAtDepthVisitor : art::StackVisitor {
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index 6c50a20..f2ae996 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -229,6 +229,7 @@
const art::ScopedObjectAccessAlreadyRunnable& soa,
/*out*/ art::Thread** thr,
/*out*/ jvmtiError* err) {
+ art::ScopedExceptionStorage sse(soa.Self());
if (thread == nullptr) {
*thr = art::Thread::Current();
return true;
diff --git a/openjdkjvmti/ti_thread.h b/openjdkjvmti/ti_thread.h
index c5443bf..5bf8a3f 100644
--- a/openjdkjvmti/ti_thread.h
+++ b/openjdkjvmti/ti_thread.h
@@ -39,6 +39,7 @@
#include "base/macros.h"
#include "base/mutex.h"
+#include "handle.h"
#include "thread.h"
namespace art {
@@ -46,6 +47,9 @@
class ScopedObjectAccessAlreadyRunnable;
class Thread;
class Closure;
+namespace mirror {
+class Throwable;
+} // namespace mirror
} // namespace art
namespace openjdkjvmti {