summaryrefslogtreecommitdiff
path: root/runtime/thread.h
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/thread.h')
-rw-r--r--runtime/thread.h198
1 files changed, 122 insertions, 76 deletions
diff --git a/runtime/thread.h b/runtime/thread.h
index 6b1c16c907..d979cce6c1 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -46,6 +46,7 @@
#include "reflective_handle_scope.h"
#include "runtime_globals.h"
#include "runtime_stats.h"
+#include "suspend_reason.h"
#include "thread_state.h"
namespace unwindstack {
@@ -101,7 +102,6 @@ class RootVisitor;
class ScopedObjectAccessAlreadyRunnable;
class ShadowFrame;
class StackedShadowFrameRecord;
-enum class SuspendReason : char;
class Thread;
class ThreadList;
enum VisitRootFlags : uint8_t;
@@ -133,6 +133,7 @@ enum class ThreadFlag : uint32_t {
kEmptyCheckpointRequest = 1u << 2,
// Register that at least 1 suspend barrier needs to be passed.
+ // Changes to this flag are guarded by suspend_count_lock_ .
kActiveSuspendBarrier = 1u << 3,
// Marks that a "flip function" needs to be executed on this thread.
@@ -140,7 +141,7 @@ enum class ThreadFlag : uint32_t {
// Marks that the "flip function" is being executed by another thread.
//
- // This is used to guards against multiple threads trying to run the
+ // This is used to guard against multiple threads trying to run the
// "flip function" for the same thread while the thread is suspended.
//
// This is not needed when the thread is running the flip function
@@ -187,6 +188,13 @@ enum class WeakRefAccessState : int32_t {
kDisabled
};
+// See Thread.tlsPtr_.active_suspend1_barriers below for explanation.
+struct WrappedSuspend1Barrier {
+ WrappedSuspend1Barrier() : barrier_(1), next_(nullptr) {}
+ AtomicInteger barrier_;
+ struct WrappedSuspend1Barrier* next_ GUARDED_BY(Locks::thread_suspend_count_lock_);
+};
+
// This should match RosAlloc::kNumThreadLocalSizeBrackets.
static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
@@ -306,7 +314,7 @@ class Thread {
}
bool IsSuspended() const {
- StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
+ StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_acquire);
return state_and_flags.GetState() != ThreadState::kRunnable &&
state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest);
}
@@ -322,20 +330,32 @@ class Thread {
return tls32_.define_class_counter;
}
- // If delta > 0 and (this != self or suspend_barrier is not null), this function may temporarily
- // release thread_suspend_count_lock_ internally.
+ // Increment suspend count and optionally install at most one suspend barrier.
+ // Should not be invoked on another thread while the target's flip_function is not null.
ALWAYS_INLINE
- bool ModifySuspendCount(Thread* self,
- int delta,
- AtomicInteger* suspend_barrier,
- SuspendReason reason)
- WARN_UNUSED
+ void IncrementSuspendCount(Thread* self,
+ AtomicInteger* suspend_all_barrier,
+ WrappedSuspend1Barrier* suspend1_barrier,
+ SuspendReason reason)
+ REQUIRES(Locks::thread_suspend_count_lock_);
+
+ // The same, but default reason to kInternal, and barriers to nullptr.
+ ALWAYS_INLINE void IncrementSuspendCount(Thread* self)
REQUIRES(Locks::thread_suspend_count_lock_);
+ ALWAYS_INLINE void DecrementSuspendCount(Thread* self,
+ SuspendReason reason = SuspendReason::kInternal)
+ REQUIRES(Locks::thread_suspend_count_lock_);
+
+ private:
+ NO_RETURN static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread);
+
+ public:
// Requests a checkpoint closure to run on another thread. The closure will be run when the
// thread notices the request, either in an explicit runtime CheckSuspend() call, or in a call
// originating from a compiler generated suspend point check. This returns true if the closure
- // was added and will (eventually) be executed. It returns false otherwise.
+ // was added and will (eventually) be executed. It returns false if the thread was found to be
+ // runnable.
//
// Since multiple closures can be queued and some closures can delay other threads from running,
// no closure should attempt to suspend another thread while running.
@@ -365,8 +385,14 @@ class Thread {
bool RequestEmptyCheckpoint()
REQUIRES(Locks::thread_suspend_count_lock_);
+ Closure* GetFlipFunction() {
+ return tlsPtr_.flip_function.load(std::memory_order_relaxed);
+ }
+
// Set the flip function. This is done with all threads suspended, except for the calling thread.
- void SetFlipFunction(Closure* function);
+ void SetFlipFunction(Closure* function)
+ REQUIRES(Locks::thread_suspend_count_lock_)
+ REQUIRES(Locks::thread_list_lock_);
// Ensure that thread flip function started running. If no other thread is executing
// it, the calling thread shall run the flip function and then notify other threads
@@ -1203,9 +1229,6 @@ class Thread {
tlsPtr_.held_mutexes[level] = mutex;
}
- void ClearSuspendBarrier(AtomicInteger* target)
- REQUIRES(Locks::thread_suspend_count_lock_);
-
bool ReadFlag(ThreadFlag flag) const {
return GetStateAndFlags(std::memory_order_relaxed).IsFlagSet(flag);
}
@@ -1451,6 +1474,7 @@ class Thread {
private:
explicit Thread(bool daemon);
+ // A runnable thread should only be deleted from the thread itself.
~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
void Destroy();
@@ -1478,7 +1502,8 @@ class Thread {
// Avoid use, callers should use SetState.
// Used only by `Thread` destructor and stack trace collection in semi-space GC (currently
- // disabled by `kStoreStackTraces = false`).
+ // disabled by `kStoreStackTraces = false`). May not be called on a runnable thread other
+ // than Thread::Current().
// NO_THREAD_SAFETY_ANALYSIS: This function is "Unsafe" and can be called in
// different states, so clang cannot perform the thread safety analysis.
ThreadState SetStateUnsafe(ThreadState new_state) NO_THREAD_SAFETY_ANALYSIS {
@@ -1487,12 +1512,13 @@ class Thread {
if (old_state == new_state) {
// Nothing to do.
} else if (old_state == ThreadState::kRunnable) {
+ DCHECK_EQ(this, Thread::Current());
// Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
// case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
// attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
TransitionToSuspendedAndRunCheckpoints(new_state);
// Since we transitioned to a suspended state, check the pass barrier requests.
- PassActiveSuspendBarriers();
+ CheckActiveSuspendBarriers();
} else {
while (true) {
StateAndFlags new_state_and_flags = old_state_and_flags;
@@ -1565,9 +1591,26 @@ class Thread {
REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE void PassActiveSuspendBarriers()
+ // Call PassActiveSuspendBarriers() if there are active barriers. Only called on current thread.
+ ALWAYS_INLINE void CheckActiveSuspendBarriers()
REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
+ // Decrement all "suspend barriers" for the current thread, notifying threads that requested our
+ // suspension. Only called on current thread.
+ bool PassActiveSuspendBarriers()
+ REQUIRES(!Locks::thread_suspend_count_lock_);
+
+ // Add an entry to active_suspend1_barriers.
+ ALWAYS_INLINE void AddSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier)
+ REQUIRES(Locks::thread_suspend_count_lock_);
+
+ // Remove last-added entry from active_suspend1_barriers.
+ // Only makes sense if we're still holding thread_suspend_count_lock_ since insertion.
+ ALWAYS_INLINE void RemoveFirstSuspend1Barrier()
+ REQUIRES(Locks::thread_suspend_count_lock_);
+
+ ALWAYS_INLINE bool HasActiveSuspendBarrier() REQUIRES(Locks::thread_suspend_count_lock_);
+
// Registers the current thread as the jit sensitive thread. Should be called just once.
static void SetJitSensitiveThread() {
if (jit_sensitive_thread_ == nullptr) {
@@ -1582,13 +1625,6 @@ class Thread {
is_sensitive_thread_hook_ = is_sensitive_thread_hook;
}
- bool ModifySuspendCountInternal(Thread* self,
- int delta,
- AtomicInteger* suspend_barrier,
- SuspendReason reason)
- WARN_UNUSED
- REQUIRES(Locks::thread_suspend_count_lock_);
-
// Runs a single checkpoint function. If there are no more pending checkpoint functions it will
// clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until
// the kCheckpointRequest flag is cleared.
@@ -1597,9 +1633,6 @@ class Thread {
REQUIRES_SHARED(Locks::mutator_lock_);
void RunEmptyCheckpoint();
- bool PassActiveSuspendBarriers(Thread* self)
- REQUIRES(!Locks::thread_suspend_count_lock_);
-
// Install the protected region for implicit stack checks.
void InstallImplicitProtection();
@@ -1885,45 +1918,47 @@ class Thread {
} tls64_;
struct PACKED(sizeof(void*)) tls_ptr_sized_values {
- tls_ptr_sized_values() : card_table(nullptr),
- exception(nullptr),
- stack_end(nullptr),
- managed_stack(),
- suspend_trigger(nullptr),
- jni_env(nullptr),
- tmp_jni_env(nullptr),
- self(nullptr),
- opeer(nullptr),
- jpeer(nullptr),
- stack_begin(nullptr),
- stack_size(0),
- deps_or_stack_trace_sample(),
- wait_next(nullptr),
- monitor_enter_object(nullptr),
- top_handle_scope(nullptr),
- class_loader_override(nullptr),
- long_jump_context(nullptr),
- instrumentation_stack(nullptr),
- stacked_shadow_frame_record(nullptr),
- deoptimization_context_stack(nullptr),
- frame_id_to_shadow_frame(nullptr),
- name(nullptr),
- pthread_self(0),
- last_no_thread_suspension_cause(nullptr),
- checkpoint_function(nullptr),
- thread_local_start(nullptr),
- thread_local_pos(nullptr),
- thread_local_end(nullptr),
- thread_local_limit(nullptr),
- thread_local_objects(0),
- thread_local_alloc_stack_top(nullptr),
- thread_local_alloc_stack_end(nullptr),
- mutator_lock(nullptr),
- flip_function(nullptr),
- method_verifier(nullptr),
- thread_local_mark_stack(nullptr),
- async_exception(nullptr),
- top_reflective_handle_scope(nullptr) {
+ tls_ptr_sized_values() : card_table(nullptr),
+ exception(nullptr),
+ stack_end(nullptr),
+ managed_stack(),
+ suspend_trigger(nullptr),
+ jni_env(nullptr),
+ tmp_jni_env(nullptr),
+ self(nullptr),
+ opeer(nullptr),
+ jpeer(nullptr),
+ stack_begin(nullptr),
+ stack_size(0),
+ deps_or_stack_trace_sample(),
+ wait_next(nullptr),
+ monitor_enter_object(nullptr),
+ top_handle_scope(nullptr),
+ class_loader_override(nullptr),
+ long_jump_context(nullptr),
+ instrumentation_stack(nullptr),
+ stacked_shadow_frame_record(nullptr),
+ deoptimization_context_stack(nullptr),
+ frame_id_to_shadow_frame(nullptr),
+ name(nullptr),
+ pthread_self(0),
+ last_no_thread_suspension_cause(nullptr),
+ checkpoint_function(nullptr),
+ active_suspendall_barrier(nullptr),
+ active_suspend1_barriers(nullptr),
+ thread_local_pos(nullptr),
+ thread_local_end(nullptr),
+ thread_local_start(nullptr),
+ thread_local_limit(nullptr),
+ thread_local_objects(0),
+ thread_local_alloc_stack_top(nullptr),
+ thread_local_alloc_stack_end(nullptr),
+ mutator_lock(nullptr),
+ flip_function(nullptr),
+ method_verifier(nullptr),
+ thread_local_mark_stack(nullptr),
+ async_exception(nullptr),
+ top_reflective_handle_scope(nullptr) {
std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
}
@@ -2033,20 +2068,30 @@ class Thread {
// requests another checkpoint, it goes to the checkpoint overflow list.
Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
- // Pending barriers that require passing or NULL if non-pending. Installation guarding by
- // Locks::thread_suspend_count_lock_.
- // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex
- // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier.
- AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers];
-
- // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM.
- uint8_t* thread_local_start;
+ // After a thread observes a suspend request and enters a suspended state,
+ // it notifies the requestor by arriving at a "suspend barrier". This consists of decrementing
+ // the atomic integer representing the barrier. (This implementation was introduced in 2015 to
+ // minimize cost. There may be other options.) These atomic integer barriers are always
+ // stored on the requesting thread's stack. They are referenced from the target thread's
+ // data structure in one of two ways; in either case the data structure referring to these
+ // barriers is guarded by suspend_count_lock:
+ // 1. A SuspendAll barrier is directly referenced from the target thread. Only one of these
+ // can be active at a time:
+ AtomicInteger* active_suspendall_barrier GUARDED_BY(Locks::thread_suspend_count_lock_);
+ // 2. For individual thread suspensions, active barriers are embedded in a struct that is used
+ // to link together all suspend requests for this thread. Unlike the SuspendAll case, each
+ // barrier is referenced by a single target thread, and thus can appear only on a single list.
+ // The struct as a whole is still stored on the requesting thread's stack.
+ WrappedSuspend1Barrier* active_suspend1_barriers GUARDED_BY(Locks::thread_suspend_count_lock_);
// thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
// potentially better performance.
uint8_t* thread_local_pos;
uint8_t* thread_local_end;
+ // Thread-local allocation pointer. Can be moved above the preceding two to correct alignment.
+ uint8_t* thread_local_start;
+
// Thread local limit is how much we can expand the thread local buffer to, it is greater or
// equal to thread_local_end.
uint8_t* thread_local_limit;
@@ -2073,7 +2118,8 @@ class Thread {
BaseMutex* held_mutexes[kLockLevelCount];
// The function used for thread flip.
- Closure* flip_function;
+ // Set only while holding Locks::thread_suspend_count_lock_ . May be cleared while being read.
+ std::atomic<Closure*> flip_function;
// Current method verifier, used for root marking.
verifier::MethodVerifier* method_verifier;