summaryrefslogtreecommitdiff
path: root/runtime/thread.h
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/thread.h')
-rw-r--r--runtime/thread.h218
1 files changed, 75 insertions, 143 deletions
diff --git a/runtime/thread.h b/runtime/thread.h
index 044e9dbd14..dda4c086f4 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -47,7 +47,6 @@
#include "reflective_handle_scope.h"
#include "runtime_globals.h"
#include "runtime_stats.h"
-#include "suspend_reason.h"
#include "thread_state.h"
namespace unwindstack {
@@ -103,6 +102,7 @@ class RootVisitor;
class ScopedObjectAccessAlreadyRunnable;
class ShadowFrame;
class StackedShadowFrameRecord;
+enum class SuspendReason : char;
class Thread;
class ThreadList;
enum VisitRootFlags : uint8_t;
@@ -134,7 +134,6 @@ enum class ThreadFlag : uint32_t {
kEmptyCheckpointRequest = 1u << 2,
// Register that at least 1 suspend barrier needs to be passed.
- // Changes to this flag are guarded by suspend_count_lock_ .
kActiveSuspendBarrier = 1u << 3,
// Marks that a "flip function" needs to be executed on this thread.
@@ -142,7 +141,7 @@ enum class ThreadFlag : uint32_t {
// Marks that the "flip function" is being executed by another thread.
//
- // This is used to guard against multiple threads trying to run the
+ // This is used to guards against multiple threads trying to run the
// "flip function" for the same thread while the thread is suspended.
//
// This is not needed when the thread is running the flip function
@@ -189,13 +188,6 @@ enum class WeakRefAccessState : int32_t {
kDisabled
};
-// See Thread.tlsPtr_.active_suspend1_barriers below for explanation.
-struct WrappedSuspend1Barrier {
- WrappedSuspend1Barrier() : barrier_(1), next_(nullptr) {}
- AtomicInteger barrier_;
- struct WrappedSuspend1Barrier* next_ GUARDED_BY(Locks::thread_suspend_count_lock_);
-};
-
// This should match RosAlloc::kNumThreadLocalSizeBrackets.
static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
@@ -323,7 +315,7 @@ class Thread {
}
bool IsSuspended() const {
- StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_acquire);
+ StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
return state_and_flags.GetState() != ThreadState::kRunnable &&
state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest);
}
@@ -339,32 +331,20 @@ class Thread {
return tls32_.define_class_counter;
}
- // Increment suspend count and optionally install at most one suspend barrier.
- // Should not be invoked on another thread while the target's flip_function is not null.
+ // If delta > 0 and (this != self or suspend_barrier is not null), this function may temporarily
+ // release thread_suspend_count_lock_ internally.
ALWAYS_INLINE
- void IncrementSuspendCount(Thread* self,
- AtomicInteger* suspend_all_barrier,
- WrappedSuspend1Barrier* suspend1_barrier,
- SuspendReason reason)
+ bool ModifySuspendCount(Thread* self,
+ int delta,
+ AtomicInteger* suspend_barrier,
+ SuspendReason reason)
+ WARN_UNUSED
REQUIRES(Locks::thread_suspend_count_lock_);
- // The same, but default reason to kInternal, and barriers to nullptr.
- ALWAYS_INLINE void IncrementSuspendCount(Thread* self)
- REQUIRES(Locks::thread_suspend_count_lock_);
-
- ALWAYS_INLINE void DecrementSuspendCount(Thread* self,
- SuspendReason reason = SuspendReason::kInternal)
- REQUIRES(Locks::thread_suspend_count_lock_);
-
- private:
- NO_RETURN static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread);
-
- public:
// Requests a checkpoint closure to run on another thread. The closure will be run when the
// thread notices the request, either in an explicit runtime CheckSuspend() call, or in a call
// originating from a compiler generated suspend point check. This returns true if the closure
- // was added and will (eventually) be executed. It returns false if the thread was
- // either suspended or in the event of a spurious WeakCAS failure.
+ // was added and will (eventually) be executed. It returns false otherwise.
//
// Since multiple closures can be queued and some closures can delay other threads from running,
// no closure should attempt to suspend another thread while running.
@@ -394,14 +374,8 @@ class Thread {
bool RequestEmptyCheckpoint()
REQUIRES(Locks::thread_suspend_count_lock_);
- Closure* GetFlipFunction() {
- return tlsPtr_.flip_function.load(std::memory_order_relaxed);
- }
-
// Set the flip function. This is done with all threads suspended, except for the calling thread.
- void SetFlipFunction(Closure* function)
- REQUIRES(Locks::thread_suspend_count_lock_)
- REQUIRES(Locks::thread_list_lock_);
+ void SetFlipFunction(Closure* function);
// Ensure that thread flip function started running. If no other thread is executing
// it, the calling thread shall run the flip function and then notify other threads
@@ -1242,27 +1216,8 @@ class Thread {
tlsPtr_.held_mutexes[level] = mutex;
}
- // Possibly check that no mutexes at level kMonitorLock or above are subsequently acquired.
- // Only invoked by the thread itself.
- void DisallowPreMonitorMutexes() {
- if (kIsDebugBuild) {
- CHECK(this == Thread::Current());
- CHECK(GetHeldMutex(kMonitorLock) == nullptr);
- // Pretend we hold a kMonitorLock level mutex to detect disallowed mutex
- // acquisitions by checkpoint Run() methods. We don't normally register or thus check
- // kMonitorLock level mutexes, but this is an exception.
- static Mutex dummy_mutex("checkpoint dummy mutex", kMonitorLock);
- SetHeldMutex(kMonitorLock, &dummy_mutex);
- }
- }
-
- // Undo the effect of the previous call. Again only invoked by the thread itself.
- void AllowPreMonitorMutexes() {
- if (kIsDebugBuild) {
- CHECK(GetHeldMutex(kMonitorLock) != nullptr);
- SetHeldMutex(kMonitorLock, nullptr);
- }
- }
+ void ClearSuspendBarrier(AtomicInteger* target)
+ REQUIRES(Locks::thread_suspend_count_lock_);
bool ReadFlag(ThreadFlag flag) const {
return GetStateAndFlags(std::memory_order_relaxed).IsFlagSet(flag);
@@ -1510,7 +1465,6 @@ class Thread {
private:
explicit Thread(bool daemon);
- // A runnable thread should only be deleted from the thread itself.
~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
void Destroy();
@@ -1538,8 +1492,7 @@ class Thread {
// Avoid use, callers should use SetState.
// Used only by `Thread` destructor and stack trace collection in semi-space GC (currently
- // disabled by `kStoreStackTraces = false`). May not be called on a runnable thread other
- // than Thread::Current().
+ // disabled by `kStoreStackTraces = false`).
// NO_THREAD_SAFETY_ANALYSIS: This function is "Unsafe" and can be called in
// different states, so clang cannot perform the thread safety analysis.
ThreadState SetStateUnsafe(ThreadState new_state) NO_THREAD_SAFETY_ANALYSIS {
@@ -1548,13 +1501,12 @@ class Thread {
if (old_state == new_state) {
// Nothing to do.
} else if (old_state == ThreadState::kRunnable) {
- DCHECK_EQ(this, Thread::Current());
// Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
// case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
// attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
TransitionToSuspendedAndRunCheckpoints(new_state);
// Since we transitioned to a suspended state, check the pass barrier requests.
- CheckActiveSuspendBarriers();
+ PassActiveSuspendBarriers();
} else {
while (true) {
StateAndFlags new_state_and_flags = old_state_and_flags;
@@ -1627,26 +1579,9 @@ class Thread {
REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Call PassActiveSuspendBarriers() if there are active barriers. Only called on current thread.
- ALWAYS_INLINE void CheckActiveSuspendBarriers()
+ ALWAYS_INLINE void PassActiveSuspendBarriers()
REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
- // Decrement all "suspend barriers" for the current thread, notifying threads that requested our
- // suspension. Only called on current thread.
- bool PassActiveSuspendBarriers()
- REQUIRES(!Locks::thread_suspend_count_lock_);
-
- // Add an entry to active_suspend1_barriers.
- ALWAYS_INLINE void AddSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier)
- REQUIRES(Locks::thread_suspend_count_lock_);
-
- // Remove last-added entry from active_suspend1_barriers.
- // Only makes sense if we're still holding thread_suspend_count_lock_ since insertion.
- ALWAYS_INLINE void RemoveFirstSuspend1Barrier()
- REQUIRES(Locks::thread_suspend_count_lock_);
-
- ALWAYS_INLINE bool HasActiveSuspendBarrier() REQUIRES(Locks::thread_suspend_count_lock_);
-
// Registers the current thread as the jit sensitive thread. Should be called just once.
static void SetJitSensitiveThread() {
if (jit_sensitive_thread_ == nullptr) {
@@ -1661,6 +1596,13 @@ class Thread {
is_sensitive_thread_hook_ = is_sensitive_thread_hook;
}
+ bool ModifySuspendCountInternal(Thread* self,
+ int delta,
+ AtomicInteger* suspend_barrier,
+ SuspendReason reason)
+ WARN_UNUSED
+ REQUIRES(Locks::thread_suspend_count_lock_);
+
// Runs a single checkpoint function. If there are no more pending checkpoint functions it will
// clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until
// the kCheckpointRequest flag is cleared.
@@ -1669,6 +1611,9 @@ class Thread {
REQUIRES_SHARED(Locks::mutator_lock_);
void RunEmptyCheckpoint();
+ bool PassActiveSuspendBarriers(Thread* self)
+ REQUIRES(!Locks::thread_suspend_count_lock_);
+
// Install the protected region for implicit stack checks.
void InstallImplicitProtection();
@@ -1951,47 +1896,45 @@ class Thread {
} tls64_;
struct PACKED(sizeof(void*)) tls_ptr_sized_values {
- tls_ptr_sized_values() : card_table(nullptr),
- exception(nullptr),
- stack_end(nullptr),
- managed_stack(),
- suspend_trigger(nullptr),
- jni_env(nullptr),
- tmp_jni_env(nullptr),
- self(nullptr),
- opeer(nullptr),
- jpeer(nullptr),
- stack_begin(nullptr),
- stack_size(0),
- deps_or_stack_trace_sample(),
- wait_next(nullptr),
- monitor_enter_object(nullptr),
- top_handle_scope(nullptr),
- class_loader_override(nullptr),
- long_jump_context(nullptr),
- instrumentation_stack(nullptr),
- stacked_shadow_frame_record(nullptr),
- deoptimization_context_stack(nullptr),
- frame_id_to_shadow_frame(nullptr),
- name(nullptr),
- pthread_self(0),
- last_no_thread_suspension_cause(nullptr),
- checkpoint_function(nullptr),
- active_suspendall_barrier(nullptr),
- active_suspend1_barriers(nullptr),
- thread_local_pos(nullptr),
- thread_local_end(nullptr),
- thread_local_start(nullptr),
- thread_local_limit(nullptr),
- thread_local_objects(0),
- thread_local_alloc_stack_top(nullptr),
- thread_local_alloc_stack_end(nullptr),
- mutator_lock(nullptr),
- flip_function(nullptr),
- method_verifier(nullptr),
- thread_local_mark_stack(nullptr),
- async_exception(nullptr),
- top_reflective_handle_scope(nullptr) {
+ tls_ptr_sized_values() : card_table(nullptr),
+ exception(nullptr),
+ stack_end(nullptr),
+ managed_stack(),
+ suspend_trigger(nullptr),
+ jni_env(nullptr),
+ tmp_jni_env(nullptr),
+ self(nullptr),
+ opeer(nullptr),
+ jpeer(nullptr),
+ stack_begin(nullptr),
+ stack_size(0),
+ deps_or_stack_trace_sample(),
+ wait_next(nullptr),
+ monitor_enter_object(nullptr),
+ top_handle_scope(nullptr),
+ class_loader_override(nullptr),
+ long_jump_context(nullptr),
+ instrumentation_stack(nullptr),
+ stacked_shadow_frame_record(nullptr),
+ deoptimization_context_stack(nullptr),
+ frame_id_to_shadow_frame(nullptr),
+ name(nullptr),
+ pthread_self(0),
+ last_no_thread_suspension_cause(nullptr),
+ checkpoint_function(nullptr),
+ thread_local_start(nullptr),
+ thread_local_pos(nullptr),
+ thread_local_end(nullptr),
+ thread_local_limit(nullptr),
+ thread_local_objects(0),
+ thread_local_alloc_stack_top(nullptr),
+ thread_local_alloc_stack_end(nullptr),
+ mutator_lock(nullptr),
+ flip_function(nullptr),
+ method_verifier(nullptr),
+ thread_local_mark_stack(nullptr),
+ async_exception(nullptr),
+ top_reflective_handle_scope(nullptr) {
std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
}
@@ -2101,30 +2044,20 @@ class Thread {
// requests another checkpoint, it goes to the checkpoint overflow list.
Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
- // After a thread observes a suspend request and enters a suspended state,
- // it notifies the requestor by arriving at a "suspend barrier". This consists of decrementing
- // the atomic integer representing the barrier. (This implementation was introduced in 2015 to
- // minimize cost. There may be other options.) These atomic integer barriers are always
- // stored on the requesting thread's stack. They are referenced from the target thread's
- // data structure in one of two ways; in either case the data structure referring to these
- // barriers is guarded by suspend_count_lock:
- // 1. A SuspendAll barrier is directly referenced from the target thread. Only one of these
- // can be active at a time:
- AtomicInteger* active_suspendall_barrier GUARDED_BY(Locks::thread_suspend_count_lock_);
- // 2. For individual thread suspensions, active barriers are embedded in a struct that is used
- // to link together all suspend requests for this thread. Unlike the SuspendAll case, each
- // barrier is referenced by a single target thread, and thus can appear only on a single list.
- // The struct as a whole is still stored on the requesting thread's stack.
- WrappedSuspend1Barrier* active_suspend1_barriers GUARDED_BY(Locks::thread_suspend_count_lock_);
+ // Pending barriers that require passing or NULL if non-pending. Installation guarding by
+ // Locks::thread_suspend_count_lock_.
+ // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex
+ // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier.
+ AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers];
+
+ // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM.
+ uint8_t* thread_local_start;
// thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
// potentially better performance.
uint8_t* thread_local_pos;
uint8_t* thread_local_end;
- // Thread-local allocation pointer. Can be moved above the preceding two to correct alignment.
- uint8_t* thread_local_start;
-
// Thread local limit is how much we can expand the thread local buffer to, it is greater or
// equal to thread_local_end.
uint8_t* thread_local_limit;
@@ -2151,8 +2084,7 @@ class Thread {
BaseMutex* held_mutexes[kLockLevelCount];
// The function used for thread flip.
- // Set only while holding Locks::thread_suspend_count_lock_ . May be cleared while being read.
- std::atomic<Closure*> flip_function;
+ Closure* flip_function;
// Current method verifier, used for root marking.
verifier::MethodVerifier* method_verifier;