summaryrefslogtreecommitdiff
path: root/runtime/thread.h
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/thread.h')
-rw-r--r--runtime/thread.h276
1 files changed, 178 insertions, 98 deletions
diff --git a/runtime/thread.h b/runtime/thread.h
index dd8b061b95..5350330daf 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -38,6 +38,7 @@
#include "handle.h"
#include "handle_scope.h"
#include "interpreter/interpreter_cache.h"
+#include "interpreter/shadow_frame.h"
#include "javaheapprof/javaheapsampler.h"
#include "jvalue.h"
#include "managed_stack.h"
@@ -48,7 +49,9 @@
#include "runtime_stats.h"
#include "thread_state.h"
-class BacktraceMap;
+namespace unwindstack {
+class AndroidLocalUnwinder;
+} // namespace unwindstack
namespace art {
@@ -188,7 +191,7 @@ enum class WeakRefAccessState : int32_t {
// This should match RosAlloc::kNumThreadLocalSizeBrackets.
static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
-static constexpr size_t kSharedMethodHotnessThreshold = 0xffff;
+static constexpr size_t kSharedMethodHotnessThreshold = 0x1fff;
// Thread's stack layout for implicit stack overflow checks:
//
@@ -229,8 +232,11 @@ class Thread {
// Attaches the calling native thread to the runtime, returning the new native peer.
// Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
- static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
- bool create_peer);
+ static Thread* Attach(const char* thread_name,
+ bool as_daemon,
+ jobject thread_group,
+ bool create_peer,
+ bool should_run_callbacks);
// Attaches the calling native thread to the runtime, returning the new native peer.
static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_peer);
@@ -242,6 +248,9 @@ class Thread {
// TODO: mark as PURE so the compiler may coalesce and remove?
static Thread* Current();
+ // Get the thread from the JNI environment.
+ static Thread* ForEnv(JNIEnv* env);
+
// On a runnable thread, check for pending thread suspension request and handle if pending.
void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -267,16 +276,28 @@ class Thread {
// Dumps a one-line summary of thread state (used for operator<<).
void ShortDump(std::ostream& os) const;
+ // Order of threads for ANRs (ANRs can be trimmed, so we print important ones first).
+ enum class DumpOrder : uint8_t {
+ kMain, // Always print the main thread first (there might not be one).
+ kBlocked, // Then print all threads that are blocked due to waiting on lock.
+ kLocked, // Then print all threads that are holding some lock already.
+ kDefault, // Print all other threads which might not be interesting for ANR.
+ };
+
// Dumps the detailed thread state and the thread stack (used for SIGQUIT).
- void Dump(std::ostream& os,
- bool dump_native_stack = true,
- BacktraceMap* backtrace_map = nullptr,
- bool force_dump_stack = false) const
+ DumpOrder Dump(std::ostream& os,
+ bool dump_native_stack = true,
+ bool force_dump_stack = false) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ DumpOrder Dump(std::ostream& os,
+ unwindstack::AndroidLocalUnwinder& unwinder,
+ bool dump_native_stack = true,
+ bool force_dump_stack = false) const
REQUIRES_SHARED(Locks::mutator_lock_);
- void DumpJavaStack(std::ostream& os,
- bool check_suspended = true,
- bool dump_locks = true) const
+ DumpOrder DumpJavaStack(std::ostream& os,
+ bool check_suspended = true,
+ bool dump_locks = true) const
REQUIRES_SHARED(Locks::mutator_lock_);
// Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
@@ -373,14 +394,23 @@ class Thread {
void WaitForFlipFunction(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
- CHECK(kUseReadBarrier);
+ CHECK(gUseReadBarrier);
return tlsPtr_.thread_local_mark_stack;
}
void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
- CHECK(kUseReadBarrier);
+ CHECK(gUseReadBarrier);
tlsPtr_.thread_local_mark_stack = stack;
}
+ uint8_t* GetThreadLocalGcBuffer() {
+ DCHECK(gUseUserfaultfd);
+ return tlsPtr_.thread_local_gc_buffer;
+ }
+ void SetThreadLocalGcBuffer(uint8_t* buf) {
+ DCHECK(gUseUserfaultfd);
+ tlsPtr_.thread_local_gc_buffer = buf;
+ }
+
// Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
// mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
void FullSuspendCheck(bool implicit = false)
@@ -546,8 +576,12 @@ class Thread {
// that needs to be dealt with, false otherwise.
bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_);
- // Find catch block and perform long jump to appropriate exception handle
- NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_);
+ // Find catch block and perform long jump to appropriate exception handle. When
+ // is_method_exit_exception is true, the exception was thrown by the method exit callback and we
+ // should not send method unwind for the method on top of the stack since method exit callback was
+ // already called.
+ NO_RETURN void QuickDeliverException(bool is_method_exit_exception = false)
+ REQUIRES_SHARED(Locks::mutator_lock_);
Context* GetLongJumpContext();
void ReleaseLongJumpContext(Context* context) {
@@ -573,8 +607,8 @@ class Thread {
tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
}
- void SetTopOfStackTagged(ArtMethod** top_method) {
- tlsPtr_.managed_stack.SetTopQuickFrameTagged(top_method);
+ void SetTopOfStackGenericJniTagged(ArtMethod** top_method) {
+ tlsPtr_.managed_stack.SetTopQuickFrameGenericJniTagged(top_method);
}
void SetTopOfShadowStack(ShadowFrame* top) {
@@ -708,6 +742,16 @@ class Thread {
jobjectArray output_array = nullptr, int* stack_depth = nullptr)
REQUIRES_SHARED(Locks::mutator_lock_);
+ static jint InternalStackTraceToStackFrameInfoArray(
+ const ScopedObjectAccessAlreadyRunnable& soa,
+ jlong mode, // See java.lang.StackStreamFactory for the mode flags
+ jobject internal,
+ jint startLevel,
+ jint batchSize,
+ jint startIndex,
+ jobjectArray output_array) // java.lang.StackFrameInfo[]
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -715,6 +759,9 @@ class Thread {
return tlsPtr_.frame_id_to_shadow_frame != nullptr;
}
+ // This is done by GC using a checkpoint (or in a stop-the-world pause).
+ void SweepInterpreterCache(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -739,6 +786,13 @@ class Thread {
}
template<PointerSize pointer_size>
+ static constexpr ThreadOffset<pointer_size> TidOffset() {
+ return ThreadOffset<pointer_size>(
+ OFFSETOF_MEMBER(Thread, tls32_) +
+ OFFSETOF_MEMBER(tls_32bit_sized_values, tid));
+ }
+
+ template<PointerSize pointer_size>
static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
@@ -766,6 +820,13 @@ class Thread {
OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
}
+ template <PointerSize pointer_size>
+ static constexpr ThreadOffset<pointer_size> DeoptCheckRequiredOffset() {
+ return ThreadOffset<pointer_size>(
+ OFFSETOF_MEMBER(Thread, tls32_) +
+ OFFSETOF_MEMBER(tls_32bit_sized_values, is_deopt_check_required));
+ }
+
static constexpr size_t IsGcMarkingSize() {
return sizeof(tls32_.is_gc_marking);
}
@@ -955,6 +1016,10 @@ class Thread {
// Is the given obj in one of this thread's JNI transition frames?
bool IsJniTransitionReference(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
+ // Convert a global (or weak global) jobject into a Object*
+ ObjPtr<mirror::Object> DecodeGlobalJObject(jobject obj) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1011,33 +1076,34 @@ class Thread {
}
bool GetIsGcMarking() const {
- CHECK(kUseReadBarrier);
+ DCHECK(gUseReadBarrier);
return tls32_.is_gc_marking;
}
void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
+ bool IsDeoptCheckRequired() const { return tls32_.is_deopt_check_required; }
+
+ void SetDeoptCheckRequired(bool flag) { tls32_.is_deopt_check_required = flag; }
+
bool GetWeakRefAccessEnabled() const; // Only safe for current thread.
void SetWeakRefAccessEnabled(bool enabled) {
- CHECK(kUseReadBarrier);
+ DCHECK(gUseReadBarrier);
WeakRefAccessState new_state = enabled ?
WeakRefAccessState::kEnabled : WeakRefAccessState::kDisabled;
tls32_.weak_ref_access_enabled.store(new_state, std::memory_order_release);
}
uint32_t GetDisableThreadFlipCount() const {
- CHECK(kUseReadBarrier);
return tls32_.disable_thread_flip_count;
}
void IncrementDisableThreadFlipCount() {
- CHECK(kUseReadBarrier);
++tls32_.disable_thread_flip_count;
}
void DecrementDisableThreadFlipCount() {
- CHECK(kUseReadBarrier);
DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
--tls32_.disable_thread_flip_count;
}
@@ -1091,7 +1157,8 @@ class Thread {
void AssertHasDeoptimizationContext()
REQUIRES_SHARED(Locks::mutator_lock_);
void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
- ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true);
+ ShadowFrame* PopStackedShadowFrame();
+ ShadowFrame* MaybePopDeoptimizedStackedShadowFrame();
// For debugger, find the shadow frame that corresponds to a frame id.
// Or return null if there is none.
@@ -1112,22 +1179,6 @@ class Thread {
void RemoveDebuggerShadowFrameMapping(size_t frame_id)
REQUIRES_SHARED(Locks::mutator_lock_);
- // While getting this map requires shared the mutator lock, manipulating it
- // should actually follow these rules:
- // (1) The owner of this map (the thread) can change it with its mutator lock.
- // (2) Other threads can read this map when the owner is suspended and they
- // hold the mutator lock.
- // (3) Other threads can change this map when owning the mutator lock exclusively.
- //
- // The reason why (3) needs the mutator lock exclusively (and not just having
- // the owner suspended) is that we don't want other threads to concurrently read the map.
- //
- // TODO: Add a class abstraction to express these rules.
- std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* GetInstrumentationStack()
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return tlsPtr_.instrumentation_stack;
- }
-
std::vector<ArtMethod*>* GetStackTraceSample() const {
DCHECK(!IsAotCompiler());
return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
@@ -1152,6 +1203,22 @@ class Thread {
tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps;
}
+ uintptr_t* GetMethodTraceBuffer() { return tlsPtr_.method_trace_buffer; }
+
+ size_t* GetMethodTraceIndexPtr() { return &tlsPtr_.method_trace_buffer_index; }
+
+ uintptr_t* SetMethodTraceBuffer(uintptr_t* buffer) {
+ return tlsPtr_.method_trace_buffer = buffer;
+ }
+
+ void ResetMethodTraceBuffer() {
+ if (tlsPtr_.method_trace_buffer != nullptr) {
+ delete[] tlsPtr_.method_trace_buffer;
+ }
+ tlsPtr_.method_trace_buffer = nullptr;
+ tlsPtr_.method_trace_buffer_index = 0;
+ }
+
uint64_t GetTraceClockBase() const {
return tls64_.trace_clock_base;
}
@@ -1206,6 +1273,10 @@ class Thread {
DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit);
}
+ // Called from Concurrent mark-compact GC to slide the TLAB pointers backwards
+ // to adjust to post-compact addresses.
+ void AdjustTlab(size_t slide_bytes);
+
// Doesn't check that there is room.
mirror::Object* AllocTlab(size_t bytes);
void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit);
@@ -1298,11 +1369,12 @@ class Thread {
bool IncrementMakeVisiblyInitializedCounter() {
tls32_.make_visibly_initialized_counter += 1u;
- return tls32_.make_visibly_initialized_counter == kMakeVisiblyInitializedCounterTriggerCount;
- }
-
- void ClearMakeVisiblyInitializedCounter() {
- tls32_.make_visibly_initialized_counter = 0u;
+ DCHECK_LE(tls32_.make_visibly_initialized_counter, kMakeVisiblyInitializedCounterTriggerCount);
+ if (tls32_.make_visibly_initialized_counter == kMakeVisiblyInitializedCounterTriggerCount) {
+ tls32_.make_visibly_initialized_counter = 0u;
+ return true;
+ }
+ return false;
}
void PushVerifier(verifier::MethodVerifier* verifier);
@@ -1347,10 +1419,9 @@ class Thread {
// Set to the read barrier marking entrypoints to be non-null.
void SetReadBarrierEntrypoints();
- static jobject CreateCompileTimePeer(JNIEnv* env,
- const char* name,
- bool as_daemon,
- jobject thread_group)
+ ObjPtr<mirror::Object> CreateCompileTimePeer(const char* name,
+ bool as_daemon,
+ jobject thread_group)
REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE InterpreterCache* GetInterpreterCache() {
@@ -1413,7 +1484,7 @@ class Thread {
private:
explicit Thread(bool daemon);
~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
- void Destroy();
+ void Destroy(bool should_run_callbacks);
// Deletes and clears the tlsPtr_.jpeer field. Done in a way so that both it and opeer cannot be
// observed to be set at the same time by instrumentation.
@@ -1424,16 +1495,16 @@ class Thread {
template <typename PeerAction>
static Thread* Attach(const char* thread_name,
bool as_daemon,
- PeerAction p);
+ PeerAction p,
+ bool should_run_callbacks);
void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
template<bool kTransactionActive>
- static void InitPeer(ScopedObjectAccessAlreadyRunnable& soa,
- ObjPtr<mirror::Object> peer,
- jboolean thread_is_daemon,
- jobject thread_group,
- jobject thread_name,
+ static void InitPeer(ObjPtr<mirror::Object> peer,
+ bool as_daemon,
+ ObjPtr<mirror::Object> thread_group,
+ ObjPtr<mirror::String> thread_name,
jint thread_priority)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1479,10 +1550,14 @@ class Thread {
void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
- void DumpStack(std::ostream& os,
- bool dump_native_stack = true,
- BacktraceMap* backtrace_map = nullptr,
- bool force_dump_stack = false) const
+ DumpOrder DumpStack(std::ostream& os,
+ bool dump_native_stack = true,
+ bool force_dump_stack = false) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ DumpOrder DumpStack(std::ostream& os,
+ unwindstack::AndroidLocalUnwinder& unwinder,
+ bool dump_native_stack = true,
+ bool force_dump_stack = false) const
REQUIRES_SHARED(Locks::mutator_lock_);
// Out-of-line conveniences for debugging in gdb.
@@ -1490,12 +1565,13 @@ class Thread {
// Like Thread::Dump(std::cerr).
void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
+ // A wrapper around CreateCallback used when userfaultfd GC is used to
+ // identify the GC by stacktrace.
+ static NO_INLINE void* CreateCallbackWithUffdGc(void* arg);
static void* CreateCallback(void* arg);
- void HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa)
- REQUIRES_SHARED(Locks::mutator_lock_);
- void RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ void HandleUncaughtExceptions() REQUIRES_SHARED(Locks::mutator_lock_);
+ void RemoveFromThreadGroup() REQUIRES_SHARED(Locks::mutator_lock_);
// Initialize a thread.
//
@@ -1563,9 +1639,6 @@ class Thread {
template <bool kPrecise>
void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
- static void SweepInterpreterCaches(IsMarkedVisitor* visitor)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
static bool IsAotCompiler();
void ReleaseLongJumpContextInternal();
@@ -1712,6 +1785,7 @@ class Thread {
thread_exit_check_count(0),
is_transitioning_to_runnable(false),
is_gc_marking(false),
+ is_deopt_check_required(false),
weak_ref_access_enabled(WeakRefAccessState::kVisiblyEnabled),
disable_thread_flip_count(0),
user_code_suspend_count(0),
@@ -1766,6 +1840,12 @@ class Thread {
// GC roots.
bool32_t is_gc_marking;
+ // True if we need to check for deoptimization when returning from the runtime functions. This
+ // is required only when a class is redefined to prevent executing code that has field offsets
+ // embedded. For non-debuggable apps redefinition is not allowed and this flag should always be
+ // set to false.
+ bool32_t is_deopt_check_required;
+
// Thread "interrupted" status; stays raised until queried or thrown.
Atomic<bool32_t> interrupted;
@@ -1853,19 +1933,18 @@ class Thread {
top_handle_scope(nullptr),
class_loader_override(nullptr),
long_jump_context(nullptr),
- instrumentation_stack(nullptr),
stacked_shadow_frame_record(nullptr),
deoptimization_context_stack(nullptr),
frame_id_to_shadow_frame(nullptr),
name(nullptr),
pthread_self(0),
last_no_thread_suspension_cause(nullptr),
- checkpoint_function(nullptr),
thread_local_start(nullptr),
thread_local_pos(nullptr),
thread_local_end(nullptr),
thread_local_limit(nullptr),
thread_local_objects(0),
+ checkpoint_function(nullptr),
thread_local_alloc_stack_top(nullptr),
thread_local_alloc_stack_end(nullptr),
mutator_lock(nullptr),
@@ -1873,7 +1952,9 @@ class Thread {
method_verifier(nullptr),
thread_local_mark_stack(nullptr),
async_exception(nullptr),
- top_reflective_handle_scope(nullptr) {
+ top_reflective_handle_scope(nullptr),
+ method_trace_buffer(nullptr),
+ method_trace_buffer_index(0) {
std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
}
@@ -1946,14 +2027,6 @@ class Thread {
// Thread local, lazily allocated, long jump context. Used to deliver exceptions.
Context* long_jump_context;
- // Additional stack used by method instrumentation to store method and return pc values.
- // Stored as a pointer since std::map is not PACKED.
- // !DO NOT CHANGE! to std::unordered_map: the users of this map require an
- // ordered iteration on the keys (which are stack addresses).
- // Also see Thread::GetInstrumentationStack for the requirements on
- // manipulating and reading this map.
- std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* instrumentation_stack;
-
// For gc purpose, a shadow frame record stack that keeps track of:
// 1) shadow frames under construction.
// 2) deoptimization shadow frames.
@@ -1979,10 +2052,6 @@ class Thread {
// If no_thread_suspension_ is > 0, what is causing that assertion.
const char* last_no_thread_suspension_cause;
- // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\
- // requests another checkpoint, it goes to the checkpoint overflow list.
- Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
-
// Pending barriers that require passing or NULL if non-pending. Installation guarding by
// Locks::thread_suspend_count_lock_.
// They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex
@@ -2003,6 +2072,10 @@ class Thread {
size_t thread_local_objects;
+ // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\
+ // requests another checkpoint, it goes to the checkpoint overflow list.
+ Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
+
// Entrypoint function pointers.
// TODO: move this to more of a global offset table model to avoid per-thread duplication.
JniEntryPoints jni_entrypoints;
@@ -2028,14 +2101,24 @@ class Thread {
// Current method verifier, used for root marking.
verifier::MethodVerifier* method_verifier;
- // Thread-local mark stack for the concurrent copying collector.
- gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
+ union {
+ // Thread-local mark stack for the concurrent copying collector.
+ gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
+ // Thread-local page-sized buffer for userfaultfd GC.
+ uint8_t* thread_local_gc_buffer;
+ };
// The pending async-exception or null.
mirror::Throwable* async_exception;
// Top of the linked-list for reflective-handle scopes or null if none.
BaseReflectiveHandleScope* top_reflective_handle_scope;
+
+ // Pointer to a thread-local buffer for method tracing.
+ uintptr_t* method_trace_buffer;
+
+ // The index of the next free entry in method_trace_buffer.
+ size_t method_trace_buffer_index;
} tlsPtr_;
// Small thread-local cache to be used from the interpreter.
@@ -2068,8 +2151,10 @@ class Thread {
SafeMap<std::string, std::unique_ptr<TLSData>, std::less<>> custom_tls_
GUARDED_BY(Locks::custom_tls_lock_);
-#ifndef __BIONIC__
- __attribute__((tls_model("initial-exec")))
+#if !defined(__BIONIC__)
+#if !defined(ANDROID_HOST_MUSL)
+ __attribute__((tls_model("initial-exec")))
+#endif
static thread_local Thread* self_tls_;
#endif
@@ -2152,17 +2237,18 @@ class ScopedAllowThreadSuspension {
class ScopedStackedShadowFramePusher {
public:
- ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
- : self_(self), type_(type) {
- self_->PushStackedShadowFrame(sf, type);
+ ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf) : self_(self), sf_(sf) {
+ DCHECK_EQ(sf->GetLink(), nullptr);
+ self_->PushStackedShadowFrame(sf, StackedShadowFrameType::kShadowFrameUnderConstruction);
}
~ScopedStackedShadowFramePusher() {
- self_->PopStackedShadowFrame(type_);
+ ShadowFrame* sf = self_->PopStackedShadowFrame();
+ DCHECK_EQ(sf, sf_);
}
private:
Thread* const self_;
- const StackedShadowFrameType type_;
+ ShadowFrame* const sf_;
DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
};
@@ -2186,16 +2272,10 @@ class ScopedTransitioningToRunnable : public ValueObject {
explicit ScopedTransitioningToRunnable(Thread* self)
: self_(self) {
DCHECK_EQ(self, Thread::Current());
- if (kUseReadBarrier) {
- self_->SetIsTransitioningToRunnable(true);
- }
+ self_->SetIsTransitioningToRunnable(true);
}
- ~ScopedTransitioningToRunnable() {
- if (kUseReadBarrier) {
- self_->SetIsTransitioningToRunnable(false);
- }
- }
+ ~ScopedTransitioningToRunnable() { self_->SetIsTransitioningToRunnable(false); }
private:
Thread* const self_;