summaryrefslogtreecommitdiff
path: root/runtime/thread.h
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/thread.h')
-rw-r--r--runtime/thread.h44
1 files changed, 30 insertions, 14 deletions
diff --git a/runtime/thread.h b/runtime/thread.h
index 1085a563c4..7eb57e914c 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -171,6 +171,20 @@ enum class DeoptimizationMethodType {
kDefault // dex pc may or may not advance depending on other conditions.
};
+// For the CC colector, normal weak reference access can be disabled on a per-thread basis, while
+// processing references. After finishing, the reference processor asynchronously sets the
+// per-thread flags back to kEnabled with release memory ordering semantics. Each mutator thread
+// should check its flag with acquire semantics before assuming that it is enabled. However,
+// that is often too expensive, so the reading thread sets it to kVisiblyEnabled after seeing it
+// kEnabled. The Reference.get() intrinsic can thus read it in relaxed mode, and reread (by
+// resorting to the slow path) with acquire semantics if it sees a value of kEnabled rather than
+// kVisiblyEnabled.
+enum class WeakRefAccessState : int32_t {
+ kVisiblyEnabled = 0, // Enabled, and previously read with acquire load by this thread.
+ kEnabled,
+ kDisabled
+};
+
// This should match RosAlloc::kNumThreadLocalSizeBrackets.
static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
@@ -994,14 +1008,13 @@ class Thread {
void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
- bool GetWeakRefAccessEnabled() const {
- CHECK(kUseReadBarrier);
- return tls32_.weak_ref_access_enabled;
- }
+ bool GetWeakRefAccessEnabled() const; // Only safe for current thread.
void SetWeakRefAccessEnabled(bool enabled) {
CHECK(kUseReadBarrier);
- tls32_.weak_ref_access_enabled = enabled;
+ WeakRefAccessState new_state = enabled ?
+ WeakRefAccessState::kEnabled : WeakRefAccessState::kDisabled;
+ tls32_.weak_ref_access_enabled.store(new_state, std::memory_order_release);
}
uint32_t GetDisableThreadFlipCount() const {
@@ -1672,7 +1685,7 @@ class Thread {
thread_exit_check_count(0),
is_transitioning_to_runnable(false),
is_gc_marking(false),
- weak_ref_access_enabled(true),
+ weak_ref_access_enabled(WeakRefAccessState::kVisiblyEnabled),
disable_thread_flip_count(0),
user_code_suspend_count(0),
force_interpreter_count(0),
@@ -1728,14 +1741,17 @@ class Thread {
AtomicInteger park_state_;
- // True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
- // weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
- // processing of the CC collector only. This is thread local so that we can enable/disable weak
- // ref access by using a checkpoint and avoid a race around the time weak ref access gets
- // disabled and concurrent reference processing begins (if weak ref access is disabled during a
- // pause, this is not an issue.) Other collectors use Runtime::DisallowNewSystemWeaks() and
- // ReferenceProcessor::EnableSlowPath().
- bool32_t weak_ref_access_enabled;
+ // Determines whether the thread is allowed to directly access a weak ref
+ // (Reference::GetReferent() and system weaks) and to potentially mark an object alive/gray.
+ // This is used for concurrent reference processing of the CC collector only. This is thread
+ // local so that we can enable/disable weak ref access by using a checkpoint and avoid a race
+ // around the time weak ref access gets disabled and concurrent reference processing begins
+ // (if weak ref access is disabled during a pause, this is not an issue.) Other collectors use
+ // Runtime::DisallowNewSystemWeaks() and ReferenceProcessor::EnableSlowPath(). Can be
+ // concurrently accessed by GetReferent() and set (by iterating over threads).
+ // Can be changed from kEnabled to kVisiblyEnabled by readers. No other concurrent access is
+ // possible when that happens.
+ mutable std::atomic<WeakRefAccessState> weak_ref_access_enabled;
// A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
// levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI