Always access Thread state and flags as 32-bit location.
Rewrite access to Thread's state and flags to use 32-bit
atomic operations. Avoid `volatile` accesses that prevent
compiler optimizations.
Change `ThreadState` and `ThreadFlag` to `enum class`es.
Golem results for art-opt-cc (higher is better):
linux-ia32 before after
NativeDowncallStaticNormal 28.162 35.323 (+25.43%)
NativeDowncallStaticNormal6 26.447 32.951 (+24.59%)
NativeDowncallStaticNormalRefs6
NativeDowncallVirtualNormal 27.972 35.027 (+25.22%)
NativeDowncallVirtualNormal6 26.096 32.131 (+23.13%)
NativeDowncallVirtualNormalRefs6 25.922 31.873 (+22.95%)
linux-x64 before after
NativeDowncallStaticNormal 26.987 34.380 (+27.40%)
NativeDowncallStaticNormal6 25.424 31.096 (+22.31%)
NativeDowncallStaticNormalRefs6 25.086 30.602 (+21.99%)
NativeDowncallVirtualNormal 26.812 33.234 (+23.95%)
NativeDowncallVirtualNormal6 25.086 30.617 (+22.05%)
NativeDowncallVirtualNormalRefs6 25.086 30.602 (+21.99%)
linux-armv7 before after
NativeDowncallStaticNormal 7.2394 7.9523 (+9.848%)
NativeDowncallStaticNormal6 6.8527 7.4888 (+9.283%)
NativeDowncallStaticNormalRefs6 6.3976 6.9444 (+8.547%)
NativeDowncallVirtualNormal 7.2081 7.9130 (+9.779%)
NativeDowncallVirtualNormal6 6.8527 7.4888 (+9.283%)
NativeDowncallVirtualNormalRefs6 6.3168 6.8527 (+8.483%)
linux-armv8 before after
NativeDowncallStaticNormal 7.0389 7.5973 (+7.933%)
NativeDowncallStaticNormal6 6.8527 7.3783 (+7.670%)
NativeDowncallStaticNormalRefs6 6.2924 6.8226 (+8.427%)
NativeDowncallVirtualNormal 6.8527 7.3783 (+7.670%)
NativeDowncallVirtualNormal6 6.5604 7.0423 (+7.344%)
NativeDowncallVirtualNormalRefs6 6.1408 6.5329 (+6.386%)
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: run-gtests.sh
Test: testrunner.py --target --optimizing --interpreter
Bug: 172332525
Bug: 143299880
Change-Id: Ib55d457ad8f5d9e1159b681dfd279d1f9cfb2af7
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 45562c4..5410bb0 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2550,7 +2550,7 @@
}
{
// Handle wrapper deals with klass moving.
- ScopedThreadSuspension sts(self, kSuspended);
+ ScopedThreadSuspension sts(self, ThreadState::kSuspended);
if (index < kNumYieldIterations) {
sched_yield();
} else {
@@ -2935,7 +2935,7 @@
soa.Env(), soa.AddLocalReference<jobject>(class_loader.Get()));
ScopedLocalRef<jobject> result(soa.Env(), nullptr);
{
- ScopedThreadStateChange tsc(self, kNative);
+ ScopedThreadStateChange tsc(self, ThreadState::kNative);
ScopedLocalRef<jobject> class_name_object(
soa.Env(), soa.Env()->NewStringUTF(class_name_string.c_str()));
if (class_name_object.get() == nullptr) {
@@ -3228,7 +3228,7 @@
// We must be in the kRunnable state to prevent instrumentation from
// suspending all threads to update entrypoints while we are doing it
// for this class.
- DCHECK_EQ(self->GetState(), kRunnable);
+ DCHECK_EQ(self->GetState(), ThreadState::kRunnable);
Runtime::Current()->GetInstrumentation()->InstallStubsForClass(h_new_class.Get());
}
@@ -7439,7 +7439,7 @@
if (methods != old_methods && old_methods != nullptr) {
// Need to make sure the GC is not running since it could be scanning the methods we are
// about to overwrite.
- ScopedThreadStateChange tsc(self_, kSuspended);
+ ScopedThreadStateChange tsc(self_, ThreadState::kSuspended);
gc::ScopedGCCriticalSection gcs(self_,
gc::kGcCauseClassLinker,
gc::kCollectorTypeClassLinker);