Always access Thread state and flags as 32-bit location.

Rewrite access to Thread's state and flags to use 32-bit
atomic operations. Avoid `volatile` accesses that prevent
compiler optimizations.

Change `ThreadState` and `ThreadFlag` to `enum class`es.

Golem results for art-opt-cc (higher is better):
linux-ia32                       before after
NativeDowncallStaticNormal       28.162 35.323 (+25.43%)
NativeDowncallStaticNormal6      26.447 32.951 (+24.59%)
NativeDowncallStaticNormalRefs6
NativeDowncallVirtualNormal      27.972 35.027 (+25.22%)
NativeDowncallVirtualNormal6     26.096 32.131 (+23.13%)
NativeDowncallVirtualNormalRefs6 25.922 31.873 (+22.95%)
linux-x64                        before after
NativeDowncallStaticNormal       26.987 34.380 (+27.40%)
NativeDowncallStaticNormal6      25.424 31.096 (+22.31%)
NativeDowncallStaticNormalRefs6  25.086 30.602 (+21.99%)
NativeDowncallVirtualNormal      26.812 33.234 (+23.95%)
NativeDowncallVirtualNormal6     25.086 30.617 (+22.05%)
NativeDowncallVirtualNormalRefs6 25.086 30.602 (+21.99%)
linux-armv7                      before after
NativeDowncallStaticNormal       7.2394 7.9523 (+9.848%)
NativeDowncallStaticNormal6      6.8527 7.4888 (+9.283%)
NativeDowncallStaticNormalRefs6  6.3976 6.9444 (+8.547%)
NativeDowncallVirtualNormal      7.2081 7.9130 (+9.779%)
NativeDowncallVirtualNormal6     6.8527 7.4888 (+9.283%)
NativeDowncallVirtualNormalRefs6 6.3168 6.8527 (+8.483%)
linux-armv8                      before after
NativeDowncallStaticNormal       7.0389 7.5973 (+7.933%)
NativeDowncallStaticNormal6      6.8527 7.3783 (+7.670%)
NativeDowncallStaticNormalRefs6  6.2924 6.8226 (+8.427%)
NativeDowncallVirtualNormal      6.8527 7.3783 (+7.670%)
NativeDowncallVirtualNormal6     6.5604 7.0423 (+7.344%)
NativeDowncallVirtualNormalRefs6 6.1408 6.5329 (+6.386%)

Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: run-gtests.sh
Test: testrunner.py --target --optimizing --interpreter
Bug: 172332525
Bug: 143299880
Change-Id: Ib55d457ad8f5d9e1159b681dfd279d1f9cfb2af7
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 3fcb10a..5cf08f9 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -568,7 +568,7 @@
   if (IsWeakAccessEnabled(self)) {
     return;
   }
-  ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
+  ScopedThreadSuspension sts(self, ThreadState::kWaitingWeakGcRootRead);
   MutexLock mu(self, *Locks::jit_lock_);
   while (!IsWeakAccessEnabled(self)) {
     inline_cache_cond_.Wait(self);
@@ -625,7 +625,7 @@
   while (collection_in_progress_) {
     Locks::jit_lock_->Unlock(self);
     {
-      ScopedThreadSuspension sts(self, kSuspended);
+      ScopedThreadSuspension sts(self, ThreadState::kSuspended);
       MutexLock mu(self, *Locks::jit_lock_);
       WaitForPotentialCollectionToComplete(self);
     }
@@ -943,7 +943,7 @@
   while (true) {
     bool at_max_capacity = false;
     {
-      ScopedThreadSuspension sts(self, kSuspended);
+      ScopedThreadSuspension sts(self, ThreadState::kSuspended);
       MutexLock mu(self, *Locks::jit_lock_);
       WaitForPotentialCollectionToComplete(self);
       ScopedCodeCacheWrite ccw(*region);
@@ -1069,7 +1069,7 @@
   threads_running_checkpoint = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
   // Now that we have run our checkpoint, move to a suspended state and wait
   // for other threads to run the checkpoint.
-  ScopedThreadSuspension sts(self, kSuspended);
+  ScopedThreadSuspension sts(self, ThreadState::kSuspended);
   if (threads_running_checkpoint != 0) {
     barrier.Increment(self, threads_running_checkpoint);
   }
@@ -1100,7 +1100,7 @@
   ScopedTrace trace(__FUNCTION__);
   // Wait for an existing collection, or let everyone know we are starting one.
   {
-    ScopedThreadSuspension sts(self, kSuspended);
+    ScopedThreadSuspension sts(self, ThreadState::kSuspended);
     MutexLock mu(self, *Locks::jit_lock_);
     if (!garbage_collect_code_) {
       private_region_.IncreaseCodeCacheCapacity();