Merge "Revert^2 "Compiler changes for bitstring based type checks.""
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 6836f75..4093833 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1646,7 +1646,7 @@
     self->AssertNoPendingException();
     CHECK_GT(work_units, 0U);
 
-    index_.StoreRelaxed(begin);
+    index_.store(begin, std::memory_order_relaxed);
     for (size_t i = 0; i < work_units; ++i) {
       thread_pool_->AddTask(self, new ForAllClosureLambda<Fn>(this, end, fn));
     }
@@ -1664,7 +1664,7 @@
   }
 
   size_t NextIndex() {
-    return index_.FetchAndAddSequentiallyConsistent(1);
+    return index_.fetch_add(1, std::memory_order_seq_cst);
   }
 
  private:
@@ -2408,6 +2408,7 @@
             // The boot image case doesn't need to recursively initialize the dependencies with
             // special logic since the class linker already does this.
             can_init_static_fields =
+                ClassLinker::kAppImageMayContainStrings &&
                 !soa.Self()->IsExceptionPending() &&
                 is_superclass_initialized &&
                 NoClinitInDependency(klass, soa.Self(), &class_loader);
@@ -2928,7 +2929,8 @@
                                                               /*expected*/ nullptr,
                                                               compiled_method);
   CHECK(result == MethodTable::kInsertResultSuccess);
-  non_relative_linker_patch_count_.FetchAndAddRelaxed(non_relative_linker_patch_count);
+  non_relative_linker_patch_count_.fetch_add(non_relative_linker_patch_count,
+                                             std::memory_order_relaxed);
   DCHECK(GetCompiledMethod(method_ref) != nullptr) << method_ref.PrettyMethod();
 }
 
@@ -3039,7 +3041,7 @@
 }
 
 size_t CompilerDriver::GetNonRelativeLinkerPatchCount() const {
-  return non_relative_linker_patch_count_.LoadRelaxed();
+  return non_relative_linker_patch_count_.load(std::memory_order_relaxed);
 }
 
 void CompilerDriver::SetRequiresConstructorBarrier(Thread* self,
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index f4760d6..2e31d35 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -214,6 +214,11 @@
     DCHECK(target_block != nullptr);
   }
 
+  // Bail if the instruction can throw and we are about to move into a catch block.
+  if (instruction->CanThrow() && target_block->GetTryCatchInformation() != nullptr) {
+    return nullptr;
+  }
+
   // Find insertion position. No need to filter anymore, as we have found a
   // target block.
   HInstruction* insert_pos = nullptr;
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index fa1d96b..676fe6b 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -859,7 +859,7 @@
 static HInstruction* NewIntegralAbs(ArenaAllocator* allocator,
                                     HInstruction* x,
                                     HInstruction* cursor) {
-  DataType::Type type = x->GetType();
+  DataType::Type type = DataType::Kind(x->GetType());
   DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64);
   HAbs* abs = new (allocator) HAbs(type, x, cursor->GetDexPc());
   cursor->GetBlock()->InsertInstructionBefore(abs, cursor);
@@ -872,7 +872,7 @@
                                        HInstruction* y,
                                        HInstruction* cursor,
                                        bool is_min) {
-  DataType::Type type = x->GetType();
+  DataType::Type type = DataType::Kind(x->GetType());
   DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64);
   HBinaryOperation* minmax = nullptr;
   if (is_min) {
@@ -946,9 +946,9 @@
     DataType::Type t_type = true_value->GetType();
     DataType::Type f_type = false_value->GetType();
     // Here we have a <cmp> b ? true_value : false_value.
-    // Test if both values are same-typed int or long.
-    if (t_type == f_type &&
-        (t_type == DataType::Type::kInt32 || t_type == DataType::Type::kInt64)) {
+    // Test if both values are compatible integral types (resulting
+    // MIN/MAX/ABS type will be int or long, like the condition).
+    if (DataType::IsIntegralType(t_type) && DataType::Kind(t_type) == DataType::Kind(f_type)) {
       // Try to replace typical integral MIN/MAX/ABS constructs.
       if ((cmp == kCondLT || cmp == kCondLE || cmp == kCondGT || cmp == kCondGE) &&
           ((a == true_value && b == false_value) ||
diff --git a/compiler/utils/atomic_dex_ref_map-inl.h b/compiler/utils/atomic_dex_ref_map-inl.h
index 7977e82..4bd323d 100644
--- a/compiler/utils/atomic_dex_ref_map-inl.h
+++ b/compiler/utils/atomic_dex_ref_map-inl.h
@@ -70,7 +70,7 @@
   if (array == nullptr) {
     return false;
   }
-  *out = (*array)[ref.index].LoadRelaxed();
+  *out = (*array)[ref.index].load(std::memory_order_relaxed);
   return true;
 }
 
@@ -81,8 +81,8 @@
   if (array == nullptr) {
     return false;
   }
-  *out = (*array)[ref.index].LoadRelaxed();
-  (*array)[ref.index].StoreSequentiallyConsistent(nullptr);
+  *out = (*array)[ref.index].load(std::memory_order_relaxed);
+  (*array)[ref.index].store(nullptr, std::memory_order_seq_cst);
   return true;
 }
 
@@ -121,7 +121,7 @@
     const DexFile* dex_file = pair.first;
     const ElementArray& elements = pair.second;
     for (size_t i = 0; i < elements.size(); ++i) {
-      visitor(DexFileReference(dex_file, i), elements[i].LoadRelaxed());
+      visitor(DexFileReference(dex_file, i), elements[i].load(std::memory_order_relaxed));
     }
   }
 }
@@ -130,7 +130,7 @@
 inline void AtomicDexRefMap<DexFileReferenceType, Value>::ClearEntries() {
   for (auto& it : arrays_) {
     for (auto& element : it.second) {
-      element.StoreRelaxed(nullptr);
+      element.store(nullptr, std::memory_order_relaxed);
     }
   }
 }
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 9b37017..6950b93 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1609,11 +1609,9 @@
         // Unzip or copy dex files straight to the oat file.
         std::vector<std::unique_ptr<MemMap>> opened_dex_files_map;
         std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
-        // No need to verify the dex file for:
-        // 1) Dexlayout since it does the verification. It also may not pass the verification since
-        // we don't update the dex checksum.
-        // 2) when we have a vdex file, which means it was already verified.
-        const bool verify = !DoDexLayoutOptimizations() && (input_vdex_file_ == nullptr);
+        // No need to verify the dex file when we have a vdex file, which means it was already
+        // verified.
+        const bool verify = (input_vdex_file_ == nullptr);
         if (!oat_writers_[i]->WriteAndOpenDexFiles(
             vdex_files_[i].get(),
             rodata_.back(),
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 5590c8b..0cd39ac 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -2013,4 +2013,84 @@
   ASSERT_EQ(vdex_unquickened->FlushCloseOrErase(), 0) << "Could not flush and close";
 }
 
+// Test that compact dex generation with invalid dex files doesn't crash dex2oat. b/75970654
+TEST_F(Dex2oatTest, CompactDexInvalidSource) {
+  ScratchFile invalid_dex;
+  {
+    FILE* file = fdopen(invalid_dex.GetFd(), "w+b");
+    ZipWriter writer(file);
+    writer.StartEntry("classes.dex", ZipWriter::kAlign32);
+    DexFile::Header header = {};
+    StandardDexFile::WriteMagic(header.magic_);
+    StandardDexFile::WriteCurrentVersion(header.magic_);
+    header.file_size_ = 4 * KB;
+    header.data_size_ = 4 * KB;
+    header.data_off_ = 10 * MB;
+    header.map_off_ = 10 * MB;
+    header.class_defs_off_ = 10 * MB;
+    header.class_defs_size_ = 10000;
+    ASSERT_GE(writer.WriteBytes(&header, sizeof(header)), 0);
+    writer.FinishEntry();
+    writer.Finish();
+    ASSERT_EQ(invalid_dex.GetFile()->Flush(), 0);
+  }
+  const std::string dex_location = invalid_dex.GetFilename();
+  const std::string odex_location = GetOdexDir() + "/output.odex";
+  std::string error_msg;
+  int status = GenerateOdexForTestWithStatus(
+      {dex_location},
+      odex_location,
+      CompilerFilter::kQuicken,
+      &error_msg,
+      { "--compact-dex-level=fast" });
+  ASSERT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) != 0) << status << " " << output_;
+}
+
+// Test that dex2oat with a CompactDex file in the APK fails.
+TEST_F(Dex2oatTest, CompactDexInZip) {
+  CompactDexFile::Header header = {};
+  CompactDexFile::WriteMagic(header.magic_);
+  CompactDexFile::WriteCurrentVersion(header.magic_);
+  header.file_size_ = sizeof(CompactDexFile::Header);
+  header.data_off_ = 10 * MB;
+  header.map_off_ = 10 * MB;
+  header.class_defs_off_ = 10 * MB;
+  header.class_defs_size_ = 10000;
+  // Create a zip containing the invalid dex.
+  ScratchFile invalid_dex_zip;
+  {
+    FILE* file = fdopen(invalid_dex_zip.GetFd(), "w+b");
+    ZipWriter writer(file);
+    writer.StartEntry("classes.dex", ZipWriter::kCompress);
+    ASSERT_GE(writer.WriteBytes(&header, sizeof(header)), 0);
+    writer.FinishEntry();
+    writer.Finish();
+    ASSERT_EQ(invalid_dex_zip.GetFile()->Flush(), 0);
+  }
+  // Create the dex file directly.
+  ScratchFile invalid_dex;
+  {
+    ASSERT_GE(invalid_dex.GetFile()->WriteFully(&header, sizeof(header)), 0);
+    ASSERT_EQ(invalid_dex.GetFile()->Flush(), 0);
+  }
+  std::string error_msg;
+  int status = 0u;
+
+  status = GenerateOdexForTestWithStatus(
+      { invalid_dex_zip.GetFilename() },
+      GetOdexDir() + "/output_apk.odex",
+      CompilerFilter::kQuicken,
+      &error_msg,
+      { "--compact-dex-level=fast" });
+  ASSERT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) != 0) << status << " " << output_;
+
+  status = GenerateOdexForTestWithStatus(
+      { invalid_dex.GetFilename() },
+      GetOdexDir() + "/output.odex",
+      CompilerFilter::kQuicken,
+      &error_msg,
+      { "--compact-dex-level=fast" });
+  ASSERT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) != 0) << status << " " << output_;
+}
+
 }  // namespace art
diff --git a/libartbase/base/allocator.cc b/libartbase/base/allocator.cc
index a424145..17da789 100644
--- a/libartbase/base/allocator.cc
+++ b/libartbase/base/allocator.cc
@@ -83,9 +83,9 @@
   if (kEnableTrackingAllocator) {
     os << "Dumping native memory usage\n";
     for (size_t i = 0; i < kAllocatorTagCount; ++i) {
-      uint64_t bytes_used = g_bytes_used[i].LoadRelaxed();
+      uint64_t bytes_used = g_bytes_used[i].load(std::memory_order_relaxed);
       uint64_t max_bytes_used = g_max_bytes_used[i];
-      uint64_t total_bytes_used = g_total_bytes_used[i].LoadRelaxed();
+      uint64_t total_bytes_used = g_total_bytes_used[i].load(std::memory_order_relaxed);
       if (total_bytes_used != 0) {
         os << static_cast<AllocatorTag>(i) << " active=" << bytes_used << " max="
            << max_bytes_used << " total=" << total_bytes_used << "\n";
diff --git a/libartbase/base/allocator.h b/libartbase/base/allocator.h
index d92fe19..7ddbacf 100644
--- a/libartbase/base/allocator.h
+++ b/libartbase/base/allocator.h
@@ -84,15 +84,15 @@
 void Dump(std::ostream& os);
 
 inline void RegisterAllocation(AllocatorTag tag, size_t bytes) {
-  g_total_bytes_used[tag].FetchAndAddSequentiallyConsistent(bytes);
-  size_t new_bytes = g_bytes_used[tag].FetchAndAddSequentiallyConsistent(bytes) + bytes;
+  g_total_bytes_used[tag].fetch_add(bytes, std::memory_order_seq_cst);
+  size_t new_bytes = g_bytes_used[tag].fetch_add(bytes, std::memory_order_seq_cst) + bytes;
   if (g_max_bytes_used[tag] < new_bytes) {
     g_max_bytes_used[tag] = new_bytes;
   }
 }
 
 inline void RegisterFree(AllocatorTag tag, size_t bytes) {
-  g_bytes_used[tag].FetchAndSubSequentiallyConsistent(bytes);
+  g_bytes_used[tag].fetch_sub(bytes, std::memory_order_seq_cst);
 }
 
 }  // namespace TrackedAllocators
diff --git a/libartbase/base/atomic.h b/libartbase/base/atomic.h
index fd34cc6..f736667 100644
--- a/libartbase/base/atomic.h
+++ b/libartbase/base/atomic.h
@@ -35,94 +35,28 @@
 
   explicit Atomic<T>(T value) : std::atomic<T>(value) { }
 
-  // Load from memory without ordering or synchronization constraints.
-  T LoadRelaxed() const {
-    return this->load(std::memory_order_relaxed);
-  }
-
-  // Load from memory with acquire ordering.
-  T LoadAcquire() const {
-    return this->load(std::memory_order_acquire);
-  }
-
-  // Word tearing allowed, but may race.
-  // TODO: Optimize?
-  // There has been some discussion of eventually disallowing word
-  // tearing for Java data loads.
+  // Load data from an atomic variable with Java data memory order semantics.
+  //
+  // Promises memory access semantics of ordinary Java data.
+  // Does not order other memory accesses.
+  // Long and double accesses may be performed 32 bits at a time.
+  // There are no "cache coherence" guarantees; e.g. loads from the same location may be reordered.
+  // In contrast to normal C++ accesses, racing accesses are allowed.
   T LoadJavaData() const {
     return this->load(std::memory_order_relaxed);
   }
 
-  // Load from memory with a total ordering.
-  // Corresponds exactly to a Java volatile load.
-  T LoadSequentiallyConsistent() const {
-    return this->load(std::memory_order_seq_cst);
-  }
-
-  // Store to memory without ordering or synchronization constraints.
-  void StoreRelaxed(T desired_value) {
-    this->store(desired_value, std::memory_order_relaxed);
-  }
-
-  // Word tearing allowed, but may race.
+  // Store data in an atomic variable with Java data memory ordering semantics.
+  //
+  // Promises memory access semantics of ordinary Java data.
+  // Does not order other memory accesses.
+  // Long and double accesses may be performed 32 bits at a time.
+  // There are no "cache coherence" guarantees; e.g. loads from the same location may be reordered.
+  // In contrast to normal C++ accesses, racing accesses are allowed.
   void StoreJavaData(T desired_value) {
     this->store(desired_value, std::memory_order_relaxed);
   }
 
-  // Store to memory with release ordering.
-  void StoreRelease(T desired_value) {
-    this->store(desired_value, std::memory_order_release);
-  }
-
-  // Store to memory with a total ordering.
-  void StoreSequentiallyConsistent(T desired_value) {
-    this->store(desired_value, std::memory_order_seq_cst);
-  }
-
-  // Atomically replace the value with desired_value.
-  T ExchangeRelaxed(T desired_value) {
-    return this->exchange(desired_value, std::memory_order_relaxed);
-  }
-
-  // Atomically replace the value with desired_value.
-  T ExchangeSequentiallyConsistent(T desired_value) {
-    return this->exchange(desired_value, std::memory_order_seq_cst);
-  }
-
-  // Atomically replace the value with desired_value.
-  T ExchangeAcquire(T desired_value) {
-    return this->exchange(desired_value, std::memory_order_acquire);
-  }
-
-  // Atomically replace the value with desired_value.
-  T ExchangeRelease(T desired_value) {
-    return this->exchange(desired_value, std::memory_order_release);
-  }
-
-  // Atomically replace the value with desired_value if it matches the expected_value.
-  // Participates in total ordering of atomic operations. Returns true on success, false otherwise.
-  // If the value does not match, updates the expected_value argument with the value that was
-  // atomically read for the failed comparison.
-  bool CompareAndExchangeStrongSequentiallyConsistent(T* expected_value, T desired_value) {
-    return this->compare_exchange_strong(*expected_value, desired_value, std::memory_order_seq_cst);
-  }
-
-  // Atomically replace the value with desired_value if it matches the expected_value.
-  // Participates in total ordering of atomic operations. Returns true on success, false otherwise.
-  // If the value does not match, updates the expected_value argument with the value that was
-  // atomically read for the failed comparison.
-  bool CompareAndExchangeStrongAcquire(T* expected_value, T desired_value) {
-    return this->compare_exchange_strong(*expected_value, desired_value, std::memory_order_acquire);
-  }
-
-  // Atomically replace the value with desired_value if it matches the expected_value.
-  // Participates in total ordering of atomic operations. Returns true on success, false otherwise.
-  // If the value does not match, updates the expected_value argument with the value that was
-  // atomically read for the failed comparison.
-  bool CompareAndExchangeStrongRelease(T* expected_value, T desired_value) {
-    return this->compare_exchange_strong(*expected_value, desired_value, std::memory_order_release);
-  }
-
   // Atomically replace the value with desired_value if it matches the expected_value.
   // Participates in total ordering of atomic operations.
   bool CompareAndSetStrongSequentiallyConsistent(T expected_value, T desired_value) {
@@ -166,66 +100,8 @@
     return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_release);
   }
 
-  T FetchAndAddSequentiallyConsistent(const T value) {
-    return this->fetch_add(value, std::memory_order_seq_cst);  // Return old_value.
-  }
-
-  T FetchAndAddRelaxed(const T value) {
-    return this->fetch_add(value, std::memory_order_relaxed);  // Return old_value.
-  }
-
-  T FetchAndAddAcquire(const T value) {
-    return this->fetch_add(value, std::memory_order_acquire);  // Return old_value.
-  }
-
-  T FetchAndAddRelease(const T value) {
-    return this->fetch_add(value, std::memory_order_acquire);  // Return old_value.
-  }
-
-  T FetchAndSubSequentiallyConsistent(const T value) {
-    return this->fetch_sub(value, std::memory_order_seq_cst);  // Return old value.
-  }
-
-  T FetchAndSubRelaxed(const T value) {
-    return this->fetch_sub(value, std::memory_order_relaxed);  // Return old value.
-  }
-
-  T FetchAndBitwiseAndSequentiallyConsistent(const T value) {
-    return this->fetch_and(value, std::memory_order_seq_cst);  // Return old_value.
-  }
-
-  T FetchAndBitwiseAndAcquire(const T value) {
-    return this->fetch_and(value, std::memory_order_acquire);  // Return old_value.
-  }
-
-  T FetchAndBitwiseAndRelease(const T value) {
-    return this->fetch_and(value, std::memory_order_release);  // Return old_value.
-  }
-
-  T FetchAndBitwiseOrSequentiallyConsistent(const T value) {
-    return this->fetch_or(value, std::memory_order_seq_cst);  // Return old_value.
-  }
-
-  T FetchAndBitwiseOrAcquire(const T value) {
-    return this->fetch_or(value, std::memory_order_acquire);  // Return old_value.
-  }
-
-  T FetchAndBitwiseOrRelease(const T value) {
-    return this->fetch_or(value, std::memory_order_release);  // Return old_value.
-  }
-
-  T FetchAndBitwiseXorSequentiallyConsistent(const T value) {
-    return this->fetch_xor(value, std::memory_order_seq_cst);  // Return old_value.
-  }
-
-  T FetchAndBitwiseXorAcquire(const T value) {
-    return this->fetch_xor(value, std::memory_order_acquire);  // Return old_value.
-  }
-
-  T FetchAndBitwiseXorRelease(const T value) {
-    return this->fetch_xor(value, std::memory_order_release);  // Return old_value.
-  }
-
+  // Returns the address of the current atomic variable. This is only used by futex() which is
+  // declared to take a volatile address (see base/mutex-inl.h).
   volatile T* Address() {
     return reinterpret_cast<T*>(this);
   }
diff --git a/libdexfile/dex/dex_file_loader.cc b/libdexfile/dex/dex_file_loader.cc
index 758a2f0..1e0f5ac 100644
--- a/libdexfile/dex/dex_file_loader.cc
+++ b/libdexfile/dex/dex_file_loader.cc
@@ -348,6 +348,8 @@
                                       location_checksum,
                                       oat_dex_file,
                                       std::move(container)));
+    // Disable verification for CompactDex input.
+    verify = false;
   } else {
     *error_msg = "Invalid or truncated dex file";
   }
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index 07b1529..de67871 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -940,9 +940,6 @@
   }
   art::ScopedThreadStateChange stsc(art::Thread::Current(), art::ThreadState::kNative);
   art::instrumentation::Instrumentation* instr = art::Runtime::Current()->GetInstrumentation();
-  art::gc::ScopedGCCriticalSection gcs(art::Thread::Current(),
-                                       art::gc::kGcCauseInstrumentation,
-                                       art::gc::kCollectorTypeInstrumentation);
   art::ScopedSuspendAll ssa("jvmti method tracing installation");
   if (enable) {
     instr->AddListener(listener, new_events);
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index 83d64ef..bf2e6cd 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -42,6 +42,7 @@
 #include "dex/dex_file_types.h"
 #include "dex/modifiers.h"
 #include "events-inl.h"
+#include "gc_root-inl.h"
 #include "jit/jit.h"
 #include "jni_internal.h"
 #include "mirror/class-inl.h"
@@ -546,13 +547,12 @@
 
 class CommonLocalVariableClosure : public art::Closure {
  public:
-  CommonLocalVariableClosure(art::Thread* caller,
-                             jint depth,
-                             jint slot)
-      : result_(ERR(INTERNAL)), caller_(caller), depth_(depth), slot_(slot) {}
+  CommonLocalVariableClosure(jint depth, jint slot)
+      : result_(ERR(INTERNAL)), depth_(depth), slot_(slot) {}
 
   void Run(art::Thread* self) OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
+    art::ScopedAssertNoThreadSuspension sants("CommonLocalVariableClosure::Run");
     std::unique_ptr<art::Context> context(art::Context::Create());
     FindFrameAtDepthVisitor visitor(self, context.get(), depth_);
     visitor.WalkStack();
@@ -597,17 +597,17 @@
     }
   }
 
-  jvmtiError GetResult() const {
+  virtual jvmtiError GetResult() {
     return result_;
   }
 
  protected:
   virtual jvmtiError Execute(art::ArtMethod* method, art::StackVisitor& visitor)
-      REQUIRES(art::Locks::mutator_lock_) = 0;
+      REQUIRES_SHARED(art::Locks::mutator_lock_) = 0;
   virtual jvmtiError GetTypeError(art::ArtMethod* method,
                                   art::Primitive::Type type,
                                   const std::string& descriptor)
-      REQUIRES(art::Locks::mutator_lock_)  = 0;
+      REQUIRES_SHARED(art::Locks::mutator_lock_)  = 0;
 
   jvmtiError GetSlotType(art::ArtMethod* method,
                          uint32_t dex_pc,
@@ -674,25 +674,35 @@
   }
 
   jvmtiError result_;
-  art::Thread* caller_;
   jint depth_;
   jint slot_;
 };
 
 class GetLocalVariableClosure : public CommonLocalVariableClosure {
  public:
-  GetLocalVariableClosure(art::Thread* caller,
-                          jint depth,
+  GetLocalVariableClosure(jint depth,
                           jint slot,
                           art::Primitive::Type type,
                           jvalue* val)
-      : CommonLocalVariableClosure(caller, depth, slot), type_(type), val_(val) {}
+      : CommonLocalVariableClosure(depth, slot),
+        type_(type),
+        val_(val),
+        obj_val_(nullptr) {}
+
+  virtual jvmtiError GetResult() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    if (result_ == OK && type_ == art::Primitive::kPrimNot) {
+      val_->l = obj_val_.IsNull()
+          ? nullptr
+          : art::Thread::Current()->GetJniEnv()->AddLocalReference<jobject>(obj_val_.Read());
+    }
+    return CommonLocalVariableClosure::GetResult();
+  }
 
  protected:
   jvmtiError GetTypeError(art::ArtMethod* method ATTRIBUTE_UNUSED,
                           art::Primitive::Type slot_type,
                           const std::string& descriptor ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
     switch (slot_type) {
       case art::Primitive::kPrimByte:
       case art::Primitive::kPrimChar:
@@ -712,7 +722,7 @@
   }
 
   jvmtiError Execute(art::ArtMethod* method, art::StackVisitor& visitor)
-      OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
     switch (type_) {
       case art::Primitive::kPrimNot: {
         uint32_t ptr_val;
@@ -722,8 +732,8 @@
                              &ptr_val)) {
           return ERR(OPAQUE_FRAME);
         }
-        art::ObjPtr<art::mirror::Object> obj(reinterpret_cast<art::mirror::Object*>(ptr_val));
-        val_->l = obj.IsNull() ? nullptr : caller_->GetJniEnv()->AddLocalReference<jobject>(obj);
+        obj_val_ = art::GcRoot<art::mirror::Object>(
+            reinterpret_cast<art::mirror::Object*>(ptr_val));
         break;
       }
       case art::Primitive::kPrimInt:
@@ -760,6 +770,7 @@
  private:
   art::Primitive::Type type_;
   jvalue* val_;
+  art::GcRoot<art::mirror::Object> obj_val_;
 };
 
 jvmtiError MethodUtil::GetLocalVariableGeneric(jvmtiEnv* env ATTRIBUTE_UNUSED,
@@ -782,9 +793,12 @@
     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
     return err;
   }
-  GetLocalVariableClosure c(self, depth, slot, type, val);
-  // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
-  if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(target, &c)) {
+  art::ScopedAssertNoThreadSuspension sants("Performing GetLocalVariable");
+  GetLocalVariableClosure c(depth, slot, type, val);
+  // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.  We
+  // need to avoid suspending as we wait for the checkpoint to occur since we are (potentially)
+  // transfering a GcRoot across threads.
+  if (!target->RequestSynchronousCheckpoint(&c, art::ThreadState::kRunnable)) {
     return ERR(THREAD_NOT_ALIVE);
   } else {
     return c.GetResult();
@@ -798,13 +812,13 @@
                           jint slot,
                           art::Primitive::Type type,
                           jvalue val)
-      : CommonLocalVariableClosure(caller, depth, slot), type_(type), val_(val) {}
+      : CommonLocalVariableClosure(depth, slot), caller_(caller), type_(type), val_(val) {}
 
  protected:
   jvmtiError GetTypeError(art::ArtMethod* method,
                           art::Primitive::Type slot_type,
                           const std::string& descriptor)
-      OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
     switch (slot_type) {
       case art::Primitive::kPrimNot: {
         if (type_ != art::Primitive::kPrimNot) {
@@ -840,7 +854,7 @@
   }
 
   jvmtiError Execute(art::ArtMethod* method, art::StackVisitor& visitor)
-      OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
     switch (type_) {
       case art::Primitive::kPrimNot: {
         uint32_t ptr_val;
@@ -887,6 +901,7 @@
   }
 
  private:
+  art::Thread* caller_;
   art::Primitive::Type type_;
   jvalue val_;
 };
@@ -913,7 +928,7 @@
   }
   SetLocalVariableClosure c(self, depth, slot, type, val);
   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
-  if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(target, &c)) {
+  if (!target->RequestSynchronousCheckpoint(&c)) {
     return ERR(THREAD_NOT_ALIVE);
   } else {
     return c.GetResult();
@@ -922,13 +937,13 @@
 
 class GetLocalInstanceClosure : public art::Closure {
  public:
-  GetLocalInstanceClosure(art::Thread* caller, jint depth, jobject* val)
+  explicit GetLocalInstanceClosure(jint depth)
       : result_(ERR(INTERNAL)),
-        caller_(caller),
         depth_(depth),
-        val_(val) {}
+        val_(nullptr) {}
 
   void Run(art::Thread* self) OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+    art::ScopedAssertNoThreadSuspension sants("GetLocalInstanceClosure::Run");
     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
     std::unique_ptr<art::Context> context(art::Context::Create());
     FindFrameAtDepthVisitor visitor(self, context.get(), depth_);
@@ -939,19 +954,22 @@
       return;
     }
     result_ = OK;
-    art::ObjPtr<art::mirror::Object> obj = visitor.GetThisObject();
-    *val_ = obj.IsNull() ? nullptr : caller_->GetJniEnv()->AddLocalReference<jobject>(obj);
+    val_ = art::GcRoot<art::mirror::Object>(visitor.GetThisObject());
   }
 
-  jvmtiError GetResult() const {
+  jvmtiError GetResult(jobject* data_out) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    if (result_ == OK) {
+      *data_out = val_.IsNull()
+          ? nullptr
+          : art::Thread::Current()->GetJniEnv()->AddLocalReference<jobject>(val_.Read());
+    }
     return result_;
   }
 
  private:
   jvmtiError result_;
-  art::Thread* caller_;
   jint depth_;
-  jobject* val_;
+  art::GcRoot<art::mirror::Object> val_;
 };
 
 jvmtiError MethodUtil::GetLocalInstance(jvmtiEnv* env ATTRIBUTE_UNUSED,
@@ -970,12 +988,15 @@
     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
     return err;
   }
-  GetLocalInstanceClosure c(self, depth, data);
-  // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
-  if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(target, &c)) {
+  art::ScopedAssertNoThreadSuspension sants("Performing GetLocalInstance");
+  GetLocalInstanceClosure c(depth);
+  // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.  We
+  // need to avoid suspending as we wait for the checkpoint to occur since we are (potentially)
+  // transfering a GcRoot across threads.
+  if (!target->RequestSynchronousCheckpoint(&c, art::ThreadState::kRunnable)) {
     return ERR(THREAD_NOT_ALIVE);
   } else {
-    return c.GetResult();
+    return c.GetResult(data);
   }
 }
 
diff --git a/openjdkjvmti/ti_monitor.cc b/openjdkjvmti/ti_monitor.cc
index 94408ba..1cfc64a 100644
--- a/openjdkjvmti/ti_monitor.cc
+++ b/openjdkjvmti/ti_monitor.cc
@@ -37,6 +37,7 @@
 #include <mutex>
 
 #include "art_jvmti.h"
+#include "gc_root-inl.h"
 #include "monitor.h"
 #include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
@@ -351,19 +352,17 @@
   }
   struct GetContendedMonitorClosure : public art::Closure {
    public:
-    explicit GetContendedMonitorClosure(art::Thread* current, jobject* out)
-        : result_thread_(current), out_(out) {}
+    GetContendedMonitorClosure() : out_(nullptr) {}
 
     void Run(art::Thread* target_thread) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      art::ScopedAssertNoThreadSuspension sants("GetContendedMonitorClosure::Run");
       switch (target_thread->GetState()) {
         // These three we are actually currently waiting on a monitor and have sent the appropriate
         // events (if anyone is listening).
         case art::kBlocked:
         case art::kTimedWaiting:
         case art::kWaiting: {
-          art::mirror::Object* mon = art::Monitor::GetContendedMonitor(target_thread);
-          *out_ = (mon == nullptr) ? nullptr
-                                   : result_thread_->GetJniEnv()->AddLocalReference<jobject>(mon);
+          out_ = art::GcRoot<art::mirror::Object>(art::Monitor::GetContendedMonitor(target_thread));
           return;
         }
         case art::kTerminated:
@@ -390,22 +389,30 @@
         case art::kStarting:
         case art::kNative:
         case art::kSuspended: {
-          // We aren't currently (explicitly) waiting for a monitor anything so just return null.
-          *out_ = nullptr;
+          // We aren't currently (explicitly) waiting for a monitor so just return null.
           return;
         }
       }
     }
 
+    jobject GetResult() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      return out_.IsNull()
+          ? nullptr
+          : art::Thread::Current()->GetJniEnv()->AddLocalReference<jobject>(out_.Read());
+    }
+
    private:
-    art::Thread* result_thread_;
-    jobject* out_;
+    art::GcRoot<art::mirror::Object> out_;
   };
-  GetContendedMonitorClosure closure(self, monitor);
-  // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
-  if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(target, &closure)) {
+  art::ScopedAssertNoThreadSuspension sants("Performing GetCurrentContendedMonitor");
+  GetContendedMonitorClosure closure;
+  // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.  We
+  // need to avoid suspending as we wait for the checkpoint to occur since we are (potentially)
+  // transfering a GcRoot across threads.
+  if (!target->RequestSynchronousCheckpoint(&closure, art::ThreadState::kRunnable)) {
     return ERR(THREAD_NOT_ALIVE);
   }
+  *monitor = closure.GetResult();
   return OK;
 }
 
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index 373944f..41a649b 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -258,7 +258,7 @@
                                        static_cast<size_t>(start_depth),
                                        static_cast<size_t>(max_frame_count));
     // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
-    if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(thread, &closure)) {
+    if (!thread->RequestSynchronousCheckpoint(&closure)) {
       return ERR(THREAD_NOT_ALIVE);
     }
     *count_ptr = static_cast<jint>(closure.index);
@@ -269,7 +269,7 @@
   } else {
     GetStackTraceVectorClosure closure(0, 0);
     // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
-    if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(thread, &closure)) {
+    if (!thread->RequestSynchronousCheckpoint(&closure)) {
       return ERR(THREAD_NOT_ALIVE);
     }
 
@@ -484,7 +484,7 @@
     *stack_info_ptr = nullptr;
     return ERR(NONE);
   }
-  if (stack_info_ptr == nullptr || stack_info_ptr == nullptr) {
+  if (thread_list == nullptr || stack_info_ptr == nullptr) {
     return ERR(NULL_POINTER);
   }
 
@@ -713,7 +713,7 @@
 
   GetFrameCountClosure closure;
   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
-  if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(thread, &closure)) {
+  if (!thread->RequestSynchronousCheckpoint(&closure)) {
     return ERR(THREAD_NOT_ALIVE);
   }
 
@@ -803,7 +803,7 @@
 
   GetLocationClosure closure(static_cast<size_t>(depth));
   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
-  if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(thread, &closure)) {
+  if (!thread->RequestSynchronousCheckpoint(&closure)) {
     return ERR(THREAD_NOT_ALIVE);
   }
 
@@ -882,8 +882,8 @@
 template<typename Fn>
 struct MonitorInfoClosure : public art::Closure {
  public:
-  MonitorInfoClosure(art::ScopedObjectAccess& soa, Fn handle_results)
-      : soa_(soa), err_(OK), handle_results_(handle_results) {}
+  explicit MonitorInfoClosure(Fn handle_results)
+      : err_(OK), handle_results_(handle_results) {}
 
   void Run(art::Thread* target) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
@@ -893,7 +893,7 @@
     // Find any other monitors, including ones acquired in native code.
     art::RootInfo root_info(art::kRootVMInternal);
     target->GetJniEnv()->VisitMonitorRoots(&visitor, root_info);
-    err_ = handle_results_(soa_, visitor);
+    err_ = handle_results_(visitor);
   }
 
   jvmtiError GetError() {
@@ -901,17 +901,18 @@
   }
 
  private:
-  art::ScopedObjectAccess& soa_;
   jvmtiError err_;
   Fn handle_results_;
 };
 
 
 template <typename Fn>
-static jvmtiError GetOwnedMonitorInfoCommon(jthread thread, Fn handle_results) {
+static jvmtiError GetOwnedMonitorInfoCommon(const art::ScopedObjectAccessAlreadyRunnable& soa,
+                                            jthread thread,
+                                            Fn handle_results)
+    REQUIRES_SHARED(art::Locks::mutator_lock_) {
   art::Thread* self = art::Thread::Current();
-  art::ScopedObjectAccess soa(self);
-  MonitorInfoClosure<Fn> closure(soa, handle_results);
+  MonitorInfoClosure<Fn> closure(handle_results);
   bool called_method = false;
   {
     art::Locks::thread_list_lock_->ExclusiveLock(self);
@@ -924,7 +925,7 @@
     if (target != self) {
       called_method = true;
       // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
-      if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(target, &closure)) {
+      if (!target->RequestSynchronousCheckpoint(&closure)) {
         return ERR(THREAD_NOT_ALIVE);
       }
     } else {
@@ -948,47 +949,64 @@
   if (info_cnt == nullptr || info_ptr == nullptr) {
     return ERR(NULL_POINTER);
   }
-  auto handle_fun = [&] (art::ScopedObjectAccess& soa, MonitorVisitor& visitor)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) {
-    auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * visitor.monitors.size();
-    jvmtiError err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr));
-    if (err != OK) {
-      return err;
-    }
-    *info_cnt = visitor.monitors.size();
+  art::ScopedObjectAccess soa(art::Thread::Current());
+  std::vector<art::GcRoot<art::mirror::Object>> mons;
+  std::vector<uint32_t> depths;
+  auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
     for (size_t i = 0; i < visitor.monitors.size(); i++) {
-      (*info_ptr)[i] = {
-        soa.Env()->AddLocalReference<jobject>(visitor.monitors[i].Get()),
-        visitor.stack_depths[i]
-      };
+      mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
+      depths.push_back(visitor.stack_depths[i]);
     }
     return OK;
   };
-  return GetOwnedMonitorInfoCommon(thread, handle_fun);
+  jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
+  if (err != OK) {
+    return err;
+  }
+  auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * mons.size();
+  err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr));
+  if (err != OK) {
+    return err;
+  }
+  *info_cnt = mons.size();
+  for (uint32_t i = 0; i < mons.size(); i++) {
+    (*info_ptr)[i] = {
+      soa.AddLocalReference<jobject>(mons[i].Read()),
+      static_cast<jint>(depths[i])
+    };
+  }
+  return err;
 }
 
 jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env,
                                           jthread thread,
                                           jint* owned_monitor_count_ptr,
                                           jobject** owned_monitors_ptr) {
-  if (owned_monitors_ptr == nullptr || owned_monitors_ptr == nullptr) {
+  if (owned_monitor_count_ptr == nullptr || owned_monitors_ptr == nullptr) {
     return ERR(NULL_POINTER);
   }
-  auto handle_fun = [&] (art::ScopedObjectAccess& soa, MonitorVisitor& visitor)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) {
-    auto nbytes = sizeof(jobject) * visitor.monitors.size();
-    jvmtiError err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr));
-    if (err != OK) {
-      return err;
-    }
-    *owned_monitor_count_ptr = visitor.monitors.size();
+  art::ScopedObjectAccess soa(art::Thread::Current());
+  std::vector<art::GcRoot<art::mirror::Object>> mons;
+  auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
     for (size_t i = 0; i < visitor.monitors.size(); i++) {
-      (*owned_monitors_ptr)[i] =
-          soa.Env()->AddLocalReference<jobject>(visitor.monitors[i].Get());
+      mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
     }
     return OK;
   };
-  return GetOwnedMonitorInfoCommon(thread, handle_fun);
+  jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
+  if (err != OK) {
+    return err;
+  }
+  auto nbytes = sizeof(jobject) * mons.size();
+  err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr));
+  if (err != OK) {
+    return err;
+  }
+  *owned_monitor_count_ptr = mons.size();
+  for (uint32_t i = 0; i < mons.size(); i++) {
+    (*owned_monitors_ptr)[i] = soa.AddLocalReference<jobject>(mons[i].Read());
+  }
+  return err;
 }
 
 jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index 555c5a7..414139c 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -1077,7 +1077,7 @@
   };
   StopThreadClosure c(exc);
   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
-  if (RequestGCSafeSynchronousCheckpoint(target, &c)) {
+  if (target->RequestSynchronousCheckpoint(&c)) {
     return OK;
   } else {
     // Something went wrong, probably the thread died.
@@ -1100,29 +1100,4 @@
   return OK;
 }
 
-class GcCriticalSectionClosure : public art::Closure {
- public:
-  explicit GcCriticalSectionClosure(art::Closure* wrapped) : wrapped_(wrapped) {}
-
-  void Run(art::Thread* self) OVERRIDE {
-    if (art::kIsDebugBuild) {
-      art::Locks::thread_list_lock_->AssertNotHeld(art::Thread::Current());
-    }
-    // This might block as it waits for any in-progress GCs to finish but this is fine since we
-    // released the Thread-list-lock prior to calling this in RequestSynchronousCheckpoint.
-    art::gc::ScopedGCCriticalSection sgccs(art::Thread::Current(),
-                                           art::gc::kGcCauseDebugger,
-                                           art::gc::kCollectorTypeDebugger);
-    wrapped_->Run(self);
-  }
-
- private:
-  art::Closure* wrapped_;
-};
-
-bool ThreadUtil::RequestGCSafeSynchronousCheckpoint(art::Thread* thr, art::Closure* function) {
-  GcCriticalSectionClosure gccsc(function);
-  return thr->RequestSynchronousCheckpoint(&gccsc);
-}
-
 }  // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_thread.h b/openjdkjvmti/ti_thread.h
index 341bffe..c6b6af1 100644
--- a/openjdkjvmti/ti_thread.h
+++ b/openjdkjvmti/ti_thread.h
@@ -134,16 +134,6 @@
     REQUIRES(!art::Locks::user_code_suspension_lock_,
              !art::Locks::thread_suspend_count_lock_);
 
-  // This will request a synchronous checkpoint in such a way as to prevent gc races if a local
-  // variable is taken from one thread's stack and placed in the stack of another thread.
-  // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
-  // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
-  // execute the checkpoint for us if it is Runnable.
-  static bool RequestGCSafeSynchronousCheckpoint(art::Thread* thr, art::Closure* function)
-      REQUIRES_SHARED(art::Locks::mutator_lock_)
-      RELEASE(art::Locks::thread_list_lock_)
-      REQUIRES(!art::Locks::thread_suspend_count_lock_);
-
  private:
   // We need to make sure only one thread tries to suspend threads at a time so we can get the
   // 'suspend-only-once' behavior the spec requires. Internally, ART considers suspension to be a
diff --git a/runtime/barrier.cc b/runtime/barrier.cc
index 4329a5a..8d3cf45 100644
--- a/runtime/barrier.cc
+++ b/runtime/barrier.cc
@@ -31,6 +31,9 @@
       condition_("GC barrier condition", lock_) {
 }
 
+template void Barrier::Increment<Barrier::kAllowHoldingLocks>(Thread* self, int delta);
+template void Barrier::Increment<Barrier::kDisallowHoldingLocks>(Thread* self, int delta);
+
 void Barrier::Pass(Thread* self) {
   MutexLock mu(self, lock_);
   SetCountLocked(self, count_ - 1);
@@ -45,6 +48,7 @@
   SetCountLocked(self, count);
 }
 
+template <Barrier::LockHandling locks>
 void Barrier::Increment(Thread* self, int delta) {
   MutexLock mu(self, lock_);
   SetCountLocked(self, count_ + delta);
@@ -57,7 +61,11 @@
   // be decremented to zero and a Broadcast will be made on the
   // condition variable, thus waking this up.
   while (count_ != 0) {
-    condition_.Wait(self);
+    if (locks == kAllowHoldingLocks) {
+      condition_.WaitHoldingLocks(self);
+    } else {
+      condition_.Wait(self);
+    }
   }
 }
 
diff --git a/runtime/barrier.h b/runtime/barrier.h
index d7c4661..8a38c4c 100644
--- a/runtime/barrier.h
+++ b/runtime/barrier.h
@@ -35,6 +35,11 @@
 // TODO: Maybe give this a better name.
 class Barrier {
  public:
+  enum LockHandling {
+    kAllowHoldingLocks,
+    kDisallowHoldingLocks,
+  };
+
   explicit Barrier(int count);
   virtual ~Barrier();
 
@@ -50,7 +55,9 @@
   // If these calls are made in that situation, the offending thread is likely to go back
   // to sleep, resulting in a deadlock.
 
-  // Increment the count by delta, wait on condition if count is non zero.
+  // Increment the count by delta, wait on condition if count is non zero.  If LockHandling is
+  // kAllowHoldingLocks we will not check that all locks are released when waiting.
+  template <Barrier::LockHandling locks = kDisallowHoldingLocks>
   void Increment(Thread* self, int delta) REQUIRES(!lock_);
 
   // Increment the count by delta, wait on condition if count is non zero, with a timeout. Returns
diff --git a/runtime/barrier_test.cc b/runtime/barrier_test.cc
index 04bb6ba..88075ba 100644
--- a/runtime/barrier_test.cc
+++ b/runtime/barrier_test.cc
@@ -69,18 +69,18 @@
     thread_pool.AddTask(self, new CheckWaitTask(&barrier, &count1, &count2));
   }
   thread_pool.StartWorkers(self);
-  while (count1.LoadRelaxed() != num_threads) {
+  while (count1.load(std::memory_order_relaxed) != num_threads) {
     timeout_barrier.Increment(self, 1, 100);  // sleep 100 msecs
   }
   // Count 2 should still be zero since no thread should have gone past the barrier.
-  EXPECT_EQ(0, count2.LoadRelaxed());
+  EXPECT_EQ(0, count2.load(std::memory_order_relaxed));
   // Perform one additional Wait(), allowing pool threads to proceed.
   barrier.Wait(self);
   // Wait for all the threads to finish.
   thread_pool.Wait(self, true, false);
   // Both counts should be equal to num_threads now.
-  EXPECT_EQ(count1.LoadRelaxed(), num_threads);
-  EXPECT_EQ(count2.LoadRelaxed(), num_threads);
+  EXPECT_EQ(count1.load(std::memory_order_relaxed), num_threads);
+  EXPECT_EQ(count2.load(std::memory_order_relaxed), num_threads);
   timeout_barrier.Init(self, 0);  // Reset to zero for destruction.
 }
 
@@ -124,7 +124,7 @@
   // Wait for all the tasks to complete using the barrier.
   barrier.Increment(self, expected_total_tasks);
   // The total number of completed tasks should be equal to expected_total_tasks.
-  EXPECT_EQ(count.LoadRelaxed(), expected_total_tasks);
+  EXPECT_EQ(count.load(std::memory_order_relaxed), expected_total_tasks);
 }
 
 }  // namespace art
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index d6dbab4..dfa14b9 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -161,7 +161,7 @@
 #if ART_USE_FUTEXES
   bool done = false;
   do {
-    int32_t cur_state = state_.LoadRelaxed();
+    int32_t cur_state = state_.load(std::memory_order_relaxed);
     if (LIKELY(cur_state >= 0)) {
       // Add as an extra reader.
       done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1);
@@ -185,7 +185,7 @@
 #if ART_USE_FUTEXES
   bool done = false;
   do {
-    int32_t cur_state = state_.LoadRelaxed();
+    int32_t cur_state = state_.load(std::memory_order_relaxed);
     if (LIKELY(cur_state > 0)) {
       // Reduce state by 1 and impose lock release load/store ordering.
       // Note, the relaxed loads below musn't reorder before the CompareAndSet.
@@ -193,8 +193,8 @@
       // a status bit into the state on contention.
       done = state_.CompareAndSetWeakSequentiallyConsistent(cur_state, cur_state - 1);
       if (done && (cur_state - 1) == 0) {  // Weak CAS may fail spuriously.
-        if (num_pending_writers_.LoadRelaxed() > 0 ||
-            num_pending_readers_.LoadRelaxed() > 0) {
+        if (num_pending_writers_.load(std::memory_order_relaxed) > 0 ||
+            num_pending_readers_.load(std::memory_order_relaxed) > 0) {
           // Wake any exclusive waiters as there are now no readers.
           futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
         }
@@ -221,7 +221,7 @@
 }
 
 inline pid_t Mutex::GetExclusiveOwnerTid() const {
-  return exclusive_owner_.LoadRelaxed();
+  return exclusive_owner_.load(std::memory_order_relaxed);
 }
 
 inline void Mutex::AssertExclusiveHeld(const Thread* self) const {
@@ -248,16 +248,16 @@
 
 inline pid_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
 #if ART_USE_FUTEXES
-  int32_t state = state_.LoadRelaxed();
+  int32_t state = state_.load(std::memory_order_relaxed);
   if (state == 0) {
     return 0;  // No owner.
   } else if (state > 0) {
     return -1;  // Shared.
   } else {
-    return exclusive_owner_.LoadRelaxed();
+    return exclusive_owner_.load(std::memory_order_relaxed);
   }
 #else
-  return exclusive_owner_.LoadRelaxed();
+  return exclusive_owner_.load(std::memory_order_relaxed);
 #endif
 }
 
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index a1f30b6..73b4641 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -128,15 +128,15 @@
  public:
   explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
     for (uint32_t i = 0;
-         !gAllMutexData->all_mutexes_guard.CompareAndSetWeakAcquire(0, mutex);
+         !gAllMutexData->all_mutexes_guard.CompareAndSetWeakAcquire(nullptr, mutex);
          ++i) {
       BackOff(i);
     }
   }
 
   ~ScopedAllMutexesLock() {
-    DCHECK_EQ(gAllMutexData->all_mutexes_guard.LoadRelaxed(), mutex_);
-    gAllMutexData->all_mutexes_guard.StoreRelease(0);
+    DCHECK_EQ(gAllMutexData->all_mutexes_guard.load(std::memory_order_relaxed), mutex_);
+    gAllMutexData->all_mutexes_guard.store(nullptr, std::memory_order_release);
   }
 
  private:
@@ -147,15 +147,17 @@
  public:
   explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) {
     for (uint32_t i = 0;
-         !Locks::expected_mutexes_on_weak_ref_access_guard_.CompareAndSetWeakAcquire(0, mutex);
+         !Locks::expected_mutexes_on_weak_ref_access_guard_.CompareAndSetWeakAcquire(nullptr,
+                                                                                     mutex);
          ++i) {
       BackOff(i);
     }
   }
 
   ~ScopedExpectedMutexesOnWeakRefAccessLock() {
-    DCHECK_EQ(Locks::expected_mutexes_on_weak_ref_access_guard_.LoadRelaxed(), mutex_);
-    Locks::expected_mutexes_on_weak_ref_access_guard_.StoreRelease(0);
+    DCHECK_EQ(Locks::expected_mutexes_on_weak_ref_access_guard_.load(std::memory_order_relaxed),
+              mutex_);
+    Locks::expected_mutexes_on_weak_ref_access_guard_.store(nullptr, std::memory_order_release);
   }
 
  private:
@@ -293,7 +295,7 @@
 void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
   if (kLogLockContentions) {
     // Atomically add value to wait_time.
-    wait_time.FetchAndAddSequentiallyConsistent(value);
+    wait_time.fetch_add(value, std::memory_order_seq_cst);
   }
 }
 
@@ -306,19 +308,19 @@
     data->AddToWaitTime(nano_time_blocked);
     ContentionLogEntry* log = data->contention_log;
     // This code is intentionally racy as it is only used for diagnostics.
-    uint32_t slot = data->cur_content_log_entry.LoadRelaxed();
+    int32_t slot = data->cur_content_log_entry.load(std::memory_order_relaxed);
     if (log[slot].blocked_tid == blocked_tid &&
         log[slot].owner_tid == blocked_tid) {
       ++log[slot].count;
     } else {
       uint32_t new_slot;
       do {
-        slot = data->cur_content_log_entry.LoadRelaxed();
+        slot = data->cur_content_log_entry.load(std::memory_order_relaxed);
         new_slot = (slot + 1) % kContentionLogSize;
       } while (!data->cur_content_log_entry.CompareAndSetWeakRelaxed(slot, new_slot));
       log[new_slot].blocked_tid = blocked_tid;
       log[new_slot].owner_tid = owner_tid;
-      log[new_slot].count.StoreRelaxed(1);
+      log[new_slot].count.store(1, std::memory_order_relaxed);
     }
   }
 }
@@ -327,8 +329,8 @@
   if (kLogLockContentions) {
     const ContentionLogData* data = contention_log_data_;
     const ContentionLogEntry* log = data->contention_log;
-    uint64_t wait_time = data->wait_time.LoadRelaxed();
-    uint32_t contention_count = data->contention_count.LoadRelaxed();
+    uint64_t wait_time = data->wait_time.load(std::memory_order_relaxed);
+    uint32_t contention_count = data->contention_count.load(std::memory_order_relaxed);
     if (contention_count == 0) {
       os << "never contended";
     } else {
@@ -340,7 +342,7 @@
       for (size_t i = 0; i < kContentionLogSize; ++i) {
         uint64_t blocked_tid = log[i].blocked_tid;
         uint64_t owner_tid = log[i].owner_tid;
-        uint32_t count = log[i].count.LoadRelaxed();
+        uint32_t count = log[i].count.load(std::memory_order_relaxed);
         if (count > 0) {
           auto it = most_common_blocked.find(blocked_tid);
           if (it != most_common_blocked.end()) {
@@ -386,8 +388,8 @@
 Mutex::Mutex(const char* name, LockLevel level, bool recursive)
     : BaseMutex(name, level), exclusive_owner_(0), recursive_(recursive), recursion_count_(0) {
 #if ART_USE_FUTEXES
-  DCHECK_EQ(0, state_.LoadRelaxed());
-  DCHECK_EQ(0, num_contenders_.LoadRelaxed());
+  DCHECK_EQ(0, state_.load(std::memory_order_relaxed));
+  DCHECK_EQ(0, num_contenders_.load(std::memory_order_relaxed));
 #else
   CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
 #endif
@@ -402,7 +404,7 @@
 Mutex::~Mutex() {
   bool safe_to_call_abort = Locks::IsSafeToCallAbortRacy();
 #if ART_USE_FUTEXES
-  if (state_.LoadRelaxed() != 0) {
+  if (state_.load(std::memory_order_relaxed) != 0) {
     LOG(safe_to_call_abort ? FATAL : WARNING)
         << "destroying mutex with owner: " << GetExclusiveOwnerTid();
   } else {
@@ -410,7 +412,7 @@
       LOG(safe_to_call_abort ? FATAL : WARNING)
           << "unexpectedly found an owner on unlocked mutex " << name_;
     }
-    if (num_contenders_.LoadSequentiallyConsistent() != 0) {
+    if (num_contenders_.load(std::memory_order_seq_cst) != 0) {
       LOG(safe_to_call_abort ? FATAL : WARNING)
           << "unexpectedly found a contender on mutex " << name_;
     }
@@ -436,7 +438,7 @@
 #if ART_USE_FUTEXES
     bool done = false;
     do {
-      int32_t cur_state = state_.LoadRelaxed();
+      int32_t cur_state = state_.load(std::memory_order_relaxed);
       if (LIKELY(cur_state == 0)) {
         // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
         done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, 1 /* new state */);
@@ -457,12 +459,12 @@
         num_contenders_--;
       }
     } while (!done);
-    DCHECK_EQ(state_.LoadRelaxed(), 1);
+    DCHECK_EQ(state_.load(std::memory_order_relaxed), 1);
 #else
     CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
 #endif
     DCHECK_EQ(GetExclusiveOwnerTid(), 0);
-    exclusive_owner_.StoreRelaxed(SafeGetTid(self));
+    exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
     RegisterAsLocked(self);
   }
   recursion_count_++;
@@ -482,7 +484,7 @@
 #if ART_USE_FUTEXES
     bool done = false;
     do {
-      int32_t cur_state = state_.LoadRelaxed();
+      int32_t cur_state = state_.load(std::memory_order_relaxed);
       if (cur_state == 0) {
         // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
         done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, 1 /* new state */);
@@ -490,7 +492,7 @@
         return false;
       }
     } while (!done);
-    DCHECK_EQ(state_.LoadRelaxed(), 1);
+    DCHECK_EQ(state_.load(std::memory_order_relaxed), 1);
 #else
     int result = pthread_mutex_trylock(&mutex_);
     if (result == EBUSY) {
@@ -502,7 +504,7 @@
     }
 #endif
     DCHECK_EQ(GetExclusiveOwnerTid(), 0);
-    exclusive_owner_.StoreRelaxed(SafeGetTid(self));
+    exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
     RegisterAsLocked(self);
   }
   recursion_count_++;
@@ -539,10 +541,10 @@
 #if ART_USE_FUTEXES
     bool done = false;
     do {
-      int32_t cur_state = state_.LoadRelaxed();
+      int32_t cur_state = state_.load(std::memory_order_relaxed);
       if (LIKELY(cur_state == 1)) {
         // We're no longer the owner.
-        exclusive_owner_.StoreRelaxed(0);
+        exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
         // Change state to 0 and impose load/store ordering appropriate for lock release.
         // Note, the relaxed loads below mustn't reorder before the CompareAndSet.
         // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
@@ -550,7 +552,7 @@
         done = state_.CompareAndSetWeakSequentiallyConsistent(cur_state, 0 /* new state */);
         if (LIKELY(done)) {  // Spurious fail?
           // Wake a contender.
-          if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
+          if (UNLIKELY(num_contenders_.load(std::memory_order_relaxed) > 0)) {
             futex(state_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
           }
         }
@@ -569,7 +571,7 @@
       }
     } while (!done);
 #else
-    exclusive_owner_.StoreRelaxed(0);
+    exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
     CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
 #endif
   }
@@ -593,7 +595,7 @@
 #if ART_USE_FUTEXES
   // Wake up all the waiters so they will respond to the emtpy checkpoint.
   DCHECK(should_respond_to_empty_checkpoint_request_);
-  if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
+  if (UNLIKELY(num_contenders_.load(std::memory_order_relaxed) > 0)) {
     futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
   }
 #else
@@ -610,15 +612,15 @@
 #if !ART_USE_FUTEXES
   CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
 #endif
-  exclusive_owner_.StoreRelaxed(0);
+  exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
 }
 
 ReaderWriterMutex::~ReaderWriterMutex() {
 #if ART_USE_FUTEXES
-  CHECK_EQ(state_.LoadRelaxed(), 0);
+  CHECK_EQ(state_.load(std::memory_order_relaxed), 0);
   CHECK_EQ(GetExclusiveOwnerTid(), 0);
-  CHECK_EQ(num_pending_readers_.LoadRelaxed(), 0);
-  CHECK_EQ(num_pending_writers_.LoadRelaxed(), 0);
+  CHECK_EQ(num_pending_readers_.load(std::memory_order_relaxed), 0);
+  CHECK_EQ(num_pending_writers_.load(std::memory_order_relaxed), 0);
 #else
   // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
   // may still be using locks.
@@ -637,7 +639,7 @@
 #if ART_USE_FUTEXES
   bool done = false;
   do {
-    int32_t cur_state = state_.LoadRelaxed();
+    int32_t cur_state = state_.load(std::memory_order_relaxed);
     if (LIKELY(cur_state == 0)) {
       // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
       done = state_.CompareAndSetWeakAcquire(0 /* cur_state*/, -1 /* new state */);
@@ -658,12 +660,12 @@
       --num_pending_writers_;
     }
   } while (!done);
-  DCHECK_EQ(state_.LoadRelaxed(), -1);
+  DCHECK_EQ(state_.load(std::memory_order_relaxed), -1);
 #else
   CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
 #endif
   DCHECK_EQ(GetExclusiveOwnerTid(), 0);
-  exclusive_owner_.StoreRelaxed(SafeGetTid(self));
+  exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
   RegisterAsLocked(self);
   AssertExclusiveHeld(self);
 }
@@ -676,10 +678,10 @@
 #if ART_USE_FUTEXES
   bool done = false;
   do {
-    int32_t cur_state = state_.LoadRelaxed();
+    int32_t cur_state = state_.load(std::memory_order_relaxed);
     if (LIKELY(cur_state == -1)) {
       // We're no longer the owner.
-      exclusive_owner_.StoreRelaxed(0);
+      exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
       // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
       // Note, the relaxed loads below musn't reorder before the CompareAndSet.
       // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
@@ -687,8 +689,8 @@
       done = state_.CompareAndSetWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
       if (LIKELY(done)) {  // Weak CAS may fail spuriously.
         // Wake any waiters.
-        if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
-                     num_pending_writers_.LoadRelaxed() > 0)) {
+        if (UNLIKELY(num_pending_readers_.load(std::memory_order_relaxed) > 0 ||
+                     num_pending_writers_.load(std::memory_order_relaxed) > 0)) {
           futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
         }
       }
@@ -697,7 +699,7 @@
     }
   } while (!done);
 #else
-  exclusive_owner_.StoreRelaxed(0);
+  exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
   CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
 #endif
 }
@@ -710,7 +712,7 @@
   timespec end_abs_ts;
   InitTimeSpec(true, CLOCK_MONOTONIC, ms, ns, &end_abs_ts);
   do {
-    int32_t cur_state = state_.LoadRelaxed();
+    int32_t cur_state = state_.load(std::memory_order_relaxed);
     if (cur_state == 0) {
       // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
       done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, -1 /* new state */);
@@ -753,7 +755,7 @@
     PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
   }
 #endif
-  exclusive_owner_.StoreRelaxed(SafeGetTid(self));
+  exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
   RegisterAsLocked(self);
   AssertSharedHeld(self);
   return true;
@@ -782,7 +784,7 @@
 #if ART_USE_FUTEXES
   bool done = false;
   do {
-    int32_t cur_state = state_.LoadRelaxed();
+    int32_t cur_state = state_.load(std::memory_order_relaxed);
     if (cur_state >= 0) {
       // Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
       done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1);
@@ -822,9 +824,9 @@
       << " level=" << static_cast<int>(level_)
       << " owner=" << GetExclusiveOwnerTid()
 #if ART_USE_FUTEXES
-      << " state=" << state_.LoadSequentiallyConsistent()
-      << " num_pending_writers=" << num_pending_writers_.LoadSequentiallyConsistent()
-      << " num_pending_readers=" << num_pending_readers_.LoadSequentiallyConsistent()
+      << " state=" << state_.load(std::memory_order_seq_cst)
+      << " num_pending_writers=" << num_pending_writers_.load(std::memory_order_seq_cst)
+      << " num_pending_readers=" << num_pending_readers_.load(std::memory_order_seq_cst)
 #endif
       << " ";
   DumpContention(os);
@@ -844,8 +846,8 @@
 #if ART_USE_FUTEXES
   // Wake up all the waiters so they will respond to the emtpy checkpoint.
   DCHECK(should_respond_to_empty_checkpoint_request_);
-  if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
-               num_pending_writers_.LoadRelaxed() > 0)) {
+  if (UNLIKELY(num_pending_readers_.load(std::memory_order_relaxed) > 0 ||
+               num_pending_writers_.load(std::memory_order_relaxed) > 0)) {
     futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
   }
 #else
@@ -856,7 +858,7 @@
 ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
     : name_(name), guard_(guard) {
 #if ART_USE_FUTEXES
-  DCHECK_EQ(0, sequence_.LoadRelaxed());
+  DCHECK_EQ(0, sequence_.load(std::memory_order_relaxed));
   num_waiters_ = 0;
 #else
   pthread_condattr_t cond_attrs;
@@ -899,7 +901,7 @@
     sequence_++;  // Indicate the broadcast occurred.
     bool done = false;
     do {
-      int32_t cur_sequence = sequence_.LoadRelaxed();
+      int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
       // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
       // mutex unlocks will awaken the requeued waiter thread.
       done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
@@ -948,7 +950,7 @@
   // Ensure the Mutex is contended so that requeued threads are awoken.
   guard_.num_contenders_++;
   guard_.recursion_count_ = 1;
-  int32_t cur_sequence = sequence_.LoadRelaxed();
+  int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
   guard_.ExclusiveUnlock(self);
   if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, nullptr, nullptr, 0) != 0) {
     // Futex failed, check it is an expected error.
@@ -974,14 +976,14 @@
   CHECK_GE(num_waiters_, 0);
   num_waiters_--;
   // We awoke and so no longer require awakes from the guard_'s unlock.
-  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
+  CHECK_GE(guard_.num_contenders_.load(std::memory_order_relaxed), 0);
   guard_.num_contenders_--;
 #else
   pid_t old_owner = guard_.GetExclusiveOwnerTid();
-  guard_.exclusive_owner_.StoreRelaxed(0);
+  guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
   guard_.recursion_count_ = 0;
   CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
-  guard_.exclusive_owner_.StoreRelaxed(old_owner);
+  guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed);
 #endif
   guard_.recursion_count_ = old_recursion_count;
 }
@@ -999,7 +1001,7 @@
   // Ensure the Mutex is contended so that requeued threads are awoken.
   guard_.num_contenders_++;
   guard_.recursion_count_ = 1;
-  int32_t cur_sequence = sequence_.LoadRelaxed();
+  int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
   guard_.ExclusiveUnlock(self);
   if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, nullptr, 0) != 0) {
     if (errno == ETIMEDOUT) {
@@ -1015,7 +1017,7 @@
   CHECK_GE(num_waiters_, 0);
   num_waiters_--;
   // We awoke and so no longer require awakes from the guard_'s unlock.
-  CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
+  CHECK_GE(guard_.num_contenders_.load(std::memory_order_relaxed), 0);
   guard_.num_contenders_--;
 #else
 #if !defined(__APPLE__)
@@ -1024,7 +1026,7 @@
   int clock = CLOCK_REALTIME;
 #endif
   pid_t old_owner = guard_.GetExclusiveOwnerTid();
-  guard_.exclusive_owner_.StoreRelaxed(0);
+  guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
   guard_.recursion_count_ = 0;
   timespec ts;
   InitTimeSpec(true, clock, ms, ns, &ts);
@@ -1035,7 +1037,7 @@
     errno = rc;
     PLOG(FATAL) << "TimedWait failed for " << name_;
   }
-  guard_.exclusive_owner_.StoreRelaxed(old_owner);
+  guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed);
 #endif
   guard_.recursion_count_ = old_recursion_count;
   return timed_out;
@@ -1254,12 +1256,13 @@
 }
 
 void Locks::SetClientCallback(ClientCallback* safe_to_call_abort_cb) {
-  safe_to_call_abort_callback.StoreRelease(safe_to_call_abort_cb);
+  safe_to_call_abort_callback.store(safe_to_call_abort_cb, std::memory_order_release);
 }
 
 // Helper to allow checking shutdown while ignoring locking requirements.
 bool Locks::IsSafeToCallAbortRacy() {
-  Locks::ClientCallback* safe_to_call_abort_cb = safe_to_call_abort_callback.LoadAcquire();
+  Locks::ClientCallback* safe_to_call_abort_cb =
+      safe_to_call_abort_callback.load(std::memory_order_acquire);
   return safe_to_call_abort_cb != nullptr && safe_to_call_abort_cb();
 }
 
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 4376617..b0eb23d 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -224,7 +224,7 @@
  public:
   bool HasEverContended() const {
     if (kLogLockContentions) {
-      return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
+      return contention_log_data_->contention_count.load(std::memory_order_seq_cst) > 0;
     }
     return false;
   }
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 8a29ff3..8b64b8d 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1340,7 +1340,7 @@
       }
     }
   }
-  {
+  if (ClassLinker::kAppImageMayContainStrings) {
     // Fixup all the literal strings happens at app images which are supposed to be interned.
     ScopedTrace timing("Fixup String Intern in image and dex_cache");
     const auto& image_header = space->GetImageHeader();
@@ -5863,14 +5863,6 @@
       // smaller as we go on.
       uint32_t hash_index = hash_table.FindAndRemove(&super_method_name_comparator);
       if (hash_index != hash_table.GetNotFoundIndex()) {
-        // Run a check whether we are going to override a method which is hidden
-        // to `klass`, but ignore the result as we only warn at the moment.
-        // We cannot do this test earlier because we need to establish that
-        // a method is being overridden first. ShouldBlockAccessToMember would
-        // print bogus warnings otherwise.
-        hiddenapi::ShouldBlockAccessToMember(
-            super_method, klass->GetClassLoader(), hiddenapi::kOverride);
-
         ArtMethod* virtual_method = klass->GetVirtualMethodDuringLinking(
             hash_index, image_pointer_size_);
         if (super_method->IsFinal()) {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index d05e78f..2f6b754 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -152,6 +152,8 @@
     kClassRootsMax,
   };
 
+  static constexpr bool kAppImageMayContainStrings = false;
+
   explicit ClassLinker(InternTable* intern_table);
   virtual ~ClassLinker();
 
diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h
index c59e2e8..5da5470 100644
--- a/runtime/class_table-inl.h
+++ b/runtime/class_table-inl.h
@@ -88,7 +88,7 @@
 
 template<ReadBarrierOption kReadBarrierOption>
 inline mirror::Class* ClassTable::TableSlot::Read() const {
-  const uint32_t before = data_.LoadRelaxed();
+  const uint32_t before = data_.load(std::memory_order_relaxed);
   ObjPtr<mirror::Class> const before_ptr(ExtractPtr(before));
   ObjPtr<mirror::Class> const after_ptr(
       GcRoot<mirror::Class>(before_ptr).Read<kReadBarrierOption>());
@@ -102,7 +102,7 @@
 
 template<typename Visitor>
 inline void ClassTable::TableSlot::VisitRoot(const Visitor& visitor) const {
-  const uint32_t before = data_.LoadRelaxed();
+  const uint32_t before = data_.load(std::memory_order_relaxed);
   ObjPtr<mirror::Class> before_ptr(ExtractPtr(before));
   GcRoot<mirror::Class> root(before_ptr);
   visitor.VisitRoot(root.AddressWithoutBarrier());
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 3e90fe2..0b08041 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -53,14 +53,14 @@
    public:
     TableSlot() : data_(0u) {}
 
-    TableSlot(const TableSlot& copy) : data_(copy.data_.LoadRelaxed()) {}
+    TableSlot(const TableSlot& copy) : data_(copy.data_.load(std::memory_order_relaxed)) {}
 
     explicit TableSlot(ObjPtr<mirror::Class> klass);
 
     TableSlot(ObjPtr<mirror::Class> klass, uint32_t descriptor_hash);
 
     TableSlot& operator=(const TableSlot& copy) {
-      data_.StoreRelaxed(copy.data_.LoadRelaxed());
+      data_.store(copy.data_.load(std::memory_order_relaxed), std::memory_order_relaxed);
       return *this;
     }
 
@@ -69,7 +69,7 @@
     }
 
     uint32_t Hash() const {
-      return MaskHash(data_.LoadRelaxed());
+      return MaskHash(data_.load(std::memory_order_relaxed));
     }
 
     static uint32_t MaskHash(uint32_t hash) {
diff --git a/runtime/dex/art_dex_file_loader.cc b/runtime/dex/art_dex_file_loader.cc
index c456764..9802c69 100644
--- a/runtime/dex/art_dex_file_loader.cc
+++ b/runtime/dex/art_dex_file_loader.cc
@@ -205,6 +205,12 @@
                                                  error_msg,
                                                  std::make_unique<MemMapContainer>(std::move(map)),
                                                  /*verify_result*/ nullptr);
+  // Opening CompactDex is only supported from vdex files.
+  if (dex_file != nullptr && dex_file->IsCompactDexFile()) {
+    *error_msg = StringPrintf("Opening CompactDex file '%s' is only supported from vdex files",
+                              location.c_str());
+    return nullptr;
+  }
   return dex_file;
 }
 
@@ -329,6 +335,12 @@
                                                  std::make_unique<MemMapContainer>(std::move(map)),
                                                  /*verify_result*/ nullptr);
 
+  // Opening CompactDex is only supported from vdex files.
+  if (dex_file != nullptr && dex_file->IsCompactDexFile()) {
+    *error_msg = StringPrintf("Opening CompactDex file '%s' is only supported from vdex files",
+                              location.c_str());
+    return nullptr;
+  }
   return dex_file;
 }
 
@@ -397,6 +409,11 @@
                                                  error_msg,
                                                  std::make_unique<MemMapContainer>(std::move(map)),
                                                  &verify_result);
+  if (dex_file != nullptr && dex_file->IsCompactDexFile()) {
+    *error_msg = StringPrintf("Opening CompactDex file '%s' is only supported from vdex files",
+                              location.c_str());
+    return nullptr;
+  }
   if (dex_file == nullptr) {
     if (verify_result == VerifyResult::kVerifyNotAttempted) {
       *error_code = ZipOpenErrorCode::kDexFileError;
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 7a0850d..2284100 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2338,10 +2338,6 @@
   ArtMethod* called = *sp;
   DCHECK(called->IsNative()) << called->PrettyMethod(true);
   Runtime* runtime = Runtime::Current();
-  jit::Jit* jit = runtime->GetJit();
-  if (jit != nullptr) {
-    jit->AddSamples(self, called, 1u, /*with_backedges*/ false);
-  }
   uint32_t shorty_len = 0;
   const char* shorty = called->GetShorty(&shorty_len);
   bool critical_native = called->IsCriticalNative();
@@ -2367,6 +2363,12 @@
 
   self->VerifyStack();
 
+  // We can now walk the stack if needed by JIT GC from MethodEntered() for JIT-on-first-use.
+  jit::Jit* jit = runtime->GetJit();
+  if (jit != nullptr) {
+    jit->MethodEntered(self, called);
+  }
+
   uint32_t cookie;
   uint32_t* sp32;
   // Skip calling JniMethodStart for @CriticalNative.
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 6b103bf..7a4bd87 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -74,8 +74,8 @@
   void Reset() {
     DCHECK(mem_map_.get() != nullptr);
     DCHECK(begin_ != nullptr);
-    front_index_.StoreRelaxed(0);
-    back_index_.StoreRelaxed(0);
+    front_index_.store(0, std::memory_order_relaxed);
+    back_index_.store(0, std::memory_order_relaxed);
     debug_is_sorted_ = true;
     mem_map_->MadviseDontNeedAndZero();
   }
@@ -103,7 +103,7 @@
     int32_t index;
     int32_t new_index;
     do {
-      index = back_index_.LoadRelaxed();
+      index = back_index_.load(std::memory_order_relaxed);
       new_index = index + num_slots;
       if (UNLIKELY(static_cast<size_t>(new_index) >= growth_limit_)) {
         // Stack overflow.
@@ -134,31 +134,32 @@
     if (kIsDebugBuild) {
       debug_is_sorted_ = false;
     }
-    const int32_t index = back_index_.LoadRelaxed();
+    const int32_t index = back_index_.load(std::memory_order_relaxed);
     DCHECK_LT(static_cast<size_t>(index), growth_limit_);
-    back_index_.StoreRelaxed(index + 1);
+    back_index_.store(index + 1, std::memory_order_relaxed);
     begin_[index].Assign(value);
   }
 
   T* PopBack() REQUIRES_SHARED(Locks::mutator_lock_) {
-    DCHECK_GT(back_index_.LoadRelaxed(), front_index_.LoadRelaxed());
+    DCHECK_GT(back_index_.load(std::memory_order_relaxed),
+              front_index_.load(std::memory_order_relaxed));
     // Decrement the back index non atomically.
-    back_index_.StoreRelaxed(back_index_.LoadRelaxed() - 1);
-    return begin_[back_index_.LoadRelaxed()].AsMirrorPtr();
+    back_index_.store(back_index_.load(std::memory_order_relaxed) - 1, std::memory_order_relaxed);
+    return begin_[back_index_.load(std::memory_order_relaxed)].AsMirrorPtr();
   }
 
   // Take an item from the front of the stack.
   T PopFront() {
-    int32_t index = front_index_.LoadRelaxed();
-    DCHECK_LT(index, back_index_.LoadRelaxed());
-    front_index_.StoreRelaxed(index + 1);
+    int32_t index = front_index_.load(std::memory_order_relaxed);
+    DCHECK_LT(index, back_index_.load(std::memory_order_relaxed));
+    front_index_.store(index + 1, std::memory_order_relaxed);
     return begin_[index];
   }
 
   // Pop a number of elements.
   void PopBackCount(int32_t n) {
     DCHECK_GE(Size(), static_cast<size_t>(n));
-    back_index_.StoreRelaxed(back_index_.LoadRelaxed() - n);
+    back_index_.store(back_index_.load(std::memory_order_relaxed) - n, std::memory_order_relaxed);
   }
 
   bool IsEmpty() const {
@@ -170,15 +171,17 @@
   }
 
   size_t Size() const {
-    DCHECK_LE(front_index_.LoadRelaxed(), back_index_.LoadRelaxed());
-    return back_index_.LoadRelaxed() - front_index_.LoadRelaxed();
+    DCHECK_LE(front_index_.load(std::memory_order_relaxed),
+              back_index_.load(std::memory_order_relaxed));
+    return
+        back_index_.load(std::memory_order_relaxed) - front_index_.load(std::memory_order_relaxed);
   }
 
   StackReference<T>* Begin() const {
-    return begin_ + front_index_.LoadRelaxed();
+    return begin_ + front_index_.load(std::memory_order_relaxed);
   }
   StackReference<T>* End() const {
-    return begin_ + back_index_.LoadRelaxed();
+    return begin_ + back_index_.load(std::memory_order_relaxed);
   }
 
   size_t Capacity() const {
@@ -193,11 +196,11 @@
   }
 
   void Sort() {
-    int32_t start_back_index = back_index_.LoadRelaxed();
-    int32_t start_front_index = front_index_.LoadRelaxed();
+    int32_t start_back_index = back_index_.load(std::memory_order_relaxed);
+    int32_t start_front_index = front_index_.load(std::memory_order_relaxed);
     std::sort(Begin(), End(), ObjectComparator());
-    CHECK_EQ(start_back_index, back_index_.LoadRelaxed());
-    CHECK_EQ(start_front_index, front_index_.LoadRelaxed());
+    CHECK_EQ(start_back_index, back_index_.load(std::memory_order_relaxed));
+    CHECK_EQ(start_front_index, front_index_.load(std::memory_order_relaxed));
     if (kIsDebugBuild) {
       debug_is_sorted_ = true;
     }
@@ -236,7 +239,7 @@
     }
     int32_t index;
     do {
-      index = back_index_.LoadRelaxed();
+      index = back_index_.load(std::memory_order_relaxed);
       if (UNLIKELY(static_cast<size_t>(index) >= limit)) {
         // Stack overflow.
         return false;
diff --git a/runtime/gc/accounting/bitmap-inl.h b/runtime/gc/accounting/bitmap-inl.h
index a71b212..a4273e5 100644
--- a/runtime/gc/accounting/bitmap-inl.h
+++ b/runtime/gc/accounting/bitmap-inl.h
@@ -37,7 +37,7 @@
   auto* atomic_entry = reinterpret_cast<Atomic<uintptr_t>*>(&bitmap_begin_[word_index]);
   uintptr_t old_word;
   do {
-    old_word = atomic_entry->LoadRelaxed();
+    old_word = atomic_entry->load(std::memory_order_relaxed);
     // Fast path: The bit is already set.
     if ((old_word & word_mask) != 0) {
       DCHECK(TestBit(bit_index));
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 14f5d0e..d9c0418 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -43,7 +43,7 @@
   Atomic<uintptr_t>* word_atomic = reinterpret_cast<Atomic<uintptr_t>*>(address);
 
   // Word with the byte we are trying to cas cleared.
-  const uintptr_t cur_word = word_atomic->LoadRelaxed() &
+  const uintptr_t cur_word = word_atomic->load(std::memory_order_relaxed) &
       ~(static_cast<uintptr_t>(0xFF) << shift_in_bits);
   const uintptr_t old_word = cur_word | (static_cast<uintptr_t>(old_value) << shift_in_bits);
   const uintptr_t new_word = cur_word | (static_cast<uintptr_t>(new_value) << shift_in_bits);
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 384e3c2..d460e00 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -41,7 +41,7 @@
   DCHECK_LT(index, bitmap_size_ / sizeof(intptr_t)) << " bitmap_size_ = " << bitmap_size_;
   uintptr_t old_word;
   do {
-    old_word = atomic_entry->LoadRelaxed();
+    old_word = atomic_entry->load(std::memory_order_relaxed);
     // Fast path: The bit is already set.
     if ((old_word & mask) != 0) {
       DCHECK(Test(obj));
@@ -59,7 +59,8 @@
   DCHECK(bitmap_begin_ != nullptr);
   DCHECK_GE(addr, heap_begin_);
   const uintptr_t offset = addr - heap_begin_;
-  return (bitmap_begin_[OffsetToIndex(offset)].LoadRelaxed() & OffsetToMask(offset)) != 0;
+  size_t index = OffsetToIndex(offset);
+  return (bitmap_begin_[index].load(std::memory_order_relaxed) & OffsetToMask(offset)) != 0;
 }
 
 template<size_t kAlignment>
@@ -119,7 +120,7 @@
 
     // Traverse the middle, full part.
     for (size_t i = index_start + 1; i < index_end; ++i) {
-      uintptr_t w = bitmap_begin_[i].LoadRelaxed();
+      uintptr_t w = bitmap_begin_[i].load(std::memory_order_relaxed);
       if (w != 0) {
         const uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
         // Iterate on the bits set in word `w`, from the least to the most significant bit.
@@ -168,7 +169,7 @@
   uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
   Atomic<uintptr_t>* bitmap_begin = bitmap_begin_;
   for (uintptr_t i = 0; i <= end; ++i) {
-    uintptr_t w = bitmap_begin[i].LoadRelaxed();
+    uintptr_t w = bitmap_begin[i].load(std::memory_order_relaxed);
     if (w != 0) {
       uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
       do {
@@ -192,7 +193,7 @@
   const uintptr_t mask = OffsetToMask(offset);
   DCHECK_LT(index, bitmap_size_ / sizeof(intptr_t)) << " bitmap_size_ = " << bitmap_size_;
   Atomic<uintptr_t>* atomic_entry = &bitmap_begin_[index];
-  uintptr_t old_word = atomic_entry->LoadRelaxed();
+  uintptr_t old_word = atomic_entry->load(std::memory_order_relaxed);
   if (kSetBit) {
     // Check the bit before setting the word incase we are trying to mark a read only bitmap
     // like an image space bitmap. This bitmap is mapped as read only and will fault if we
@@ -200,10 +201,10 @@
     // occur if we check before setting the bit. This also prevents dirty pages that would
     // occur if the bitmap was read write and we did not check the bit.
     if ((old_word & mask) == 0) {
-      atomic_entry->StoreRelaxed(old_word | mask);
+      atomic_entry->store(old_word | mask, std::memory_order_relaxed);
     }
   } else {
-    atomic_entry->StoreRelaxed(old_word & ~mask);
+    atomic_entry->store(old_word & ~mask, std::memory_order_relaxed);
   }
   DCHECK_EQ(Test(obj), kSetBit);
   return (old_word & mask) != 0;
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 0247564..d84288f 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -145,7 +145,7 @@
   Atomic<uintptr_t>* const src = source_bitmap->Begin();
   Atomic<uintptr_t>* const dest = Begin();
   for (size_t i = 0; i < count; ++i) {
-    dest[i].StoreRelaxed(src[i].LoadRelaxed());
+    dest[i].store(src[i].load(std::memory_order_relaxed), std::memory_order_relaxed);
   }
 }
 
@@ -184,7 +184,8 @@
   Atomic<uintptr_t>* live = live_bitmap.bitmap_begin_;
   Atomic<uintptr_t>* mark = mark_bitmap.bitmap_begin_;
   for (size_t i = start; i <= end; i++) {
-    uintptr_t garbage = live[i].LoadRelaxed() & ~mark[i].LoadRelaxed();
+    uintptr_t garbage =
+        live[i].load(std::memory_order_relaxed) & ~mark[i].load(std::memory_order_relaxed);
     if (UNLIKELY(garbage != 0)) {
       uintptr_t ptr_base = IndexToOffset(i) + live_bitmap.heap_begin_;
       do {
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 56983be..6e345fb 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -78,13 +78,13 @@
     if (kIsDebugBuild) {
       if (Thread::Current() == thread_running_gc_) {
         DCHECK(!kGrayImmuneObject ||
-               updated_all_immune_objects_.LoadRelaxed() ||
+               updated_all_immune_objects_.load(std::memory_order_relaxed) ||
                gc_grays_immune_objects_);
       } else {
         DCHECK(kGrayImmuneObject);
       }
     }
-    if (!kGrayImmuneObject || updated_all_immune_objects_.LoadRelaxed()) {
+    if (!kGrayImmuneObject || updated_all_immune_objects_.load(std::memory_order_relaxed)) {
       return ref;
     }
     // This may or may not succeed, which is ok because the object may already be gray.
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index b10c504..bb5167f 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -291,14 +291,14 @@
   rb_mark_bit_stack_full_ = false;
   mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_;
   if (measure_read_barrier_slow_path_) {
-    rb_slow_path_ns_.StoreRelaxed(0);
-    rb_slow_path_count_.StoreRelaxed(0);
-    rb_slow_path_count_gc_.StoreRelaxed(0);
+    rb_slow_path_ns_.store(0, std::memory_order_relaxed);
+    rb_slow_path_count_.store(0, std::memory_order_relaxed);
+    rb_slow_path_count_gc_.store(0, std::memory_order_relaxed);
   }
 
   immune_spaces_.Reset();
-  bytes_moved_.StoreRelaxed(0);
-  objects_moved_.StoreRelaxed(0);
+  bytes_moved_.store(0, std::memory_order_relaxed);
+  objects_moved_.store(0, std::memory_order_relaxed);
   GcCause gc_cause = GetCurrentIteration()->GetGcCause();
   if (gc_cause == kGcCauseExplicit ||
       gc_cause == kGcCauseCollectorTransition ||
@@ -308,7 +308,7 @@
     force_evacuate_all_ = false;
   }
   if (kUseBakerReadBarrier) {
-    updated_all_immune_objects_.StoreRelaxed(false);
+    updated_all_immune_objects_.store(false, std::memory_order_relaxed);
     // GC may gray immune objects in the thread flip.
     gc_grays_immune_objects_ = true;
     if (kIsDebugBuild) {
@@ -350,7 +350,7 @@
         concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
         reinterpret_cast<Atomic<size_t>*>(
             &concurrent_copying_->from_space_num_objects_at_first_pause_)->
-                FetchAndAddSequentiallyConsistent(thread_local_objects);
+                fetch_add(thread_local_objects, std::memory_order_seq_cst);
       } else {
         concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
       }
@@ -430,7 +430,8 @@
       cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
     }
     cc->is_marking_ = true;
-    cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
+    cc->mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal,
+                               std::memory_order_relaxed);
     if (kIsDebugBuild) {
       cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
     }
@@ -728,7 +729,7 @@
   }
   // Since all of the objects that may point to other spaces are gray, we can avoid all the read
   // barriers in the immune spaces.
-  updated_all_immune_objects_.StoreRelaxed(true);
+  updated_all_immune_objects_.store(true, std::memory_order_relaxed);
 }
 
 void ConcurrentCopying::SwapStacks() {
@@ -816,7 +817,7 @@
   if (kUseBakerReadBarrier) {
     // This release fence makes the field updates in the above loop visible before allowing mutator
     // getting access to immune objects without graying it first.
-    updated_all_immune_objects_.StoreRelease(true);
+    updated_all_immune_objects_.store(true, std::memory_order_release);
     // Now whiten immune objects concurrently accessed and grayed by mutators. We can't do this in
     // the above loop because we would incorrectly disable the read barrier by whitening an object
     // which may point to an unscanned, white object, breaking the to-space invariant.
@@ -1018,8 +1019,8 @@
     heap_->rb_table_->ClearAll();
     DCHECK(heap_->rb_table_->IsAllCleared());
   }
-  is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
-  mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
+  is_mark_stack_push_disallowed_.store(1, std::memory_order_seq_cst);
+  mark_stack_mode_.store(kMarkStackModeOff, std::memory_order_seq_cst);
 }
 
 void ConcurrentCopying::PushOntoFalseGrayStack(mirror::Object* ref) {
@@ -1069,11 +1070,11 @@
 }
 
 void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
-  CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
+  CHECK_EQ(is_mark_stack_push_disallowed_.load(std::memory_order_relaxed), 0)
       << " " << to_ref << " " << mirror::Object::PrettyTypeOf(to_ref);
   Thread* self = Thread::Current();  // TODO: pass self as an argument from call sites?
   CHECK(thread_running_gc_ != nullptr);
-  MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
+  MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
   if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
     if (LIKELY(self == thread_running_gc_)) {
       // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
@@ -1412,7 +1413,7 @@
   CHECK(self == thread_running_gc_);
   CHECK(self->GetThreadLocalMarkStack() == nullptr);
   size_t count = 0;
-  MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
+  MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
   if (mark_stack_mode == kMarkStackModeThreadLocal) {
     // Process the thread-local mark stacks and the GC mark stack.
     count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ false,
@@ -1597,10 +1598,10 @@
   CHECK(thread_running_gc_ != nullptr);
   CHECK_EQ(self, thread_running_gc_);
   CHECK(self->GetThreadLocalMarkStack() == nullptr);
-  MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
+  MarkStackMode before_mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
   CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
            static_cast<uint32_t>(kMarkStackModeThreadLocal));
-  mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
+  mark_stack_mode_.store(kMarkStackModeShared, std::memory_order_relaxed);
   DisableWeakRefAccessCallback dwrac(this);
   // Process the thread local mark stacks one last time after switching to the shared mark stack
   // mode and disable weak ref accesses.
@@ -1615,10 +1616,10 @@
   CHECK(thread_running_gc_ != nullptr);
   CHECK_EQ(self, thread_running_gc_);
   CHECK(self->GetThreadLocalMarkStack() == nullptr);
-  MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
+  MarkStackMode before_mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
   CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
            static_cast<uint32_t>(kMarkStackModeShared));
-  mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
+  mark_stack_mode_.store(kMarkStackModeGcExclusive, std::memory_order_relaxed);
   QuasiAtomic::ThreadFenceForConstructor();
   if (kVerboseMode) {
     LOG(INFO) << "Switched to GC exclusive mark stack mode";
@@ -1630,7 +1631,7 @@
   CHECK(thread_running_gc_ != nullptr);
   CHECK_EQ(self, thread_running_gc_);
   CHECK(self->GetThreadLocalMarkStack() == nullptr);
-  MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
+  MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
   if (mark_stack_mode == kMarkStackModeThreadLocal) {
     // Thread-local mark stack mode.
     RevokeThreadLocalMarkStacks(false, nullptr);
@@ -1738,9 +1739,9 @@
     }
     IssueEmptyCheckpoint();
     // Disable the check.
-    is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
+    is_mark_stack_push_disallowed_.store(0, std::memory_order_seq_cst);
     if (kUseBakerReadBarrier) {
-      updated_all_immune_objects_.StoreSequentiallyConsistent(false);
+      updated_all_immune_objects_.store(false, std::memory_order_seq_cst);
     }
     CheckEmptyMarkStack();
   }
@@ -1753,10 +1754,10 @@
     const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
     const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
     const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
-    uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
-    cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes);
-    uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
-    cumulative_objects_moved_.FetchAndAddRelaxed(to_objects);
+    uint64_t to_bytes = bytes_moved_.load(std::memory_order_seq_cst);
+    cumulative_bytes_moved_.fetch_add(to_bytes, std::memory_order_relaxed);
+    uint64_t to_objects = objects_moved_.load(std::memory_order_seq_cst);
+    cumulative_objects_moved_.fetch_add(to_objects, std::memory_order_relaxed);
     if (kEnableFromSpaceAccountingCheck) {
       CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
       CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
@@ -1787,12 +1788,12 @@
                 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
                 << " to_space size=" << region_space_->ToSpaceSize();
       LOG(INFO) << "(before) num_bytes_allocated="
-                << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
+                << heap_->num_bytes_allocated_.load(std::memory_order_seq_cst);
     }
     RecordFree(ObjectBytePair(freed_objects, freed_bytes));
     if (kVerboseMode) {
       LOG(INFO) << "(after) num_bytes_allocated="
-                << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
+                << heap_->num_bytes_allocated_.load(std::memory_order_seq_cst);
     }
   }
 
@@ -2042,7 +2043,7 @@
       if (Thread::Current() == thread_running_gc_ && !gc_grays_immune_objects_) {
         return;
       }
-      bool updated_all_immune_objects = updated_all_immune_objects_.LoadSequentiallyConsistent();
+      bool updated_all_immune_objects = updated_all_immune_objects_.load(std::memory_order_seq_cst);
       CHECK(updated_all_immune_objects || ref->GetReadBarrierState() == ReadBarrier::GrayState())
           << "Unmarked immune space ref. obj=" << obj << " rb_state="
           << (obj != nullptr ? obj->GetReadBarrierState() : 0U)
@@ -2165,7 +2166,7 @@
     mirror::Object* expected_ref = ref;
     mirror::Object* new_ref = to_ref;
     do {
-      if (expected_ref != addr->LoadRelaxed()) {
+      if (expected_ref != addr->load(std::memory_order_relaxed)) {
         // It was updated by the mutator.
         break;
       }
@@ -2184,7 +2185,7 @@
     auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
     // If the cas fails, then it was updated by the mutator.
     do {
-      if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
+      if (ref != addr->load(std::memory_order_relaxed).AsMirrorPtr()) {
         // It was updated by the mutator.
         break;
       }
@@ -2378,8 +2379,9 @@
       fall_back_to_non_moving = true;
       if (kVerboseMode) {
         LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
-                  << to_space_bytes_skipped_.LoadSequentiallyConsistent()
-                  << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
+                  << to_space_bytes_skipped_.load(std::memory_order_seq_cst)
+                  << " skipped_objects="
+                  << to_space_objects_skipped_.load(std::memory_order_seq_cst);
       }
       fall_back_to_non_moving = true;
       to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
@@ -2431,9 +2433,9 @@
           region_space_->FreeLarge</*kForEvac*/ true>(to_ref, bytes_allocated);
         } else {
           // Record the lost copy for later reuse.
-          heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
-          to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
-          to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
+          heap_->num_bytes_allocated_.fetch_add(bytes_allocated, std::memory_order_seq_cst);
+          to_space_bytes_skipped_.fetch_add(bytes_allocated, std::memory_order_seq_cst);
+          to_space_objects_skipped_.fetch_add(1, std::memory_order_seq_cst);
           MutexLock mu(Thread::Current(), skipped_blocks_lock_);
           skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
                                                     reinterpret_cast<uint8_t*>(to_ref)));
@@ -2477,8 +2479,8 @@
     bool success = from_ref->CasLockWordWeakRelaxed(old_lock_word, new_lock_word);
     if (LIKELY(success)) {
       // The CAS succeeded.
-      objects_moved_.FetchAndAddRelaxed(1);
-      bytes_moved_.FetchAndAddRelaxed(region_space_alloc_size);
+      objects_moved_.fetch_add(1, std::memory_order_relaxed);
+      bytes_moved_.fetch_add(region_space_alloc_size, std::memory_order_relaxed);
       if (LIKELY(!fall_back_to_non_moving)) {
         DCHECK(region_space_->IsInToSpace(to_ref));
       } else {
@@ -2704,9 +2706,10 @@
   }
   if (measure_read_barrier_slow_path_) {
     MutexLock mu(self, rb_slow_path_histogram_lock_);
-    rb_slow_path_time_histogram_.AdjustAndAddValue(rb_slow_path_ns_.LoadRelaxed());
-    rb_slow_path_count_total_ += rb_slow_path_count_.LoadRelaxed();
-    rb_slow_path_count_gc_total_ += rb_slow_path_count_gc_.LoadRelaxed();
+    rb_slow_path_time_histogram_.AdjustAndAddValue(
+        rb_slow_path_ns_.load(std::memory_order_relaxed));
+    rb_slow_path_count_total_ += rb_slow_path_count_.load(std::memory_order_relaxed);
+    rb_slow_path_count_gc_total_ += rb_slow_path_count_gc_.load(std::memory_order_relaxed);
   }
 }
 
@@ -2760,15 +2763,15 @@
 
 mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref) {
   if (Thread::Current() != thread_running_gc_) {
-    rb_slow_path_count_.FetchAndAddRelaxed(1u);
+    rb_slow_path_count_.fetch_add(1u, std::memory_order_relaxed);
   } else {
-    rb_slow_path_count_gc_.FetchAndAddRelaxed(1u);
+    rb_slow_path_count_gc_.fetch_add(1u, std::memory_order_relaxed);
   }
   ScopedTrace tr(__FUNCTION__);
   const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
   mirror::Object* ret = Mark(from_ref);
   if (measure_read_barrier_slow_path_) {
-    rb_slow_path_ns_.FetchAndAddRelaxed(NanoTime() - start_time);
+    rb_slow_path_ns_.fetch_add(NanoTime() - start_time, std::memory_order_relaxed);
   }
   return ret;
 }
@@ -2787,8 +2790,10 @@
   if (rb_slow_path_count_gc_total_ > 0) {
     os << "GC slow path count " << rb_slow_path_count_gc_total_ << "\n";
   }
-  os << "Cumulative bytes moved " << cumulative_bytes_moved_.LoadRelaxed() << "\n";
-  os << "Cumulative objects moved " << cumulative_objects_moved_.LoadRelaxed() << "\n";
+  os << "Cumulative bytes moved "
+     << cumulative_bytes_moved_.load(std::memory_order_relaxed) << "\n";
+  os << "Cumulative objects moved "
+     << cumulative_objects_moved_.load(std::memory_order_relaxed) << "\n";
 
   os << "Peak regions allocated "
      << region_space_->GetMaxPeakNumNonFreeRegions() << " ("
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 9ab965e..2335964 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -116,21 +116,21 @@
   mark_stack_ = heap_->GetMarkStack();
   DCHECK(mark_stack_ != nullptr);
   immune_spaces_.Reset();
-  no_reference_class_count_.StoreRelaxed(0);
-  normal_count_.StoreRelaxed(0);
-  class_count_.StoreRelaxed(0);
-  object_array_count_.StoreRelaxed(0);
-  other_count_.StoreRelaxed(0);
-  reference_count_.StoreRelaxed(0);
-  large_object_test_.StoreRelaxed(0);
-  large_object_mark_.StoreRelaxed(0);
-  overhead_time_ .StoreRelaxed(0);
-  work_chunks_created_.StoreRelaxed(0);
-  work_chunks_deleted_.StoreRelaxed(0);
-  mark_null_count_.StoreRelaxed(0);
-  mark_immune_count_.StoreRelaxed(0);
-  mark_fastpath_count_.StoreRelaxed(0);
-  mark_slowpath_count_.StoreRelaxed(0);
+  no_reference_class_count_.store(0, std::memory_order_relaxed);
+  normal_count_.store(0, std::memory_order_relaxed);
+  class_count_.store(0, std::memory_order_relaxed);
+  object_array_count_.store(0, std::memory_order_relaxed);
+  other_count_.store(0, std::memory_order_relaxed);
+  reference_count_.store(0, std::memory_order_relaxed);
+  large_object_test_.store(0, std::memory_order_relaxed);
+  large_object_mark_.store(0, std::memory_order_relaxed);
+  overhead_time_ .store(0, std::memory_order_relaxed);
+  work_chunks_created_.store(0, std::memory_order_relaxed);
+  work_chunks_deleted_.store(0, std::memory_order_relaxed);
+  mark_null_count_.store(0, std::memory_order_relaxed);
+  mark_immune_count_.store(0, std::memory_order_relaxed);
+  mark_fastpath_count_.store(0, std::memory_order_relaxed);
+  mark_slowpath_count_.store(0, std::memory_order_relaxed);
   {
     // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
@@ -724,7 +724,7 @@
         if (kUseFinger) {
           std::atomic_thread_fence(std::memory_order_seq_cst);
           if (reinterpret_cast<uintptr_t>(ref) >=
-              static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) {
+              static_cast<uintptr_t>(mark_sweep_->atomic_finger_.load(std::memory_order_relaxed))) {
             return;
           }
         }
@@ -1046,7 +1046,7 @@
           // This function does not handle heap end increasing, so we must use the space end.
           uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
           uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
-          atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue());
+          atomic_finger_.store(AtomicInteger::MaxValue(), std::memory_order_relaxed);
 
           // Create a few worker tasks.
           const size_t n = thread_count * 2;
@@ -1405,8 +1405,8 @@
   thread_pool->Wait(self, true, true);
   thread_pool->StopWorkers(self);
   mark_stack_->Reset();
-  CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(),
-           work_chunks_deleted_.LoadSequentiallyConsistent())
+  CHECK_EQ(work_chunks_created_.load(std::memory_order_seq_cst),
+           work_chunks_deleted_.load(std::memory_order_seq_cst))
       << " some of the work chunks were leaked";
 }
 
@@ -1462,28 +1462,32 @@
   if (kCountScannedTypes) {
     VLOG(gc)
         << "MarkSweep scanned"
-        << " no reference objects=" << no_reference_class_count_.LoadRelaxed()
-        << " normal objects=" << normal_count_.LoadRelaxed()
-        << " classes=" << class_count_.LoadRelaxed()
-        << " object arrays=" << object_array_count_.LoadRelaxed()
-        << " references=" << reference_count_.LoadRelaxed()
-        << " other=" << other_count_.LoadRelaxed();
+        << " no reference objects=" << no_reference_class_count_.load(std::memory_order_relaxed)
+        << " normal objects=" << normal_count_.load(std::memory_order_relaxed)
+        << " classes=" << class_count_.load(std::memory_order_relaxed)
+        << " object arrays=" << object_array_count_.load(std::memory_order_relaxed)
+        << " references=" << reference_count_.load(std::memory_order_relaxed)
+        << " other=" << other_count_.load(std::memory_order_relaxed);
   }
   if (kCountTasks) {
-    VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed();
+    VLOG(gc)
+        << "Total number of work chunks allocated: "
+        << work_chunks_created_.load(std::memory_order_relaxed);
   }
   if (kMeasureOverhead) {
-    VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed());
+    VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.load(std::memory_order_relaxed));
   }
   if (kProfileLargeObjects) {
-    VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed()
-        << " marked " << large_object_mark_.LoadRelaxed();
+    VLOG(gc)
+        << "Large objects tested " << large_object_test_.load(std::memory_order_relaxed)
+        << " marked " << large_object_mark_.load(std::memory_order_relaxed);
   }
   if (kCountMarkedObjects) {
-    VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed()
-        << " immune=" <<  mark_immune_count_.LoadRelaxed()
-        << " fastpath=" << mark_fastpath_count_.LoadRelaxed()
-        << " slowpath=" << mark_slowpath_count_.LoadRelaxed();
+    VLOG(gc)
+        << "Marked: null=" << mark_null_count_.load(std::memory_order_relaxed)
+        << " immune=" <<  mark_immune_count_.load(std::memory_order_relaxed)
+        << " fastpath=" << mark_fastpath_count_.load(std::memory_order_relaxed)
+        << " slowpath=" << mark_slowpath_count_.load(std::memory_order_relaxed);
   }
   CHECK(mark_stack_->IsEmpty());  // Ensure that the mark stack is empty.
   mark_stack_->Reset();
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 41ee183..948d233 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -156,7 +156,7 @@
     pre_fence_visitor(obj, usable_size);
     QuasiAtomic::ThreadFenceForConstructor();
     size_t num_bytes_allocated_before =
-        num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated);
+        num_bytes_allocated_.fetch_add(bytes_tl_bulk_allocated, std::memory_order_relaxed);
     new_num_bytes_allocated = num_bytes_allocated_before + bytes_tl_bulk_allocated;
     if (bytes_tl_bulk_allocated > 0) {
       // Only trace when we get an increase in the number of bytes allocated. This happens when
@@ -187,7 +187,7 @@
       DCHECK(allocation_records_ != nullptr);
       allocation_records_->RecordAllocation(self, &obj, bytes_allocated);
     }
-    AllocationListener* l = alloc_listener_.LoadSequentiallyConsistent();
+    AllocationListener* l = alloc_listener_.load(std::memory_order_seq_cst);
     if (l != nullptr) {
       // Same as above. We assume that a listener that was once stored will never be deleted.
       // Otherwise we'd have to perform this under a lock.
@@ -393,7 +393,7 @@
 inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
                                             size_t alloc_size,
                                             bool grow) {
-  size_t new_footprint = num_bytes_allocated_.LoadSequentiallyConsistent() + alloc_size;
+  size_t new_footprint = num_bytes_allocated_.load(std::memory_order_seq_cst) + alloc_size;
   if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
     if (UNLIKELY(new_footprint > growth_limit_)) {
       return true;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a725ec4..52afb38 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -549,7 +549,7 @@
     AddRememberedSet(non_moving_space_rem_set);
   }
   // TODO: Count objects in the image space here?
-  num_bytes_allocated_.StoreRelaxed(0);
+  num_bytes_allocated_.store(0, std::memory_order_relaxed);
   mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
                                                     kDefaultMarkStackSize));
   const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
@@ -1053,7 +1053,8 @@
   }
 
   os << "Registered native bytes allocated: "
-     << old_native_bytes_allocated_.LoadRelaxed() + new_native_bytes_allocated_.LoadRelaxed()
+     << (old_native_bytes_allocated_.load(std::memory_order_relaxed) +
+         new_native_bytes_allocated_.load(std::memory_order_relaxed))
      << "\n";
 
   BaseMutex::DumpAll(os);
@@ -1120,11 +1121,7 @@
 ALWAYS_INLINE
 static inline AllocationListener* GetAndOverwriteAllocationListener(
     Atomic<AllocationListener*>* storage, AllocationListener* new_value) {
-  AllocationListener* old;
-  do {
-    old = storage->LoadSequentiallyConsistent();
-  } while (!storage->CompareAndSetStrongSequentiallyConsistent(old, new_value));
-  return old;
+  return storage->exchange(new_value);
 }
 
 Heap::~Heap() {
@@ -1142,12 +1139,11 @@
   delete thread_flip_lock_;
   delete pending_task_lock_;
   delete backtrace_lock_;
-  if (unique_backtrace_count_.LoadRelaxed() != 0 || seen_backtrace_count_.LoadRelaxed() != 0) {
-    LOG(INFO) << "gc stress unique=" << unique_backtrace_count_.LoadRelaxed()
-        << " total=" << seen_backtrace_count_.LoadRelaxed() +
-            unique_backtrace_count_.LoadRelaxed();
+  uint64_t unique_count = unique_backtrace_count_.load(std::memory_order_relaxed);
+  uint64_t seen_count = seen_backtrace_count_.load(std::memory_order_relaxed);
+  if (unique_count != 0 || seen_count != 0) {
+    LOG(INFO) << "gc stress unique=" << unique_count << " total=" << (unique_count + seen_count);
   }
-
   VLOG(heap) << "Finished ~Heap()";
 }
 
@@ -1493,7 +1489,7 @@
   }
 
   // Ignore early dawn of the universe verifications.
-  if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
+  if (UNLIKELY(num_bytes_allocated_.load(std::memory_order_relaxed) < 10 * KB)) {
     return;
   }
   CHECK_ALIGNED(obj.Ptr(), kObjectAlignment) << "Object isn't aligned";
@@ -1525,9 +1521,10 @@
   // Use signed comparison since freed bytes can be negative when background compaction foreground
   // transitions occurs. This is caused by the moving objects from a bump pointer space to a
   // free list backed space typically increasing memory footprint due to padding and binning.
-  DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
+  DCHECK_LE(freed_bytes,
+            static_cast<int64_t>(num_bytes_allocated_.load(std::memory_order_relaxed)));
   // Note: This relies on 2s complement for handling negative freed_bytes.
-  num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
+  num_bytes_allocated_.fetch_sub(static_cast<ssize_t>(freed_bytes));
   if (Runtime::Current()->HasStatsEnabled()) {
     RuntimeStats* thread_stats = Thread::Current()->GetStats();
     thread_stats->freed_objects += freed_objects;
@@ -1544,10 +1541,10 @@
   // ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
   // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
   // all the way to zero exactly as the remainder will be subtracted at the next GC.
-  size_t bytes_freed = num_bytes_freed_revoke_.LoadSequentiallyConsistent();
-  CHECK_GE(num_bytes_freed_revoke_.FetchAndSubSequentiallyConsistent(bytes_freed),
+  size_t bytes_freed = num_bytes_freed_revoke_.load();
+  CHECK_GE(num_bytes_freed_revoke_.fetch_sub(bytes_freed),
            bytes_freed) << "num_bytes_freed_revoke_ underflow";
-  CHECK_GE(num_bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes_freed),
+  CHECK_GE(num_bytes_allocated_.fetch_sub(bytes_freed),
            bytes_freed) << "num_bytes_allocated_ underflow";
   GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
 }
@@ -1703,13 +1700,13 @@
           // Always print that we ran homogeneous space compation since this can cause jank.
           VLOG(heap) << "Ran heap homogeneous space compaction, "
                     << " requested defragmentation "
-                    << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
+                    << count_requested_homogeneous_space_compaction_.load()
                     << " performed defragmentation "
-                    << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
+                    << count_performed_homogeneous_space_compaction_.load()
                     << " ignored homogeneous space compaction "
-                    << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
+                    << count_ignored_homogeneous_space_compaction_.load()
                     << " delayed count = "
-                    << count_delayed_oom_.LoadSequentiallyConsistent();
+                    << count_delayed_oom_.load();
         }
         break;
       }
@@ -1972,7 +1969,7 @@
   VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
              << " -> " << static_cast<int>(collector_type);
   uint64_t start_time = NanoTime();
-  uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
+  uint32_t before_allocated = num_bytes_allocated_.load();
   Runtime* const runtime = Runtime::Current();
   Thread* const self = Thread::Current();
   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
@@ -2110,7 +2107,7 @@
     ScopedObjectAccess soa(self);
     soa.Vm()->UnloadNativeLibraries();
   }
-  int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
+  int32_t after_allocated = num_bytes_allocated_.load(std::memory_order_seq_cst);
   int32_t delta_allocated = before_allocated - after_allocated;
   std::string saved_str;
   if (delta_allocated >= 0) {
@@ -2559,7 +2556,9 @@
     // Move all bytes from new_native_bytes_allocated_ to
     // old_native_bytes_allocated_ now that GC has been triggered, resetting
     // new_native_bytes_allocated_ to zero in the process.
-    old_native_bytes_allocated_.FetchAndAddRelaxed(new_native_bytes_allocated_.ExchangeRelaxed(0));
+    old_native_bytes_allocated_.fetch_add(
+        new_native_bytes_allocated_.exchange(0, std::memory_order_relaxed),
+        std::memory_order_relaxed);
   }
 
   DCHECK_LT(gc_type, collector::kGcTypeMax);
@@ -2759,7 +2758,7 @@
       : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
 
   size_t GetFailureCount() const {
-    return fail_count_->LoadSequentiallyConsistent();
+    return fail_count_->load(std::memory_order_seq_cst);
   }
 
   void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED, ObjPtr<mirror::Reference> ref) const
@@ -2811,7 +2810,7 @@
       // Verify that the reference is live.
       return true;
     }
-    if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) {
+    if (fail_count_->fetch_add(1, std::memory_order_seq_cst) == 0) {
       // Print message on only on first failure to prevent spam.
       LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
     }
@@ -2924,7 +2923,7 @@
   }
 
   size_t GetFailureCount() const {
-    return fail_count_->LoadSequentiallyConsistent();
+    return fail_count_->load(std::memory_order_seq_cst);
   }
 
  private:
@@ -3605,7 +3604,7 @@
 }
 
 void Heap::ClearConcurrentGCRequest() {
-  concurrent_gc_pending_.StoreRelaxed(false);
+  concurrent_gc_pending_.store(false, std::memory_order_relaxed);
 }
 
 void Heap::RequestConcurrentGC(Thread* self, GcCause cause, bool force_full) {
@@ -3732,8 +3731,9 @@
   if (rosalloc_space_ != nullptr) {
     size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
     if (freed_bytes_revoke > 0U) {
-      num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
-      CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
+      num_bytes_freed_revoke_.fetch_add(freed_bytes_revoke, std::memory_order_seq_cst);
+      CHECK_GE(num_bytes_allocated_.load(std::memory_order_relaxed),
+               num_bytes_freed_revoke_.load(std::memory_order_relaxed));
     }
   }
   if (bump_pointer_space_ != nullptr) {
@@ -3748,8 +3748,9 @@
   if (rosalloc_space_ != nullptr) {
     size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
     if (freed_bytes_revoke > 0U) {
-      num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
-      CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
+      num_bytes_freed_revoke_.fetch_add(freed_bytes_revoke, std::memory_order_seq_cst);
+      CHECK_GE(num_bytes_allocated_.load(std::memory_order_relaxed),
+               num_bytes_freed_revoke_.load(std::memory_order_relaxed));
     }
   }
 }
@@ -3758,8 +3759,9 @@
   if (rosalloc_space_ != nullptr) {
     size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
     if (freed_bytes_revoke > 0U) {
-      num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
-      CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
+      num_bytes_freed_revoke_.fetch_add(freed_bytes_revoke, std::memory_order_seq_cst);
+      CHECK_GE(num_bytes_allocated_.load(std::memory_order_relaxed),
+               num_bytes_freed_revoke_.load(std::memory_order_relaxed));
     }
   }
   if (bump_pointer_space_ != nullptr) {
@@ -3771,7 +3773,7 @@
 }
 
 bool Heap::IsGCRequestPending() const {
-  return concurrent_gc_pending_.LoadRelaxed();
+  return concurrent_gc_pending_.load(std::memory_order_relaxed);
 }
 
 void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
@@ -3781,7 +3783,7 @@
 }
 
 void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
-  size_t old_value = new_native_bytes_allocated_.FetchAndAddRelaxed(bytes);
+  size_t old_value = new_native_bytes_allocated_.fetch_add(bytes, std::memory_order_relaxed);
 
   if (old_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() &&
              !IsGCRequestPending()) {
@@ -3803,12 +3805,12 @@
   size_t allocated;
   size_t new_freed_bytes;
   do {
-    allocated = new_native_bytes_allocated_.LoadRelaxed();
+    allocated = new_native_bytes_allocated_.load(std::memory_order_relaxed);
     new_freed_bytes = std::min(allocated, bytes);
   } while (!new_native_bytes_allocated_.CompareAndSetWeakRelaxed(allocated,
                                                                    allocated - new_freed_bytes));
   if (new_freed_bytes < bytes) {
-    old_native_bytes_allocated_.FetchAndSubRelaxed(bytes - new_freed_bytes);
+    old_native_bytes_allocated_.fetch_sub(bytes - new_freed_bytes, std::memory_order_relaxed);
   }
 }
 
@@ -3942,9 +3944,9 @@
       StackHandleScope<1> hs(self);
       auto h = hs.NewHandleWrapper(obj);
       CollectGarbage(/* clear_soft_references */ false);
-      unique_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
+      unique_backtrace_count_.fetch_add(1, std::memory_order_seq_cst);
     } else {
-      seen_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
+      seen_backtrace_count_.fetch_add(1, std::memory_order_seq_cst);
     }
   }
 }
@@ -4020,11 +4022,11 @@
 }
 
 void Heap::SetGcPauseListener(GcPauseListener* l) {
-  gc_pause_listener_.StoreRelaxed(l);
+  gc_pause_listener_.store(l, std::memory_order_relaxed);
 }
 
 void Heap::RemoveGcPauseListener() {
-  gc_pause_listener_.StoreRelaxed(nullptr);
+  gc_pause_listener_.store(nullptr, std::memory_order_relaxed);
 }
 
 mirror::Object* Heap::AllocWithNewTLAB(Thread* self,
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 021fe58..9af57d1 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -496,7 +496,7 @@
 
   // Returns the number of bytes currently allocated.
   size_t GetBytesAllocated() const {
-    return num_bytes_allocated_.LoadSequentiallyConsistent();
+    return num_bytes_allocated_.load(std::memory_order_seq_cst);
   }
 
   // Returns the number of objects currently allocated.
@@ -546,7 +546,7 @@
   // Returns how much free memory we have until we need to grow the heap to perform an allocation.
   // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
   size_t GetFreeMemory() const {
-    size_t byte_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
+    size_t byte_allocated = num_bytes_allocated_.load(std::memory_order_seq_cst);
     size_t total_memory = GetTotalMemory();
     // Make sure we don't get a negative number.
     return total_memory - std::min(total_memory, byte_allocated);
@@ -775,11 +775,11 @@
   // Allocation tracking support
   // Callers to this function use double-checked locking to ensure safety on allocation_records_
   bool IsAllocTrackingEnabled() const {
-    return alloc_tracking_enabled_.LoadRelaxed();
+    return alloc_tracking_enabled_.load(std::memory_order_relaxed);
   }
 
   void SetAllocTrackingEnabled(bool enabled) REQUIRES(Locks::alloc_tracker_lock_) {
-    alloc_tracking_enabled_.StoreRelaxed(enabled);
+    alloc_tracking_enabled_.store(enabled, std::memory_order_relaxed);
   }
 
   AllocRecordObjectMap* GetAllocationRecords() const
@@ -825,7 +825,7 @@
   void SetGcPauseListener(GcPauseListener* l);
   // Get the currently installed gc pause listener, or null.
   GcPauseListener* GetGcPauseListener() {
-    return gc_pause_listener_.LoadAcquire();
+    return gc_pause_listener_.load(std::memory_order_acquire);
   }
   // Remove a gc pause listener. Note: the listener must not be deleted, as for performance
   // reasons, we assume it stays valid when we read it (so that we don't require a lock).
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 9ebb131..4c58549 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -46,16 +46,18 @@
                                                            size_t* bytes_tl_bulk_allocated) {
   Locks::mutator_lock_->AssertExclusiveHeld(self);
   num_bytes = RoundUp(num_bytes, kAlignment);
-  uint8_t* end = end_.LoadRelaxed();
+  uint8_t* end = end_.load(std::memory_order_relaxed);
   if (end + num_bytes > growth_end_) {
     return nullptr;
   }
   mirror::Object* obj = reinterpret_cast<mirror::Object*>(end);
-  end_.StoreRelaxed(end + num_bytes);
+  end_.store(end + num_bytes, std::memory_order_relaxed);
   *bytes_allocated = num_bytes;
   // Use the CAS free versions as an optimization.
-  objects_allocated_.StoreRelaxed(objects_allocated_.LoadRelaxed() + 1);
-  bytes_allocated_.StoreRelaxed(bytes_allocated_.LoadRelaxed() + num_bytes);
+  objects_allocated_.store(objects_allocated_.load(std::memory_order_relaxed) + 1,
+                           std::memory_order_relaxed);
+  bytes_allocated_.store(bytes_allocated_.load(std::memory_order_relaxed) + num_bytes,
+                         std::memory_order_relaxed);
   if (UNLIKELY(usable_size != nullptr)) {
     *usable_size = num_bytes;
   }
@@ -68,7 +70,7 @@
   uint8_t* old_end;
   uint8_t* new_end;
   do {
-    old_end = end_.LoadRelaxed();
+    old_end = end_.load(std::memory_order_relaxed);
     new_end = old_end + num_bytes;
     // If there is no more room in the region, we are out of memory.
     if (UNLIKELY(new_end > growth_end_)) {
@@ -81,8 +83,8 @@
 inline mirror::Object* BumpPointerSpace::AllocNonvirtual(size_t num_bytes) {
   mirror::Object* ret = AllocNonvirtualWithoutAccounting(num_bytes);
   if (ret != nullptr) {
-    objects_allocated_.FetchAndAddSequentiallyConsistent(1);
-    bytes_allocated_.FetchAndAddSequentiallyConsistent(num_bytes);
+    objects_allocated_.fetch_add(1, std::memory_order_seq_cst);
+    bytes_allocated_.fetch_add(num_bytes, std::memory_order_seq_cst);
   }
   return ret;
 }
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index ce0e0f3..e95da01 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -72,8 +72,8 @@
   // Reset the end of the space back to the beginning, we move the end forward as we allocate
   // objects.
   SetEnd(Begin());
-  objects_allocated_.StoreRelaxed(0);
-  bytes_allocated_.StoreRelaxed(0);
+  objects_allocated_.store(0, std::memory_order_relaxed);
+  bytes_allocated_.store(0, std::memory_order_relaxed);
   growth_end_ = Limit();
   {
     MutexLock mu(Thread::Current(), block_lock_);
@@ -160,7 +160,7 @@
 
 uint64_t BumpPointerSpace::GetBytesAllocated() {
   // Start out pre-determined amount (blocks which are not being allocated into).
-  uint64_t total = static_cast<uint64_t>(bytes_allocated_.LoadRelaxed());
+  uint64_t total = static_cast<uint64_t>(bytes_allocated_.load(std::memory_order_relaxed));
   Thread* self = Thread::Current();
   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
   MutexLock mu2(self, *Locks::thread_list_lock_);
@@ -178,7 +178,7 @@
 
 uint64_t BumpPointerSpace::GetObjectsAllocated() {
   // Start out pre-determined amount (blocks which are not being allocated into).
-  uint64_t total = static_cast<uint64_t>(objects_allocated_.LoadRelaxed());
+  uint64_t total = static_cast<uint64_t>(objects_allocated_.load(std::memory_order_relaxed));
   Thread* self = Thread::Current();
   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
   MutexLock mu2(self, *Locks::thread_list_lock_);
@@ -195,8 +195,8 @@
 }
 
 void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
-  objects_allocated_.FetchAndAddSequentiallyConsistent(thread->GetThreadLocalObjectsAllocated());
-  bytes_allocated_.FetchAndAddSequentiallyConsistent(thread->GetThreadLocalBytesAllocated());
+  objects_allocated_.fetch_add(thread->GetThreadLocalObjectsAllocated(), std::memory_order_seq_cst);
+  bytes_allocated_.fetch_add(thread->GetThreadLocalBytesAllocated(), std::memory_order_seq_cst);
   thread->SetTlab(nullptr, nullptr, nullptr);
 }
 
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 7b43362..5ba13ca 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -155,8 +155,8 @@
 
   // Record objects / bytes freed.
   void RecordFree(int32_t objects, int32_t bytes) {
-    objects_allocated_.FetchAndSubSequentiallyConsistent(objects);
-    bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes);
+    objects_allocated_.fetch_sub(objects, std::memory_order_seq_cst);
+    bytes_allocated_.fetch_sub(bytes, std::memory_order_seq_cst);
   }
 
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index c100bc0..e2154b8 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -672,7 +672,7 @@
     // Loaded the map, use the image header from the file now in case we patch it with
     // RelocateInPlace.
     image_header = reinterpret_cast<ImageHeader*>(map->Begin());
-    const uint32_t bitmap_index = ImageSpace::bitmap_index_.FetchAndAddSequentiallyConsistent(1);
+    const uint32_t bitmap_index = ImageSpace::bitmap_index_.fetch_add(1, std::memory_order_seq_cst);
     std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u",
                                          image_filename,
                                          bitmap_index));
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 410931c..7072a7e 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -100,13 +100,13 @@
   uint8_t* old_top;
   uint8_t* new_top;
   do {
-    old_top = top_.LoadRelaxed();
+    old_top = top_.load(std::memory_order_relaxed);
     new_top = old_top + num_bytes;
     if (UNLIKELY(new_top > end_)) {
       return nullptr;
     }
   } while (!top_.CompareAndSetWeakRelaxed(old_top, new_top));
-  objects_allocated_.FetchAndAddRelaxed(1);
+  objects_allocated_.fetch_add(1, std::memory_order_relaxed);
   DCHECK_LE(Top(), end_);
   DCHECK_LT(old_top, end_);
   DCHECK_LE(new_top, end_);
@@ -365,11 +365,11 @@
 inline size_t RegionSpace::Region::ObjectsAllocated() const {
   if (IsLarge()) {
     DCHECK_LT(begin_ + kRegionSize, Top());
-    DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
+    DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U);
     return 1;
   } else if (IsLargeTail()) {
     DCHECK_EQ(begin_, Top());
-    DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
+    DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U);
     return 0;
   } else {
     DCHECK(IsAllocated()) << "state=" << state_;
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 8d94c86..5ea434a 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -489,7 +489,7 @@
 void RegionSpace::RecordAlloc(mirror::Object* ref) {
   CHECK(ref != nullptr);
   Region* r = RefToRegion(ref);
-  r->objects_allocated_.FetchAndAddSequentiallyConsistent(1);
+  r->objects_allocated_.fetch_add(1, std::memory_order_seq_cst);
 }
 
 bool RegionSpace::AllocNewTlab(Thread* self, size_t min_bytes) {
@@ -589,10 +589,10 @@
 }
 
 void RegionSpace::Region::Clear(bool zero_and_release_pages) {
-  top_.StoreRelaxed(begin_);
+  top_.store(begin_, std::memory_order_relaxed);
   state_ = RegionState::kRegionStateFree;
   type_ = RegionType::kRegionTypeNone;
-  objects_allocated_.StoreRelaxed(0);
+  objects_allocated_.store(0, std::memory_order_relaxed);
   alloc_time_ = 0;
   live_bytes_ = static_cast<size_t>(-1);
   if (zero_and_release_pages) {
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index d63257d..6a1371a 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -300,11 +300,11 @@
     void Init(size_t idx, uint8_t* begin, uint8_t* end) {
       idx_ = idx;
       begin_ = begin;
-      top_.StoreRelaxed(begin);
+      top_.store(begin, std::memory_order_relaxed);
       end_ = end;
       state_ = RegionState::kRegionStateFree;
       type_ = RegionType::kRegionTypeNone;
-      objects_allocated_.StoreRelaxed(0);
+      objects_allocated_.store(0, std::memory_order_relaxed);
       alloc_time_ = 0;
       live_bytes_ = static_cast<size_t>(-1);
       is_newly_allocated_ = false;
@@ -334,7 +334,7 @@
       if (is_free) {
         DCHECK(IsInNoSpace());
         DCHECK_EQ(begin_, Top());
-        DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
+        DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U);
       }
       return is_free;
     }
@@ -461,11 +461,11 @@
     }
 
     ALWAYS_INLINE uint8_t* Top() const {
-      return top_.LoadRelaxed();
+      return top_.load(std::memory_order_relaxed);
     }
 
     void SetTop(uint8_t* new_top) {
-      top_.StoreRelaxed(new_top);
+      top_.store(new_top, std::memory_order_relaxed);
     }
 
     uint8_t* End() const {
@@ -480,10 +480,10 @@
 
     void RecordThreadLocalAllocations(size_t num_objects, size_t num_bytes) {
       DCHECK(IsAllocated());
-      DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
+      DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U);
       DCHECK_EQ(Top(), end_);
-      objects_allocated_.StoreRelaxed(num_objects);
-      top_.StoreRelaxed(begin_ + num_bytes);
+      objects_allocated_.store(num_objects, std::memory_order_relaxed);
+      top_.store(begin_ + num_bytes, std::memory_order_relaxed);
       DCHECK_LE(Top(), end_);
     }
 
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 7af19fa..bc3ab48 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -272,7 +272,7 @@
 
   // Current address at which the space ends, which may vary as the space is filled.
   uint8_t* End() const {
-    return end_.LoadRelaxed();
+    return end_.load(std::memory_order_relaxed);
   }
 
   // The end of the address range covered by the space.
@@ -283,7 +283,7 @@
   // Change the end of the space. Be careful with use since changing the end of a space to an
   // invalid value may break the GC.
   void SetEnd(uint8_t* end) {
-    end_.StoreRelaxed(end);
+    end_.store(end, std::memory_order_relaxed);
   }
 
   void SetLimit(uint8_t* limit) {
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index cde155f..8c73ef9 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -122,7 +122,7 @@
     // Need to mark the card since this will update the mod-union table next GC cycle.
     card_table->MarkCard(ptrs[i]);
   }
-  zygote_space->objects_allocated_.FetchAndSubSequentiallyConsistent(num_ptrs);
+  zygote_space->objects_allocated_.fetch_sub(num_ptrs, std::memory_order_seq_cst);
 }
 
 }  // namespace space
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 0823101..10c1398 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -67,7 +67,7 @@
   }
 
   uint64_t GetObjectsAllocated() {
-    return objects_allocated_.LoadSequentiallyConsistent();
+    return objects_allocated_.load(std::memory_order_seq_cst);
   }
 
   void Clear() OVERRIDE;
diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc
index 77b40e4..38581ce 100644
--- a/runtime/gc/task_processor_test.cc
+++ b/runtime/gc/task_processor_test.cc
@@ -37,7 +37,7 @@
     if (max_recursion_ > 0) {
       task_processor_->AddTask(self,
                                new RecursiveTask(task_processor_, counter_, max_recursion_ - 1));
-      counter_->FetchAndAddSequentiallyConsistent(1U);
+      counter_->fetch_add(1U, std::memory_order_seq_cst);
     }
   }
 
@@ -54,7 +54,7 @@
   }
   virtual void Run(Thread* self) OVERRIDE {
     task_processor_->RunAllTasks(self);
-    done_running_->StoreSequentiallyConsistent(true);
+    done_running_->store(true, std::memory_order_seq_cst);
   }
 
  private:
@@ -76,7 +76,7 @@
   thread_pool.StartWorkers(self);
   ASSERT_FALSE(done_running);
   // Wait until all the tasks are done, but since we didn't interrupt, done_running should be 0.
-  while (counter.LoadSequentiallyConsistent() != kRecursion) {
+  while (counter.load(std::memory_order_seq_cst) != kRecursion) {
     usleep(10);
   }
   ASSERT_FALSE(done_running);
@@ -84,11 +84,11 @@
   thread_pool.Wait(self, true, false);
   // After the interrupt and wait, the WorkUntilInterruptedTasktask should have terminated and
   // set done_running_ to true.
-  ASSERT_TRUE(done_running.LoadSequentiallyConsistent());
+  ASSERT_TRUE(done_running.load(std::memory_order_seq_cst));
 
   // Test that we finish remaining tasks before returning from RunTasksUntilInterrupted.
-  counter.StoreSequentiallyConsistent(0);
-  done_running.StoreSequentiallyConsistent(false);
+  counter.store(0, std::memory_order_seq_cst);
+  done_running.store(false, std::memory_order_seq_cst);
   // Self interrupt before any of the other tasks run, but since we added them we should keep on
   // working until all the tasks are completed.
   task_processor.Stop(self);
@@ -96,8 +96,8 @@
   thread_pool.AddTask(self, new WorkUntilDoneTask(&task_processor, &done_running));
   thread_pool.StartWorkers(self);
   thread_pool.Wait(self, true, false);
-  ASSERT_TRUE(done_running.LoadSequentiallyConsistent());
-  ASSERT_EQ(counter.LoadSequentiallyConsistent(), kRecursion);
+  ASSERT_TRUE(done_running.load(std::memory_order_seq_cst));
+  ASSERT_EQ(counter.load(std::memory_order_seq_cst), kRecursion);
 }
 
 class TestOrderTask : public HeapTask {
@@ -137,10 +137,10 @@
   Atomic<bool> done_running(false);
   // Add a task which will wait until interrupted to the thread pool.
   thread_pool.AddTask(self, new WorkUntilDoneTask(&task_processor, &done_running));
-  ASSERT_FALSE(done_running.LoadSequentiallyConsistent());
+  ASSERT_FALSE(done_running.load(std::memory_order_seq_cst));
   thread_pool.StartWorkers(self);
   thread_pool.Wait(self, true, false);
-  ASSERT_TRUE(done_running.LoadSequentiallyConsistent());
+  ASSERT_TRUE(done_running.load(std::memory_order_seq_cst));
   ASSERT_EQ(counter, kNumTasks);
 }
 
diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h
index f2ea2fd..5c6b4b5 100644
--- a/runtime/hidden_api.h
+++ b/runtime/hidden_api.h
@@ -27,6 +27,23 @@
 namespace art {
 namespace hiddenapi {
 
+// Hidden API enforcement policy
+// This must be kept in sync with ApplicationInfo.ApiEnforcementPolicy in
+// frameworks/base/core/java/android/content/pm/ApplicationInfo.java
+enum class EnforcementPolicy {
+  kNoChecks             = 0,
+  kAllLists             = 1,  // ban anything but whitelist
+  kDarkGreyAndBlackList = 2,  // ban dark grey & blacklist
+  kBlacklistOnly        = 3,  // ban blacklist violations only
+  kMax = kBlacklistOnly,
+};
+
+inline EnforcementPolicy EnforcementPolicyFromInt(int api_policy_int) {
+  DCHECK_GE(api_policy_int, 0);
+  DCHECK_LE(api_policy_int, static_cast<int>(EnforcementPolicy::kMax));
+  return static_cast<EnforcementPolicy>(api_policy_int);
+}
+
 enum Action {
   kAllow,
   kAllowButWarn,
@@ -38,7 +55,6 @@
   kReflection,
   kJNI,
   kLinking,
-  kOverride,
 };
 
 inline std::ostream& operator<<(std::ostream& os, AccessMethod value) {
@@ -52,23 +68,42 @@
     case kLinking:
       os << "linking";
       break;
-    case kOverride:
-      os << "override";
-      break;
   }
   return os;
 }
 
+static constexpr bool EnumsEqual(EnforcementPolicy policy, HiddenApiAccessFlags::ApiList apiList) {
+  return static_cast<int>(policy) == static_cast<int>(apiList);
+}
+
 inline Action GetMemberAction(uint32_t access_flags) {
-  switch (HiddenApiAccessFlags::DecodeFromRuntime(access_flags)) {
-    case HiddenApiAccessFlags::kWhitelist:
-      return kAllow;
-    case HiddenApiAccessFlags::kLightGreylist:
-      return kAllowButWarn;
-    case HiddenApiAccessFlags::kDarkGreylist:
-      return kAllowButWarnAndToast;
-    case HiddenApiAccessFlags::kBlacklist:
-      return kDeny;
+  EnforcementPolicy policy = Runtime::Current()->GetHiddenApiEnforcementPolicy();
+  if (policy == EnforcementPolicy::kNoChecks) {
+    // Exit early. Nothing to enforce.
+    return kAllow;
+  }
+
+  HiddenApiAccessFlags::ApiList api_list = HiddenApiAccessFlags::DecodeFromRuntime(access_flags);
+  if (api_list == HiddenApiAccessFlags::kWhitelist) {
+    return kAllow;
+  }
+  // The logic below relies on equality of values in the enums EnforcementPolicy and
+  // HiddenApiAccessFlags::ApiList, and their ordering. Assert that this is as expected.
+  static_assert(
+      EnumsEqual(EnforcementPolicy::kAllLists, HiddenApiAccessFlags::kLightGreylist) &&
+      EnumsEqual(EnforcementPolicy::kDarkGreyAndBlackList, HiddenApiAccessFlags::kDarkGreylist) &&
+      EnumsEqual(EnforcementPolicy::kBlacklistOnly, HiddenApiAccessFlags::kBlacklist),
+      "Mismatch between EnforcementPolicy and ApiList enums");
+  static_assert(
+      EnforcementPolicy::kAllLists < EnforcementPolicy::kDarkGreyAndBlackList &&
+      EnforcementPolicy::kDarkGreyAndBlackList < EnforcementPolicy::kBlacklistOnly,
+      "EnforcementPolicy values ordering not correct");
+  if (static_cast<int>(policy) > static_cast<int>(api_list)) {
+    return api_list == HiddenApiAccessFlags::kDarkGreylist
+        ? kAllowButWarnAndToast
+        : kAllowButWarn;
+  } else {
+    return kDeny;
   }
 }
 
@@ -107,12 +142,6 @@
                                       AccessMethod access_method)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK(member != nullptr);
-  Runtime* runtime = Runtime::Current();
-
-  if (!runtime->AreHiddenApiChecksEnabled()) {
-    // Exit early. Nothing to enforce.
-    return false;
-  }
 
   Action action = GetMemberAction(member->GetAccessFlags());
   if (action == kAllow) {
@@ -133,14 +162,16 @@
   // We do this regardless of whether we block the access or not.
   WarnAboutMemberAccess(member, access_method);
 
-  // Block access if on blacklist.
   if (action == kDeny) {
+    // Block access
     return true;
   }
 
   // Allow access to this member but print a warning.
   DCHECK(action == kAllowButWarn || action == kAllowButWarnAndToast);
 
+  Runtime* runtime = Runtime::Current();
+
   // Depending on a runtime flag, we might move the member into whitelist and
   // skip the warning the next time the member is accessed.
   if (runtime->ShouldDedupeHiddenApiWarnings()) {
@@ -150,7 +181,7 @@
 
   // If this action requires a UI warning, set the appropriate flag.
   if (action == kAllowButWarnAndToast || runtime->ShouldAlwaysSetHiddenApiWarningFlag()) {
-    Runtime::Current()->SetPendingHiddenApiWarning(true);
+    runtime->SetPendingHiddenApiWarning(true);
   }
 
   return false;
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index da4c4b2..8fe68bd 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -736,14 +736,14 @@
   // mutator lock exclusively held so that we don't have any threads in the middle of
   // DecodeWeakGlobal.
   Locks::mutator_lock_->AssertExclusiveHeld(self);
-  allow_accessing_weak_globals_.StoreSequentiallyConsistent(false);
+  allow_accessing_weak_globals_.store(false, std::memory_order_seq_cst);
 }
 
 void JavaVMExt::AllowNewWeakGlobals() {
   CHECK(!kUseReadBarrier);
   Thread* self = Thread::Current();
   MutexLock mu(self, *Locks::jni_weak_globals_lock_);
-  allow_accessing_weak_globals_.StoreSequentiallyConsistent(true);
+  allow_accessing_weak_globals_.store(true, std::memory_order_seq_cst);
   weak_globals_add_condition_.Broadcast(self);
 }
 
@@ -770,7 +770,7 @@
   DCHECK(self != nullptr);
   return kUseReadBarrier ?
       self->GetWeakRefAccessEnabled() :
-      allow_accessing_weak_globals_.LoadSequentiallyConsistent();
+      allow_accessing_weak_globals_.load(std::memory_order_seq_cst);
 }
 
 ObjPtr<mirror::Object> JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref) {
@@ -809,7 +809,7 @@
   }
   // self can be null during a runtime shutdown. ~Runtime()->~ClassLinker()->DecodeWeakGlobal().
   if (!kUseReadBarrier) {
-    DCHECK(allow_accessing_weak_globals_.LoadSequentiallyConsistent());
+    DCHECK(allow_accessing_weak_globals_.load(std::memory_order_seq_cst));
   }
   return weak_globals_.SynchronizedGet(ref);
 }
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 291a983..1e61ba0 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -1625,7 +1625,7 @@
      * so waitForDebugger() doesn't return if we stall for a bit here.
      */
     Dbg::GoActive();
-    last_activity_time_ms_.StoreSequentiallyConsistent(0);
+    last_activity_time_ms_.store(0, std::memory_order_seq_cst);
   }
 
   /*
@@ -1703,7 +1703,7 @@
    * the initial setup.  Only update if this is a non-DDMS packet.
    */
   if (request->GetCommandSet() != kJDWPDdmCmdSet) {
-    last_activity_time_ms_.StoreSequentiallyConsistent(MilliTime());
+    last_activity_time_ms_.store(MilliTime(), std::memory_order_seq_cst);
   }
 
   return replyLength;
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 557b032..447e3bf 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -729,7 +729,7 @@
     return -1;
   }
 
-  int64_t last = last_activity_time_ms_.LoadSequentiallyConsistent();
+  int64_t last = last_activity_time_ms_.load(std::memory_order_seq_cst);
 
   /* initializing or in the middle of something? */
   if (last == 0) {
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 23cf071..813430f 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -718,10 +718,11 @@
   Runtime* runtime = Runtime::Current();
   if (UNLIKELY(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse())) {
     ArtMethod* np_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
-    DCHECK(!np_method->IsNative());
     if (np_method->IsCompilable()) {
-      // The compiler requires a ProfilingInfo object.
-      ProfilingInfo::Create(thread, np_method, /* retry_allocation */ true);
+      if (!np_method->IsNative()) {
+        // The compiler requires a ProfilingInfo object for non-native methods.
+        ProfilingInfo::Create(thread, np_method, /* retry_allocation */ true);
+      }
       JitCompileTask compile_task(method, JitCompileTask::kCompile);
       compile_task.Run(thread);
     }
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index b2d58da..1c4b93e 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -623,7 +623,7 @@
 bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const {
   return kUseReadBarrier
       ? self->GetWeakRefAccessEnabled()
-      : is_weak_access_enabled_.LoadSequentiallyConsistent();
+      : is_weak_access_enabled_.load(std::memory_order_seq_cst);
 }
 
 void JitCodeCache::WaitUntilInlineCacheAccessible(Thread* self) {
@@ -645,13 +645,13 @@
 
 void JitCodeCache::AllowInlineCacheAccess() {
   DCHECK(!kUseReadBarrier);
-  is_weak_access_enabled_.StoreSequentiallyConsistent(true);
+  is_weak_access_enabled_.store(true, std::memory_order_seq_cst);
   BroadcastForInlineCacheAccess();
 }
 
 void JitCodeCache::DisallowInlineCacheAccess() {
   DCHECK(!kUseReadBarrier);
-  is_weak_access_enabled_.StoreSequentiallyConsistent(false);
+  is_weak_access_enabled_.store(false, std::memory_order_seq_cst);
 }
 
 void JitCodeCache::CopyInlineCacheInto(const InlineCache& ic,
@@ -820,7 +820,7 @@
       // code.
       GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
     }
-    last_update_time_ns_.StoreRelease(NanoTime());
+    last_update_time_ns_.store(NanoTime(), std::memory_order_release);
     VLOG(jit)
         << "JIT added (osr=" << std::boolalpha << osr << std::noboolalpha << ") "
         << ArtMethod::PrettyMethod(method) << "@" << method
@@ -1647,7 +1647,7 @@
 }
 
 uint64_t JitCodeCache::GetLastUpdateTimeNs() const {
-  return last_update_time_ns_.LoadAcquire();
+  return last_update_time_ns_.load(std::memory_order_acquire);
 }
 
 bool JitCodeCache::IsOsrCompiled(ArtMethod* method) {
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 3ffedca..7a4876c 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -154,7 +154,7 @@
   GcRoot<mirror::CallSite>& target = GetResolvedCallSites()[call_site_idx];
   Atomic<GcRoot<mirror::CallSite>>& ref =
       reinterpret_cast<Atomic<GcRoot<mirror::CallSite>>&>(target);
-  return ref.LoadSequentiallyConsistent().Read();
+  return ref.load(std::memory_order_seq_cst).Read();
 }
 
 inline CallSite* DexCache::SetResolvedCallSite(uint32_t call_site_idx, CallSite* call_site) {
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 55dd514..c7561f4 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -673,7 +673,7 @@
 inline kSize Object::GetFieldAcquire(MemberOffset field_offset) {
   const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
   const kSize* addr = reinterpret_cast<const kSize*>(raw_addr);
-  return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadAcquire();
+  return reinterpret_cast<const Atomic<kSize>*>(addr)->load(std::memory_order_acquire);
 }
 
 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
@@ -956,7 +956,7 @@
   uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
   uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
   Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
-  bool success = atomic_addr->CompareAndExchangeStrongSequentiallyConsistent(&old_ref, new_ref);
+  bool success = atomic_addr->compare_exchange_strong(old_ref, new_ref, std::memory_order_seq_cst);
   ObjPtr<Object> witness_value(PtrCompression<kPoisonHeapReferences, Object>::Decompress(old_ref));
   if (kIsDebugBuild) {
     // Ensure caller has done read barrier on the reference field so it's in the to-space.
@@ -986,7 +986,7 @@
   uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
   uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
   Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
-  uint32_t old_ref = atomic_addr->ExchangeSequentiallyConsistent(new_ref);
+  uint32_t old_ref = atomic_addr->exchange(new_ref, std::memory_order_seq_cst);
   ObjPtr<Object> old_value(PtrCompression<kPoisonHeapReferences, Object>::Decompress(old_ref));
   if (kIsDebugBuild) {
     // Ensure caller has done read barrier on the reference field so it's in the to-space.
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index f274cfc..0e03e37 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -87,16 +87,18 @@
     DCHECK_ALIGNED(dst_bytes, sizeof(uintptr_t));
     // Use word sized copies to begin.
     while (num_bytes >= sizeof(uintptr_t)) {
-      reinterpret_cast<Atomic<uintptr_t>*>(dst_bytes)->StoreRelaxed(
-          reinterpret_cast<Atomic<uintptr_t>*>(src_bytes)->LoadRelaxed());
+      reinterpret_cast<Atomic<uintptr_t>*>(dst_bytes)->store(
+          reinterpret_cast<Atomic<uintptr_t>*>(src_bytes)->load(std::memory_order_relaxed),
+          std::memory_order_relaxed);
       src_bytes += sizeof(uintptr_t);
       dst_bytes += sizeof(uintptr_t);
       num_bytes -= sizeof(uintptr_t);
     }
     // Copy possible 32 bit word.
     if (sizeof(uintptr_t) != sizeof(uint32_t) && num_bytes >= sizeof(uint32_t)) {
-      reinterpret_cast<Atomic<uint32_t>*>(dst_bytes)->StoreRelaxed(
-          reinterpret_cast<Atomic<uint32_t>*>(src_bytes)->LoadRelaxed());
+      reinterpret_cast<Atomic<uint32_t>*>(dst_bytes)->store(
+          reinterpret_cast<Atomic<uint32_t>*>(src_bytes)->load(std::memory_order_relaxed),
+          std::memory_order_relaxed);
       src_bytes += sizeof(uint32_t);
       dst_bytes += sizeof(uint32_t);
       num_bytes -= sizeof(uint32_t);
@@ -104,8 +106,9 @@
     // Copy remaining bytes, avoid going past the end of num_bytes since there may be a redzone
     // there.
     while (num_bytes > 0) {
-      reinterpret_cast<Atomic<uint8_t>*>(dst_bytes)->StoreRelaxed(
-          reinterpret_cast<Atomic<uint8_t>*>(src_bytes)->LoadRelaxed());
+      reinterpret_cast<Atomic<uint8_t>*>(dst_bytes)->store(
+          reinterpret_cast<Atomic<uint8_t>*>(src_bytes)->load(std::memory_order_relaxed),
+          std::memory_order_relaxed);
       src_bytes += sizeof(uint8_t);
       dst_bytes += sizeof(uint8_t);
       num_bytes -= sizeof(uint8_t);
@@ -173,7 +176,7 @@
 uint32_t Object::GenerateIdentityHashCode() {
   uint32_t expected_value, new_value;
   do {
-    expected_value = hash_code_seed.LoadRelaxed();
+    expected_value = hash_code_seed.load(std::memory_order_relaxed);
     new_value = expected_value * 1103515245 + 12345;
   } while (!hash_code_seed.CompareAndSetWeakRelaxed(expected_value, new_value) ||
       (expected_value & LockWord::kHashMask) == 0);
@@ -181,7 +184,7 @@
 }
 
 void Object::SetHashCodeSeed(uint32_t new_seed) {
-  hash_code_seed.StoreRelaxed(new_seed);
+  hash_code_seed.store(new_seed, std::memory_order_relaxed);
 }
 
 int32_t Object::IdentityHashCode() {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 95f82cb..d00c90b 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -730,7 +730,7 @@
     uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
     kSize* addr = reinterpret_cast<kSize*>(raw_addr);
     if (kIsVolatile) {
-      reinterpret_cast<Atomic<kSize>*>(addr)->StoreSequentiallyConsistent(new_value);
+      reinterpret_cast<Atomic<kSize>*>(addr)->store(new_value, std::memory_order_seq_cst);
     } else {
       reinterpret_cast<Atomic<kSize>*>(addr)->StoreJavaData(new_value);
     }
@@ -742,7 +742,7 @@
     const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
     const kSize* addr = reinterpret_cast<const kSize*>(raw_addr);
     if (kIsVolatile) {
-      return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadSequentiallyConsistent();
+      return reinterpret_cast<const Atomic<kSize>*>(addr)->load(std::memory_order_seq_cst);
     } else {
       return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadJavaData();
     }
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index cf1f85d..356fef0 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -110,13 +110,13 @@
   template <bool kIsVolatile = false>
   MirrorType* AsMirrorPtr() const REQUIRES_SHARED(Locks::mutator_lock_) {
     return Compression::Decompress(
-        kIsVolatile ? reference_.LoadSequentiallyConsistent() : reference_.LoadJavaData());
+        kIsVolatile ? reference_.load(std::memory_order_seq_cst) : reference_.LoadJavaData());
   }
 
   template <bool kIsVolatile = false>
   void Assign(MirrorType* other) REQUIRES_SHARED(Locks::mutator_lock_) {
     if (kIsVolatile) {
-      reference_.StoreSequentiallyConsistent(Compression::Compress(other));
+      reference_.store(Compression::Compress(other), std::memory_order_seq_cst);
     } else {
       reference_.StoreJavaData(Compression::Compress(other));
     }
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 2a938da..e110763 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -140,7 +140,7 @@
     }
   }
   DCHECK(HasHashCode());
-  return hash_code_.LoadRelaxed();
+  return hash_code_.load(std::memory_order_relaxed);
 }
 
 bool Monitor::Install(Thread* self) {
@@ -155,7 +155,7 @@
       break;
     }
     case LockWord::kHashCode: {
-      CHECK_EQ(hash_code_.LoadRelaxed(), static_cast<int32_t>(lw.GetHashCode()));
+      CHECK_EQ(hash_code_.load(std::memory_order_relaxed), static_cast<int32_t>(lw.GetHashCode()));
       break;
     }
     case LockWord::kFatLocked: {
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 384ebbe..6b7604e 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -130,7 +130,7 @@
   bool IsLocked() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_lock_);
 
   bool HasHashCode() const {
-    return hash_code_.LoadRelaxed() != 0;
+    return hash_code_.load(std::memory_order_relaxed) != 0;
   }
 
   MonitorId GetMonitorId() const {
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index fc94266..3692a30 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -89,17 +89,27 @@
 
 static void VMDebug_startMethodTracingDdmsImpl(JNIEnv*, jclass, jint bufferSize, jint flags,
                                                jboolean samplingEnabled, jint intervalUs) {
-  Trace::Start("[DDMS]", -1, bufferSize, flags, Trace::TraceOutputMode::kDDMS,
-               samplingEnabled ? Trace::TraceMode::kSampling : Trace::TraceMode::kMethodTracing,
-               intervalUs);
+  Trace::StartDDMS(bufferSize,
+                   flags,
+                   samplingEnabled ? Trace::TraceMode::kSampling : Trace::TraceMode::kMethodTracing,
+                   intervalUs);
 }
 
-static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceFilename,
-                                         jint javaFd, jint bufferSize, jint flags,
-                                         jboolean samplingEnabled, jint intervalUs,
+static void VMDebug_startMethodTracingFd(JNIEnv* env,
+                                         jclass,
+                                         jstring javaTraceFilename ATTRIBUTE_UNUSED,
+                                         jint javaFd,
+                                         jint bufferSize,
+                                         jint flags,
+                                         jboolean samplingEnabled,
+                                         jint intervalUs,
                                          jboolean streamingOutput) {
   int originalFd = javaFd;
   if (originalFd < 0) {
+    ScopedObjectAccess soa(env);
+    soa.Self()->ThrowNewExceptionF("Ljava/lang/RuntimeException;",
+                                   "Trace fd is invalid: %d",
+                                   originalFd);
     return;
   }
 
@@ -107,18 +117,20 @@
   if (fd < 0) {
     ScopedObjectAccess soa(env);
     soa.Self()->ThrowNewExceptionF("Ljava/lang/RuntimeException;",
-                                   "dup(%d) failed: %s", originalFd, strerror(errno));
+                                   "dup(%d) failed: %s",
+                                   originalFd,
+                                   strerror(errno));
     return;
   }
 
-  ScopedUtfChars traceFilename(env, javaTraceFilename);
-  if (traceFilename.c_str() == nullptr) {
-    return;
-  }
+  // Ignore the traceFilename.
   Trace::TraceOutputMode outputMode = streamingOutput
                                           ? Trace::TraceOutputMode::kStreaming
                                           : Trace::TraceOutputMode::kFile;
-  Trace::Start(traceFilename.c_str(), fd, bufferSize, flags, outputMode,
+  Trace::Start(fd,
+               bufferSize,
+               flags,
+               outputMode,
                samplingEnabled ? Trace::TraceMode::kSampling : Trace::TraceMode::kMethodTracing,
                intervalUs);
 }
@@ -130,7 +142,10 @@
   if (traceFilename.c_str() == nullptr) {
     return;
   }
-  Trace::Start(traceFilename.c_str(), -1, bufferSize, flags, Trace::TraceOutputMode::kFile,
+  Trace::Start(traceFilename.c_str(),
+               bufferSize,
+               flags,
+               Trace::TraceOutputMode::kFile,
                samplingEnabled ? Trace::TraceMode::kSampling : Trace::TraceMode::kMethodTracing,
                intervalUs);
 }
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 8913569..d9a5096 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -162,19 +162,24 @@
 
 // Must match values in com.android.internal.os.Zygote.
 enum {
-  DEBUG_ENABLE_JDWP               = 1,
-  DEBUG_ENABLE_CHECKJNI           = 1 << 1,
-  DEBUG_ENABLE_ASSERT             = 1 << 2,
-  DEBUG_ENABLE_SAFEMODE           = 1 << 3,
-  DEBUG_ENABLE_JNI_LOGGING        = 1 << 4,
-  DEBUG_GENERATE_DEBUG_INFO       = 1 << 5,
-  DEBUG_ALWAYS_JIT                = 1 << 6,
-  DEBUG_NATIVE_DEBUGGABLE         = 1 << 7,
-  DEBUG_JAVA_DEBUGGABLE           = 1 << 8,
-  DISABLE_VERIFIER                = 1 << 9,
-  ONLY_USE_SYSTEM_OAT_FILES       = 1 << 10,
-  ENABLE_HIDDEN_API_CHECKS        = 1 << 11,
-  DEBUG_GENERATE_MINI_DEBUG_INFO  = 1 << 12,
+  DEBUG_ENABLE_JDWP                  = 1,
+  DEBUG_ENABLE_CHECKJNI              = 1 << 1,
+  DEBUG_ENABLE_ASSERT                = 1 << 2,
+  DEBUG_ENABLE_SAFEMODE              = 1 << 3,
+  DEBUG_ENABLE_JNI_LOGGING           = 1 << 4,
+  DEBUG_GENERATE_DEBUG_INFO          = 1 << 5,
+  DEBUG_ALWAYS_JIT                   = 1 << 6,
+  DEBUG_NATIVE_DEBUGGABLE            = 1 << 7,
+  DEBUG_JAVA_DEBUGGABLE              = 1 << 8,
+  DISABLE_VERIFIER                   = 1 << 9,
+  ONLY_USE_SYSTEM_OAT_FILES          = 1 << 10,
+  DEBUG_GENERATE_MINI_DEBUG_INFO     = 1 << 11,
+  HIDDEN_API_ENFORCEMENT_POLICY_MASK = (1 << 12)
+                                     | (1 << 13),
+
+  // bits to shift (flags & HIDDEN_API_ENFORCEMENT_POLICY_MASK) by to get a value
+  // corresponding to hiddenapi::EnforcementPolicy
+  API_ENFORCEMENT_POLICY_SHIFT = CTZ(HIDDEN_API_ENFORCEMENT_POLICY_MASK),
 };
 
 static uint32_t EnableDebugFeatures(uint32_t runtime_flags) {
@@ -285,7 +290,8 @@
   // Our system thread ID, etc, has changed so reset Thread state.
   thread->InitAfterFork();
   runtime_flags = EnableDebugFeatures(runtime_flags);
-  bool do_hidden_api_checks = false;
+  hiddenapi::EnforcementPolicy api_enforcement_policy = hiddenapi::EnforcementPolicy::kNoChecks;
+  bool dedupe_hidden_api_warnings = true;
 
   if ((runtime_flags & DISABLE_VERIFIER) != 0) {
     Runtime::Current()->DisableVerifier();
@@ -297,10 +303,9 @@
     runtime_flags &= ~ONLY_USE_SYSTEM_OAT_FILES;
   }
 
-  if ((runtime_flags & ENABLE_HIDDEN_API_CHECKS) != 0) {
-    do_hidden_api_checks = true;
-    runtime_flags &= ~ENABLE_HIDDEN_API_CHECKS;
-  }
+  api_enforcement_policy = hiddenapi::EnforcementPolicyFromInt(
+      (runtime_flags & HIDDEN_API_ENFORCEMENT_POLICY_MASK) >> API_ENFORCEMENT_POLICY_SHIFT);
+  runtime_flags &= ~HIDDEN_API_ENFORCEMENT_POLICY_MASK;
 
   if (runtime_flags != 0) {
     LOG(ERROR) << StringPrintf("Unknown bits set in runtime_flags: %#x", runtime_flags);
@@ -338,7 +343,6 @@
 
       std::string trace_file = StringPrintf("/data/misc/trace/%s.trace.bin", proc_name.c_str());
       Trace::Start(trace_file.c_str(),
-                   -1,
                    buffer_size,
                    0,   // TODO: Expose flags.
                    output_mode,
@@ -351,11 +355,13 @@
     }
   }
 
+  bool do_hidden_api_checks = api_enforcement_policy != hiddenapi::EnforcementPolicy::kNoChecks;
   DCHECK(!(is_system_server && do_hidden_api_checks))
-      << "SystemServer should be forked with ENABLE_HIDDEN_API_CHECKS";
+      << "SystemServer should be forked with EnforcementPolicy::kDisable";
   DCHECK(!(is_zygote && do_hidden_api_checks))
-      << "Child zygote processes should be forked with ENABLE_HIDDEN_API_CHECKS";
-  Runtime::Current()->SetHiddenApiChecksEnabled(do_hidden_api_checks);
+      << "Child zygote processes should be forked with EnforcementPolicy::kDisable";
+  Runtime::Current()->SetHiddenApiEnforcementPolicy(api_enforcement_policy);
+  Runtime::Current()->SetDedupeHiddenApiWarnings(dedupe_hidden_api_warnings);
 
   // Clear the hidden API warning flag, in case it was set.
   Runtime::Current()->SetPendingHiddenApiWarning(false);
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 25d5037..fc61c95 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -89,8 +89,8 @@
 // access hidden APIs. This can be *very* expensive. Never call this in a loop.
 ALWAYS_INLINE static bool ShouldEnforceHiddenApi(Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  return Runtime::Current()->AreHiddenApiChecksEnabled() &&
-         !IsCallerInBootClassPath(self);
+  hiddenapi::EnforcementPolicy policy = Runtime::Current()->GetHiddenApiEnforcementPolicy();
+  return policy != hiddenapi::EnforcementPolicy::kNoChecks && !IsCallerInBootClassPath(self);
 }
 
 // Returns true if the first non-ClassClass caller up the stack should not be
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 58f6c04..5035ba0 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -130,7 +130,7 @@
         ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
         // Update the field atomically. This may fail if mutator updates before us, but it's ok.
         if (ref != old_ref) {
-          Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
+          Atomic<MirrorType*>* atomic_root = reinterpret_cast<Atomic<MirrorType*>*>(root);
           atomic_root->CompareAndSetStrongRelaxed(old_ref, ref);
         }
       }
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 7d9d342..9a626ba 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -267,7 +267,7 @@
       oat_file_manager_(nullptr),
       is_low_memory_mode_(false),
       safe_mode_(false),
-      do_hidden_api_checks_(false),
+      hidden_api_policy_(hiddenapi::EnforcementPolicy::kNoChecks),
       pending_hidden_api_warning_(false),
       dedupe_hidden_api_warnings_(true),
       always_set_hidden_api_warning_flag_(false),
@@ -839,7 +839,6 @@
   if (trace_config_.get() != nullptr && trace_config_->trace_file != "") {
     ScopedThreadStateChange tsc(self, kWaitingForMethodTracingStart);
     Trace::Start(trace_config_->trace_file.c_str(),
-                 -1,
                  static_cast<int>(trace_config_->trace_file_size),
                  0,
                  trace_config_->trace_output_mode,
@@ -1196,9 +1195,14 @@
   // by default and we only enable them if:
   // (a) runtime was started with a flag that enables the checks, or
   // (b) Zygote forked a new process that is not exempt (see ZygoteHooks).
-  do_hidden_api_checks_ = runtime_options.Exists(Opt::HiddenApiChecks);
-  DCHECK(!is_zygote_ || !do_hidden_api_checks_)
-      << "Zygote should not be started with hidden API checks";
+  bool do_hidden_api_checks = runtime_options.Exists(Opt::HiddenApiChecks);
+  DCHECK(!is_zygote_ || !do_hidden_api_checks);
+  // TODO pass the actual enforcement policy in, rather than just a single bit.
+  // As is, we're encoding some logic here about which specific policy to use, which would be better
+  // controlled by the framework.
+  hidden_api_policy_ = do_hidden_api_checks
+      ? hiddenapi::EnforcementPolicy::kBlacklistOnly
+      : hiddenapi::EnforcementPolicy::kNoChecks;
 
   no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
   force_native_bridge_ = runtime_options.Exists(Opt::ForceNativeBridge);
diff --git a/runtime/runtime.h b/runtime/runtime.h
index c7f650e..dba31b2 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -49,6 +49,10 @@
 class Heap;
 }  // namespace gc
 
+namespace hiddenapi {
+enum class EnforcementPolicy;
+}  // namespace hiddenapi
+
 namespace jit {
 class Jit;
 class JitOptions;
@@ -520,12 +524,12 @@
   bool IsVerificationEnabled() const;
   bool IsVerificationSoftFail() const;
 
-  void SetHiddenApiChecksEnabled(bool value) {
-    do_hidden_api_checks_ = value;
+  void SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
+    hidden_api_policy_ = policy;
   }
 
-  bool AreHiddenApiChecksEnabled() const {
-    return do_hidden_api_checks_;
+  hiddenapi::EnforcementPolicy GetHiddenApiEnforcementPolicy() const {
+    return hidden_api_policy_;
   }
 
   void SetPendingHiddenApiWarning(bool value) {
@@ -990,7 +994,7 @@
   bool safe_mode_;
 
   // Whether access checks on hidden API should be performed.
-  bool do_hidden_api_checks_;
+  hiddenapi::EnforcementPolicy hidden_api_policy_;
 
   // Whether the application has used an API which is not restricted but we
   // should issue a warning about it.
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 2f6f50e..e34f32e 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -251,6 +251,7 @@
       union StateAndFlags new_state_and_flags;
       new_state_and_flags.as_int = old_state_and_flags.as_int;
       new_state_and_flags.as_struct.state = kRunnable;
+
       // CAS the value with a memory barrier.
       if (LIKELY(tls32_.state_and_flags.as_atomic_int.CompareAndSetWeakAcquire(
                                                  old_state_and_flags.as_int,
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 5b03c2d..b13d8ec 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1280,7 +1280,7 @@
     AtomicClearFlag(kSuspendRequest);
   } else {
     // Two bits might be set simultaneously.
-    tls32_.state_and_flags.as_atomic_int.FetchAndBitwiseOrSequentiallyConsistent(flags);
+    tls32_.state_and_flags.as_atomic_int.fetch_or(flags, std::memory_order_seq_cst);
     TriggerSuspend();
   }
   return true;
@@ -1318,7 +1318,7 @@
     if (pending_threads != nullptr) {
       bool done = false;
       do {
-        int32_t cur_val = pending_threads->LoadRelaxed();
+        int32_t cur_val = pending_threads->load(std::memory_order_relaxed);
         CHECK_GT(cur_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << cur_val;
         // Reduce value by 1.
         done = pending_threads->CompareAndSetWeakRelaxed(cur_val, cur_val - 1);
@@ -1438,8 +1438,12 @@
     barrier_.Pass(self);
   }
 
-  void Wait(Thread* self) {
-    barrier_.Increment(self, 1);
+  void Wait(Thread* self, ThreadState suspend_state) {
+    if (suspend_state != ThreadState::kRunnable) {
+      barrier_.Increment<Barrier::kDisallowHoldingLocks>(self, 1);
+    } else {
+      barrier_.Increment<Barrier::kAllowHoldingLocks>(self, 1);
+    }
   }
 
  private:
@@ -1448,7 +1452,7 @@
 };
 
 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
-bool Thread::RequestSynchronousCheckpoint(Closure* function) {
+bool Thread::RequestSynchronousCheckpoint(Closure* function, ThreadState suspend_state) {
   Thread* self = Thread::Current();
   if (this == Thread::Current()) {
     Locks::thread_list_lock_->AssertExclusiveHeld(self);
@@ -1496,8 +1500,8 @@
         // Relinquish the thread-list lock. We should not wait holding any locks. We cannot
         // reacquire it since we don't know if 'this' hasn't been deleted yet.
         Locks::thread_list_lock_->ExclusiveUnlock(self);
-        ScopedThreadSuspension sts(self, ThreadState::kWaiting);
-        barrier_closure.Wait(self);
+        ScopedThreadStateChange sts(self, suspend_state);
+        barrier_closure.Wait(self, suspend_state);
         return true;
       }
       // Fall-through.
@@ -1521,7 +1525,7 @@
       // that we can call ModifySuspendCount without racing against ThreadList::Unregister.
       ScopedThreadListLockUnlock stllu(self);
       {
-        ScopedThreadSuspension sts(self, ThreadState::kWaiting);
+        ScopedThreadStateChange sts(self, suspend_state);
         while (GetState() == ThreadState::kRunnable) {
           // We became runnable again. Wait till the suspend triggered in ModifySuspendCount
           // moves us to suspended.
@@ -1558,7 +1562,7 @@
   Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function);
   Closure* func;
   do {
-    func = atomic_func->LoadRelaxed();
+    func = atomic_func->load(std::memory_order_relaxed);
     if (func == nullptr) {
       return nullptr;
     }
@@ -1570,7 +1574,7 @@
 void Thread::SetFlipFunction(Closure* function) {
   CHECK(function != nullptr);
   Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function);
-  atomic_func->StoreSequentiallyConsistent(function);
+  atomic_func->store(function, std::memory_order_seq_cst);
 }
 
 void Thread::FullSuspendCheck() {
@@ -2102,7 +2106,7 @@
                 "art::Thread has a size which is not a multiple of 4.");
   tls32_.state_and_flags.as_struct.flags = 0;
   tls32_.state_and_flags.as_struct.state = kNative;
-  tls32_.interrupted.StoreRelaxed(false);
+  tls32_.interrupted.store(false, std::memory_order_relaxed);
   memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
   std::fill(tlsPtr_.rosalloc_runs,
             tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread,
@@ -2397,24 +2401,24 @@
 bool Thread::Interrupted() {
   DCHECK_EQ(Thread::Current(), this);
   // No other thread can concurrently reset the interrupted flag.
-  bool interrupted = tls32_.interrupted.LoadSequentiallyConsistent();
+  bool interrupted = tls32_.interrupted.load(std::memory_order_seq_cst);
   if (interrupted) {
-    tls32_.interrupted.StoreSequentiallyConsistent(false);
+    tls32_.interrupted.store(false, std::memory_order_seq_cst);
   }
   return interrupted;
 }
 
 // Implements java.lang.Thread.isInterrupted.
 bool Thread::IsInterrupted() {
-  return tls32_.interrupted.LoadSequentiallyConsistent();
+  return tls32_.interrupted.load(std::memory_order_seq_cst);
 }
 
 void Thread::Interrupt(Thread* self) {
   MutexLock mu(self, *wait_mutex_);
-  if (tls32_.interrupted.LoadSequentiallyConsistent()) {
+  if (tls32_.interrupted.load(std::memory_order_seq_cst)) {
     return;
   }
-  tls32_.interrupted.StoreSequentiallyConsistent(true);
+  tls32_.interrupted.store(true, std::memory_order_seq_cst);
   NotifyLocked(self);
 }
 
diff --git a/runtime/thread.h b/runtime/thread.h
index 6549fc1..22b77ee 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -263,16 +263,31 @@
       WARN_UNUSED
       REQUIRES(Locks::thread_suspend_count_lock_);
 
+  // Requests a checkpoint closure to run on another thread. The closure will be run when the thread
+  // gets suspended. This will return true if the closure was added and will (eventually) be
+  // executed. It returns false otherwise.
+  //
+  // Since multiple closures can be queued and some closures can delay other threads from running no
+  // closure should attempt to suspend another thread while running.
+  // TODO We should add some debug option that verifies this.
   bool RequestCheckpoint(Closure* function)
       REQUIRES(Locks::thread_suspend_count_lock_);
 
   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
   // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
-  // execute the checkpoint for us if it is Runnable.
-  bool RequestSynchronousCheckpoint(Closure* function)
+  // execute the checkpoint for us if it is Runnable. The suspend_state is the state that the thread
+  // will go into while it is awaiting the checkpoint to be run.
+  // NB Passing ThreadState::kRunnable may cause the current thread to wait in a condition variable
+  // while holding the mutator_lock_.  Callers should ensure that this will not cause any problems
+  // for the closure or the rest of the system.
+  // NB Since multiple closures can be queued and some closures can delay other threads from running
+  // no closure should attempt to suspend another thread while running.
+  bool RequestSynchronousCheckpoint(Closure* function,
+                                    ThreadState suspend_state = ThreadState::kWaiting)
       REQUIRES_SHARED(Locks::mutator_lock_)
       RELEASE(Locks::thread_list_lock_)
       REQUIRES(!Locks::thread_suspend_count_lock_);
+
   bool RequestEmptyCheckpoint()
       REQUIRES(Locks::thread_suspend_count_lock_);
 
@@ -541,7 +556,7 @@
   bool IsInterrupted();
   void Interrupt(Thread* self) REQUIRES(!*wait_mutex_);
   void SetInterrupted(bool i) {
-    tls32_.interrupted.StoreSequentiallyConsistent(i);
+    tls32_.interrupted.store(i, std::memory_order_seq_cst);
   }
   void Notify() REQUIRES(!*wait_mutex_);
 
@@ -1095,11 +1110,11 @@
   }
 
   void AtomicSetFlag(ThreadFlag flag) {
-    tls32_.state_and_flags.as_atomic_int.FetchAndBitwiseOrSequentiallyConsistent(flag);
+    tls32_.state_and_flags.as_atomic_int.fetch_or(flag, std::memory_order_seq_cst);
   }
 
   void AtomicClearFlag(ThreadFlag flag) {
-    tls32_.state_and_flags.as_atomic_int.FetchAndBitwiseAndSequentiallyConsistent(-1 ^ flag);
+    tls32_.state_and_flags.as_atomic_int.fetch_and(-1 ^ flag, std::memory_order_seq_cst);
   }
 
   void ResetQuickAllocEntryPointsForThread(bool is_marking);
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 8095ef5..44af867 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -732,7 +732,7 @@
     if (reason == SuspendReason::kForDebugger) {
       ++debug_suspend_all_count_;
     }
-    pending_threads.StoreRelaxed(list_.size() - num_ignored);
+    pending_threads.store(list_.size() - num_ignored, std::memory_order_relaxed);
     // Increment everybody's suspend count (except those that should be ignored).
     for (const auto& thread : list_) {
       if (thread == ignore1 || thread == ignore2) {
@@ -748,7 +748,7 @@
       if (thread->IsSuspended()) {
         // Only clear the counter for the current thread.
         thread->ClearSuspendBarrier(&pending_threads);
-        pending_threads.FetchAndSubSequentiallyConsistent(1);
+        pending_threads.fetch_sub(1, std::memory_order_seq_cst);
       }
     }
   }
@@ -761,7 +761,7 @@
 #endif
   const uint64_t start_time = NanoTime();
   while (true) {
-    int32_t cur_val = pending_threads.LoadRelaxed();
+    int32_t cur_val = pending_threads.load(std::memory_order_relaxed);
     if (LIKELY(cur_val > 0)) {
 #if ART_USE_FUTEXES
       if (futex(pending_threads.Address(), FUTEX_WAIT, cur_val, &wait_timeout, nullptr, 0) != 0) {
diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc
index 895a108..d784200 100644
--- a/runtime/thread_pool_test.cc
+++ b/runtime/thread_pool_test.cc
@@ -71,7 +71,7 @@
   // Wait for tasks to complete.
   thread_pool.Wait(self, true, false);
   // Make sure that we finished all the work.
-  EXPECT_EQ(num_tasks, count.LoadSequentiallyConsistent());
+  EXPECT_EQ(num_tasks, count.load(std::memory_order_seq_cst));
 }
 
 TEST_F(ThreadPoolTest, StopStart) {
@@ -84,7 +84,7 @@
   }
   usleep(200);
   // Check that no threads started prematurely.
-  EXPECT_EQ(0, count.LoadSequentiallyConsistent());
+  EXPECT_EQ(0, count.load(std::memory_order_seq_cst));
   // Signal the threads to start processing tasks.
   thread_pool.StartWorkers(self);
   usleep(200);
@@ -93,7 +93,7 @@
   thread_pool.AddTask(self, new CountTask(&bad_count));
   usleep(200);
   // Ensure that the task added after the workers were stopped doesn't get run.
-  EXPECT_EQ(0, bad_count.LoadSequentiallyConsistent());
+  EXPECT_EQ(0, bad_count.load(std::memory_order_seq_cst));
   // Allow tasks to finish up and delete themselves.
   thread_pool.StartWorkers(self);
   thread_pool.Wait(self, false, false);
@@ -157,7 +157,7 @@
   thread_pool.AddTask(self, new TreeTask(&thread_pool, &count, depth));
   thread_pool.StartWorkers(self);
   thread_pool.Wait(self, true, false);
-  EXPECT_EQ((1 << depth) - 1, count.LoadSequentiallyConsistent());
+  EXPECT_EQ((1 << depth) - 1, count.load(std::memory_order_seq_cst));
 }
 
 class PeerTask : public Task {
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 0f321b6..bea510a 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -319,8 +319,74 @@
   return nullptr;
 }
 
-void Trace::Start(const char* trace_filename, int trace_fd, size_t buffer_size, int flags,
-                  TraceOutputMode output_mode, TraceMode trace_mode, int interval_us) {
+void Trace::Start(const char* trace_filename,
+                  size_t buffer_size,
+                  int flags,
+                  TraceOutputMode output_mode,
+                  TraceMode trace_mode,
+                  int interval_us) {
+  std::unique_ptr<File> file(OS::CreateEmptyFileWriteOnly(trace_filename));
+  if (file == nullptr) {
+    std::string msg = android::base::StringPrintf("Unable to open trace file '%s'", trace_filename);
+    PLOG(ERROR) << msg;
+    ScopedObjectAccess soa(Thread::Current());
+    Thread::Current()->ThrowNewException("Ljava/lang/RuntimeException;", msg.c_str());
+    return;
+  }
+  Start(std::move(file), buffer_size, flags, output_mode, trace_mode, interval_us);
+}
+
+void Trace::Start(int trace_fd,
+                  size_t buffer_size,
+                  int flags,
+                  TraceOutputMode output_mode,
+                  TraceMode trace_mode,
+                  int interval_us) {
+  if (trace_fd < 0) {
+    std::string msg = android::base::StringPrintf("Unable to start tracing with invalid fd %d",
+                                                  trace_fd);
+    LOG(ERROR) << msg;
+    ScopedObjectAccess soa(Thread::Current());
+    Thread::Current()->ThrowNewException("Ljava/lang/RuntimeException;", msg.c_str());
+    return;
+  }
+  std::unique_ptr<File> file(new File(trace_fd, "tracefile"));
+  Start(std::move(file), buffer_size, flags, output_mode, trace_mode, interval_us);
+}
+
+void Trace::StartDDMS(size_t buffer_size,
+                      int flags,
+                      TraceMode trace_mode,
+                      int interval_us) {
+  Start(std::unique_ptr<File>(),
+        buffer_size,
+        flags,
+        TraceOutputMode::kDDMS,
+        trace_mode,
+        interval_us);
+}
+
+void Trace::Start(std::unique_ptr<File>&& trace_file_in,
+                  size_t buffer_size,
+                  int flags,
+                  TraceOutputMode output_mode,
+                  TraceMode trace_mode,
+                  int interval_us) {
+  // We own trace_file now and are responsible for closing it. To account for error situations, use
+  // a specialized unique_ptr to ensure we close it on the way out (if it hasn't been passed to a
+  // Trace instance).
+  auto deleter = [](File* file) {
+    if (file != nullptr) {
+      file->MarkUnchecked();  // Don't deal with flushing requirements.
+      int result ATTRIBUTE_UNUSED = file->Close();
+      delete file;
+    }
+  };
+  std::unique_ptr<File, decltype(deleter)> trace_file(trace_file_in.release(), deleter);
+  if (trace_file != nullptr) {
+    trace_file->DisableAutoClose();
+  }
+
   Thread* self = Thread::Current();
   {
     MutexLock mu(self, *Locks::trace_lock_);
@@ -338,23 +404,6 @@
     return;
   }
 
-  // Open trace file if not going directly to ddms.
-  std::unique_ptr<File> trace_file;
-  if (output_mode != TraceOutputMode::kDDMS) {
-    if (trace_fd < 0) {
-      trace_file.reset(OS::CreateEmptyFileWriteOnly(trace_filename));
-    } else {
-      trace_file.reset(new File(trace_fd, "tracefile"));
-      trace_file->DisableAutoClose();
-    }
-    if (trace_file.get() == nullptr) {
-      PLOG(ERROR) << "Unable to open trace file '" << trace_filename << "'";
-      ScopedObjectAccess soa(self);
-      ThrowRuntimeException("Unable to open trace file '%s'", trace_filename);
-      return;
-    }
-  }
-
   Runtime* runtime = Runtime::Current();
 
   // Enable count of allocs if specified in the flags.
@@ -372,8 +421,7 @@
       LOG(ERROR) << "Trace already in progress, ignoring this request";
     } else {
       enable_stats = (flags && kTraceCountAllocs) != 0;
-      the_trace_ = new Trace(trace_file.release(), trace_filename, buffer_size, flags, output_mode,
-                             trace_mode);
+      the_trace_ = new Trace(trace_file.release(), buffer_size, flags, output_mode, trace_mode);
       if (trace_mode == TraceMode::kSampling) {
         CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, nullptr, &RunSamplingThread,
                                             reinterpret_cast<void*>(interval_us)),
@@ -595,8 +643,11 @@
 
 static constexpr size_t kMinBufSize = 18U;  // Trace header is up to 18B.
 
-Trace::Trace(File* trace_file, const char* trace_name, size_t buffer_size, int flags,
-             TraceOutputMode output_mode, TraceMode trace_mode)
+Trace::Trace(File* trace_file,
+             size_t buffer_size,
+             int flags,
+             TraceOutputMode output_mode,
+             TraceMode trace_mode)
     : trace_file_(trace_file),
       buf_(new uint8_t[std::max(kMinBufSize, buffer_size)]()),
       flags_(flags), trace_output_mode_(output_mode), trace_mode_(trace_mode),
@@ -605,6 +656,8 @@
       start_time_(MicroTime()), clock_overhead_ns_(GetClockOverheadNanoSeconds()), cur_offset_(0),
       overflow_(false), interval_us_(0), streaming_lock_(nullptr),
       unique_methods_lock_(new Mutex("unique methods lock", kTracingUniqueMethodsLock)) {
+  CHECK(trace_file != nullptr || output_mode == TraceOutputMode::kDDMS);
+
   uint16_t trace_version = GetTraceVersion(clock_source_);
   if (output_mode == TraceOutputMode::kStreaming) {
     trace_version |= 0xF0U;
@@ -622,10 +675,9 @@
   static_assert(18 <= kMinBufSize, "Minimum buffer size not large enough for trace header");
 
   // Update current offset.
-  cur_offset_.StoreRelaxed(kTraceHeaderLength);
+  cur_offset_.store(kTraceHeaderLength, std::memory_order_relaxed);
 
   if (output_mode == TraceOutputMode::kStreaming) {
-    streaming_file_name_ = trace_name;
     streaming_lock_ = new Mutex("tracing lock", LockLevel::kTracingStreamingLock);
     seen_threads_.reset(new ThreadIDBitSet());
   }
@@ -665,7 +717,7 @@
     // Clean up.
     STLDeleteValues(&seen_methods_);
   } else {
-    final_offset = cur_offset_.LoadRelaxed();
+    final_offset = cur_offset_.load(std::memory_order_relaxed);
     GetVisitedMethods(final_offset, &visited_methods);
   }
 
@@ -892,7 +944,7 @@
 }
 
 void Trace::WriteToBuf(const uint8_t* src, size_t src_size) {
-  int32_t old_offset = cur_offset_.LoadRelaxed();
+  int32_t old_offset = cur_offset_.load(std::memory_order_relaxed);
   int32_t new_offset = old_offset + static_cast<int32_t>(src_size);
   if (dchecked_integral_cast<size_t>(new_offset) > buffer_size_) {
     // Flush buffer.
@@ -905,24 +957,24 @@
       if (!trace_file_->WriteFully(src, src_size)) {
         PLOG(WARNING) << "Failed streaming a tracing event.";
       }
-      cur_offset_.StoreRelease(0);  // Buffer is empty now.
+      cur_offset_.store(0, std::memory_order_release);  // Buffer is empty now.
       return;
     }
 
     old_offset = 0;
     new_offset = static_cast<int32_t>(src_size);
   }
-  cur_offset_.StoreRelease(new_offset);
+  cur_offset_.store(new_offset, std::memory_order_release);
   // Fill in data.
   memcpy(buf_.get() + old_offset, src, src_size);
 }
 
 void Trace::FlushBuf() {
-  int32_t offset = cur_offset_.LoadRelaxed();
+  int32_t offset = cur_offset_.load(std::memory_order_relaxed);
   if (!trace_file_->WriteFully(buf_.get(), offset)) {
     PLOG(WARNING) << "Failed flush the remaining data in streaming.";
   }
-  cur_offset_.StoreRelease(0);
+  cur_offset_.store(0, std::memory_order_release);
 }
 
 void Trace::LogMethodTraceEvent(Thread* thread, ArtMethod* method,
@@ -938,7 +990,7 @@
   // We do a busy loop here trying to acquire the next offset.
   if (trace_output_mode_ != TraceOutputMode::kStreaming) {
     do {
-      old_offset = cur_offset_.LoadRelaxed();
+      old_offset = cur_offset_.load(std::memory_order_relaxed);
       new_offset = old_offset + GetRecordSize(clock_source_);
       if (static_cast<size_t>(new_offset) > buffer_size_) {
         overflow_ = true;
diff --git a/runtime/trace.h b/runtime/trace.h
index 86b8d00..7171f75 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -33,6 +33,10 @@
 #include "globals.h"
 #include "instrumentation.h"
 
+namespace unix_file {
+class FdFile;
+}  // namespace unix_file
+
 namespace art {
 
 class ArtField;
@@ -115,10 +119,37 @@
 
   static void SetDefaultClockSource(TraceClockSource clock_source);
 
-  static void Start(const char* trace_filename, int trace_fd, size_t buffer_size, int flags,
-                    TraceOutputMode output_mode, TraceMode trace_mode, int interval_us)
+  static void Start(const char* trace_filename,
+                    size_t buffer_size,
+                    int flags,
+                    TraceOutputMode output_mode,
+                    TraceMode trace_mode,
+                    int interval_us)
       REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
                !Locks::trace_lock_);
+  static void Start(int trace_fd,
+                    size_t buffer_size,
+                    int flags,
+                    TraceOutputMode output_mode,
+                    TraceMode trace_mode,
+                    int interval_us)
+      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
+               !Locks::trace_lock_);
+  static void Start(std::unique_ptr<unix_file::FdFile>&& file,
+                    size_t buffer_size,
+                    int flags,
+                    TraceOutputMode output_mode,
+                    TraceMode trace_mode,
+                    int interval_us)
+      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
+               !Locks::trace_lock_);
+  static void StartDDMS(size_t buffer_size,
+                        int flags,
+                        TraceMode trace_mode,
+                        int interval_us)
+      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
+               !Locks::trace_lock_);
+
   static void Pause() REQUIRES(!Locks::trace_lock_, !Locks::thread_list_lock_);
   static void Resume() REQUIRES(!Locks::trace_lock_);
 
@@ -212,8 +243,11 @@
   static bool IsTracingEnabled() REQUIRES(!Locks::trace_lock_);
 
  private:
-  Trace(File* trace_file, const char* trace_name, size_t buffer_size, int flags,
-        TraceOutputMode output_mode, TraceMode trace_mode);
+  Trace(File* trace_file,
+        size_t buffer_size,
+        int flags,
+        TraceOutputMode output_mode,
+        TraceMode trace_mode);
 
   // The sampling interval in microseconds is passed as an argument.
   static void* RunSamplingThread(void* arg) REQUIRES(!Locks::trace_lock_);
@@ -318,7 +352,6 @@
   int interval_us_;
 
   // Streaming mode data.
-  std::string streaming_file_name_;
   Mutex* streaming_lock_;
   std::map<const DexFile*, DexIndexBitSet*> seen_methods_;
   std::unique_ptr<ThreadIDBitSet> seen_threads_;
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index b07001e..cee7176 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -2765,47 +2765,61 @@
       break;
 
     case Instruction::IGET_BOOLEAN:
+    case Instruction::IGET_BOOLEAN_QUICK:
       VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Boolean(), true, false);
       break;
     case Instruction::IGET_BYTE:
+    case Instruction::IGET_BYTE_QUICK:
       VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Byte(), true, false);
       break;
     case Instruction::IGET_CHAR:
+    case Instruction::IGET_CHAR_QUICK:
       VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Char(), true, false);
       break;
     case Instruction::IGET_SHORT:
+    case Instruction::IGET_SHORT_QUICK:
       VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Short(), true, false);
       break;
     case Instruction::IGET:
+    case Instruction::IGET_QUICK:
       VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Integer(), true, false);
       break;
     case Instruction::IGET_WIDE:
+    case Instruction::IGET_WIDE_QUICK:
       VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.LongLo(), true, false);
       break;
     case Instruction::IGET_OBJECT:
+    case Instruction::IGET_OBJECT_QUICK:
       VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.JavaLangObject(false), false,
                                                     false);
       break;
 
     case Instruction::IPUT_BOOLEAN:
+    case Instruction::IPUT_BOOLEAN_QUICK:
       VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Boolean(), true, false);
       break;
     case Instruction::IPUT_BYTE:
+    case Instruction::IPUT_BYTE_QUICK:
       VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Byte(), true, false);
       break;
     case Instruction::IPUT_CHAR:
+    case Instruction::IPUT_CHAR_QUICK:
       VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Char(), true, false);
       break;
     case Instruction::IPUT_SHORT:
+    case Instruction::IPUT_SHORT_QUICK:
       VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Short(), true, false);
       break;
     case Instruction::IPUT:
+    case Instruction::IPUT_QUICK:
       VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Integer(), true, false);
       break;
     case Instruction::IPUT_WIDE:
+    case Instruction::IPUT_WIDE_QUICK:
       VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.LongLo(), true, false);
       break;
     case Instruction::IPUT_OBJECT:
+    case Instruction::IPUT_OBJECT_QUICK:
       VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.JavaLangObject(false), false,
                                                     false);
       break;
@@ -2859,9 +2873,12 @@
     case Instruction::INVOKE_VIRTUAL:
     case Instruction::INVOKE_VIRTUAL_RANGE:
     case Instruction::INVOKE_SUPER:
-    case Instruction::INVOKE_SUPER_RANGE: {
+    case Instruction::INVOKE_SUPER_RANGE:
+    case Instruction::INVOKE_VIRTUAL_QUICK:
+    case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
       bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE ||
-                       inst->Opcode() == Instruction::INVOKE_SUPER_RANGE);
+                       inst->Opcode() == Instruction::INVOKE_SUPER_RANGE ||
+                       inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
       bool is_super = (inst->Opcode() == Instruction::INVOKE_SUPER ||
                        inst->Opcode() == Instruction::INVOKE_SUPER_RANGE);
       MethodType type = is_super ? METHOD_SUPER : METHOD_VIRTUAL;
@@ -2881,7 +2898,7 @@
         }
       }
       if (return_type == nullptr) {
-        uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
+        uint32_t method_idx = GetMethodIdxOfInvoke(inst);
         const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
         dex::TypeIndex return_type_idx =
             dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
@@ -3368,67 +3385,6 @@
         }
       }
       break;
-    // Note: the following instructions encode offsets derived from class linking.
-    // As such they use Class*/Field*/Executable* as these offsets only have
-    // meaning if the class linking and resolution were successful.
-    case Instruction::IGET_QUICK:
-      VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Integer(), true);
-      break;
-    case Instruction::IGET_WIDE_QUICK:
-      VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.LongLo(), true);
-      break;
-    case Instruction::IGET_OBJECT_QUICK:
-      VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.JavaLangObject(false), false);
-      break;
-    case Instruction::IGET_BOOLEAN_QUICK:
-      VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Boolean(), true);
-      break;
-    case Instruction::IGET_BYTE_QUICK:
-      VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Byte(), true);
-      break;
-    case Instruction::IGET_CHAR_QUICK:
-      VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Char(), true);
-      break;
-    case Instruction::IGET_SHORT_QUICK:
-      VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Short(), true);
-      break;
-    case Instruction::IPUT_QUICK:
-      VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Integer(), true);
-      break;
-    case Instruction::IPUT_BOOLEAN_QUICK:
-      VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Boolean(), true);
-      break;
-    case Instruction::IPUT_BYTE_QUICK:
-      VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Byte(), true);
-      break;
-    case Instruction::IPUT_CHAR_QUICK:
-      VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Char(), true);
-      break;
-    case Instruction::IPUT_SHORT_QUICK:
-      VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Short(), true);
-      break;
-    case Instruction::IPUT_WIDE_QUICK:
-      VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.LongLo(), true);
-      break;
-    case Instruction::IPUT_OBJECT_QUICK:
-      VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.JavaLangObject(false), false);
-      break;
-    case Instruction::INVOKE_VIRTUAL_QUICK:
-    case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
-      bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
-      ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range);
-      if (called_method != nullptr) {
-        const char* descriptor = called_method->GetReturnTypeDescriptor();
-        const RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
-        if (!return_type.IsLowHalf()) {
-          work_line_->SetResultRegisterType(this, return_type);
-        } else {
-          work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(&reg_types_));
-        }
-        just_set_result = true;
-      }
-      break;
-    }
 
     /* These should never appear during verification. */
     case Instruction::UNUSED_3E ... Instruction::UNUSED_43:
@@ -3995,7 +3951,7 @@
         }
       } else {
         // Check whether the name of the called method is "<init>"
-        const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
+        const uint32_t method_idx = GetMethodIdxOfInvoke(inst);
         if (strcmp(dex_file_->GetMethodName(dex_file_->GetMethodId(method_idx)), "<init>") != 0) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "'this' arg must be initialized";
           return nullptr;
@@ -4017,7 +3973,7 @@
         res_method_class = &FromClass(klass->GetDescriptor(&temp), klass,
                                       klass->CannotBeAssignedFromOtherTypes());
       } else {
-        const uint32_t method_idx = inst->VRegB();
+        const uint32_t method_idx = GetMethodIdxOfInvoke(inst);
         const dex::TypeIndex class_idx = dex_file_->GetMethodId(method_idx).class_idx_;
         res_method_class = &reg_types_.FromDescriptor(
             GetClassLoader(),
@@ -4108,7 +4064,7 @@
   // As the method may not have been resolved, make this static check against what we expect.
   // The main reason for this code block is to fail hard when we find an illegal use, e.g.,
   // wrong number of arguments or wrong primitive types, even if the method could not be resolved.
-  const uint32_t method_idx = inst->VRegB();
+  const uint32_t method_idx = GetMethodIdxOfInvoke(inst);
   DexFileParameterIterator it(*dex_file_,
                               dex_file_->GetProtoId(dex_file_->GetMethodId(method_idx).proto_idx_));
   VerifyInvocationArgsFromIterator(&it, inst, method_type, is_range, nullptr);
@@ -4181,7 +4137,7 @@
     const Instruction* inst, MethodType method_type, bool is_range) {
   // Resolve the method. This could be an abstract or concrete method depending on what sort of call
   // we're making.
-  const uint32_t method_idx = inst->VRegB();
+  const uint32_t method_idx = GetMethodIdxOfInvoke(inst);
   ArtMethod* res_method = ResolveMethodAndCheckAccess(method_idx, method_type);
   if (res_method == nullptr) {  // error or class is unresolved
     // Check what we can statically.
@@ -4334,122 +4290,34 @@
   return true;
 }
 
-ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst, bool is_range) {
-  if (is_range) {
-    DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
-  } else {
-    DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_VIRTUAL_QUICK);
+uint16_t MethodVerifier::GetMethodIdxOfInvoke(const Instruction* inst) {
+  switch (inst->Opcode()) {
+    case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
+    case Instruction::INVOKE_VIRTUAL_QUICK: {
+      DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_)
+          << dex_file_->PrettyMethod(dex_method_idx_, true) << "@" << work_insn_idx_;
+      DCHECK(method_being_verified_ != nullptr);
+      uint16_t method_idx = method_being_verified_->GetIndexFromQuickening(work_insn_idx_);
+      CHECK_NE(method_idx, DexFile::kDexNoIndex16);
+      return method_idx;
+    }
+    default: {
+      return inst->VRegB();
+    }
   }
-
-  DCHECK(method_being_verified_ != nullptr);
-  uint16_t method_idx = method_being_verified_->GetIndexFromQuickening(work_insn_idx_);
-  CHECK_NE(method_idx, DexFile::kDexNoIndex16);
-  return ResolveMethodAndCheckAccess(method_idx, METHOD_VIRTUAL);
 }
 
-ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instruction* inst, bool is_range) {
-  DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_)
-      << dex_file_->PrettyMethod(dex_method_idx_, true) << "@" << work_insn_idx_;
-
-  ArtMethod* res_method = GetQuickInvokedMethod(inst, is_range);
-  if (res_method == nullptr) {
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer method from " << inst->Name();
-    return nullptr;
-  }
-  if (FailOrAbort(!res_method->IsDirect(),
-                  "Quick-invoked method is direct at ",
-                  work_insn_idx_)) {
-    return nullptr;
-  }
-  if (FailOrAbort(!res_method->IsStatic(),
-                  "Quick-invoked method is static at ",
-                  work_insn_idx_)) {
-    return nullptr;
-  }
-
-  // We use vAA as our expected arg count, rather than res_method->insSize, because we need to
-  // match the call to the signature. Also, we might be calling through an abstract method
-  // definition (which doesn't have register count values).
-  const RegType& actual_arg_type = work_line_->GetInvocationThis(this, inst);
-  if (actual_arg_type.IsConflict()) {  // GetInvocationThis failed.
-    return nullptr;
-  }
-  const size_t expected_args = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
-  /* caught by static verifier */
-  DCHECK(is_range || expected_args <= 5);
-  if (expected_args > code_item_accessor_.OutsSize()) {
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid argument count (" << expected_args
-        << ") exceeds outsSize (" << code_item_accessor_.OutsSize() << ")";
-    return nullptr;
-  }
-
-  /*
-   * Check the "this" argument, which must be an instance of the class that declared the method.
-   * For an interface class, we don't do the full interface merge (see JoinClass), so we can't do a
-   * rigorous check here (which is okay since we have to do it at runtime).
-   */
-  // Note: given an uninitialized type, this should always fail. Constructors aren't virtual.
-  if (actual_arg_type.IsUninitializedTypes() && !res_method->IsConstructor()) {
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "'this' arg must be initialized";
-    return nullptr;
-  }
-  if (!actual_arg_type.IsZeroOrNull()) {
-    mirror::Class* klass = res_method->GetDeclaringClass();
-    std::string temp;
-    const RegType& res_method_class =
-        FromClass(klass->GetDescriptor(&temp), klass, klass->CannotBeAssignedFromOtherTypes());
-    if (!res_method_class.IsAssignableFrom(actual_arg_type, this)) {
-      Fail(actual_arg_type.IsUninitializedTypes()    // Just overcautious - should have never
-               ? VERIFY_ERROR_BAD_CLASS_HARD         // quickened this.
-               : actual_arg_type.IsUnresolvedTypes()
-                     ? VERIFY_ERROR_NO_CLASS
-                     : VERIFY_ERROR_BAD_CLASS_SOFT) << "'this' argument '" << actual_arg_type
-          << "' not instance of '" << res_method_class << "'";
-      return nullptr;
-    }
-  }
-  /*
-   * Process the target method's signature. This signature may or may not
-   * have been verified, so we can't assume it's properly formed.
-   */
-  const DexFile::TypeList* params = res_method->GetParameterTypeList();
-  size_t params_size = params == nullptr ? 0 : params->Size();
-  uint32_t arg[5];
-  if (!is_range) {
-    inst->GetVarArgs(arg);
-  }
-  size_t actual_args = 1;
-  for (size_t param_index = 0; param_index < params_size; param_index++) {
-    if (actual_args >= expected_args) {
-      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invalid call to '"
-                                        << res_method->PrettyMethod()
-                                        << "'. Expected " << expected_args
-                                         << " arguments, processing argument " << actual_args
-                                        << " (where longs/doubles count twice).";
-      return nullptr;
-    }
-    const char* descriptor =
-        res_method->GetTypeDescriptorFromTypeIdx(params->GetTypeItem(param_index).type_idx_);
-    if (descriptor == nullptr) {
-      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invocation of "
-                                        << res_method->PrettyMethod()
-                                        << " missing signature component";
-      return nullptr;
-    }
-    const RegType& reg_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
-    uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
-    if (!work_line_->VerifyRegisterType(this, get_reg, reg_type)) {
-      return res_method;
-    }
-    actual_args = reg_type.IsLongOrDoubleTypes() ? actual_args + 2 : actual_args + 1;
-  }
-  if (actual_args != expected_args) {
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invocation of "
-                                      << res_method->PrettyMethod() << " expected "
-                                      << expected_args << " arguments, found " << actual_args;
-    return nullptr;
+uint16_t MethodVerifier::GetFieldIdxOfFieldAccess(const Instruction* inst, bool is_static) {
+  if (is_static) {
+    return inst->VRegB_21c();
+  } else if (inst->IsQuickened()) {
+    DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_);
+    DCHECK(method_being_verified_ != nullptr);
+    uint16_t field_idx = method_being_verified_->GetIndexFromQuickening(work_insn_idx_);
+    CHECK_NE(field_idx, DexFile::kDexNoIndex16);
+    return field_idx;
   } else {
-    return res_method;
+    return inst->VRegC_22c();
   }
 }
 
@@ -4819,7 +4687,7 @@
 template <MethodVerifier::FieldAccessType kAccType>
 void MethodVerifier::VerifyISFieldAccess(const Instruction* inst, const RegType& insn_type,
                                          bool is_primitive, bool is_static) {
-  uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
+  uint32_t field_idx = GetFieldIdxOfFieldAccess(inst, is_static);
   ArtField* field;
   if (is_static) {
     field = GetStaticField(field_idx);
@@ -4972,151 +4840,6 @@
   }
 }
 
-ArtField* MethodVerifier::GetQuickAccessedField() {
-  DCHECK(method_being_verified_ != nullptr);
-  uint16_t field_idx = method_being_verified_->GetIndexFromQuickening(work_insn_idx_);
-  CHECK_NE(field_idx, DexFile::kDexNoIndex16);
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  ArtField* field = class_linker->ResolveFieldJLS(field_idx, dex_cache_, class_loader_);
-  if (field == nullptr) {
-    DCHECK(self_->IsExceptionPending());
-    self_->ClearException();
-  }
-  return field;
-}
-
-template <MethodVerifier::FieldAccessType kAccType>
-void MethodVerifier::VerifyQuickFieldAccess(const Instruction* inst, const RegType& insn_type,
-                                            bool is_primitive) {
-  DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_);
-
-  ArtField* field = GetQuickAccessedField();
-  if (field == nullptr) {
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field from " << inst->Name();
-    return;
-  }
-
-  // For an IPUT_QUICK, we now test for final flag of the field.
-  if (kAccType == FieldAccessType::kAccPut) {
-    if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
-      Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << field->PrettyField()
-                                      << " from other class " << GetDeclaringClass();
-      return;
-    }
-  }
-
-  // Get the field type.
-  const RegType* field_type;
-  {
-    ObjPtr<mirror::Class> field_type_class =
-        can_load_classes_ ? field->ResolveType() : field->LookupResolvedType();
-
-    if (field_type_class != nullptr) {
-      field_type = &FromClass(field->GetTypeDescriptor(),
-                              field_type_class.Ptr(),
-                              field_type_class->CannotBeAssignedFromOtherTypes());
-    } else {
-      Thread* self = Thread::Current();
-      DCHECK(!can_load_classes_ || self->IsExceptionPending());
-      self->ClearException();
-      field_type = &reg_types_.FromDescriptor(field->GetDeclaringClass()->GetClassLoader(),
-                                              field->GetTypeDescriptor(),
-                                              false);
-    }
-    if (field_type == nullptr) {
-      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field type from " << inst->Name();
-      return;
-    }
-  }
-
-  const uint32_t vregA = inst->VRegA_22c();
-  static_assert(kAccType == FieldAccessType::kAccPut || kAccType == FieldAccessType::kAccGet,
-                "Unexpected third access type");
-  if (kAccType == FieldAccessType::kAccPut) {
-    if (is_primitive) {
-      // Primitive field assignability rules are weaker than regular assignability rules
-      bool instruction_compatible;
-      bool value_compatible;
-      const RegType& value_type = work_line_->GetRegisterType(this, vregA);
-      if (field_type->IsIntegralTypes()) {
-        instruction_compatible = insn_type.IsIntegralTypes();
-        value_compatible = value_type.IsIntegralTypes();
-      } else if (field_type->IsFloat()) {
-        instruction_compatible = insn_type.IsInteger();  // no [is]put-float, so expect [is]put-int
-        value_compatible = value_type.IsFloatTypes();
-      } else if (field_type->IsLong()) {
-        instruction_compatible = insn_type.IsLong();
-        value_compatible = value_type.IsLongTypes();
-      } else if (field_type->IsDouble()) {
-        instruction_compatible = insn_type.IsLong();  // no [is]put-double, so expect [is]put-long
-        value_compatible = value_type.IsDoubleTypes();
-      } else {
-        instruction_compatible = false;  // reference field with primitive store
-        value_compatible = false;  // unused
-      }
-      if (!instruction_compatible) {
-        // This is a global failure rather than a class change failure as the instructions and
-        // the descriptors for the type should have been consistent within the same file at
-        // compile time
-        Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << ArtField::PrettyField(field)
-                                          << " to be of type '" << insn_type
-                                          << "' but found type '" << *field_type
-                                          << "' in put";
-        return;
-      }
-      if (!value_compatible) {
-        Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected value in v" << vregA
-            << " of type " << value_type
-            << " but expected " << *field_type
-            << " for store to " << ArtField::PrettyField(field) << " in put";
-        return;
-      }
-    } else {
-      if (!insn_type.IsAssignableFrom(*field_type, this)) {
-        Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << ArtField::PrettyField(field)
-                                          << " to be compatible with type '" << insn_type
-                                          << "' but found type '" << *field_type
-                                          << "' in put-object";
-        return;
-      }
-      work_line_->VerifyRegisterType(this, vregA, *field_type);
-    }
-  } else if (kAccType == FieldAccessType::kAccGet) {
-    if (is_primitive) {
-      if (field_type->Equals(insn_type) ||
-          (field_type->IsFloat() && insn_type.IsIntegralTypes()) ||
-          (field_type->IsDouble() && insn_type.IsLongTypes())) {
-        // expected that read is of the correct primitive type or that int reads are reading
-        // floats or long reads are reading doubles
-      } else {
-        // This is a global failure rather than a class change failure as the instructions and
-        // the descriptors for the type should have been consistent within the same file at
-        // compile time
-        Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << ArtField::PrettyField(field)
-                                          << " to be of type '" << insn_type
-                                          << "' but found type '" << *field_type << "' in Get";
-        return;
-      }
-    } else {
-      if (!insn_type.IsAssignableFrom(*field_type, this)) {
-        Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << ArtField::PrettyField(field)
-                                          << " to be compatible with type '" << insn_type
-                                          << "' but found type '" << *field_type
-                                          << "' in get-object";
-        work_line_->SetRegisterType<LockOp::kClear>(this, vregA, reg_types_.Conflict());
-        return;
-      }
-    }
-    if (!field_type->IsLowHalf()) {
-      work_line_->SetRegisterType<LockOp::kClear>(this, vregA, *field_type);
-    } else {
-      work_line_->SetRegisterTypeWide(this, vregA, *field_type, field_type->HighHalf(&reg_types_));
-    }
-  } else {
-    LOG(FATAL) << "Unexpected case.";
-  }
-}
-
 bool MethodVerifier::CheckNotMoveException(const uint16_t* insns, int insn_idx) {
   if ((insns[insn_idx] & 0xff) == Instruction::MOVE_EXCEPTION) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid use of move-exception";
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 9237a8b..531d3da 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -209,12 +209,12 @@
 
   const RegType& ResolveCheckedClass(dex::TypeIndex class_idx)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  // Returns the method of a quick invoke or null if it cannot be found.
-  ArtMethod* GetQuickInvokedMethod(const Instruction* inst, bool is_range)
+  // Returns the method index of an invoke instruction.
+  uint16_t GetMethodIdxOfInvoke(const Instruction* inst)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  // Returns the access field of a quick field access (iget/iput-quick) or null
-  // if it cannot be found.
-  ArtField* GetQuickAccessedField() REQUIRES_SHARED(Locks::mutator_lock_);
+  // Returns the field index of a field access instruction.
+  uint16_t GetFieldIdxOfFieldAccess(const Instruction* inst, bool is_static)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   uint32_t GetEncounteredFailureTypes() {
     return encountered_failure_types_;
@@ -575,10 +575,6 @@
                            bool is_primitive, bool is_static)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  template <FieldAccessType kAccType>
-  void VerifyQuickFieldAccess(const Instruction* inst, const RegType& insn_type, bool is_primitive)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   enum class CheckAccess {  // private.
     kYes,
     kNo,
@@ -642,9 +638,6 @@
                                                       ArtMethod* res_method)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ArtMethod* VerifyInvokeVirtualQuickArgs(const Instruction* inst, bool is_range)
-  REQUIRES_SHARED(Locks::mutator_lock_);
-
   /*
    * Verify the arguments present for a call site. Returns "true" if all is well, "false" otherwise.
    */
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 67ea64b..bf36ccf 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -24,6 +24,7 @@
 #include <android-base/stringprintf.h>
 
 #include "entrypoints/quick/quick_entrypoints_enum.h"
+#include "hidden_api.h"
 #include "jni_internal.h"
 #include "mirror/class.h"
 #include "mirror/throwable.h"
@@ -287,17 +288,17 @@
  public:
   explicit ScopedHiddenApiExemption(Runtime* runtime)
       : runtime_(runtime),
-        initially_enabled_(runtime_->AreHiddenApiChecksEnabled()) {
-    runtime_->SetHiddenApiChecksEnabled(false);
+        initial_policy_(runtime_->GetHiddenApiEnforcementPolicy()) {
+    runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kNoChecks);
   }
 
   ~ScopedHiddenApiExemption() {
-    runtime_->SetHiddenApiChecksEnabled(initially_enabled_);
+    runtime_->SetHiddenApiEnforcementPolicy(initial_policy_);
   }
 
  private:
   Runtime* runtime_;
-  const bool initially_enabled_;
+  const hiddenapi::EnforcementPolicy initial_policy_;
   DISALLOW_COPY_AND_ASSIGN(ScopedHiddenApiExemption);
 };
 
diff --git a/test/121-modifiers/classes/A$B.class b/test/121-modifiers/classes/A$B.class
deleted file mode 100644
index bd7ebfe..0000000
--- a/test/121-modifiers/classes/A$B.class
+++ /dev/null
Binary files differ
diff --git a/test/121-modifiers/classes/A$C.class b/test/121-modifiers/classes/A$C.class
deleted file mode 100644
index 3ae872e..0000000
--- a/test/121-modifiers/classes/A$C.class
+++ /dev/null
Binary files differ
diff --git a/test/121-modifiers/classes/A.class b/test/121-modifiers/classes/A.class
deleted file mode 100644
index d89d029..0000000
--- a/test/121-modifiers/classes/A.class
+++ /dev/null
Binary files differ
diff --git a/test/121-modifiers/classes/Inf.class b/test/121-modifiers/classes/Inf.class
deleted file mode 100644
index e8dd680..0000000
--- a/test/121-modifiers/classes/Inf.class
+++ /dev/null
Binary files differ
diff --git a/test/121-modifiers/classes/Main.class b/test/121-modifiers/classes/Main.class
deleted file mode 100644
index e044074..0000000
--- a/test/121-modifiers/classes/Main.class
+++ /dev/null
Binary files differ
diff --git a/test/121-modifiers/classes/NonInf.class b/test/121-modifiers/classes/NonInf.class
deleted file mode 100644
index 0f1e826..0000000
--- a/test/121-modifiers/classes/NonInf.class
+++ /dev/null
Binary files differ
diff --git a/test/121-modifiers/info.txt b/test/121-modifiers/info.txt
index 335df53..7dba113 100644
--- a/test/121-modifiers/info.txt
+++ b/test/121-modifiers/info.txt
@@ -10,9 +10,9 @@
 javac Inf.java NonInf.java Main.java
 javac -cp asm.jar:asm-tree.jar:. Asm.java
 java -cp asm.jar:asm-tree.jar:. Asm
-mv Inf.out classes/Inf.class
-mv NonInf.out classes/NonInf.class
-mv Main.class A.class A\$B.class A\$C.class classes/
+mv Inf.out classes_tmp/Inf.class
+mv NonInf.out classes_tmp/NonInf.class
+mv Main.class A.class A\$B.class A\$C.class classes_tmp/
 dx --debug --dex --output=classes.dex classes
 baksmali disassemble classes.dex
 mv out/*.smali smali/
diff --git a/test/161-final-abstract-class/smali/Main.smali b/test/161-final-abstract-class/smali/Main.smali
new file mode 100644
index 0000000..588854c
--- /dev/null
+++ b/test/161-final-abstract-class/smali/Main.smali
@@ -0,0 +1,214 @@
+# Created with baksmali.
+
+# Java file for reference.
+
+# import java.lang.reflect.InvocationTargetException;
+# import java.lang.reflect.Method;
+#
+# public class Main {
+#     public static void main(String[] args) {
+#         try {
+#             // Make sure that the abstract final class is marked as erroneous.
+#             Class.forName("AbstractFinal");
+#             System.out.println("UNREACHABLE!");
+#         } catch (VerifyError expected) {
+#         } catch (Throwable t) {
+#             t.printStackTrace(System.out);
+#         }
+#         try {
+#             // Verification of TestClass.test() used to crash when processing
+#             // the final abstract (erroneous) class.
+#             Class<?> tc = Class.forName("TestClass");
+#             Method test = tc.getDeclaredMethod("test");
+#             test.invoke(null);
+#             System.out.println("UNREACHABLE!");
+#         } catch (InvocationTargetException ite) {
+#             if (ite.getCause() instanceof InstantiationError) {
+#                 System.out.println(
+#                     ite.getCause().getClass().getName() + ": " + ite.getCause().getMessage());
+#             } else {
+#                 ite.printStackTrace(System.out);
+#             }
+#         } catch (Throwable t) {
+#             t.printStackTrace(System.out);
+#         }
+#     }
+# }
+
+.class public LMain;
+.super Ljava/lang/Object;
+.source "Main.java"
+
+
+# direct methods
+.method public constructor <init>()V
+    .registers 1
+
+    .line 20
+    invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+
+    return-void
+.end method
+
+.method public static main([Ljava/lang/String;)V
+    .registers 4
+
+    .line 24
+    :try_start_0
+    const-string p0, "AbstractFinal"
+
+    invoke-static {p0}, Ljava/lang/Class;->forName(Ljava/lang/String;)Ljava/lang/Class;
+
+    .line 25
+    sget-object p0, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+    const-string v0, "UNREACHABLE!"
+
+    invoke-virtual {p0, v0}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+    :try_end_c
+    .catch Ljava/lang/VerifyError; {:try_start_0 .. :try_end_c} :catch_14
+    .catch Ljava/lang/Throwable; {:try_start_0 .. :try_end_c} :catch_d
+
+    goto :goto_15
+
+    .line 27
+    :catch_d
+    move-exception p0
+
+    .line 28
+    sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+    invoke-virtual {p0, v0}, Ljava/lang/Throwable;->printStackTrace(Ljava/io/PrintStream;)V
+
+    goto :goto_16
+
+    .line 26
+    :catch_14
+    move-exception p0
+
+    .line 29
+    :goto_15
+    nop
+
+    .line 33
+    :goto_16
+    :try_start_16
+    const-string p0, "TestClass"
+
+    invoke-static {p0}, Ljava/lang/Class;->forName(Ljava/lang/String;)Ljava/lang/Class;
+
+    move-result-object p0
+
+    .line 34
+    const-string v0, "test"
+
+    const/4 v1, 0x0
+
+    new-array v2, v1, [Ljava/lang/Class;
+
+    invoke-virtual {p0, v0, v2}, Ljava/lang/Class;->getDeclaredMethod(Ljava/lang/String;[Ljava/lang/Class;)Ljava/lang/reflect/Method;
+
+    move-result-object p0
+
+    .line 35
+    const/4 v0, 0x0
+
+    new-array v1, v1, [Ljava/lang/Object;
+
+    invoke-virtual {p0, v0, v1}, Ljava/lang/reflect/Method;->invoke(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;
+
+    .line 36
+    sget-object p0, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+    const-string v0, "UNREACHABLE!"
+
+    invoke-virtual {p0, v0}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+    :try_end_32
+    .catch Ljava/lang/reflect/InvocationTargetException; {:try_start_16 .. :try_end_32} :catch_3a
+    .catch Ljava/lang/Throwable; {:try_start_16 .. :try_end_32} :catch_33
+
+    goto :goto_76
+
+    .line 44
+    :catch_33
+    move-exception p0
+
+    .line 45
+    sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+    invoke-virtual {p0, v0}, Ljava/lang/Throwable;->printStackTrace(Ljava/io/PrintStream;)V
+
+    goto :goto_77
+
+    .line 37
+    :catch_3a
+    move-exception p0
+
+    .line 38
+    invoke-virtual {p0}, Ljava/lang/reflect/InvocationTargetException;->getCause()Ljava/lang/Throwable;
+
+    move-result-object v0
+
+    instance-of v0, v0, Ljava/lang/InstantiationError;
+
+    if-eqz v0, :cond_71
+
+    .line 39
+    sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+    new-instance v1, Ljava/lang/StringBuilder;
+
+    invoke-direct {v1}, Ljava/lang/StringBuilder;-><init>()V
+
+    .line 40
+    invoke-virtual {p0}, Ljava/lang/reflect/InvocationTargetException;->getCause()Ljava/lang/Throwable;
+
+    move-result-object v2
+
+    invoke-virtual {v2}, Ljava/lang/Object;->getClass()Ljava/lang/Class;
+
+    move-result-object v2
+
+    invoke-virtual {v2}, Ljava/lang/Class;->getName()Ljava/lang/String;
+
+    move-result-object v2
+
+    invoke-virtual {v1, v2}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+
+    const-string v2, ": "
+
+    invoke-virtual {v1, v2}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+
+    invoke-virtual {p0}, Ljava/lang/reflect/InvocationTargetException;->getCause()Ljava/lang/Throwable;
+
+    move-result-object p0
+
+    invoke-virtual {p0}, Ljava/lang/Throwable;->getMessage()Ljava/lang/String;
+
+    move-result-object p0
+
+    invoke-virtual {v1, p0}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+
+    invoke-virtual {v1}, Ljava/lang/StringBuilder;->toString()Ljava/lang/String;
+
+    move-result-object p0
+
+    .line 39
+    invoke-virtual {v0, p0}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+
+    goto :goto_76
+
+    .line 42
+    :cond_71
+    sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+    invoke-virtual {p0, v0}, Ljava/lang/reflect/InvocationTargetException;->printStackTrace(Ljava/io/PrintStream;)V
+
+    .line 46
+    :goto_76
+    nop
+
+    .line 47
+    :goto_77
+    return-void
+.end method
diff --git a/test/161-final-abstract-class/src/Main.java b/test/161-final-abstract-class/src/Main.java
deleted file mode 100644
index 2452490..0000000
--- a/test/161-final-abstract-class/src/Main.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-
-public class Main {
-    public static void main(String[] args) {
-        try {
-            // Make sure that the abstract final class is marked as erroneous.
-            Class.forName("AbstractFinal");
-            System.out.println("UNREACHABLE!");
-        } catch (VerifyError expected) {
-        } catch (Throwable t) {
-            t.printStackTrace(System.out);
-        }
-        try {
-            // Verification of TestClass.test() used to crash when processing
-            // the final abstract (erroneous) class.
-            Class<?> tc = Class.forName("TestClass");
-            Method test = tc.getDeclaredMethod("test");
-            test.invoke(null);
-            System.out.println("UNREACHABLE!");
-        } catch (InvocationTargetException ite) {
-            if (ite.getCause() instanceof InstantiationError) {
-                System.out.println(
-                    ite.getCause().getClass().getName() + ": " + ite.getCause().getMessage());
-            } else {
-                ite.printStackTrace(System.out);
-            }
-        } catch (Throwable t) {
-            t.printStackTrace(System.out);
-        }
-    }
-}
diff --git a/test/1929-exception-catch-exception/expected.txt b/test/1929-exception-catch-exception/expected.txt
index bc5608a..a82b732 100644
--- a/test/1929-exception-catch-exception/expected.txt
+++ b/test/1929-exception-catch-exception/expected.txt
@@ -1,11 +1,11 @@
 Test "art.Test1929$DoThrowClass": Running breakpoint with handler "art.Test1929$DoNothingHandler"
-main: public static void art.Test1929.run() throws java.lang.Exception @ line = 283 caught class art.Test1929$TestException: doThrow
+main: public static void art.Test1929.run() throws java.lang.Exception @ line = 298 caught class art.Test1929$TestException: doThrow
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 283
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 298
 Test "art.Test1929$DoThrowClass": Caught error art.Test1929$TestException:"doThrow" with handler "art.Test1929$DoNothingHandler"
 Test "art.Test1929$DoThrowClass": Finished running with handler "art.Test1929$DoNothingHandler"
 Test "art.Test1929$DoThrowCatchBaseTestException": Running breakpoint with handler "art.Test1929$DoNothingHandler"
@@ -17,71 +17,71 @@
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
 		public static void art.Test1929.throwCatchBaseTestException() @ line = 140
 		public void art.Test1929$DoThrowCatchBaseTestException.run() @ line = 149
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 280
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 295
 	Doing nothing!
 Caught art.Test1929$TestException: "throwCatchBaseTestException"
 Test "art.Test1929$DoThrowCatchBaseTestException": No error caught with handler "art.Test1929$DoNothingHandler"
 Test "art.Test1929$DoThrowCatchBaseTestException": Finished running with handler "art.Test1929$DoNothingHandler"
 Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": Running breakpoint with handler "art.Test1929$DoNothingHandler"
-main: public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 161 caught class art.Test1929$TestException: throwCatchBaseTestExceptionTwice
+main: public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 157 caught class art.Test1929$TestException: throwCatchBaseTestExceptionTwice
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 161
-		public static void art.Test1929.throwCatchBaseTestExceptionTwice() @ line = 197
-		public void art.Test1929$DoThrowCatchBaseTestExceptionTwice.run() @ line = 201
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 280
+		public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 157
+		public static void art.Test1929.throwCatchBaseTestExceptionTwice() @ line = 203
+		public void art.Test1929$DoThrowCatchBaseTestExceptionTwice.run() @ line = 210
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 295
 Caught art.Test1929$TestException: "throwCatchBaseTestExceptionTwice"
 Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": No error caught with handler "art.Test1929$DoNothingHandler"
 Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": Finished running with handler "art.Test1929$DoNothingHandler"
 Test "art.Test1929$DoThrowCatchTestException": Running breakpoint with handler "art.Test1929$DoNothingHandler"
-main: public static void art.Test1929.throwCatchTestException() @ line = 207 caught class art.Test1929$TestException: throwCatchTestException
+main: public static void art.Test1929.throwCatchTestException() @ line = 216 caught class art.Test1929$TestException: throwCatchTestException
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929.throwCatchTestException() @ line = 207
-		public void art.Test1929$DoThrowCatchTestException.run() @ line = 216
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 280
+		public static void art.Test1929.throwCatchTestException() @ line = 216
+		public void art.Test1929$DoThrowCatchTestException.run() @ line = 225
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 295
 	Doing nothing!
 Caught art.Test1929$TestException: "throwCatchTestException"
 Test "art.Test1929$DoThrowCatchTestException": No error caught with handler "art.Test1929$DoNothingHandler"
 Test "art.Test1929$DoThrowCatchTestException": Finished running with handler "art.Test1929$DoNothingHandler"
 Test "art.Test1929$DoThrowCatchTestExceptionTwice": Running breakpoint with handler "art.Test1929$DoNothingHandler"
-main: public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 179 caught class art.Test1929$TestException: throwCatchTestExceptionTwice
+main: public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 175 caught class art.Test1929$TestException: throwCatchTestExceptionTwice
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 179
-		public static void art.Test1929.throwCatchTestExceptionTwice() @ line = 222
-		public void art.Test1929$DoThrowCatchTestExceptionTwice.run() @ line = 226
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 280
+		public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 175
+		public static void art.Test1929.throwCatchTestExceptionTwice() @ line = 234
+		public void art.Test1929$DoThrowCatchTestExceptionTwice.run() @ line = 241
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 295
 Caught art.Test1929$TestException: "throwCatchTestExceptionTwice"
 Test "art.Test1929$DoThrowCatchTestExceptionTwice": No error caught with handler "art.Test1929$DoNothingHandler"
 Test "art.Test1929$DoThrowCatchTestExceptionTwice": Finished running with handler "art.Test1929$DoNothingHandler"
 Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Running breakpoint with handler "art.Test1929$DoNothingHandler"
-main: public static void art.Test1929.run() throws java.lang.Exception @ line = 283 caught class art.Test1929$TestException: throwCatchTestExceptionNoRethrow
+main: public static void art.Test1929.run() throws java.lang.Exception @ line = 298 caught class art.Test1929$TestException: throwCatchTestExceptionNoRethrow
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 283
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 298
 Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Caught error art.Test1929$TestException:"throwCatchTestExceptionNoRethrow" with handler "art.Test1929$DoNothingHandler"
 Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Finished running with handler "art.Test1929$DoNothingHandler"
 Test "art.Test1929$DoThrowClass": Running breakpoint with handler "art.Test1929$ThrowCatchBase"
-main: public static void art.Test1929.run() throws java.lang.Exception @ line = 283 caught class art.Test1929$TestException: doThrow
+main: public static void art.Test1929.run() throws java.lang.Exception @ line = 298 caught class art.Test1929$TestException: doThrow
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 283
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 298
 Test "art.Test1929$DoThrowClass": Caught error art.Test1929$TestException:"doThrow" with handler "art.Test1929$ThrowCatchBase"
 Test "art.Test1929$DoThrowClass": Finished running with handler "art.Test1929$ThrowCatchBase"
 Test "art.Test1929$DoThrowCatchBaseTestException": Running breakpoint with handler "art.Test1929$ThrowCatchBase"
@@ -93,73 +93,73 @@
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
 		public static void art.Test1929.throwCatchBaseTestException() @ line = 140
 		public void art.Test1929$DoThrowCatchBaseTestException.run() @ line = 149
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 280
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 295
 	Throwing BaseTestException and catching it!
 Caught art.Test1929$BaseTestException: "ThrowBaseHandler during throw from public static void art.Test1929.throwCatchBaseTestException() @ line = 140"
 Caught art.Test1929$TestException: "throwCatchBaseTestException"
 Test "art.Test1929$DoThrowCatchBaseTestException": No error caught with handler "art.Test1929$ThrowCatchBase"
 Test "art.Test1929$DoThrowCatchBaseTestException": Finished running with handler "art.Test1929$ThrowCatchBase"
 Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": Running breakpoint with handler "art.Test1929$ThrowCatchBase"
-main: public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 161 caught class art.Test1929$TestException: throwCatchBaseTestExceptionTwice
+main: public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 157 caught class art.Test1929$TestException: throwCatchBaseTestExceptionTwice
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 161
-		public static void art.Test1929.throwCatchBaseTestExceptionTwice() @ line = 197
-		public void art.Test1929$DoThrowCatchBaseTestExceptionTwice.run() @ line = 201
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 280
+		public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 157
+		public static void art.Test1929.throwCatchBaseTestExceptionTwice() @ line = 203
+		public void art.Test1929$DoThrowCatchBaseTestExceptionTwice.run() @ line = 210
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 295
 Caught art.Test1929$TestException: "throwCatchBaseTestExceptionTwice"
 Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": No error caught with handler "art.Test1929$ThrowCatchBase"
 Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": Finished running with handler "art.Test1929$ThrowCatchBase"
 Test "art.Test1929$DoThrowCatchTestException": Running breakpoint with handler "art.Test1929$ThrowCatchBase"
-main: public static void art.Test1929.throwCatchTestException() @ line = 207 caught class art.Test1929$TestException: throwCatchTestException
+main: public static void art.Test1929.throwCatchTestException() @ line = 216 caught class art.Test1929$TestException: throwCatchTestException
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929.throwCatchTestException() @ line = 207
-		public void art.Test1929$DoThrowCatchTestException.run() @ line = 216
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 280
+		public static void art.Test1929.throwCatchTestException() @ line = 216
+		public void art.Test1929$DoThrowCatchTestException.run() @ line = 225
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 295
 	Throwing BaseTestException and catching it!
-Caught art.Test1929$BaseTestException: "ThrowBaseHandler during throw from public static void art.Test1929.throwCatchTestException() @ line = 207"
+Caught art.Test1929$BaseTestException: "ThrowBaseHandler during throw from public static void art.Test1929.throwCatchTestException() @ line = 216"
 Caught art.Test1929$TestException: "throwCatchTestException"
 Test "art.Test1929$DoThrowCatchTestException": No error caught with handler "art.Test1929$ThrowCatchBase"
 Test "art.Test1929$DoThrowCatchTestException": Finished running with handler "art.Test1929$ThrowCatchBase"
 Test "art.Test1929$DoThrowCatchTestExceptionTwice": Running breakpoint with handler "art.Test1929$ThrowCatchBase"
-main: public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 179 caught class art.Test1929$TestException: throwCatchTestExceptionTwice
+main: public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 175 caught class art.Test1929$TestException: throwCatchTestExceptionTwice
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 179
-		public static void art.Test1929.throwCatchTestExceptionTwice() @ line = 222
-		public void art.Test1929$DoThrowCatchTestExceptionTwice.run() @ line = 226
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 280
+		public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 175
+		public static void art.Test1929.throwCatchTestExceptionTwice() @ line = 234
+		public void art.Test1929$DoThrowCatchTestExceptionTwice.run() @ line = 241
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 295
 Caught art.Test1929$TestException: "throwCatchTestExceptionTwice"
 Test "art.Test1929$DoThrowCatchTestExceptionTwice": No error caught with handler "art.Test1929$ThrowCatchBase"
 Test "art.Test1929$DoThrowCatchTestExceptionTwice": Finished running with handler "art.Test1929$ThrowCatchBase"
 Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Running breakpoint with handler "art.Test1929$ThrowCatchBase"
-main: public static void art.Test1929.run() throws java.lang.Exception @ line = 283 caught class art.Test1929$TestException: throwCatchTestExceptionNoRethrow
+main: public static void art.Test1929.run() throws java.lang.Exception @ line = 298 caught class art.Test1929$TestException: throwCatchTestExceptionNoRethrow
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 283
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 298
 Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Caught error art.Test1929$TestException:"throwCatchTestExceptionNoRethrow" with handler "art.Test1929$ThrowCatchBase"
 Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Finished running with handler "art.Test1929$ThrowCatchBase"
 Test "art.Test1929$DoThrowClass": Running breakpoint with handler "art.Test1929$ThrowBaseTestExceptionHandler"
-main: public static void art.Test1929.run() throws java.lang.Exception @ line = 283 caught class art.Test1929$TestException: doThrow
+main: public static void art.Test1929.run() throws java.lang.Exception @ line = 298 caught class art.Test1929$TestException: doThrow
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 283
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 298
 Test "art.Test1929$DoThrowClass": Caught error art.Test1929$TestException:"doThrow" with handler "art.Test1929$ThrowBaseTestExceptionHandler"
 Test "art.Test1929$DoThrowClass": Finished running with handler "art.Test1929$ThrowBaseTestExceptionHandler"
 Test "art.Test1929$DoThrowCatchBaseTestException": Running breakpoint with handler "art.Test1929$ThrowBaseTestExceptionHandler"
@@ -171,69 +171,69 @@
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
 		public static void art.Test1929.throwCatchBaseTestException() @ line = 140
 		public void art.Test1929$DoThrowCatchBaseTestException.run() @ line = 149
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 280
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 295
 	Throwing BaseTestException!
 Test "art.Test1929$DoThrowCatchBaseTestException": Caught error art.Test1929$BaseTestException:"ThrowBaseHandler during throw from public static void art.Test1929.throwCatchBaseTestException() @ line = 140" with handler "art.Test1929$ThrowBaseTestExceptionHandler"
 Test "art.Test1929$DoThrowCatchBaseTestException": Finished running with handler "art.Test1929$ThrowBaseTestExceptionHandler"
 Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": Running breakpoint with handler "art.Test1929$ThrowBaseTestExceptionHandler"
-main: public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 161 caught class art.Test1929$TestException: throwCatchBaseTestExceptionTwice
+main: public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 157 caught class art.Test1929$TestException: throwCatchBaseTestExceptionTwice
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 161
-		public static void art.Test1929.throwCatchBaseTestExceptionTwice() @ line = 197
-		public void art.Test1929$DoThrowCatchBaseTestExceptionTwice.run() @ line = 201
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 280
+		public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 157
+		public static void art.Test1929.throwCatchBaseTestExceptionTwice() @ line = 203
+		public void art.Test1929$DoThrowCatchBaseTestExceptionTwice.run() @ line = 210
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 295
 Caught art.Test1929$TestException: "throwCatchBaseTestExceptionTwice"
 Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": No error caught with handler "art.Test1929$ThrowBaseTestExceptionHandler"
 Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": Finished running with handler "art.Test1929$ThrowBaseTestExceptionHandler"
 Test "art.Test1929$DoThrowCatchTestException": Running breakpoint with handler "art.Test1929$ThrowBaseTestExceptionHandler"
-main: public static void art.Test1929.throwCatchTestException() @ line = 207 caught class art.Test1929$TestException: throwCatchTestException
+main: public static void art.Test1929.throwCatchTestException() @ line = 216 caught class art.Test1929$TestException: throwCatchTestException
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929.throwCatchTestException() @ line = 207
-		public void art.Test1929$DoThrowCatchTestException.run() @ line = 216
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 280
+		public static void art.Test1929.throwCatchTestException() @ line = 216
+		public void art.Test1929$DoThrowCatchTestException.run() @ line = 225
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 295
 	Throwing BaseTestException!
-Test "art.Test1929$DoThrowCatchTestException": Caught error art.Test1929$BaseTestException:"ThrowBaseHandler during throw from public static void art.Test1929.throwCatchTestException() @ line = 207" with handler "art.Test1929$ThrowBaseTestExceptionHandler"
+Test "art.Test1929$DoThrowCatchTestException": Caught error art.Test1929$BaseTestException:"ThrowBaseHandler during throw from public static void art.Test1929.throwCatchTestException() @ line = 216" with handler "art.Test1929$ThrowBaseTestExceptionHandler"
 Test "art.Test1929$DoThrowCatchTestException": Finished running with handler "art.Test1929$ThrowBaseTestExceptionHandler"
 Test "art.Test1929$DoThrowCatchTestExceptionTwice": Running breakpoint with handler "art.Test1929$ThrowBaseTestExceptionHandler"
-main: public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 179 caught class art.Test1929$TestException: throwCatchTestExceptionTwice
+main: public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 175 caught class art.Test1929$TestException: throwCatchTestExceptionTwice
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 179
-		public static void art.Test1929.throwCatchTestExceptionTwice() @ line = 222
-		public void art.Test1929$DoThrowCatchTestExceptionTwice.run() @ line = 226
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 280
+		public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 175
+		public static void art.Test1929.throwCatchTestExceptionTwice() @ line = 234
+		public void art.Test1929$DoThrowCatchTestExceptionTwice.run() @ line = 241
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 295
 Caught art.Test1929$TestException: "throwCatchTestExceptionTwice"
 Test "art.Test1929$DoThrowCatchTestExceptionTwice": No error caught with handler "art.Test1929$ThrowBaseTestExceptionHandler"
 Test "art.Test1929$DoThrowCatchTestExceptionTwice": Finished running with handler "art.Test1929$ThrowBaseTestExceptionHandler"
 Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Running breakpoint with handler "art.Test1929$ThrowBaseTestExceptionHandler"
-main: public static void art.Test1929.run() throws java.lang.Exception @ line = 283 caught class art.Test1929$TestException: throwCatchTestExceptionNoRethrow
+main: public static void art.Test1929.run() throws java.lang.Exception @ line = 298 caught class art.Test1929$TestException: throwCatchTestExceptionNoRethrow
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 283
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 298
 Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Caught error art.Test1929$TestException:"throwCatchTestExceptionNoRethrow" with handler "art.Test1929$ThrowBaseTestExceptionHandler"
 Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Finished running with handler "art.Test1929$ThrowBaseTestExceptionHandler"
 Test "art.Test1929$DoThrowClass": Running breakpoint with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
-main: public static void art.Test1929.run() throws java.lang.Exception @ line = 283 caught class art.Test1929$TestException: doThrow
+main: public static void art.Test1929.run() throws java.lang.Exception @ line = 298 caught class art.Test1929$TestException: doThrow
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 283
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 298
 Test "art.Test1929$DoThrowClass": Caught error art.Test1929$TestException:"doThrow" with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
 Test "art.Test1929$DoThrowClass": Finished running with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
 Test "art.Test1929$DoThrowCatchBaseTestException": Running breakpoint with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
@@ -245,58 +245,58 @@
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
 		public static void art.Test1929.throwCatchBaseTestException() @ line = 140
 		public void art.Test1929$DoThrowCatchBaseTestException.run() @ line = 149
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 280
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 295
 	Throwing TestExceptionNoRethrow!
 Test "art.Test1929$DoThrowCatchBaseTestException": Caught error art.Test1929$TestExceptionNoRethrow:"ThrowTestExceptionNoRethrowHandler during throw from public static void art.Test1929.throwCatchBaseTestException() @ line = 140" with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
 Test "art.Test1929$DoThrowCatchBaseTestException": Finished running with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
 Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": Running breakpoint with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
-main: public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 161 caught class art.Test1929$TestException: throwCatchBaseTestExceptionTwice
+main: public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 157 caught class art.Test1929$TestException: throwCatchBaseTestExceptionTwice
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 161
-		public static void art.Test1929.throwCatchBaseTestExceptionTwice() @ line = 197
-		public void art.Test1929$DoThrowCatchBaseTestExceptionTwice.run() @ line = 201
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 280
+		public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 157
+		public static void art.Test1929.throwCatchBaseTestExceptionTwice() @ line = 203
+		public void art.Test1929$DoThrowCatchBaseTestExceptionTwice.run() @ line = 210
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 295
 Caught art.Test1929$TestException: "throwCatchBaseTestExceptionTwice"
 Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": No error caught with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
 Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": Finished running with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
 Test "art.Test1929$DoThrowCatchTestException": Running breakpoint with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
-main: public static void art.Test1929.throwCatchTestException() @ line = 207 caught class art.Test1929$TestException: throwCatchTestException
+main: public static void art.Test1929.throwCatchTestException() @ line = 216 caught class art.Test1929$TestException: throwCatchTestException
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929.throwCatchTestException() @ line = 207
-		public void art.Test1929$DoThrowCatchTestException.run() @ line = 216
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 280
+		public static void art.Test1929.throwCatchTestException() @ line = 216
+		public void art.Test1929$DoThrowCatchTestException.run() @ line = 225
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 295
 	Throwing TestExceptionNoRethrow!
-Test "art.Test1929$DoThrowCatchTestException": Caught error art.Test1929$TestExceptionNoRethrow:"ThrowTestExceptionNoRethrowHandler during throw from public static void art.Test1929.throwCatchTestException() @ line = 207" with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
+Test "art.Test1929$DoThrowCatchTestException": Caught error art.Test1929$TestExceptionNoRethrow:"ThrowTestExceptionNoRethrowHandler during throw from public static void art.Test1929.throwCatchTestException() @ line = 216" with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
 Test "art.Test1929$DoThrowCatchTestException": Finished running with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
 Test "art.Test1929$DoThrowCatchTestExceptionTwice": Running breakpoint with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
-main: public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 179 caught class art.Test1929$TestException: throwCatchTestExceptionTwice
+main: public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 175 caught class art.Test1929$TestException: throwCatchTestExceptionTwice
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 179
-		public static void art.Test1929.throwCatchTestExceptionTwice() @ line = 222
-		public void art.Test1929$DoThrowCatchTestExceptionTwice.run() @ line = 226
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 280
+		public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 175
+		public static void art.Test1929.throwCatchTestExceptionTwice() @ line = 234
+		public void art.Test1929$DoThrowCatchTestExceptionTwice.run() @ line = 241
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 295
 Caught art.Test1929$TestException: "throwCatchTestExceptionTwice"
 Test "art.Test1929$DoThrowCatchTestExceptionTwice": No error caught with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
 Test "art.Test1929$DoThrowCatchTestExceptionTwice": Finished running with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
 Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Running breakpoint with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
-main: public static void art.Test1929.run() throws java.lang.Exception @ line = 283 caught class art.Test1929$TestException: throwCatchTestExceptionNoRethrow
+main: public static void art.Test1929.run() throws java.lang.Exception @ line = 298 caught class art.Test1929$TestException: throwCatchTestExceptionNoRethrow
 	Current Stack:
 		private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1
 		public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61
 		private static void art.Test1929.PrintStack() @ line = 52
 		public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65
-		public static void art.Test1929.run() throws java.lang.Exception @ line = 283
+		public static void art.Test1929.run() throws java.lang.Exception @ line = 298
 Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Caught error art.Test1929$TestException:"throwCatchTestExceptionNoRethrow" with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
 Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Finished running with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler"
diff --git a/test/1929-exception-catch-exception/src/art/Test1929.java b/test/1929-exception-catch-exception/src/art/Test1929.java
index 07d2087..e2deb3f 100644
--- a/test/1929-exception-catch-exception/src/art/Test1929.java
+++ b/test/1929-exception-catch-exception/src/art/Test1929.java
@@ -152,49 +152,58 @@
   // dx/d8/jack all do an optimization around catch blocks that (while legal) breaks assumptions
   // this test relies on so we have the actual implementation be corrected smali. This does work
   // for RI however.
-  public static final class Impl {
-    private Impl() {}
-    public static void throwCatchBaseTestExceptionTwiceImpl() {
-      try {
-        try {
-          throw new TestException("throwCatchBaseTestExceptionTwice");
-        } catch (BaseTestException t) {
-          System.out.println("Caught " + t.getClass().getName() + ": \"" + t.getMessage() + "\"");
-          if (PRINT_FULL_EXCEPTION) {
-            t.printStackTrace(System.out);
-          }
-        }
-      } catch (BaseTestException t) {
-        System.out.println("2nd Caught " + t.getClass().getName() + ": \"" + t.getMessage() + "\"");
-        if (PRINT_FULL_EXCEPTION) {
-          t.printStackTrace(System.out);
-        }
-      }
-    }
 
-    public static void throwCatchTestExceptionTwiceImpl() {
-      try {
-        try {
-          throw new TestException("throwCatchTestExceptionTwice");
-        } catch (TestException t) {
-          System.out.println("Caught " + t.getClass().getName() + ": \"" + t.getMessage() + "\"");
-          if (PRINT_FULL_EXCEPTION) {
-            t.printStackTrace(System.out);
-          }
-        }
-      } catch (TestException t) {
-        System.out.println("2nd Caught " + t.getClass().getName() + ": \"" + t.getMessage() + "\"");
-        if (PRINT_FULL_EXCEPTION) {
-          t.printStackTrace(System.out);
-        }
-      }
-    }
-  }
+  // For reference:
+
+  // public static final class Impl {
+  //   private Impl() {}
+  //   public static void throwCatchBaseTestExceptionTwiceImpl() {
+  //     try {
+  //       try {
+  //         throw new TestException("throwCatchBaseTestExceptionTwice");
+  //       } catch (BaseTestException t) {
+  //         System.out.println("Caught " + t.getClass().getName() + ": \"" + t.getMessage() + "\"");
+  //         if (PRINT_FULL_EXCEPTION) {
+  //           t.printStackTrace(System.out);
+  //         }
+  //       }
+  //     } catch (BaseTestException t) {
+  //       System.out.println("2nd Caught " + t.getClass().getName() + ": \"" + t.getMessage() + "\"");
+  //       if (PRINT_FULL_EXCEPTION) {
+  //         t.printStackTrace(System.out);
+  //       }
+  //     }
+  //   }
+
+  //   public static void throwCatchTestExceptionTwiceImpl() {
+  //     try {
+  //       try {
+  //         throw new TestException("throwCatchTestExceptionTwice");
+  //       } catch (TestException t) {
+  //         System.out.println("Caught " + t.getClass().getName() + ": \"" + t.getMessage() + "\"");
+  //         if (PRINT_FULL_EXCEPTION) {
+  //           t.printStackTrace(System.out);
+  //         }
+  //       }
+  //     } catch (TestException t) {
+  //       System.out.println("2nd Caught " + t.getClass().getName() + ": \"" + t.getMessage() + "\"");
+  //       if (PRINT_FULL_EXCEPTION) {
+  //         t.printStackTrace(System.out);
+  //       }
+  //     }
+  //   }
+  // }
 
   public static void throwCatchBaseTestExceptionTwice() {
     // The implementation of this has to change depending upon the runtime slightly due to compiler
     // optimizations present in DX/D8/Jack.
-    Impl.throwCatchBaseTestExceptionTwiceImpl();
+    try {
+      Class<?> Impl = Class.forName("art.Test1929$Impl");
+      Method m = Impl.getMethod("throwCatchBaseTestExceptionTwiceImpl");
+      m.invoke(null);
+    } catch (Exception e) {
+      e.printStackTrace(System.out);
+    }
   }
 
   public static class DoThrowCatchBaseTestExceptionTwice implements Runnable {
@@ -219,7 +228,13 @@
   public static void throwCatchTestExceptionTwice() {
     // The implementation of this has to change depending upon the runtime slightly due to compiler
     // optimizations present in DX/D8/Jack.
-    Impl.throwCatchTestExceptionTwiceImpl();
+    try {
+      Class<?> Impl = Class.forName("art.Test1929$Impl");
+      Method m = Impl.getMethod("throwCatchTestExceptionTwiceImpl");
+      m.invoke(null);
+    } catch (Exception e) {
+      e.printStackTrace(System.out);
+    }
   }
 
   public static class DoThrowCatchTestExceptionTwice implements Runnable {
diff --git a/test/1935-get-set-current-frame-jit/expected.txt b/test/1935-get-set-current-frame-jit/expected.txt
index fed993c..cdb8f6a 100644
--- a/test/1935-get-set-current-frame-jit/expected.txt
+++ b/test/1935-get-set-current-frame-jit/expected.txt
@@ -1,7 +1,7 @@
 JNI_OnLoad called
 From GetLocalInt(), value is 42
-isInterpreted? true
+isInOsrCode? false
 	Value is '42'
 Setting TARGET to 1337
-isInterpreted? true
+isInOsrCode? false
 	Value is '1337'
diff --git a/test/1935-get-set-current-frame-jit/src/Main.java b/test/1935-get-set-current-frame-jit/src/Main.java
index eb0a637..714a98a 100644
--- a/test/1935-get-set-current-frame-jit/src/Main.java
+++ b/test/1935-get-set-current-frame-jit/src/Main.java
@@ -64,9 +64,9 @@
         Main.ensureJitCompiled(IntRunner.class, "run");
         i++;
       }
-      // We shouldn't be doing OSR since we are using JVMTI and the get/set local will push us to
-      // interpreter.
-      System.out.println("isInterpreted? " + Main.isInterpreted());
+      // We shouldn't be doing OSR since we are using JVMTI and the get/set prevents OSR.
+      // Set local will also push us to interpreter but the get local may remain in compiled code.
+      System.out.println("isInOsrCode? " + (hasJit() && Main.isInOsrCode("run")));
       reportValue(TARGET);
     }
     public void waitForBusyLoopStart() { while (!inBusyLoop) {} }
@@ -159,4 +159,6 @@
 
   public static native void ensureJitCompiled(Class k, String f);
   public static native boolean isInterpreted();
+  public static native boolean isInOsrCode(String methodName);
+  public static native boolean hasJit();
 }
diff --git a/test/674-hiddenapi/hiddenapi.cc b/test/674-hiddenapi/hiddenapi.cc
index effa37a..04c3fbf 100644
--- a/test/674-hiddenapi/hiddenapi.cc
+++ b/test/674-hiddenapi/hiddenapi.cc
@@ -16,6 +16,7 @@
 
 #include "class_linker.h"
 #include "dex/art_dex_file_loader.h"
+#include "hidden_api.h"
 #include "jni.h"
 #include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
@@ -27,7 +28,7 @@
 
 extern "C" JNIEXPORT void JNICALL Java_Main_init(JNIEnv*, jclass) {
   Runtime* runtime = Runtime::Current();
-  runtime->SetHiddenApiChecksEnabled(true);
+  runtime->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kBlacklistOnly);
   runtime->SetDedupeHiddenApiWarnings(false);
   runtime->AlwaysSetHiddenApiWarningFlag();
 }
diff --git a/test/674-hiddenapi/src-ex/ChildClass.java b/test/674-hiddenapi/src-ex/ChildClass.java
index 8cd237a..582e907 100644
--- a/test/674-hiddenapi/src-ex/ChildClass.java
+++ b/test/674-hiddenapi/src-ex/ChildClass.java
@@ -123,9 +123,6 @@
           // Check whether one can use an interface default method.
           String name = "method" + visibility.name() + "Default" + hiddenness.name();
           checkMethod(ParentInterface.class, name, /*isStatic*/ false, visibility, expected);
-
-          // Check whether one can override this method.
-          checkOverriding(suffix, isStatic, visibility, expected);
         }
 
         // Test whether static linking succeeds.
@@ -406,37 +403,6 @@
     }
   }
 
-  private static void checkOverriding(String suffix,
-                                      boolean isStatic,
-                                      Visibility visibility,
-                                      Behaviour behaviour) throws Exception {
-    if (isStatic || visibility == Visibility.Private) {
-      // Does not make sense to override a static or private method.
-      return;
-    }
-
-    // The classes are in the same package, but will be able to access each
-    // other only if loaded with the same class loader, here the boot class loader.
-    boolean canAccess = (visibility != Visibility.Package) || (isParentInBoot && isChildInBoot);
-    boolean setsWarning = false;  // warnings may be set during vtable linking
-
-    String methodName = "callMethod" + visibility.name() + suffix;
-
-    // Force the test class to link its vtable, which may cause warnings, before
-    // the actual test.
-    new OverrideClass().methodPublicWhitelist();
-
-    clearWarning();
-    if (Linking.canOverride(methodName) != canAccess) {
-      throw new RuntimeException("Expected to " + (canAccess ? "" : "not ") +
-          "be able to override " + methodName + "." +
-          "isParentInBoot = " + isParentInBoot + ", " + "isChildInBoot = " + isChildInBoot);
-    }
-    if (canAccess && hasPendingWarning() != setsWarning) {
-      throwWarningException(ParentClass.class, methodName, false, "static linking", setsWarning);
-    }
-  }
-
   private static void throwDiscoveryException(Class<?> klass, String name, boolean isField,
       String fn, boolean canAccess) {
     throw new RuntimeException("Expected " + (isField ? "field " : "method ") + klass.getName() +
diff --git a/test/674-hiddenapi/src-ex/Linking.java b/test/674-hiddenapi/src-ex/Linking.java
index b416250..a89b92b 100644
--- a/test/674-hiddenapi/src-ex/Linking.java
+++ b/test/674-hiddenapi/src-ex/Linking.java
@@ -14,7 +14,6 @@
  * limitations under the License.
  */
 
-import java.lang.reflect.Method;
 import java.lang.reflect.InvocationTargetException;
 
 public class Linking {
@@ -35,16 +34,6 @@
       }
     }
   }
-
-  public static boolean canOverride(String methodName) throws Exception {
-    // ParentClass returns only positive numbers, OverrideClass only negative.
-    // This way we can tell if OverrideClass managed to override the original
-    // method or not.
-    Method method = ParentClass.class.getDeclaredMethod(methodName);
-    int result1 = (int) method.invoke(new ParentClass());
-    int result2 = (int) method.invoke(new OverrideClass());
-    return (result1 > 0) && (result2 < 0);
-  }
 }
 
 // INSTANCE FIELD GET
diff --git a/test/674-hiddenapi/src-ex/OverrideClass.java b/test/674-hiddenapi/src-ex/OverrideClass.java
deleted file mode 100644
index 1f1f4d6..0000000
--- a/test/674-hiddenapi/src-ex/OverrideClass.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class OverrideClass extends ParentClass {
-
-  @Override public int methodPublicWhitelist() { return -411; }
-  @Override int methodPackageWhitelist() { return -412; }
-  @Override protected int methodProtectedWhitelist() { return -413; }
-
-  @Override public int methodPublicLightGreylist() { return -421; }
-  @Override int methodPackageLightGreylist() { return -422; }
-  @Override protected int methodProtectedLightGreylist() { return -423; }
-
-  @Override public int methodPublicDarkGreylist() { return -431; }
-  @Override int methodPackageDarkGreylist() { return -432; }
-  @Override protected int methodProtectedDarkGreylist() { return -433; }
-
-  @Override public int methodPublicBlacklist() { return -441; }
-  @Override int methodPackageBlacklist() { return -442; }
-  @Override protected int methodProtectedBlacklist() { return -443; }
-
-}
diff --git a/test/679-checker-minmax/src/Main.java b/test/679-checker-minmax/src/Main.java
index d016de6..38085bb 100644
--- a/test/679-checker-minmax/src/Main.java
+++ b/test/679-checker-minmax/src/Main.java
@@ -79,6 +79,51 @@
     return a >= b ? b : a;
   }
 
+  /// CHECK-START: int Main.min5(short, short) instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Op1:s\d+>>,<<Op2:s\d+>>]
+  /// CHECK-DAG: <<Sel:i\d+>> Select [<<Op2>>,<<Op1>>,<<Cnd>>]
+  /// CHECK-DAG:              Return [<<Sel>>]
+  //
+  /// CHECK-START: int Main.min5(short, short) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Min:i\d+>> Min
+  /// CHECK-DAG:              Return [<<Min>>]
+  //
+  /// CHECK-START: int Main.min5(short, short) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:              Select
+  public static int min5(short a, short b) {
+    return a >= b ? b : a;
+  }
+
+  /// CHECK-START: int Main.min6(byte, byte) instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Op1:b\d+>>,<<Op2:b\d+>>]
+  /// CHECK-DAG: <<Sel:i\d+>> Select [<<Op2>>,<<Op1>>,<<Cnd>>]
+  /// CHECK-DAG:              Return [<<Sel>>]
+  //
+  /// CHECK-START: int Main.min6(byte, byte) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Min:i\d+>> Min
+  /// CHECK-DAG:              Return [<<Min>>]
+  //
+  /// CHECK-START: int Main.min6(byte, byte) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:              Select
+  public static int min6(byte a, byte b) {
+    return a >= b ? b : a;
+  }
+
+  /// CHECK-START: long Main.min7(long, long) instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Op1:j\d+>>,<<Op2:j\d+>>]
+  /// CHECK-DAG: <<Sel:j\d+>> Select [<<Op2>>,<<Op1>>,<<Cnd>>]
+  /// CHECK-DAG:              Return [<<Sel>>]
+  //
+  /// CHECK-START: long Main.min7(long, long) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Min:j\d+>> Min
+  /// CHECK-DAG:              Return [<<Min>>]
+  //
+  /// CHECK-START: long Main.min7(long, long) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:              Select
+  public static long min7(long a, long b) {
+    return a >= b ? b : a;
+  }
+
   /// CHECK-START: int Main.max1(int, int) instruction_simplifier$after_inlining (before)
   /// CHECK-DAG: <<Cnd:z\d+>> GreaterThanOrEqual [<<Op1:i\d+>>,<<Op2:i\d+>>]
   /// CHECK-DAG: <<Sel:i\d+>> Select [<<Op2>>,<<Op1>>,<<Cnd>>]
@@ -139,15 +184,66 @@
     return a >= b ? a : b;
   }
 
+  /// CHECK-START: int Main.max5(short, short) instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Op1:s\d+>>,<<Op2:s\d+>>]
+  /// CHECK-DAG: <<Sel:i\d+>> Select [<<Op1>>,<<Op2>>,<<Cnd>>]
+  /// CHECK-DAG:              Return [<<Sel>>]
+  //
+  /// CHECK-START: int Main.max5(short, short) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Max:i\d+>> Max
+  /// CHECK-DAG:              Return [<<Max>>]
+  //
+  /// CHECK-START: int Main.max5(short, short) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:              Select
+  public static int max5(short a, short b) {
+    return a >= b ? a : b;
+  }
+
+  /// CHECK-START: int Main.max6(byte, byte) instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Op1:b\d+>>,<<Op2:b\d+>>]
+  /// CHECK-DAG: <<Sel:i\d+>> Select [<<Op1>>,<<Op2>>,<<Cnd>>]
+  /// CHECK-DAG:              Return [<<Sel>>]
+  //
+  /// CHECK-START: int Main.max6(byte, byte) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Max:i\d+>> Max
+  /// CHECK-DAG:              Return [<<Max>>]
+  //
+  /// CHECK-START: int Main.max6(byte, byte) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:              Select
+  public static int max6(byte a, byte b) {
+    return a >= b ? a : b;
+  }
+
+  /// CHECK-START: long Main.max7(long, long) instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Op1:j\d+>>,<<Op2:j\d+>>]
+  /// CHECK-DAG: <<Sel:j\d+>> Select [<<Op1>>,<<Op2>>,<<Cnd>>]
+  /// CHECK-DAG:              Return [<<Sel>>]
+  //
+  /// CHECK-START: long Main.max7(long, long) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Max:j\d+>> Max
+  /// CHECK-DAG:              Return [<<Max>>]
+  //
+  /// CHECK-START: long Main.max7(long, long) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:              Select
+  public static long max7(long a, long b) {
+    return a >= b ? a : b;
+  }
+
   public static void main(String[] args) {
     expectEquals(10, min1(10, 20));
     expectEquals(10, min2(10, 20));
     expectEquals(10, min3(10, 20));
     expectEquals(10, min4(10, 20));
+    expectEquals(10, min5((short) 10, (short) 20));
+    expectEquals(10, min6((byte) 10, (byte) 20));
+    expectEquals(10L, min7(10L, 20L));
     expectEquals(20, max1(10, 20));
     expectEquals(20, max2(10, 20));
     expectEquals(20, max3(10, 20));
     expectEquals(20, max4(10, 20));
+    expectEquals(20, max5((short) 10, (short) 20));
+    expectEquals(20, max6((byte) 10, (byte) 20));
+    expectEquals(20L, max7(10L, 20L));
     System.out.println("passed");
   }
 
@@ -156,4 +252,10 @@
       throw new Error("Expected: " + expected + ", found: " + result);
     }
   }
+
+  private static void expectEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
 }
diff --git a/test/679-locks/expected.txt b/test/679-locks/expected.txt
new file mode 100644
index 0000000..85a20be
--- /dev/null
+++ b/test/679-locks/expected.txt
@@ -0,0 +1,2 @@
+JNI_OnLoad called
+MyString
diff --git a/test/679-locks/info.txt b/test/679-locks/info.txt
new file mode 100644
index 0000000..7ada490
--- /dev/null
+++ b/test/679-locks/info.txt
@@ -0,0 +1,2 @@
+Ensure FindLocksAtDexPc is able to pass through quickened instructions related
+to unresolved classes.
diff --git a/test/679-locks/run b/test/679-locks/run
new file mode 100644
index 0000000..0cc87f3
--- /dev/null
+++ b/test/679-locks/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.i
+
+# Run without an app image to prevent the class NotLoaded to be loaded at startup.
+exec ${RUN} "${@}" --no-app-image
diff --git a/test/679-locks/src/Main.java b/test/679-locks/src/Main.java
new file mode 100644
index 0000000..fbc8c53
--- /dev/null
+++ b/test/679-locks/src/Main.java
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class NotLoaded {
+  public void foo() {}
+}
+
+public class Main {
+    public static void main(String[] args) throws Exception {
+        System.loadLibrary(args[0]);
+        TestSync.run();
+    }
+
+    public static void run() {
+        testVisitLocks();
+    }
+
+    static Object myStatic;
+
+    // Note: declared in 167-visit-locks.
+    public static native void testVisitLocks();
+}
+
+// 167-visit-locks/visit-locks.cc looks at the locks held in TestSync.run().
+class TestSync {
+  public static void run() {
+    Object o = Main.myStatic;
+    if (o != null) {
+      if (o instanceof NotLoaded) {
+        ((NotLoaded)o).foo();
+      }
+    }
+    synchronized ("MyString") {
+      Main.testVisitLocks();
+    }
+  }
+}
diff --git a/test/680-sink-regression/expected.txt b/test/680-sink-regression/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/680-sink-regression/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/680-sink-regression/info.txt b/test/680-sink-regression/info.txt
new file mode 100644
index 0000000..547e3b8
--- /dev/null
+++ b/test/680-sink-regression/info.txt
@@ -0,0 +1 @@
+Regression test for code sinking with exceptions (b/75971227).
diff --git a/test/680-sink-regression/src/Main.java b/test/680-sink-regression/src/Main.java
new file mode 100644
index 0000000..642c3ab
--- /dev/null
+++ b/test/680-sink-regression/src/Main.java
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.*;
+
+/**
+ * Regression test for b/75971227 (code sinking with exceptions).
+ */
+public class Main {
+
+  public static class N {
+    int x;
+  }
+
+  private int f;
+
+  public int doit(N n1) throws FileNotFoundException {
+    int x = 1;
+    N n3 = new N();
+    try {
+      if (n1.x == 0) {
+        f = 11;
+        x = 3;
+      } else {
+        f = x;
+      }
+      throw new FileNotFoundException("n3" + n3.x);
+    } catch (NullPointerException e) {
+    }
+    return x;
+  }
+
+
+  public static void main(String[] args) {
+    N n = new N();
+    Main t = new Main();
+    int x = 0;
+
+    // Main 1, null pointer argument.
+    t.f = 0;
+    try {
+      x = t.doit(null);
+    } catch (FileNotFoundException e) {
+      x = -1;
+    }
+    if (x != 1 || t.f != 0) {
+      throw new Error("Main 1: x=" + x + " f=" + t.f);
+    }
+
+    // Main 2, n.x is 0.
+    n.x = 0;
+    try {
+      x = t.doit(n);
+    } catch (FileNotFoundException e) {
+      x = -1;
+    }
+    if (x != -1 || t.f != 11) {
+      throw new Error("Main 2: x=" + x + " f=" + t.f);
+    }
+
+    // Main 3, n.x is not 0.
+    n.x = 1;
+    try {
+      x = t.doit(n);
+    } catch (FileNotFoundException e) {
+      x = -1;
+    }
+    if (x != -1 || t.f != 1) {
+      throw new Error("Main 3: x=" + x + " f=" + t.f);
+    }
+
+    System.out.println("passed");
+  }
+}
diff --git a/test/681-checker-abs/expected.txt b/test/681-checker-abs/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/681-checker-abs/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/681-checker-abs/info.txt b/test/681-checker-abs/info.txt
new file mode 100644
index 0000000..d36e76e
--- /dev/null
+++ b/test/681-checker-abs/info.txt
@@ -0,0 +1 @@
+Functional tests on detecting abs.
diff --git a/test/681-checker-abs/src/Main.java b/test/681-checker-abs/src/Main.java
new file mode 100644
index 0000000..8064b1d
--- /dev/null
+++ b/test/681-checker-abs/src/Main.java
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional tests for detecting abs.
+ */
+public class Main {
+
+  /// CHECK-START: int Main.abs1(int) instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Par:i\d+>> ParameterValue
+  /// CHECK-DAG: <<Zer:i\d+>> IntConstant 0
+  /// CHECK-DAG: <<Cnd:z\d+>> GreaterThanOrEqual [<<Par>>,<<Zer>>]
+  /// CHECK-DAG: <<Neg:i\d+>> [<<Par>>]
+  /// CHECK-DAG: <<Sel:i\d+>> Select [<<Neg>>,<<Par>>,<<Cnd>>]
+  /// CHECK-DAG:              Return [<<Sel>>]
+  //
+  /// CHECK-START: int Main.abs1(int) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Par:i\d+>> ParameterValue
+  /// CHECK-DAG: <<Abs:i\d+>> Abs [<<Par>>]
+  /// CHECK-DAG:              Return [<<Abs>>]
+  //
+  /// CHECK-START: int Main.abs1(int) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:              Select
+  public static int abs1(int a) {
+    return a < 0 ? -a : a;
+  }
+
+  /// CHECK-START: int Main.abs2(int) instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Par:i\d+>> ParameterValue
+  /// CHECK-DAG: <<Zer:i\d+>> IntConstant 0
+  /// CHECK-DAG: <<Cnd:z\d+>> GreaterThan [<<Par>>,<<Zer>>]
+  /// CHECK-DAG: <<Neg:i\d+>> [<<Par>>]
+  /// CHECK-DAG: <<Sel:i\d+>> Select [<<Neg>>,<<Par>>,<<Cnd>>]
+  /// CHECK-DAG:              Return [<<Sel>>]
+  //
+  /// CHECK-START: int Main.abs2(int) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Par:i\d+>> ParameterValue
+  /// CHECK-DAG: <<Abs:i\d+>> Abs [<<Par>>]
+  /// CHECK-DAG:              Return [<<Abs>>]
+  //
+  /// CHECK-START: int Main.abs2(int) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:              Select
+  public static int abs2(int a) {
+    return a <= 0 ? -a : a;
+  }
+
+  /// CHECK-START: int Main.abs3(int) instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Par:i\d+>> ParameterValue
+  /// CHECK-DAG: <<Zer:i\d+>> IntConstant 0
+  /// CHECK-DAG: <<Cnd:z\d+>> LessThanOrEqual [<<Par>>,<<Zer>>]
+  /// CHECK-DAG: <<Neg:i\d+>> [<<Par>>]
+  /// CHECK-DAG: <<Sel:i\d+>> Select [<<Par>>,<<Neg>>,<<Cnd>>]
+  /// CHECK-DAG:              Return [<<Sel>>]
+  //
+  /// CHECK-START: int Main.abs3(int) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Par:i\d+>> ParameterValue
+  /// CHECK-DAG: <<Abs:i\d+>> Abs [<<Par>>]
+  /// CHECK-DAG:              Return [<<Abs>>]
+  //
+  /// CHECK-START: int Main.abs3(int) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:              Select
+  public static int abs3(int a) {
+    return a > 0 ? a : -a;
+  }
+
+  /// CHECK-START: int Main.abs4(int) instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Par:i\d+>> ParameterValue
+  /// CHECK-DAG: <<Zer:i\d+>> IntConstant 0
+  /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Par>>,<<Zer>>]
+  /// CHECK-DAG: <<Neg:i\d+>> [<<Par>>]
+  /// CHECK-DAG: <<Sel:i\d+>> Select [<<Par>>,<<Neg>>,<<Cnd>>]
+  /// CHECK-DAG:              Return [<<Sel>>]
+  //
+  /// CHECK-START: int Main.abs4(int) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Par:i\d+>> ParameterValue
+  /// CHECK-DAG: <<Abs:i\d+>> Abs [<<Par>>]
+  /// CHECK-DAG:              Return [<<Abs>>]
+  //
+  /// CHECK-START: int Main.abs4(int) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:              Select
+  public static int abs4(int a) {
+    return a >= 0 ? a : -a;
+  }
+
+  /// CHECK-START: int Main.abs5(short) instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Par:s\d+>> ParameterValue
+  /// CHECK-DAG: <<Zer:i\d+>> IntConstant 0
+  /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Par>>,<<Zer>>]
+  /// CHECK-DAG: <<Neg:i\d+>> [<<Par>>]
+  /// CHECK-DAG: <<Sel:i\d+>> Select [<<Par>>,<<Neg>>,<<Cnd>>]
+  /// CHECK-DAG:              Return [<<Sel>>]
+  //
+  /// CHECK-START: int Main.abs5(short) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Par:s\d+>> ParameterValue
+  /// CHECK-DAG: <<Abs:i\d+>> Abs [<<Par>>]
+  /// CHECK-DAG:              Return [<<Abs>>]
+  //
+  /// CHECK-START: int Main.abs5(short) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:              Select
+  public static int abs5(short a) {
+    return a >= 0 ? a : -a;
+  }
+
+  /// CHECK-START: int Main.abs6(byte) instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Par:b\d+>> ParameterValue
+  /// CHECK-DAG: <<Zer:i\d+>> IntConstant 0
+  /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Par>>,<<Zer>>]
+  /// CHECK-DAG: <<Neg:i\d+>> [<<Par>>]
+  /// CHECK-DAG: <<Sel:i\d+>> Select [<<Par>>,<<Neg>>,<<Cnd>>]
+  /// CHECK-DAG:              Return [<<Sel>>]
+  //
+  /// CHECK-START: int Main.abs6(byte) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Par:b\d+>> ParameterValue
+  /// CHECK-DAG: <<Abs:i\d+>> Abs [<<Par>>]
+  /// CHECK-DAG:              Return [<<Abs>>]
+  //
+  /// CHECK-START: int Main.abs6(byte) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:              Select
+  public static int abs6(byte a) {
+    return a >= 0 ? a : -a;
+  }
+
+  /// CHECK-START: long Main.abs7(long) instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Par:j\d+>> ParameterValue
+  /// CHECK-DAG: <<Zer:j\d+>> LongConstant 0
+  /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Par>>,<<Zer>>]
+  /// CHECK-DAG: <<Neg:j\d+>> [<<Par>>]
+  /// CHECK-DAG: <<Sel:j\d+>> Select [<<Par>>,<<Neg>>,<<Cnd>>]
+  /// CHECK-DAG:              Return [<<Sel>>]
+  //
+  /// CHECK-START: long Main.abs7(long) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Par:j\d+>> ParameterValue
+  /// CHECK-DAG: <<Abs:j\d+>> Abs [<<Par>>]
+  /// CHECK-DAG:              Return [<<Abs>>]
+  //
+  /// CHECK-START: long Main.abs7(long) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:              Select
+  public static long abs7(long a) {
+    return a >= 0 ? a : -a;
+  }
+
+  public static void main(String[] args) {
+    expectEquals(10, abs1(-10));
+    expectEquals(20, abs1(20));
+    expectEquals(10, abs2(-10));
+    expectEquals(20, abs2(20));
+    expectEquals(10, abs3(-10));
+    expectEquals(20, abs3(20));
+    expectEquals(10, abs4(-10));
+    expectEquals(20, abs4(20));
+    expectEquals(10, abs4((short) -10));
+    expectEquals(20, abs4((short) 20));
+    expectEquals(10, abs6((byte) -10));
+    expectEquals(20, abs6((byte) 20));
+    expectEquals(10L, abs7(-10L));
+    expectEquals(20L, abs7(20L));
+    System.out.println("passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  private static void expectEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index cf781d7..6633958 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -24,7 +24,6 @@
   $(HOST_OUT_EXECUTABLES)/hiddenapi \
   $(HOST_OUT_EXECUTABLES)/jasmin \
   $(HOST_OUT_EXECUTABLES)/smali \
-  $(HOST_OUT_EXECUTABLES)/dexmerger \
   $(HOST_OUT_JAVA_LIBRARIES)/desugar.jar
 
 # Add d8 dependency, if enabled.
@@ -103,7 +102,7 @@
 # Host executables.
 host_prereq_rules := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES)
 
-# Required for dx, jasmin, smali, dexmerger.
+# Required for dx, jasmin, smali.
 host_prereq_rules += $(TEST_ART_RUN_TEST_DEPENDENCIES)
 
 # Sync test files to the target, depends upon all things that must be pushed
diff --git a/test/etc/default-build b/test/etc/default-build
index 3e6577c..9de7294 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -341,8 +341,26 @@
     shift
   done
 
-  # Should have at least 1 dex_files_to_merge here, otherwise dxmerger will print the help.
-  ${DXMERGER} "$dst_file" "${dex_files_to_merge[@]}"
+  # Skip merge if we are not merging anything. IE: input = output.
+  if [[ "${#dex_files_to_merge[@]}" -eq "1" ]]; then
+    local single_input=${dex_files_to_merge[0]}
+    if [[ "$dst_file" != "$single_input" ]]; then
+     mv "$single_input" "$dst_file";
+     return
+    fi
+  fi
+
+  # We assume the dexer did all the API level checks and just merge away.
+  mkdir d8_merge_out
+  ${DXMERGER} --min-api 1000 --output ./d8_merge_out "${dex_files_to_merge[@]}"
+
+  if [[ -e "./d8_merge_out/classes2.dex" ]]; then
+    echo "Cannot merge all dex files into a single dex"
+    exit 1
+  fi
+
+  mv ./d8_merge_out/classes.dex "$dst_file";
+  rmdir d8_merge_out
 }
 
 function make_hiddenapi() {
diff --git a/test/knownfailures.json b/test/knownfailures.json
index a7e76d1..80b262d 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -954,11 +954,19 @@
     },
     {
         "tests": ["616-cha-unloading",
-                  "678-quickening"],
+                  "678-quickening",
+                  "679-locks"],
         "variant": "jvm",
         "description": ["Doesn't run on RI."]
     },
     {
+        "tests": ["121-modifiers",
+                  "1929-exception-catch-exception"],
+        "variant": "jvm",
+        "bug": "b/76399183",
+        "description": ["New failures to be investigated."]
+    },
+    {
         "tests": ["616-cha-unloading"],
         "variant": "trace",
         "description": ["Trace prevents class unloading."]
diff --git a/test/run-test b/test/run-test
index 260a65a..5b43b52 100755
--- a/test/run-test
+++ b/test/run-test
@@ -50,11 +50,18 @@
 export USE_DESUGAR="true"
 export SMALI_ARGS=""
 
+# If d8 was not set by the environment variable, assume it is in the path.
+if [ -z "$D8" ]; then
+  export D8="d8"
+fi
+
 # If dx was not set by the environment variable, assume it is in the path.
 if [ -z "$DX" ]; then
   export DX="dx"
 fi
 
+export DXMERGER="$D8"
+
 # If jasmin was not set by the environment variable, assume it is in the path.
 if [ -z "$JASMIN" ]; then
   export JASMIN="jasmin"
@@ -65,11 +72,6 @@
   export SMALI="smali"
 fi
 
-# If dexmerger was not set by the environment variable, assume it is in the path.
-if [ -z "$DXMERGER" ]; then
-  export DXMERGER="dexmerger"
-fi
-
 # If jack was not set by the environment variable, assume it is in the path.
 if [ -z "$JACK" ]; then
   export JACK="jack"
diff --git a/test/testrunner/env.py b/test/testrunner/env.py
index 70efce5..5394991 100644
--- a/test/testrunner/env.py
+++ b/test/testrunner/env.py
@@ -136,9 +136,8 @@
                                     _get_build_var("HOST_OUT_EXECUTABLES"))
 
 # Set up default values for $JACK, $DX, $SMALI, etc to the $HOST_OUT_EXECUTABLES/$name path.
-for tool in ['jack', 'dx', 'smali', 'jasmin', 'dxmerger']:
-  binary = tool if tool != 'dxmerger' else 'dexmerger'
-  os.environ.setdefault(tool.upper(), HOST_OUT_EXECUTABLES + '/' + binary)
+for tool in ['jack', 'dx', 'smali', 'jasmin', 'd8']:
+  os.environ.setdefault(tool.upper(), HOST_OUT_EXECUTABLES + '/' + tool)
 
 ANDROID_JAVA_TOOLCHAIN = os.path.join(ANDROID_BUILD_TOP,
                                      _get_build_var('ANDROID_JAVA_TOOLCHAIN'))
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index a2215f9..734a600 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -114,6 +114,7 @@
 build = False
 gdb = False
 gdb_arg = ''
+runtime_option = ''
 stop_testrunner = False
 dex2oat_jobs = -1   # -1 corresponds to default threads for dex2oat
 run_all_configs = False
@@ -346,6 +347,10 @@
     if gdb_arg:
       options_all += ' --gdb-arg ' + gdb_arg
 
+  if runtime_option:
+    for opt in runtime_option:
+      options_all += ' --runtime-option ' + opt
+
   if dex2oat_jobs != -1:
     options_all += ' --dex2oat-jobs ' + str(dex2oat_jobs)
 
@@ -921,6 +926,7 @@
   global build
   global gdb
   global gdb_arg
+  global runtime_option
   global timeout
   global dex2oat_jobs
   global run_all_configs
@@ -933,9 +939,9 @@
   global_group.add_argument('--timeout', default=timeout, type=int, dest='timeout')
   global_group.add_argument('--verbose', '-v', action='store_true', dest='verbose')
   global_group.add_argument('--dry-run', action='store_true', dest='dry_run')
-  global_group.add_argument("--skip", action="append", dest="skips", default=[],
+  global_group.add_argument("--skip", action='append', dest="skips", default=[],
                             help="Skip the given test in all circumstances.")
-  global_group.add_argument("--no-skips", dest="ignore_skips", action="store_true", default=False,
+  global_group.add_argument("--no-skips", dest="ignore_skips", action='store_true', default=False,
                             help="""Don't skip any run-test configurations listed in
                             knownfailures.json.""")
   global_group.add_argument('--no-build-dependencies',
@@ -950,6 +956,10 @@
   global_group.set_defaults(build = env.ART_TEST_RUN_TEST_BUILD)
   global_group.add_argument('--gdb', action='store_true', dest='gdb')
   global_group.add_argument('--gdb-arg', dest='gdb_arg')
+  global_group.add_argument('--runtime-option', action='append', dest='runtime_option',
+                            help="""Pass an option to the runtime. Runtime options
+                            starting with a '-' must be separated by a '=', for
+                            example '--runtime-option=-Xjitthreshold:0'.""")
   global_group.add_argument('--dex2oat-jobs', type=int, dest='dex2oat_jobs',
                             help='Number of dex2oat jobs')
   global_group.add_argument('-a', '--all', action='store_true', dest='run_all',
@@ -993,6 +1003,7 @@
     gdb = True
     if options['gdb_arg']:
       gdb_arg = options['gdb_arg']
+  runtime_option = options['runtime_option'];
   timeout = options['timeout']
   if options['dex2oat_jobs']:
     dex2oat_jobs = options['dex2oat_jobs']
diff --git a/tools/ahat/Android.bp b/tools/ahat/Android.bp
new file mode 100644
index 0000000..dc9f098
--- /dev/null
+++ b/tools/ahat/Android.bp
@@ -0,0 +1,25 @@
+// Copyright 2018 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+droiddoc_host {
+    name: "ahat-docs",
+    srcs: [
+        "src/main/**/*.java",
+    ],
+    custom_template: "droiddoc-templates-sdk",
+    args: "-stubpackages com.android.ahat:com.android.ahat.*",
+    api_tag_name: "AHAT",
+    api_filename: "ahat_api.txt",
+    removed_api_filename: "ahat_removed_api.txt",
+}
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index bf79751..ad33233 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -37,23 +37,10 @@
 
 include $(BUILD_HOST_JAVA_LIBRARY)
 AHAT_JAR := $(LOCAL_BUILT_MODULE)
-AHAT_API := $(intermediates.COMMON)/ahat_api.txt
-AHAT_REMOVED_API := $(intermediates.COMMON)/ahat_removed_api.txt
 
 # --- api check for ahat.jar ----------
-include $(CLEAR_VARS)
-LOCAL_SRC_FILES := $(call all-java-files-under, src/main)
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := JAVA_LIBRARIES
-LOCAL_MODULE := ahat
-LOCAL_DROIDDOC_OPTIONS := \
-  -stubpackages com.android.ahat:com.android.ahat.* \
-  -api $(AHAT_API) \
-  -removedApi $(AHAT_REMOVED_API)
-LOCAL_DROIDDOC_CUSTOM_TEMPLATE_DIR := external/doclava/res/assets/templates-sdk
-include $(BUILD_DROIDDOC)
-$(AHAT_API): $(full_target)
+AHAT_API := $(INTERNAL_PLATFORM_AHAT_API_FILE)
+AHAT_REMOVED_API := $(INTERNAL_PLATFORM_AHAT_REMOVED_API_FILE)
 
 $(eval $(call check-api, \
   ahat-check-api, \
diff --git a/tools/veridex/Android.bp b/tools/veridex/Android.bp
index cac441a..31ff682 100644
--- a/tools/veridex/Android.bp
+++ b/tools/veridex/Android.bp
@@ -15,7 +15,10 @@
 art_cc_binary {
     name: "veridex",
     host_supported: true,
-    srcs: ["veridex.cc"],
+    srcs: [
+        "resolver.cc",
+        "veridex.cc",
+    ],
     cflags: ["-Wall", "-Werror"],
     shared_libs: ["libdexfile", "libbase"],
     header_libs: [
diff --git a/tools/veridex/resolver.cc b/tools/veridex/resolver.cc
new file mode 100644
index 0000000..8297821
--- /dev/null
+++ b/tools/veridex/resolver.cc
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "resolver.h"
+
+#include "dex/dex_file-inl.h"
+#include "dex/primitive.h"
+#include "veridex.h"
+
+namespace art {
+
+void VeridexResolver::Run() {
+  size_t class_def_count = dex_file_.NumClassDefs();
+  for (size_t class_def_index = 0; class_def_index < class_def_count; ++class_def_index) {
+    const DexFile::ClassDef& class_def = dex_file_.GetClassDef(class_def_index);
+    std::string name(dex_file_.StringByTypeIdx(class_def.class_idx_));
+    auto existing = type_map_.find(name);
+    if (existing != type_map_.end()) {
+      // Class already exists, cache it and move on.
+      type_infos_[class_def.class_idx_.index_] = *existing->second;
+      continue;
+    }
+    type_infos_[class_def.class_idx_.index_] = VeriClass(Primitive::Type::kPrimNot, 0, &class_def);
+    type_map_[name] = &(type_infos_[class_def.class_idx_.index_]);
+
+    const uint8_t* class_data = dex_file_.GetClassData(class_def);
+    if (class_data == nullptr) {
+      // Empty class.
+      continue;
+    }
+
+    ClassDataItemIterator it(dex_file_, class_data);
+    for (; it.HasNextStaticField(); it.Next()) {
+      field_infos_[it.GetMemberIndex()] = it.DataPointer();
+    }
+    for (; it.HasNextInstanceField(); it.Next()) {
+      field_infos_[it.GetMemberIndex()] = it.DataPointer();
+    }
+    for (; it.HasNextMethod(); it.Next()) {
+      method_infos_[it.GetMemberIndex()] = it.DataPointer();
+    }
+  }
+}
+
+static bool HasSameNameAndSignature(const DexFile& dex_file,
+                                    const DexFile::MethodId& method_id,
+                                    const char* method_name,
+                                    const Signature& signature) {
+  return strcmp(method_name, dex_file.GetMethodName(method_id)) == 0 &&
+      dex_file.GetMethodSignature(method_id) == signature;
+}
+
+static bool HasSameNameAndType(const DexFile& dex_file,
+                               const DexFile::FieldId& field_id,
+                               const char* field_name,
+                               const char* field_type) {
+  return strcmp(field_name, dex_file.GetFieldName(field_id)) == 0 &&
+      strcmp(field_type, dex_file.GetFieldTypeDescriptor(field_id)) == 0;
+}
+
+VeriClass* VeridexResolver::GetVeriClass(dex::TypeIndex index) {
+  CHECK_LT(index.index_, dex_file_.NumTypeIds());
+  // Lookup in our local cache.
+  VeriClass* cls = &type_infos_[index.index_];
+  if (cls->IsUninitialized()) {
+    // Class is defined in another dex file. Lookup in the global cache.
+    std::string name(dex_file_.StringByTypeIdx(index));
+    auto existing = type_map_.find(name);
+    if (existing == type_map_.end()) {
+      // Class hasn't been defined, so check if it's an array class.
+      size_t last_array = name.find_last_of('[');
+      if (last_array == std::string::npos) {
+        // There is no such class.
+        return nullptr;
+      } else {
+        // Class is an array class. Check if its most enclosed component type (which is not
+        // an array class) has been defined.
+        std::string klass_name = name.substr(last_array + 1);
+        existing = type_map_.find(klass_name);
+        if (existing == type_map_.end()) {
+          // There is no such class, so there is no such array.
+          return nullptr;
+        } else {
+          // Create the type, and cache it locally and globally.
+          type_infos_[index.index_] = VeriClass(
+              existing->second->GetKind(), last_array + 1, existing->second->GetClassDef());
+          cls = &(type_infos_[index.index_]);
+          type_map_[name] = cls;
+        }
+      }
+    } else {
+      // Cache the found class.
+      cls = existing->second;
+      type_infos_[index.index_] = *cls;
+    }
+  }
+  return cls;
+}
+
+VeridexResolver* VeridexResolver::GetResolverOf(const VeriClass& kls) const {
+  auto resolver_it = dex_resolvers_.lower_bound(reinterpret_cast<uintptr_t>(kls.GetClassDef()));
+  --resolver_it;
+
+  // Check the class def pointer is indeed in the mapped dex file range.
+  const DexFile& dex_file = resolver_it->second->dex_file_;
+  CHECK_LT(reinterpret_cast<uintptr_t>(dex_file.Begin()),
+           reinterpret_cast<uintptr_t>(kls.GetClassDef()));
+  CHECK_GT(reinterpret_cast<uintptr_t>(dex_file.Begin()) + dex_file.Size(),
+           reinterpret_cast<uintptr_t>(kls.GetClassDef()));
+  return resolver_it->second;
+}
+
+VeriMethod VeridexResolver::LookupMethodIn(const VeriClass& kls,
+                                           const char* method_name,
+                                           const Signature& method_signature) {
+  if (kls.IsPrimitive()) {
+    // Primitive classes don't have methods.
+    return nullptr;
+  }
+  if (kls.IsArray()) {
+    // Array classes don't have methods, but inherit the ones in j.l.Object.
+    return LookupMethodIn(*VeriClass::object_, method_name, method_signature);
+  }
+  // Get the resolver where `kls` is from.
+  VeridexResolver* resolver = GetResolverOf(kls);
+
+  // Look at methods declared in `kls`.
+  const DexFile& other_dex_file = resolver->dex_file_;
+  const uint8_t* class_data = other_dex_file.GetClassData(*kls.GetClassDef());
+  if (class_data != nullptr) {
+    ClassDataItemIterator it(other_dex_file, class_data);
+    it.SkipAllFields();
+    for (; it.HasNextMethod(); it.Next()) {
+      const DexFile::MethodId& other_method_id = other_dex_file.GetMethodId(it.GetMemberIndex());
+      if (HasSameNameAndSignature(other_dex_file,
+                                  other_method_id,
+                                  method_name,
+                                  method_signature)) {
+        return it.DataPointer();
+      }
+    }
+  }
+
+  // Look at methods in `kls`'s super class hierarchy.
+  if (kls.GetClassDef()->superclass_idx_.IsValid()) {
+    VeriClass* super = resolver->GetVeriClass(kls.GetClassDef()->superclass_idx_);
+    if (super != nullptr) {
+      VeriMethod super_method = resolver->LookupMethodIn(*super, method_name, method_signature);
+      if (super_method != nullptr) {
+        return super_method;
+      }
+    }
+  }
+
+  // Look at methods in `kls`'s interface hierarchy.
+  const DexFile::TypeList* interfaces = other_dex_file.GetInterfacesList(*kls.GetClassDef());
+  if (interfaces != nullptr) {
+    for (size_t i = 0; i < interfaces->Size(); i++) {
+      dex::TypeIndex idx = interfaces->GetTypeItem(i).type_idx_;
+      VeriClass* itf = resolver->GetVeriClass(idx);
+      if (itf != nullptr) {
+        VeriMethod itf_method = resolver->LookupMethodIn(*itf, method_name, method_signature);
+        if (itf_method != nullptr) {
+          return itf_method;
+        }
+      }
+    }
+  }
+  return nullptr;
+}
+
+VeriField VeridexResolver::LookupFieldIn(const VeriClass& kls,
+                                         const char* field_name,
+                                         const char* field_type) {
+  if (kls.IsPrimitive()) {
+    // Primitive classes don't have fields.
+    return nullptr;
+  }
+  if (kls.IsArray()) {
+    // Array classes don't have fields.
+    return nullptr;
+  }
+  // Get the resolver where `kls` is from.
+  VeridexResolver* resolver = GetResolverOf(kls);
+
+  // Look at fields declared in `kls`.
+  const DexFile& other_dex_file = resolver->dex_file_;
+  const uint8_t* class_data = other_dex_file.GetClassData(*kls.GetClassDef());
+  if (class_data != nullptr) {
+    ClassDataItemIterator it(other_dex_file, class_data);
+    for (; it.HasNextStaticField() || it.HasNextInstanceField(); it.Next()) {
+      const DexFile::FieldId& other_field_id = other_dex_file.GetFieldId(it.GetMemberIndex());
+      if (HasSameNameAndType(other_dex_file,
+                             other_field_id,
+                             field_name,
+                             field_type)) {
+        return it.DataPointer();
+      }
+    }
+  }
+
+  // Look at fields in `kls`'s interface hierarchy.
+  const DexFile::TypeList* interfaces = other_dex_file.GetInterfacesList(*kls.GetClassDef());
+  if (interfaces != nullptr) {
+    for (size_t i = 0; i < interfaces->Size(); i++) {
+      dex::TypeIndex idx = interfaces->GetTypeItem(i).type_idx_;
+      VeriClass* itf = resolver->GetVeriClass(idx);
+      if (itf != nullptr) {
+        VeriField itf_field = resolver->LookupFieldIn(*itf, field_name, field_type);
+        if (itf_field != nullptr) {
+          return itf_field;
+        }
+      }
+    }
+  }
+
+  // Look at fields in `kls`'s super class hierarchy.
+  if (kls.GetClassDef()->superclass_idx_.IsValid()) {
+    VeriClass* super = resolver->GetVeriClass(kls.GetClassDef()->superclass_idx_);
+    if (super != nullptr) {
+      VeriField super_field = resolver->LookupFieldIn(*super, field_name, field_type);
+      if (super_field != nullptr) {
+        return super_field;
+      }
+    }
+  }
+  return nullptr;
+}
+
+VeriMethod VeridexResolver::GetMethod(uint32_t method_index) {
+  VeriMethod method_info = method_infos_[method_index];
+  if (method_info == nullptr) {
+    // Method is defined in another dex file.
+    const DexFile::MethodId& method_id = dex_file_.GetMethodId(method_index);
+    VeriClass* kls = GetVeriClass(method_id.class_idx_);
+    if (kls == nullptr) {
+      return nullptr;
+    }
+    // Class found, now lookup the method in it.
+    method_info = LookupMethodIn(*kls,
+                                 dex_file_.GetMethodName(method_id),
+                                 dex_file_.GetMethodSignature(method_id));
+    method_infos_[method_index] = method_info;
+  }
+  return method_info;
+}
+
+VeriField VeridexResolver::GetField(uint32_t field_index) {
+  VeriField field_info = field_infos_[field_index];
+  if (field_info == nullptr) {
+    // Field is defined in another dex file.
+    const DexFile::FieldId& field_id = dex_file_.GetFieldId(field_index);
+    VeriClass* kls = GetVeriClass(field_id.class_idx_);
+    if (kls == nullptr) {
+      return nullptr;
+    }
+    // Class found, now lookup the field in it.
+    field_info = LookupFieldIn(*kls,
+                               dex_file_.GetFieldName(field_id),
+                               dex_file_.GetFieldTypeDescriptor(field_id));
+    field_infos_[field_index] = field_info;
+  }
+  return field_info;
+}
+
+void VeridexResolver::ResolveAll() {
+  for (uint32_t i = 0; i < dex_file_.NumTypeIds(); ++i) {
+    if (GetVeriClass(dex::TypeIndex(i)) == nullptr) {
+      LOG(WARNING) << "Unresolved " << dex_file_.PrettyType(dex::TypeIndex(i));
+    }
+  }
+
+  for (uint32_t i = 0; i < dex_file_.NumMethodIds(); ++i) {
+    if (GetMethod(i) == nullptr) {
+      LOG(WARNING) << "Unresolved: " << dex_file_.PrettyMethod(i);
+    }
+  }
+
+  for (uint32_t i = 0; i < dex_file_.NumFieldIds(); ++i) {
+    if (GetField(i) == nullptr) {
+      LOG(WARNING) << "Unresolved: " << dex_file_.PrettyField(i);
+    }
+  }
+}
+
+}  // namespace art
diff --git a/tools/veridex/resolver.h b/tools/veridex/resolver.h
new file mode 100644
index 0000000..ae94dad
--- /dev/null
+++ b/tools/veridex/resolver.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TOOLS_VERIDEX_RESOLVER_H_
+#define ART_TOOLS_VERIDEX_RESOLVER_H_
+
+#include "dex/dex_file.h"
+#include "veridex.h"
+
+namespace art {
+
+class VeridexResolver;
+
+/**
+ * Map from the start of a dex file (ie DexFile::Begin()), to
+ * its corresponding resolver.
+ */
+using DexResolverMap = std::map<uintptr_t, VeridexResolver*>;
+
+class VeridexResolver {
+ public:
+  VeridexResolver(const DexFile& dex_file,
+                  const DexResolverMap& dex_resolvers,
+                  TypeMap& type_map)
+      : dex_file_(dex_file),
+        type_map_(type_map),
+        dex_resolvers_(dex_resolvers),
+        type_infos_(dex_file.NumTypeIds(), VeriClass()),
+        method_infos_(dex_file.NumMethodIds(), nullptr),
+        field_infos_(dex_file.NumFieldIds(), nullptr) {}
+
+  // Run on the defined classes of that dex file and populate our
+  // local type cache.
+  void Run();
+
+  // Return the class declared at `index`.
+  VeriClass* GetVeriClass(dex::TypeIndex index);
+
+  // Return the method declared at `method_index`.
+  VeriMethod GetMethod(uint32_t method_index);
+
+  // Return the field declared at `field_index`.
+  VeriField GetField(uint32_t field_index);
+
+  // Do a JLS lookup in `kls` to find a method.
+  VeriMethod LookupMethodIn(const VeriClass& kls,
+                            const char* method_name,
+                            const Signature& method_signature);
+
+  // Do a JLS lookup in `kls` to find a field.
+  VeriField LookupFieldIn(const VeriClass& kls,
+                          const char* field_name,
+                          const char* field_type);
+
+  // Resolve all type_id/method_id/field_id.
+  void ResolveAll();
+
+ private:
+  // Return the resolver where `kls` is from.
+  VeridexResolver* GetResolverOf(const VeriClass& kls) const;
+
+  const DexFile& dex_file_;
+  TypeMap& type_map_;
+  const DexResolverMap& dex_resolvers_;
+  std::vector<VeriClass> type_infos_;
+  std::vector<VeriMethod> method_infos_;
+  std::vector<VeriField> field_infos_;
+};
+
+}  // namespace art
+
+#endif  // ART_TOOLS_VERIDEX_RESOLVER_H_
diff --git a/tools/veridex/veridex.cc b/tools/veridex/veridex.cc
index 9d0dd36..9287211 100644
--- a/tools/veridex/veridex.cc
+++ b/tools/veridex/veridex.cc
@@ -14,15 +14,40 @@
  * limitations under the License.
  */
 
+#include "veridex.h"
+
 #include <android-base/file.h>
 
 #include "dex/dex_file.h"
 #include "dex/dex_file_loader.h"
+#include "resolver.h"
 
 #include <sstream>
 
 namespace art {
 
+static VeriClass z_(Primitive::Type::kPrimBoolean, 0, nullptr);
+static VeriClass b_(Primitive::Type::kPrimByte, 0, nullptr);
+static VeriClass c_(Primitive::Type::kPrimChar, 0, nullptr);
+static VeriClass s_(Primitive::Type::kPrimShort, 0, nullptr);
+static VeriClass i_(Primitive::Type::kPrimInt, 0, nullptr);
+static VeriClass f_(Primitive::Type::kPrimFloat, 0, nullptr);
+static VeriClass d_(Primitive::Type::kPrimDouble, 0, nullptr);
+static VeriClass j_(Primitive::Type::kPrimLong, 0, nullptr);
+static VeriClass v_(Primitive::Type::kPrimVoid, 0, nullptr);
+
+VeriClass* VeriClass::boolean_ = &z_;
+VeriClass* VeriClass::byte_ = &b_;
+VeriClass* VeriClass::char_ = &c_;
+VeriClass* VeriClass::short_ = &s_;
+VeriClass* VeriClass::integer_ = &i_;
+VeriClass* VeriClass::float_ = &f_;
+VeriClass* VeriClass::double_ = &d_;
+VeriClass* VeriClass::long_ = &j_;
+VeriClass* VeriClass::void_ = &v_;
+// Will be set after boot classpath has been resolved.
+VeriClass* VeriClass::object_ = nullptr;
+
 struct VeridexOptions {
   const char* dex_file = nullptr;
   const char* core_stubs = nullptr;
@@ -108,6 +133,39 @@
         return 1;
       }
     }
+
+    // Resolve classes/methods/fields defined in each dex file.
+
+    // Cache of types we've seen, for quick class name lookups.
+    TypeMap type_map;
+    // Add internally defined primitives.
+    type_map["Z"] = VeriClass::boolean_;
+    type_map["B"] = VeriClass::byte_;
+    type_map["S"] = VeriClass::short_;
+    type_map["C"] = VeriClass::char_;
+    type_map["I"] = VeriClass::integer_;
+    type_map["F"] = VeriClass::float_;
+    type_map["D"] = VeriClass::double_;
+    type_map["J"] = VeriClass::long_;
+    type_map["V"] = VeriClass::void_;
+
+    // Cache of resolvers, to easily query address in memory to VeridexResolver.
+    DexResolverMap resolver_map;
+
+    std::vector<std::unique_ptr<VeridexResolver>> boot_resolvers;
+    Resolve(boot_dex_files, resolver_map, type_map, &boot_resolvers);
+
+    // Now that boot classpath has been resolved, fill j.l.Object.
+    VeriClass::object_ = type_map["Ljava/lang/Object;"];
+
+    std::vector<std::unique_ptr<VeridexResolver>> app_resolvers;
+    Resolve(app_dex_files, resolver_map, type_map, &app_resolvers);
+
+    // Resolve all type_id/method_id/field_id of app dex files.
+    for (const std::unique_ptr<VeridexResolver>& resolver : app_resolvers) {
+      resolver->ResolveAll();
+    }
+
     return 0;
   }
 
@@ -142,6 +200,22 @@
 
     return true;
   }
+
+  static void Resolve(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
+                      DexResolverMap& resolver_map,
+                      TypeMap& type_map,
+                      std::vector<std::unique_ptr<VeridexResolver>>* resolvers) {
+    for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
+      VeridexResolver* resolver =
+          new VeridexResolver(*dex_file.get(), resolver_map, type_map);
+      resolvers->emplace_back(resolver);
+      resolver_map[reinterpret_cast<uintptr_t>(dex_file->Begin())] = resolver;
+    }
+
+    for (const std::unique_ptr<VeridexResolver>& resolver : *resolvers) {
+      resolver->Run();
+    }
+  }
 };
 
 }  // namespace art
diff --git a/tools/veridex/veridex.h b/tools/veridex/veridex.h
new file mode 100644
index 0000000..0c928ab
--- /dev/null
+++ b/tools/veridex/veridex.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TOOLS_VERIDEX_VERIDEX_H_
+#define ART_TOOLS_VERIDEX_VERIDEX_H_
+
+#include <map>
+
+#include "dex/dex_file.h"
+#include "dex/primitive.h"
+
+namespace art {
+
+/**
+ * Abstraction for classes defined, or implicitly defined (for arrays and primitives)
+ * in dex files.
+ */
+class VeriClass {
+ public:
+  VeriClass(const VeriClass& other) = default;
+  VeriClass() = default;
+  VeriClass(Primitive::Type k, uint8_t dims, const DexFile::ClassDef* cl)
+      : kind_(k), dimensions_(dims), class_def_(cl) {}
+
+  bool IsUninitialized() const {
+    return kind_ == Primitive::Type::kPrimNot && dimensions_ == 0 && class_def_ == nullptr;
+  }
+
+  bool IsPrimitive() const {
+    return kind_ != Primitive::Type::kPrimNot && dimensions_ == 0;
+  }
+
+  bool IsArray() const {
+    return dimensions_ != 0;
+  }
+
+  Primitive::Type GetKind() const { return kind_; }
+  uint8_t GetDimensions() const { return dimensions_; }
+  const DexFile::ClassDef* GetClassDef() const { return class_def_; }
+
+  static VeriClass* object_;
+  static VeriClass* boolean_;
+  static VeriClass* byte_;
+  static VeriClass* char_;
+  static VeriClass* short_;
+  static VeriClass* integer_;
+  static VeriClass* float_;
+  static VeriClass* double_;
+  static VeriClass* long_;
+  static VeriClass* void_;
+
+ private:
+  Primitive::Type kind_;
+  uint8_t dimensions_;
+  const DexFile::ClassDef* class_def_;
+};
+
+/**
+ * Abstraction for fields defined in dex files. Currently, that's a pointer into their
+ * `encoded_field` description.
+ */
+using VeriField = const uint8_t*;
+
+/**
+ * Abstraction for methods defined in dex files. Currently, that's a pointer into their
+ * `encoded_method` description.
+ */
+using VeriMethod = const uint8_t*;
+
+/**
+ * Map from name to VeriClass to quickly lookup classes.
+ */
+using TypeMap = std::map<std::string, VeriClass*>;
+
+}  // namespace art
+
+#endif  // ART_TOOLS_VERIDEX_VERIDEX_H_