Merge "Revert "ART: Fix breaking changes from recent VIXL update.""
diff --git a/Android.mk b/Android.mk
index 0c20973..e27c3db 100644
--- a/Android.mk
+++ b/Android.mk
@@ -341,7 +341,6 @@
     libart-compiler \
     libopenjdkjvm \
     libopenjdkjvmti \
-    patchoat \
     profman \
     libadbconnection \
 
@@ -367,7 +366,6 @@
     libopenjdkd \
     libopenjdkjvmd \
     libopenjdkjvmtid \
-    patchoatd \
     profmand \
     libadbconnectiond \
 
@@ -451,7 +449,7 @@
 # Also include libartbenchmark, we always include it when running golem.
 # libstdc++ is needed when building for ART_TARGET_LINUX.
 ART_TARGET_SHARED_LIBRARY_BENCHMARK := $(TARGET_OUT_SHARED_LIBRARIES)/libartbenchmark.so
-build-art-target-golem: dex2oat dalvikvm patchoat linker libstdc++ \
+build-art-target-golem: dex2oat dalvikvm linker libstdc++ \
                         $(TARGET_OUT_EXECUTABLES)/art \
                         $(TARGET_OUT)/etc/public.libraries.txt \
                         $(ART_TARGET_DEX_DEPENDENCIES) \
diff --git a/build/Android.bp b/build/Android.bp
index 3eb4aaf..19d54d5 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -27,6 +27,7 @@
     "performance-faster-string-find",
     "performance-for-range-copy",
     "performance-implicit-conversion-in-loop",
+    "performance-noexcept-move-constructor",
     "performance-unnecessary-copy-initialization",
     "performance-unnecessary-value-param",
     "misc-unused-using-decls",
@@ -42,6 +43,7 @@
         + ",performance-faster-string-find"
         + ",performance-for-range-copy"
         + ",performance-implicit-conversion-in-loop"
+        + ",performance-noexcept-move-constructor"
         + ",performance-unnecessary-copy-initialization"
         + ",performance-unnecessary-value-param"
         + ",misc-unused-using-decls"
@@ -55,9 +57,6 @@
     // We have lots of C-style variadic functions, and are OK with them. JNI ensures
     // that working around this warning would be extra-painful.
     "-cert-dcl50-cpp",
-    // No exceptions.
-    "-misc-noexcept-move-constructor",
-    "-performance-noexcept-move-constructor",
     // "Modernization" we don't agree with.
     "-modernize-use-auto",
     "-modernize-return-braced-init-list",
@@ -107,6 +106,10 @@
         "-Wunreachable-code-break",
         "-Wunreachable-code-return",
 
+        // Disable warning for use of offsetof on non-standard layout type.
+        // We use it to implement OFFSETOF_MEMBER - see macros.h.
+        "-Wno-invalid-offsetof",
+
         // Enable thread annotations for std::mutex, etc.
         "-D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS",
     ],
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 3247e54..96d3648 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -108,7 +108,6 @@
     dexoptanalyzer \
     imgdiag \
     oatdump \
-    patchoat \
     profman \
 
 ART_CORE_EXECUTABLES := \
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 20f20c9..b97feff 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -191,11 +191,11 @@
 ART_GTEST_oat_test_DEX_DEPS := Main
 ART_GTEST_oat_writer_test_DEX_DEPS := Main
 ART_GTEST_object_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode XandY
-ART_GTEST_patchoat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS)
 ART_GTEST_proxy_test_DEX_DEPS := Interfaces
 ART_GTEST_reflection_test_DEX_DEPS := Main NonStaticLeafMethods StaticLeafMethods
 ART_GTEST_profile_assistant_test_DEX_DEPS := ProfileTestMultiDex
 ART_GTEST_profile_compilation_info_test_DEX_DEPS := ManyMethods ProfileTestMultiDex
+ART_GTEST_profiling_info_test_DEX_DEPS := ProfileTestMultiDex
 ART_GTEST_runtime_callbacks_test_DEX_DEPS := XandY
 ART_GTEST_stub_test_DEX_DEPS := AllFields
 ART_GTEST_transaction_test_DEX_DEPS := Transaction
@@ -214,14 +214,12 @@
   $(HOST_CORE_IMAGE_optimizing_64) \
   $(HOST_CORE_IMAGE_optimizing_32) \
   $(HOST_CORE_IMAGE_interpreter_64) \
-  $(HOST_CORE_IMAGE_interpreter_32) \
-  patchoatd-host
+  $(HOST_CORE_IMAGE_interpreter_32)
 ART_GTEST_dex2oat_environment_tests_TARGET_DEPS := \
   $(TARGET_CORE_IMAGE_optimizing_64) \
   $(TARGET_CORE_IMAGE_optimizing_32) \
   $(TARGET_CORE_IMAGE_interpreter_64) \
-  $(TARGET_CORE_IMAGE_interpreter_32) \
-  patchoatd-target
+  $(TARGET_CORE_IMAGE_interpreter_32)
 
 ART_GTEST_oat_file_test_HOST_DEPS := \
   $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
@@ -348,11 +346,6 @@
 ART_GTEST_oatdump_app_test_TARGET_DEPS := $(ART_GTEST_oatdump_test_TARGET_DEPS) \
   dex2oatd-target
 
-ART_GTEST_patchoat_test_HOST_DEPS := \
-  $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
-ART_GTEST_patchoat_test_TARGET_DEPS := \
-  $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS)
-
 # Profile assistant tests requires profman utility.
 ART_GTEST_profile_assistant_test_HOST_DEPS := profmand-host
 ART_GTEST_profile_assistant_test_TARGET_DEPS := profmand-target
@@ -382,7 +375,6 @@
     art_libdexfile_tests \
     art_libprofile_tests \
     art_oatdump_tests \
-    art_patchoat_tests \
     art_profman_tests \
     art_runtime_tests \
     art_runtime_compiler_tests \
@@ -733,9 +725,6 @@
 ART_GTEST_dex2oat_image_test_HOST_DEPS :=
 ART_GTEST_dex2oat_image_test_TARGET_DEPS :=
 ART_GTEST_object_test_DEX_DEPS :=
-ART_GTEST_patchoat_test_DEX_DEPS :=
-ART_GTEST_patchoat_test_HOST_DEPS :=
-ART_GTEST_patchoat_test_TARGET_DEPS :=
 ART_GTEST_proxy_test_DEX_DEPS :=
 ART_GTEST_reflection_test_DEX_DEPS :=
 ART_GTEST_stub_test_DEX_DEPS :=
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 95d08b3..df84b25 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -2647,7 +2647,7 @@
     LOG(INFO) << "[ProfileGuidedCompilation] " <<
         ((profile_compilation_info_ == nullptr)
             ? "null"
-            : profile_compilation_info_->DumpInfo(&dex_files));
+            : profile_compilation_info_->DumpInfo(dex_files));
   }
 
   dex_to_dex_compiler_.ClearState();
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 62e8e02..09376dd 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -219,12 +219,6 @@
   jni_asm->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
   jni_asm->SetEmitRunTimeChecksInDebugMode(compiler_options.EmitRunTimeChecksInDebugMode());
 
-  // Offsets into data structures
-  // TODO: if cross compiling these offsets are for the host not the target
-  const Offset functions(OFFSETOF_MEMBER(JNIEnvExt, functions));
-  const Offset monitor_enter(OFFSETOF_MEMBER(JNINativeInterface, MonitorEnter));
-  const Offset monitor_exit(OFFSETOF_MEMBER(JNINativeInterface, MonitorExit));
-
   // 1. Build the frame saving all callee saves, Method*, and PC return address.
   const size_t frame_size(main_jni_conv->FrameSize());  // Excludes outgoing args.
   ArrayRef<const ManagedRegister> callee_save_regs = main_jni_conv->CalleeSaveRegisters();
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index d5149b3..17d9736 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2677,6 +2677,18 @@
   const Location first = locations->InAt(0);
   const Location out = locations->Out();
   const Location second = locations->InAt(1);
+
+  // In the unlucky case the output of this instruction overlaps
+  // with an input of an "emitted-at-use-site" condition, and
+  // the output of this instruction is not one of its inputs, we'll
+  // need to fallback to branches instead of conditional ARM instructions.
+  bool output_overlaps_with_condition_inputs =
+      !IsBooleanValueOrMaterializedCondition(condition) &&
+      !out.Equals(first) &&
+      !out.Equals(second) &&
+      (condition->GetLocations()->InAt(0).Equals(out) ||
+       condition->GetLocations()->InAt(1).Equals(out));
+  DCHECK(!output_overlaps_with_condition_inputs || condition->IsCondition());
   Location src;
 
   if (condition->IsIntConstant()) {
@@ -2690,7 +2702,7 @@
     return;
   }
 
-  if (!DataType::IsFloatingPointType(type)) {
+  if (!DataType::IsFloatingPointType(type) && !output_overlaps_with_condition_inputs) {
     bool invert = false;
 
     if (out.Equals(second)) {
@@ -2762,6 +2774,7 @@
   vixl32::Label* false_target = nullptr;
   vixl32::Label* true_target = nullptr;
   vixl32::Label select_end;
+  vixl32::Label other_case;
   vixl32::Label* const target = codegen_->GetFinalLabel(select, &select_end);
 
   if (out.Equals(second)) {
@@ -2772,12 +2785,21 @@
     src = second;
 
     if (!out.Equals(first)) {
-      codegen_->MoveLocation(out, first, type);
+      if (output_overlaps_with_condition_inputs) {
+        false_target = &other_case;
+      } else {
+        codegen_->MoveLocation(out, first, type);
+      }
     }
   }
 
   GenerateTestAndBranch(select, 2, true_target, false_target, /* far_target */ false);
   codegen_->MoveLocation(out, src, type);
+  if (output_overlaps_with_condition_inputs) {
+    __ B(target);
+    __ Bind(&other_case);
+    codegen_->MoveLocation(out, first, type);
+  }
 
   if (select_end.IsReferenced()) {
     __ Bind(&select_end);
@@ -2876,31 +2898,16 @@
 void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) {
   LocationSummary* locations =
       new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall);
-  // Handle the long/FP comparisons made in instruction simplification.
-  switch (cond->InputAt(0)->GetType()) {
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
-      if (!cond->IsEmittedAtUseSite()) {
-        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      }
-      break;
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1)));
-      if (!cond->IsEmittedAtUseSite()) {
-        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      }
-      break;
-
-    default:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
-      if (!cond->IsEmittedAtUseSite()) {
-        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      }
+  const DataType::Type type = cond->InputAt(0)->GetType();
+  if (DataType::IsFloatingPointType(type)) {
+    locations->SetInAt(0, Location::RequiresFpuRegister());
+    locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1)));
+  } else {
+    locations->SetInAt(0, Location::RequiresRegister());
+    locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
+  }
+  if (!cond->IsEmittedAtUseSite()) {
+    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   }
 }
 
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 72b6748..b576f83 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1497,21 +1497,22 @@
   // to be visited once it is clear whether it has remaining uses.
   if (arg_this->IsNewInstance()) {
     ssa_builder_->AddUninitializedString(arg_this->AsNewInstance());
-    // Walk over all vregs and replace any occurrence of `arg_this` with `invoke`.
-    for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {
-      if ((*current_locals_)[vreg] == arg_this) {
-        (*current_locals_)[vreg] = invoke;
-      }
-    }
   } else {
     DCHECK(arg_this->IsPhi());
     // We can get a phi as input of a String.<init> if there is a loop between the
     // allocation and the String.<init> call. As we don't know which other phis might alias
-    // with `arg_this`, we keep a record of these phis and will analyze their inputs and
-    // uses once the inputs and users are populated (in ssa_builder.cc).
-    // Note: we only do this for phis, as it is a somewhat more expensive operation than
-    // what we're doing above when the input is the `HNewInstance`.
-    ssa_builder_->AddUninitializedStringPhi(arg_this->AsPhi(), invoke);
+    // with `arg_this`, we keep a record of those invocations so we can later replace
+    // the allocation with the invocation.
+    // Add the actual 'this' input so the analysis knows what is the allocation instruction.
+    // The input will be removed during the analysis.
+    invoke->AddInput(arg_this);
+    ssa_builder_->AddUninitializedStringPhi(invoke);
+  }
+  // Walk over all vregs and replace any occurrence of `arg_this` with `invoke`.
+  for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {
+    if ((*current_locals_)[vreg] == arg_this) {
+      (*current_locals_)[vreg] = invoke;
+    }
   }
   return true;
 }
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index daf86fd..7921061 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -129,6 +129,7 @@
   kAnalysisInvalidBytecode,
   kAnalysisFailThrowCatchLoop,
   kAnalysisFailAmbiguousArrayOp,
+  kAnalysisFailIrreducibleLoopAndStringInit,
   kAnalysisSuccess,
 };
 
@@ -4537,8 +4538,7 @@
                 allocator,
                 number_of_arguments,
                 // There is potentially one extra argument for the HCurrentMethod node, and
-                // potentially one other if the clinit check is explicit, and potentially
-                // one other if the method is a string factory.
+                // potentially one other if the clinit check is explicit.
                 (NeedsCurrentMethodInput(dispatch_info.method_load_kind) ? 1u : 0u) +
                     (clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 1u : 0u),
                 return_type,
@@ -6140,6 +6140,9 @@
 
  private:
   static constexpr size_t kFlagIsStringCharAt = kNumberOfGenericPackedBits;
+  static constexpr size_t kNumberOfBoundsCheckPackedBits = kFlagIsStringCharAt + 1;
+  static_assert(kNumberOfBoundsCheckPackedBits <= HInstruction::kMaxNumberOfPackedBits,
+                "Too many packed fields.");
 };
 
 class HSuspendCheck final : public HExpression<0> {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 7d339cd..46754fe 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -847,6 +847,11 @@
                           MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
           break;
         }
+        case kAnalysisFailIrreducibleLoopAndStringInit: {
+          MaybeRecordStat(compilation_stats_.get(),
+                          MethodCompilationStat::kNotCompiledIrreducibleLoopAndStringInit);
+          break;
+        }
         case kAnalysisSuccess:
           UNREACHABLE();
       }
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 9a26f2f..1f4f6d5 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -59,6 +59,7 @@
   kNotCompiledUnsupportedIsa,
   kNotCompiledVerificationError,
   kNotCompiledVerifyAtRuntime,
+  kNotCompiledIrreducibleLoopAndStringInit,
   kInlinedMonomorphicCall,
   kInlinedPolymorphicCall,
   kMonomorphicCall,
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index db96e41..16c23c8 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -16,6 +16,8 @@
 
 #include "ssa_builder.h"
 
+#include "base/arena_bit_vector.h"
+#include "base/bit_vector-inl.h"
 #include "data_type-inl.h"
 #include "dex/bytecode_utils.h"
 #include "mirror/class-inl.h"
@@ -415,97 +417,34 @@
   return true;
 }
 
-static bool HasAliasInEnvironments(HInstruction* instruction) {
-  HEnvironment* last_user = nullptr;
+bool SsaBuilder::HasAliasInEnvironments(HInstruction* instruction) {
+  ScopedArenaHashSet<size_t> seen_users(
+      local_allocator_->Adapter(kArenaAllocGraphBuilder));
   for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
     DCHECK(use.GetUser() != nullptr);
-    // Note: The first comparison (== null) always fails.
-    if (use.GetUser() == last_user) {
+    size_t id = use.GetUser()->GetHolder()->GetId();
+    if (seen_users.find(id) != seen_users.end()) {
       return true;
     }
-    last_user = use.GetUser();
-  }
-
-  if (kIsDebugBuild) {
-    // Do a quadratic search to ensure same environment uses are next
-    // to each other.
-    const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
-    for (auto current = env_uses.begin(), end = env_uses.end(); current != end; ++current) {
-      auto next = current;
-      for (++next; next != end; ++next) {
-        DCHECK(next->GetUser() != current->GetUser());
-      }
-    }
+    seen_users.insert(id);
   }
   return false;
 }
 
-// Returns whether the analysis succeeded. If it did not, we are going to bail
-// to interpreter.
-// TODO(ngeoffray): Remove this workaround.
 bool SsaBuilder::ReplaceUninitializedStringPhis() {
-  ScopedArenaHashSet<HInstruction*> seen_instructions(
-      local_allocator_->Adapter(kArenaAllocGraphBuilder));
-  ScopedArenaVector<HInstruction*> worklist(local_allocator_->Adapter(kArenaAllocGraphBuilder));
-
-  // Iterate over all inputs and uses of the phi, recursively, until all related instructions
-  // have been visited.
-  for (const auto& pair : uninitialized_string_phis_) {
-    HPhi* string_phi = pair.first;
-    HInvoke* invoke = pair.second;
-    worklist.push_back(string_phi);
-    HNewInstance* found_instance = nullptr;
-    do {
-      HInstruction* current = worklist.back();
-      worklist.pop_back();
-      if (seen_instructions.find(current) != seen_instructions.end()) {
-        continue;
-      }
-      seen_instructions.insert(current);
-      if (current->IsNewInstance()) {
-        // If it is the first time we see the allocation, replace its uses. We don't register
-        // it through `RemoveRedundantUninitializedStrings`, as that method makes assumption about
-        // aliasing and environment uses that don't hold when the string escapes to phis.
-        // Note that this also means we will keep the (useless) allocation.
-        if (found_instance == nullptr) {
-          found_instance = current->AsNewInstance();
-        } else {
-          if (found_instance != current) {
-            return false;
-          }
-        }
-      } else if (current->IsPhi()) {
-        // Push all inputs to the worklist. Those should be Phis or NewInstance.
-        for (HInstruction* input : current->GetInputs()) {
-          if (!input->IsPhi() && !input->IsNewInstance()) {
-            return false;
-          }
-          worklist.push_back(input);
-        }
-      } else {
-        // The verifier prevents any other DEX uses of the uninitialized string.
-        if (!current->IsEqual() && !current->IsNotEqual()) {
-          return false;
-        }
-        continue;
-      }
-      current->ReplaceUsesDominatedBy(invoke, invoke);
-      current->ReplaceEnvUsesDominatedBy(invoke, invoke);
-      // Push all users to the worklist. Now that we have replaced
-      // the uses dominated by the invokes, the remaining users should only
-      // be Phi, or Equal/NotEqual.
-      for (const HUseListNode<HInstruction*>& use : current->GetUses()) {
-        HInstruction* user = use.GetUser();
-        if (!user->IsPhi() && !user->IsEqual() && !user->IsNotEqual()) {
-          return false;
-        }
-        worklist.push_back(user);
-      }
-    } while (!worklist.empty());
-    seen_instructions.clear();
-    if (found_instance == nullptr) {
+  for (HInvoke* invoke : uninitialized_string_phis_) {
+    HInstruction* str = invoke->InputAt(invoke->InputCount() - 1);
+    if (str->IsPhi()) {
+      // If after redundant phi and dead phi elimination, it's still a phi that feeds
+      // the invoke, then we must be compiling a method with irreducible loops. Just bail.
+      DCHECK(graph_->HasIrreducibleLoops());
       return false;
     }
+    DCHECK(str->IsNewInstance());
+    AddUninitializedString(str->AsNewInstance());
+    str->ReplaceUsesDominatedBy(invoke, invoke);
+    str->ReplaceEnvUsesDominatedBy(invoke, invoke);
+    invoke->RemoveInputAt(invoke->InputCount() - 1);
   }
   return true;
 }
@@ -522,8 +461,9 @@
     DCHECK(new_instance->IsStringAlloc());
 
     // Replace NewInstance of String with NullConstant if not used prior to
-    // calling StringFactory. In case of deoptimization, the interpreter is
-    // expected to skip null check on the `this` argument of the StringFactory call.
+    // calling StringFactory. We check for alias environments in case of deoptimization.
+    // The interpreter is expected to skip null check on the `this` argument of the
+    // StringFactory call.
     if (!new_instance->HasNonEnvironmentUses() && !HasAliasInEnvironments(new_instance)) {
       new_instance->ReplaceWith(graph_->GetNullConstant());
       new_instance->GetBlock()->RemoveInstruction(new_instance);
@@ -558,13 +498,6 @@
 GraphAnalysisResult SsaBuilder::BuildSsa() {
   DCHECK(!graph_->IsInSsaForm());
 
-  // Replace Phis that feed in a String.<init>, as well as their aliases, with
-  // the actual String allocation invocation. We do this first, as the phis stored in
-  // the data structure might get removed from the graph in later stages during `BuildSsa`.
-  if (!ReplaceUninitializedStringPhis()) {
-    return kAnalysisSkipped;
-  }
-
   // Propagate types of phis. At this point, phis are typed void in the general
   // case, or float/double/reference if we created an equivalent phi. So we need
   // to propagate the types across phis to give them a correct type. If a type
@@ -623,6 +556,14 @@
   // input types.
   dead_phi_elimimation.EliminateDeadPhis();
 
+  // Replace Phis that feed in a String.<init> during instruction building. We
+  // run this after redundant and dead phi elimination to make sure the phi will have
+  // been replaced by the actual allocation. Only with an irreducible loop
+  // a phi can still be the input, in which case we bail.
+  if (!ReplaceUninitializedStringPhis()) {
+    return kAnalysisFailIrreducibleLoopAndStringInit;
+  }
+
   // HInstructionBuidler replaced uses of NewInstances of String with the
   // results of their corresponding StringFactory calls. Unless the String
   // objects are used before they are initialized, they can be replaced with
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index bae15ac..bb892c9 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -97,8 +97,8 @@
     }
   }
 
-  void AddUninitializedStringPhi(HPhi* phi, HInvoke* invoke) {
-    uninitialized_string_phis_.push_back(std::make_pair(phi, invoke));
+  void AddUninitializedStringPhi(HInvoke* invoke) {
+    uninitialized_string_phis_.push_back(invoke);
   }
 
  private:
@@ -124,6 +124,7 @@
 
   void RemoveRedundantUninitializedStrings();
   bool ReplaceUninitializedStringPhis();
+  bool HasAliasInEnvironments(HInstruction* instruction);
 
   HGraph* const graph_;
   Handle<mirror::ClassLoader> class_loader_;
@@ -137,7 +138,7 @@
   ScopedArenaVector<HArrayGet*> ambiguous_agets_;
   ScopedArenaVector<HArraySet*> ambiguous_asets_;
   ScopedArenaVector<HNewInstance*> uninitialized_strings_;
-  ScopedArenaVector<std::pair<HPhi*, HInvoke*>> uninitialized_string_phis_;
+  ScopedArenaVector<HInvoke*> uninitialized_string_phis_;
 
   DISALLOW_COPY_AND_ASSIGN(SsaBuilder);
 };
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index a851cfa..78a8dd6 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -46,7 +46,7 @@
   LOG(ERROR) << "Copyright (C) 2016 The Android Open Source Project\n";
   LOG(ERROR) << kProgramName
              << ": [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile] [-p profile]"
-                " [-s] [-t] [-v] [-w directory] dexfile...\n";
+                " [-s] [-t] [-u] [-v] [-w directory] dexfile...\n";
   LOG(ERROR) << " -a : display annotations";
   LOG(ERROR) << " -b : build dex_ir";
   LOG(ERROR) << " -c : verify checksum and exit";
diff --git a/libartbase/base/bit_memory_region.h b/libartbase/base/bit_memory_region.h
index 76f57da..1f1011e 100644
--- a/libartbase/base/bit_memory_region.h
+++ b/libartbase/base/bit_memory_region.h
@@ -85,15 +85,15 @@
 
   // Load a single bit in the region. The bit at offset 0 is the least
   // significant bit in the first byte.
-  ATTRIBUTE_NO_SANITIZE_ADDRESS  // We might touch extra bytes due to the alignment.
-  ALWAYS_INLINE bool LoadBit(uintptr_t bit_offset) const {
+  ALWAYS_INLINE bool LoadBit(size_t bit_offset) const {
     DCHECK_LT(bit_offset, bit_size_);
-    size_t index = (bit_start_ + bit_offset) / kBitsPerIntPtrT;
-    size_t shift = (bit_start_ + bit_offset) % kBitsPerIntPtrT;
-    return ((data_[index] >> shift) & 1) != 0;
+    uint8_t* data = reinterpret_cast<uint8_t*>(data_);
+    size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
+    size_t shift = (bit_start_ + bit_offset) % kBitsPerByte;
+    return ((data[index] >> shift) & 1) != 0;
   }
 
-  ALWAYS_INLINE void StoreBit(uintptr_t bit_offset, bool value) {
+  ALWAYS_INLINE void StoreBit(size_t bit_offset, bool value) {
     DCHECK_LT(bit_offset, bit_size_);
     uint8_t* data = reinterpret_cast<uint8_t*>(data_);
     size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
diff --git a/libartbase/base/common_art_test.cc b/libartbase/base/common_art_test.cc
index 6dd2381..b65710b 100644
--- a/libartbase/base/common_art_test.cc
+++ b/libartbase/base/common_art_test.cc
@@ -73,11 +73,11 @@
   file_.reset(file);
 }
 
-ScratchFile::ScratchFile(ScratchFile&& other) {
+ScratchFile::ScratchFile(ScratchFile&& other) noexcept {
   *this = std::move(other);
 }
 
-ScratchFile& ScratchFile::operator=(ScratchFile&& other) {
+ScratchFile& ScratchFile::operator=(ScratchFile&& other) noexcept {
   if (GetFile() != other.GetFile()) {
     std::swap(filename_, other.filename_);
     std::swap(file_, other.file_);
diff --git a/libartbase/base/common_art_test.h b/libartbase/base/common_art_test.h
index d645fa1..32a2628 100644
--- a/libartbase/base/common_art_test.h
+++ b/libartbase/base/common_art_test.h
@@ -54,9 +54,9 @@
 
   ScratchFile(const ScratchFile& other, const char* suffix);
 
-  ScratchFile(ScratchFile&& other);
+  ScratchFile(ScratchFile&& other) noexcept;
 
-  ScratchFile& operator=(ScratchFile&& other);
+  ScratchFile& operator=(ScratchFile&& other) noexcept;
 
   explicit ScratchFile(File* file);
 
diff --git a/libartbase/base/macros.h b/libartbase/base/macros.h
index 315f4d2..323fa4e 100644
--- a/libartbase/base/macros.h
+++ b/libartbase/base/macros.h
@@ -42,8 +42,16 @@
   private: \
     void* operator new(size_t) = delete  // NOLINT
 
-#define OFFSETOF_MEMBER(t, f) \
-  (reinterpret_cast<uintptr_t>(&reinterpret_cast<t*>(16)->f) - static_cast<uintptr_t>(16u))  // NOLINT
+// offsetof is not defined by the spec on types with non-standard layout,
+// however it is implemented by compilers in practice.
+// (note that reinterpret_cast is not valid constexpr)
+//
+// Alternative approach would be something like:
+// #define OFFSETOF_HELPER(t, f) \
+//   (reinterpret_cast<uintptr_t>(&reinterpret_cast<t*>(16)->f) - static_cast<uintptr_t>(16u))
+// #define OFFSETOF_MEMBER(t, f) \
+//   (__builtin_constant_p(OFFSETOF_HELPER(t,f)) ? OFFSETOF_HELPER(t,f) : OFFSETOF_HELPER(t,f))
+#define OFFSETOF_MEMBER(t, f) offsetof(t, f)
 
 #define OFFSETOF_MEMBERPTR(t, f) \
   (reinterpret_cast<uintptr_t>(&(reinterpret_cast<t*>(16)->*f)) - static_cast<uintptr_t>(16))  // NOLINT
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 1bf553d..06a168d 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -585,7 +585,7 @@
                 redzone_size);
 }
 
-MemMap::MemMap(MemMap&& other)
+MemMap::MemMap(MemMap&& other) noexcept
     : MemMap() {
   swap(other);
 }
@@ -692,6 +692,24 @@
                           int tail_prot,
                           std::string* error_msg,
                           bool use_debug_name) {
+  return RemapAtEnd(new_end,
+                    tail_name,
+                    tail_prot,
+                    MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
+                    /* fd */ -1,
+                    /* offset */ 0,
+                    error_msg,
+                    use_debug_name);
+}
+
+MemMap MemMap::RemapAtEnd(uint8_t* new_end,
+                          const char* tail_name,
+                          int tail_prot,
+                          int flags,
+                          int fd,
+                          off_t offset,
+                          std::string* error_msg,
+                          bool use_debug_name) {
   DCHECK_GE(new_end, Begin());
   DCHECK_LE(new_end, End());
   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
@@ -715,9 +733,6 @@
   DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
   DCHECK_ALIGNED(tail_base_size, kPageSize);
 
-  unique_fd fd;
-  int flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
-
   MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
   // Note: Do not explicitly unmap the tail region, mmap() with MAP_FIXED automatically
   // removes old mappings for the overlapping region. This makes the operation atomic
@@ -726,13 +741,13 @@
                                                           tail_base_size,
                                                           tail_prot,
                                                           flags,
-                                                          fd.get(),
-                                                          0));
+                                                          fd,
+                                                          offset));
   if (actual == MAP_FAILED) {
     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
-    *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
+    *error_msg = StringPrintf("map(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
                               "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
-                              fd.get());
+                              fd);
     return Invalid();
   }
   // Update *this.
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 20eda32..4f92492 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -68,8 +68,8 @@
     return MemMap();
   }
 
-  MemMap(MemMap&& other) REQUIRES(!MemMap::mem_maps_lock_);
-  MemMap& operator=(MemMap&& other) REQUIRES(!MemMap::mem_maps_lock_) {
+  MemMap(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_);
+  MemMap& operator=(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_) {
     Reset();
     swap(other);
     return *this;
@@ -261,6 +261,16 @@
                     std::string* error_msg,
                     bool use_debug_name = true);
 
+  // Unmap the pages of a file at end and remap them to create another memory map.
+  MemMap RemapAtEnd(uint8_t* new_end,
+                    const char* tail_name,
+                    int tail_prot,
+                    int tail_flags,
+                    int fd,
+                    off_t offset,
+                    std::string* error_msg,
+                    bool use_debug_name = true);
+
   // Take ownership of pages at the beginning of the mapping. The mapping must be an
   // anonymous reservation mapping, owning entire pages. The `byte_count` must not
   // exceed the size of this reservation.
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index ab3d18f..bf143d4 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -455,6 +455,53 @@
 }
 #endif
 
+TEST_F(MemMapTest, RemapFileViewAtEnd) {
+  CommonInit();
+  std::string error_msg;
+  ScratchFile scratch_file;
+
+  // Create a scratch file 3 pages large.
+  constexpr size_t kMapSize = 3 * kPageSize;
+  std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
+  memset(data.get(), 1, kPageSize);
+  memset(&data[0], 0x55, kPageSize);
+  memset(&data[kPageSize], 0x5a, kPageSize);
+  memset(&data[2 * kPageSize], 0xaa, kPageSize);
+  ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
+
+  MemMap map = MemMap::MapFile(/*byte_count*/kMapSize,
+                               PROT_READ,
+                               MAP_PRIVATE,
+                               scratch_file.GetFd(),
+                               /*start*/0,
+                               /*low_4gb*/true,
+                               scratch_file.GetFilename().c_str(),
+                               &error_msg);
+  ASSERT_TRUE(map.IsValid()) << error_msg;
+  ASSERT_TRUE(error_msg.empty());
+  ASSERT_EQ(map.Size(), kMapSize);
+  ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
+  ASSERT_EQ(data[0], *map.Begin());
+  ASSERT_EQ(data[kPageSize], *(map.Begin() + kPageSize));
+  ASSERT_EQ(data[2 * kPageSize], *(map.Begin() + 2 * kPageSize));
+
+  for (size_t offset = 2 * kPageSize; offset > 0; offset -= kPageSize) {
+    MemMap tail = map.RemapAtEnd(map.Begin() + offset,
+                                 "bad_offset_map",
+                                 PROT_READ,
+                                 MAP_PRIVATE | MAP_FIXED,
+                                 scratch_file.GetFd(),
+                                 offset,
+                                 &error_msg);
+    ASSERT_TRUE(tail.IsValid()) << error_msg;
+    ASSERT_TRUE(error_msg.empty());
+    ASSERT_EQ(offset, map.Size());
+    ASSERT_EQ(static_cast<size_t>(kPageSize), tail.Size());
+    ASSERT_EQ(tail.Begin(), map.Begin() + map.Size());
+    ASSERT_EQ(data[offset], *tail.Begin());
+  }
+}
+
 TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
   // Some MIPS32 hardware (namely the Creator Ci20 development board)
   // cannot allocate in the 2GB-4GB region.
diff --git a/libartbase/base/scoped_arena_allocator.cc b/libartbase/base/scoped_arena_allocator.cc
index ab05c60..a54f350 100644
--- a/libartbase/base/scoped_arena_allocator.cc
+++ b/libartbase/base/scoped_arena_allocator.cc
@@ -106,7 +106,7 @@
   return ptr;
 }
 
-ScopedArenaAllocator::ScopedArenaAllocator(ScopedArenaAllocator&& other)
+ScopedArenaAllocator::ScopedArenaAllocator(ScopedArenaAllocator&& other) noexcept
     : DebugStackReference(std::move(other)),
       DebugStackRefCounter(),
       ArenaAllocatorStats(other),
diff --git a/libartbase/base/scoped_arena_allocator.h b/libartbase/base/scoped_arena_allocator.h
index 7eaec5e..52d0361 100644
--- a/libartbase/base/scoped_arena_allocator.h
+++ b/libartbase/base/scoped_arena_allocator.h
@@ -138,7 +138,7 @@
 class ScopedArenaAllocator
     : private DebugStackReference, private DebugStackRefCounter, private ArenaAllocatorStats {
  public:
-  ScopedArenaAllocator(ScopedArenaAllocator&& other);
+  ScopedArenaAllocator(ScopedArenaAllocator&& other) noexcept;
   explicit ScopedArenaAllocator(ArenaStack* arena_stack);
   ~ScopedArenaAllocator();
 
diff --git a/libartbase/base/unix_file/fd_file.cc b/libartbase/base/unix_file/fd_file.cc
index d715670..de60277 100644
--- a/libartbase/base/unix_file/fd_file.cc
+++ b/libartbase/base/unix_file/fd_file.cc
@@ -91,7 +91,7 @@
   }
 }
 
-FdFile::FdFile(FdFile&& other)
+FdFile::FdFile(FdFile&& other) noexcept
     : guard_state_(other.guard_state_),
       fd_(other.fd_),
       file_path_(std::move(other.file_path_)),
@@ -105,7 +105,7 @@
   other.fd_ = -1;
 }
 
-FdFile& FdFile::operator=(FdFile&& other) {
+FdFile& FdFile::operator=(FdFile&& other) noexcept {
   if (this == &other) {
     return *this;
   }
diff --git a/libartbase/base/unix_file/fd_file.h b/libartbase/base/unix_file/fd_file.h
index e362ed1..54a16a2 100644
--- a/libartbase/base/unix_file/fd_file.h
+++ b/libartbase/base/unix_file/fd_file.h
@@ -46,10 +46,10 @@
   FdFile(const std::string& path, int flags, mode_t mode, bool checkUsage);
 
   // Move constructor.
-  FdFile(FdFile&& other);
+  FdFile(FdFile&& other) noexcept;
 
   // Move assignment operator.
-  FdFile& operator=(FdFile&& other);
+  FdFile& operator=(FdFile&& other) noexcept;
 
   // Release the file descriptor. This will make further accesses to this FdFile invalid. Disables
   // all further state checking.
diff --git a/libprofile/profile/profile_compilation_info.cc b/libprofile/profile/profile_compilation_info.cc
index f5e08da..2ebde5e 100644
--- a/libprofile/profile/profile_compilation_info.cc
+++ b/libprofile/profile/profile_compilation_info.cc
@@ -1636,25 +1636,7 @@
   return total;
 }
 
-// Produce a non-owning vector from a vector.
-template<typename T>
-const std::vector<T*>* MakeNonOwningVector(const std::vector<std::unique_ptr<T>>* owning_vector) {
-  auto non_owning_vector = new std::vector<T*>();
-  for (auto& element : *owning_vector) {
-    non_owning_vector->push_back(element.get());
-  }
-  return non_owning_vector;
-}
-
-std::string ProfileCompilationInfo::DumpInfo(
-    const std::vector<std::unique_ptr<const DexFile>>* dex_files,
-    bool print_full_dex_location) const {
-  std::unique_ptr<const std::vector<const DexFile*>> non_owning_dex_files(
-      MakeNonOwningVector(dex_files));
-  return DumpInfo(non_owning_dex_files.get(), print_full_dex_location);
-}
-
-std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>* dex_files,
+std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>& dex_files,
                                              bool print_full_dex_location) const {
   std::ostringstream os;
   if (info_.empty()) {
@@ -1677,11 +1659,10 @@
     os << " [index=" << static_cast<uint32_t>(dex_data->profile_index) << "]";
     os << " [checksum=" << std::hex << dex_data->checksum << "]" << std::dec;
     const DexFile* dex_file = nullptr;
-    if (dex_files != nullptr) {
-      for (size_t i = 0; i < dex_files->size(); i++) {
-        if (dex_data->profile_key == (*dex_files)[i]->GetLocation()) {
-          dex_file = (*dex_files)[i];
-        }
+    for (const DexFile* current : dex_files) {
+      if (dex_data->profile_key == current->GetLocation() &&
+          dex_data->checksum == current->GetLocationChecksum()) {
+        dex_file = current;
       }
     }
     os << "\n\thot methods: ";
diff --git a/libprofile/profile/profile_compilation_info.h b/libprofile/profile/profile_compilation_info.h
index 0dbf490..92fa098 100644
--- a/libprofile/profile/profile_compilation_info.h
+++ b/libprofile/profile/profile_compilation_info.h
@@ -377,12 +377,10 @@
                                                       uint16_t dex_method_index) const;
 
   // Dump all the loaded profile info into a string and returns it.
-  // If dex_files is not null then the method indices will be resolved to their
+  // If dex_files is not empty then the method indices will be resolved to their
   // names.
   // This is intended for testing and debugging.
-  std::string DumpInfo(const std::vector<std::unique_ptr<const DexFile>>* dex_files,
-                       bool print_full_dex_location = true) const;
-  std::string DumpInfo(const std::vector<const DexFile*>* dex_files,
+  std::string DumpInfo(const std::vector<const DexFile*>& dex_files,
                        bool print_full_dex_location = true) const;
 
   // Return the classes and methods for a given dex file through out args. The out args are the set
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index 43d0b10..300a009 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -843,16 +843,6 @@
     return;
   }
 
-  // Call-back for when we get an invokevirtual or an invokeinterface.
-  void InvokeVirtualOrInterface(art::Thread* self ATTRIBUTE_UNUSED,
-                                art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
-                                art::ArtMethod* caller ATTRIBUTE_UNUSED,
-                                uint32_t dex_pc ATTRIBUTE_UNUSED,
-                                art::ArtMethod* callee ATTRIBUTE_UNUSED)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
-    return;
-  }
-
  private:
   EventHandler* const event_handler_;
 };
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index 1ed615b..f6113df 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -145,7 +145,7 @@
   FakeJvmtiDeleter() {}
 
   FakeJvmtiDeleter(FakeJvmtiDeleter&) = default;
-  FakeJvmtiDeleter(FakeJvmtiDeleter&&) = default;
+  FakeJvmtiDeleter(FakeJvmtiDeleter&&) noexcept = default;
   FakeJvmtiDeleter& operator=(const FakeJvmtiDeleter&) = default;
 
   template <typename U> void operator()(const U* ptr) const {
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index e2b98b3..220ad22 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -77,7 +77,7 @@
         start(start_),
         stop(stop_) {}
   GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
-  GetStackTraceVisitor(GetStackTraceVisitor&&) = default;
+  GetStackTraceVisitor(GetStackTraceVisitor&&) noexcept = default;
 
   bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     art::ArtMethod* m = GetMethod();
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index 41ef6c2..b54c77d 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -818,37 +818,6 @@
   return ERR(NONE);
 }
 
-class ScopedSuspendByPeer {
- public:
-  explicit ScopedSuspendByPeer(jthread jtarget)
-      : thread_list_(art::Runtime::Current()->GetThreadList()),
-        timeout_(false),
-        target_(thread_list_->SuspendThreadByPeer(jtarget,
-                                                  /* suspend_thread */ true,
-                                                  art::SuspendReason::kInternal,
-                                                  &timeout_)) { }
-  ~ScopedSuspendByPeer() {
-    if (target_ != nullptr) {
-      if (!thread_list_->Resume(target_, art::SuspendReason::kInternal)) {
-        LOG(ERROR) << "Failed to resume " << target_ << "!";
-      }
-    }
-  }
-
-  art::Thread* GetTargetThread() const {
-    return target_;
-  }
-
-  bool TimedOut() const {
-    return timeout_;
-  }
-
- private:
-  art::ThreadList* thread_list_;
-  bool timeout_;
-  art::Thread* target_;
-};
-
 jvmtiError ThreadUtil::SuspendOther(art::Thread* self,
                                     jthread target_jthread) {
   // Loop since we need to bail out and try again if we would end up getting suspended while holding
@@ -876,27 +845,29 @@
       if (!GetAliveNativeThread(target_jthread, soa, &target, &err)) {
         return err;
       }
+      art::ThreadState state = target->GetState();
+      if (state == art::ThreadState::kStarting || target->IsStillStarting()) {
+        return ERR(THREAD_NOT_ALIVE);
+      } else {
+        art::MutexLock thread_suspend_count_mu(self, *art::Locks::thread_suspend_count_lock_);
+        if (target->GetUserCodeSuspendCount() != 0) {
+          return ERR(THREAD_SUSPENDED);
+        }
+      }
     }
-    // Get the actual thread in a suspended state so we can change the user-code suspend count.
-    ScopedSuspendByPeer ssbp(target_jthread);
-    if (ssbp.GetTargetThread() == nullptr && !ssbp.TimedOut()) {
+    bool timeout = true;
+    art::Thread* ret_target = art::Runtime::Current()->GetThreadList()->SuspendThreadByPeer(
+        target_jthread,
+        /* request_suspension */ true,
+        art::SuspendReason::kForUserCode,
+        &timeout);
+    if (ret_target == nullptr && !timeout) {
       // TODO It would be good to get more information about why exactly the thread failed to
       // suspend.
       return ERR(INTERNAL);
-    } else if (!ssbp.TimedOut()) {
-      art::ThreadState state = ssbp.GetTargetThread()->GetState();
-      if (state == art::ThreadState::kStarting || ssbp.GetTargetThread()->IsStillStarting()) {
-        return ERR(THREAD_NOT_ALIVE);
-      }
-      // we didn't time out and got a result. Suspend the thread by usercode and return. It's
-      // already suspended internal so we don't need to do anything but increment the count.
-      art::MutexLock thread_suspend_count_mu(self, *art::Locks::thread_suspend_count_lock_);
-      if (ssbp.GetTargetThread()->GetUserCodeSuspendCount() != 0) {
-        return ERR(THREAD_SUSPENDED);
-      }
-      bool res = ssbp.GetTargetThread()->ModifySuspendCount(
-          self, +1, nullptr, art::SuspendReason::kForUserCode);
-      return res ? OK : ERR(INTERNAL);
+    } else if (!timeout) {
+      // we didn't time out and got a result.
+      return OK;
     }
     // We timed out. Just go around and try again.
   } while (true);
@@ -905,17 +876,6 @@
 
 jvmtiError ThreadUtil::SuspendSelf(art::Thread* self) {
   CHECK(self == art::Thread::Current());
-  if (!self->CanBeSuspendedByUserCode()) {
-    // TODO This is really undesirable. As far as I can tell this is can only come about because of
-    // class-loads in the jit-threads (through either VMObjectAlloc or the ClassLoad/ClassPrepare
-    // events that we send). It's unlikely that anyone would be suspending themselves there since
-    // it's almost guaranteed to cause a deadlock but it is technically allowed. Ideally we'd want
-    // to put a CHECK here (or in the event-dispatch code) that we are only in this situation when
-    // sending the GC callbacks but the jit causing events means we cannot do this.
-    LOG(WARNING) << "Attempt to self-suspend on a thread without suspension enabled. Thread is "
-                 << *self;
-    return ERR(INTERNAL);
-  }
   {
     art::MutexLock mu(self, *art::Locks::user_code_suspension_lock_);
     art::MutexLock thread_list_mu(self, *art::Locks::thread_suspend_count_lock_);
@@ -963,6 +923,7 @@
     return ERR(NULL_POINTER);
   }
   art::Thread* self = art::Thread::Current();
+  art::Thread* target;
   // Retry until we know we won't get suspended by user code while resuming something.
   do {
     SuspendCheck(self);
@@ -973,37 +934,36 @@
       continue;
     }
     // From now on we know we cannot get suspended by user-code.
-    // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
-    // have the 'suspend_lock' locked here.
-    art::ScopedObjectAccess soa(self);
-    if (thread == nullptr) {
-      // The thread is the current thread.
-      return ERR(THREAD_NOT_SUSPENDED);
-    } else if (!soa.Env()->IsInstanceOf(thread, art::WellKnownClasses::java_lang_Thread)) {
-      // Not a thread object.
-      return ERR(INVALID_THREAD);
-    } else if (self->GetPeer() == soa.Decode<art::mirror::Object>(thread)) {
-      // The thread is the current thread.
-      return ERR(THREAD_NOT_SUSPENDED);
+    {
+      // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
+      // have the 'suspend_lock' locked here.
+      art::ScopedObjectAccess soa(self);
+      art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
+      jvmtiError err = ERR(INTERNAL);
+      if (!GetAliveNativeThread(thread, soa, &target, &err)) {
+        return err;
+      } else if (target == self) {
+        // We would have paused until we aren't suspended anymore due to the ScopedObjectAccess so
+        // we can just return THREAD_NOT_SUSPENDED. Unfortunately we cannot do any real DCHECKs
+        // about current state since it's all concurrent.
+        return ERR(THREAD_NOT_SUSPENDED);
+      }
+      // The JVMTI spec requires us to return THREAD_NOT_SUSPENDED if it is alive but we really
+      // cannot tell why resume failed.
+      {
+        art::MutexLock thread_suspend_count_mu(self, *art::Locks::thread_suspend_count_lock_);
+        if (target->GetUserCodeSuspendCount() == 0) {
+          return ERR(THREAD_NOT_SUSPENDED);
+        }
+      }
     }
-    ScopedSuspendByPeer ssbp(thread);
-    if (ssbp.TimedOut()) {
-      // Unknown error. Couldn't suspend thread!
-      return ERR(INTERNAL);
-    } else if (ssbp.GetTargetThread() == nullptr) {
-      // Thread must not be alive.
-      return ERR(THREAD_NOT_ALIVE);
-    }
-    // We didn't time out and got a result. Check the thread is suspended by usercode, unsuspend it
-    // and return. It's already suspended internal so we don't need to do anything but decrement the
-    // count.
-    art::MutexLock thread_list_mu(self, *art::Locks::thread_suspend_count_lock_);
-    if (ssbp.GetTargetThread()->GetUserCodeSuspendCount() == 0) {
-      return ERR(THREAD_NOT_SUSPENDED);
-    } else if (!ssbp.GetTargetThread()->ModifySuspendCount(
-        self, -1, nullptr, art::SuspendReason::kForUserCode)) {
+    // It is okay that we don't have a thread_list_lock here since we know that the thread cannot
+    // die since it is currently held suspended by a SuspendReason::kForUserCode suspend.
+    DCHECK(target != self);
+    if (!art::Runtime::Current()->GetThreadList()->Resume(target,
+                                                          art::SuspendReason::kForUserCode)) {
       // TODO Give a better error.
-      // This should not really be possible and is probably some race.
+      // This is most likely THREAD_NOT_SUSPENDED but we cannot really be sure.
       return ERR(INTERNAL);
     } else {
       return OK;
diff --git a/patchoat/Android.bp b/patchoat/Android.bp
deleted file mode 100644
index 13c8f47..0000000
--- a/patchoat/Android.bp
+++ /dev/null
@@ -1,64 +0,0 @@
-//
-// Copyright (C) 2014 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-cc_defaults {
-    name: "patchoat-defaults",
-    host_supported: true,
-    defaults: ["art_defaults"],
-    srcs: ["patchoat.cc"],
-    target: {
-        android: {
-            compile_multilib: "prefer32",
-        },
-    },
-    shared_libs: [
-        "libartbase",
-        "libbase",
-        "libcrypto", // For computing the digest of image file
-    ],
-}
-
-art_cc_binary {
-    name: "patchoat",
-    defaults: ["patchoat-defaults"],
-    shared_libs: [
-        "libart",
-    ],
-}
-
-art_cc_binary {
-    name: "patchoatd",
-    defaults: [
-        "art_debug_defaults",
-        "patchoat-defaults",
-    ],
-    shared_libs: [
-        "libartd",
-    ],
-}
-
-art_cc_test {
-    name: "art_patchoat_tests",
-    defaults: [
-        "art_gtest_defaults",
-    ],
-    srcs: [
-        "patchoat_test.cc",
-    ],
-    shared_libs: [
-        "libcrypto", // For computing the digest of image file
-    ],
-}
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
deleted file mode 100644
index 5d38e8b..0000000
--- a/patchoat/patchoat.cc
+++ /dev/null
@@ -1,1324 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "patchoat.h"
-
-#include <openssl/sha.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/file.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-#include <string>
-#include <vector>
-
-#include "android-base/file.h"
-#include <android-base/parseint.h>
-#include "android-base/stringprintf.h"
-#include "android-base/strings.h"
-
-#include "art_field-inl.h"
-#include "art_method-inl.h"
-#include "base/bit_memory_region.h"
-#include "base/dumpable.h"
-#include "base/file_utils.h"
-#include "base/leb128.h"
-#include "base/logging.h"  // For InitLogging.
-#include "base/mutex.h"
-#include "base/memory_region.h"
-#include "base/memory_tool.h"
-#include "base/os.h"
-#include "base/scoped_flock.h"
-#include "base/stringpiece.h"
-#include "base/unix_file/fd_file.h"
-#include "base/unix_file/random_access_file_utils.h"
-#include "base/utils.h"
-#include "class_root.h"
-#include "elf_file.h"
-#include "elf_file_impl.h"
-#include "elf_utils.h"
-#include "gc/space/image_space.h"
-#include "image-inl.h"
-#include "intern_table.h"
-#include "mirror/dex_cache.h"
-#include "mirror/executable.h"
-#include "mirror/method.h"
-#include "mirror/object-inl.h"
-#include "mirror/object-refvisitor-inl.h"
-#include "mirror/reference.h"
-#include "noop_compiler_callbacks.h"
-#include "offsets.h"
-#include "runtime.h"
-#include "scoped_thread_state_change-inl.h"
-#include "thread.h"
-
-namespace art {
-
-using android::base::StringPrintf;
-
-namespace {
-
-static const OatHeader* GetOatHeader(const ElfFile* elf_file) {
-  uint64_t off = 0;
-  if (!elf_file->GetSectionOffsetAndSize(".rodata", &off, nullptr)) {
-    return nullptr;
-  }
-
-  OatHeader* oat_header = reinterpret_cast<OatHeader*>(elf_file->Begin() + off);
-  return oat_header;
-}
-
-static File* CreateOrOpen(const char* name) {
-  if (OS::FileExists(name)) {
-    return OS::OpenFileReadWrite(name);
-  } else {
-    std::unique_ptr<File> f(OS::CreateEmptyFile(name));
-    if (f.get() != nullptr) {
-      if (fchmod(f->Fd(), 0644) != 0) {
-        PLOG(ERROR) << "Unable to make " << name << " world readable";
-        unlink(name);
-        return nullptr;
-      }
-    }
-    return f.release();
-  }
-}
-
-// Either try to close the file (close=true), or erase it.
-static bool FinishFile(File* file, bool close) {
-  if (close) {
-    if (file->FlushCloseOrErase() != 0) {
-      PLOG(ERROR) << "Failed to flush and close file.";
-      return false;
-    }
-    return true;
-  } else {
-    file->Erase();
-    return false;
-  }
-}
-
-static bool SymlinkFile(const std::string& input_filename, const std::string& output_filename) {
-  if (input_filename == output_filename) {
-    // Input and output are the same, nothing to do.
-    return true;
-  }
-
-  // Unlink the original filename, since we are overwriting it.
-  unlink(output_filename.c_str());
-
-  // Create a symlink from the source file to the target path.
-  if (symlink(input_filename.c_str(), output_filename.c_str()) < 0) {
-    PLOG(ERROR) << "Failed to create symlink " << output_filename << " -> " << input_filename;
-    return false;
-  }
-
-  if (kIsDebugBuild) {
-    LOG(INFO) << "Created symlink " << output_filename << " -> " << input_filename;
-  }
-
-  return true;
-}
-
-// Holder class for runtime options and related objects.
-class PatchoatRuntimeOptionsHolder {
- public:
-  PatchoatRuntimeOptionsHolder(const std::string& image_location, InstructionSet isa) {
-    options_.push_back(std::make_pair("compilercallbacks", &callbacks_));
-    img_ = "-Ximage:" + image_location;
-    options_.push_back(std::make_pair(img_.c_str(), nullptr));
-    isa_name_ = GetInstructionSetString(isa);
-    options_.push_back(std::make_pair("imageinstructionset",
-                                      reinterpret_cast<const void*>(isa_name_.c_str())));
-    options_.push_back(std::make_pair("-Xno-sig-chain", nullptr));
-    // We do not want the runtime to attempt to patch the image.
-    options_.push_back(std::make_pair("-Xnorelocate", nullptr));
-    // Don't try to compile.
-    options_.push_back(std::make_pair("-Xnoimage-dex2oat", nullptr));
-    // Do not accept broken image.
-    options_.push_back(std::make_pair("-Xno-dex-file-fallback", nullptr));
-  }
-
-  const RuntimeOptions& GetRuntimeOptions() {
-    return options_;
-  }
-
- private:
-  RuntimeOptions options_;
-  NoopCompilerCallbacks callbacks_;
-  std::string isa_name_;
-  std::string img_;
-};
-
-}  // namespace
-
-bool PatchOat::GeneratePatch(
-    const MemMap& original,
-    const MemMap& relocated,
-    std::vector<uint8_t>* output,
-    std::string* error_msg) {
-  // FORMAT of the patch (aka image relocation) file:
-  // * SHA-256 digest (32 bytes) of original/unrelocated file (e.g., the one from /system)
-  // * List of monotonically increasing offsets (max value defined by uint32_t) at which relocations
-  //   occur.
-  //   Each element is represented as the delta from the previous offset in the list (first element
-  //   is a delta from 0). Each delta is encoded using unsigned LEB128: little-endian
-  //   variable-length 7 bits per byte encoding, where all bytes have the highest bit (0x80) set
-  //   except for the final byte which does not have that bit set. For example, 0x3f is offset 0x3f,
-  //   whereas 0xbf 0x05 is offset (0x3f & 0x7f) | (0x5 << 7) which is 0x2bf. Most deltas end up
-  //   being encoding using just one byte, achieving ~4x decrease in relocation file size compared
-  //   to the encoding where offsets are stored verbatim, as uint32_t.
-
-  size_t original_size = original.Size();
-  size_t relocated_size = relocated.Size();
-  if (original_size != relocated_size) {
-    *error_msg =
-        StringPrintf(
-            "Original and relocated image sizes differ: %zu vs %zu", original_size, relocated_size);
-    return false;
-  }
-  if (original_size > UINT32_MAX) {
-    *error_msg = StringPrintf("Image too large: %zu" , original_size);
-    return false;
-  }
-
-  const ImageHeader& relocated_header =
-      *reinterpret_cast<const ImageHeader*>(relocated.Begin());
-  // Offsets are supposed to differ between original and relocated by this value
-  off_t expected_diff = relocated_header.GetPatchDelta();
-  if (expected_diff == 0) {
-    // Can't identify offsets which are supposed to differ due to relocation
-    *error_msg = "Relocation delta is 0";
-    return false;
-  }
-
-  const ImageHeader* image_header = reinterpret_cast<const ImageHeader*>(original.Begin());
-  if (image_header->GetStorageMode() != ImageHeader::kStorageModeUncompressed) {
-    *error_msg = "Unexpected compressed image.";
-    return false;
-  }
-  if (image_header->IsAppImage()) {
-    *error_msg = "Unexpected app image.";
-    return false;
-  }
-  if (image_header->GetPointerSize() != PointerSize::k32 &&
-      image_header->GetPointerSize() != PointerSize::k64) {
-    *error_msg = "Unexpected pointer size.";
-    return false;
-  }
-  static_assert(sizeof(GcRoot<mirror::Object>) == sizeof(mirror::HeapReference<mirror::Object>),
-                "Expecting heap GC roots and references to have the same size.");
-  DCHECK_LE(sizeof(GcRoot<mirror::Object>), static_cast<size_t>(image_header->GetPointerSize()));
-
-  const size_t image_bitmap_offset = RoundUp(sizeof(ImageHeader) + image_header->GetDataSize(),
-                                             kPageSize);
-  const size_t end_of_bitmap = image_bitmap_offset + image_header->GetImageBitmapSection().Size();
-  const ImageSection& relocation_section = image_header->GetImageRelocationsSection();
-  MemoryRegion relocations_data(original.Begin() + end_of_bitmap, relocation_section.Size());
-  size_t image_end = image_header->GetClassTableSection().End();
-  if (!IsAligned<sizeof(GcRoot<mirror::Object>)>(image_end)) {
-    *error_msg = StringPrintf("Unaligned image end: %zu", image_end);
-    return false;
-  }
-  size_t num_indexes = image_end / sizeof(GcRoot<mirror::Object>);
-  if (relocation_section.Size() != BitsToBytesRoundUp(num_indexes)) {
-    *error_msg = StringPrintf("Unexpected size of relocation section: %zu expected: %zu",
-                              static_cast<size_t>(relocation_section.Size()),
-                              BitsToBytesRoundUp(num_indexes));
-    return false;
-  }
-  BitMemoryRegion relocation_bitmap(relocations_data, /* bit_offset */ 0u, num_indexes);
-
-  // Output the SHA-256 digest of the original
-  output->resize(SHA256_DIGEST_LENGTH);
-  const uint8_t* original_bytes = original.Begin();
-  SHA256(original_bytes, original_size, output->data());
-
-  // Check the list of offsets at which the original and patched images differ.
-  size_t diff_offset_count = 0;
-  const uint8_t* relocated_bytes = relocated.Begin();
-  for (size_t index = 0; index != num_indexes; ++index) {
-    size_t offset = index * sizeof(GcRoot<mirror::Object>);
-    uint32_t original_value = *reinterpret_cast<const uint32_t*>(original_bytes + offset);
-    uint32_t relocated_value = *reinterpret_cast<const uint32_t*>(relocated_bytes + offset);
-    off_t diff = relocated_value - original_value;
-    if (diff == 0) {
-      CHECK(!relocation_bitmap.LoadBit(index));
-      continue;
-    } else if (diff != expected_diff) {
-      *error_msg =
-          StringPrintf(
-              "Unexpected diff at offset %zu. Expected: %jd, but was: %jd",
-              offset,
-              (intmax_t) expected_diff,
-              (intmax_t) diff);
-      return false;
-    }
-    CHECK(relocation_bitmap.LoadBit(index));
-    diff_offset_count++;
-  }
-  size_t tail_bytes = original_size - image_end;
-  CHECK_EQ(memcmp(original_bytes + image_end, relocated_bytes + image_end, tail_bytes), 0);
-
-  if (diff_offset_count == 0) {
-    *error_msg = "Original and patched images are identical";
-    return false;
-  }
-
-  return true;
-}
-
-static bool WriteRelFile(
-    const MemMap& original,
-    const MemMap& relocated,
-    const std::string& rel_filename,
-    std::string* error_msg) {
-  std::vector<uint8_t> output;
-  if (!PatchOat::GeneratePatch(original, relocated, &output, error_msg)) {
-    return false;
-  }
-
-  std::unique_ptr<File> rel_file(OS::CreateEmptyFileWriteOnly(rel_filename.c_str()));
-  if (rel_file.get() == nullptr) {
-    *error_msg = StringPrintf("Failed to create/open output file %s", rel_filename.c_str());
-    return false;
-  }
-  if (!rel_file->WriteFully(output.data(), output.size())) {
-    *error_msg = StringPrintf("Failed to write to %s", rel_filename.c_str());
-    return false;
-  }
-  if (rel_file->FlushCloseOrErase() != 0) {
-    *error_msg = StringPrintf("Failed to flush and close %s", rel_filename.c_str());
-    return false;
-  }
-
-  return true;
-}
-
-static bool CheckImageIdenticalToOriginalExceptForRelocation(
-    const std::string& relocated_filename,
-    const std::string& original_filename,
-    std::string* error_msg) {
-  *error_msg = "";
-  std::string rel_filename = original_filename + ".rel";
-  std::unique_ptr<File> rel_file(OS::OpenFileForReading(rel_filename.c_str()));
-  if (rel_file.get() == nullptr) {
-    *error_msg = StringPrintf("Failed to open image relocation file %s", rel_filename.c_str());
-    return false;
-  }
-  int64_t rel_size = rel_file->GetLength();
-  if (rel_size < 0) {
-    *error_msg = StringPrintf("Error while getting size of image relocation file %s",
-                              rel_filename.c_str());
-    return false;
-  }
-  if (rel_size != SHA256_DIGEST_LENGTH) {
-    *error_msg = StringPrintf("Unexpected size of image relocation file %s: %" PRId64
-                                  ", expected %zu",
-                              rel_filename.c_str(),
-                              rel_size,
-                              static_cast<size_t>(SHA256_DIGEST_LENGTH));
-    return false;
-  }
-  std::unique_ptr<uint8_t[]> rel(new uint8_t[rel_size]);
-  if (!rel_file->ReadFully(rel.get(), rel_size)) {
-    *error_msg = StringPrintf("Failed to read image relocation file %s", rel_filename.c_str());
-    return false;
-  }
-
-  std::unique_ptr<File> image_file(OS::OpenFileForReading(relocated_filename.c_str()));
-  if (image_file.get() == nullptr) {
-    *error_msg = StringPrintf("Unable to open relocated image file  %s",
-                              relocated_filename.c_str());
-    return false;
-  }
-
-  int64_t image_size = image_file->GetLength();
-  if (image_size < 0) {
-    *error_msg = StringPrintf("Error while getting size of relocated image file %s",
-                              relocated_filename.c_str());
-    return false;
-  }
-  if (static_cast<uint64_t>(image_size) < sizeof(ImageHeader)) {
-    *error_msg =
-        StringPrintf(
-            "Relocated image file %s too small: %" PRId64,
-                relocated_filename.c_str(), image_size);
-    return false;
-  }
-  if (image_size > std::numeric_limits<uint32_t>::max()) {
-    *error_msg =
-        StringPrintf(
-            "Relocated image file %s too large: %" PRId64, relocated_filename.c_str(), image_size);
-    return false;
-  }
-
-  std::unique_ptr<uint8_t[]> image(new uint8_t[image_size]);
-  if (!image_file->ReadFully(image.get(), image_size)) {
-    *error_msg = StringPrintf("Failed to read relocated image file %s", relocated_filename.c_str());
-    return false;
-  }
-
-  const ImageHeader& image_header = *reinterpret_cast<const ImageHeader*>(image.get());
-  if (image_header.GetStorageMode() != ImageHeader::kStorageModeUncompressed) {
-    *error_msg = StringPrintf("Unsuported compressed image file %s",
-                              relocated_filename.c_str());
-    return false;
-  }
-  size_t image_end = image_header.GetClassTableSection().End();
-  if (image_end > static_cast<uint64_t>(image_size) || !IsAligned<4u>(image_end)) {
-    *error_msg = StringPrintf("Heap size too big or unaligned in image file %s: %zu",
-                              relocated_filename.c_str(),
-                              image_end);
-    return false;
-  }
-  size_t number_of_relocation_locations = image_end / 4u;
-  const ImageSection& relocation_section = image_header.GetImageRelocationsSection();
-  if (relocation_section.Size() != BitsToBytesRoundUp(number_of_relocation_locations)) {
-    *error_msg = StringPrintf("Unexpected size of relocation section in image file %s: %zu"
-                                  " expected: %zu",
-                              relocated_filename.c_str(),
-                              static_cast<size_t>(relocation_section.Size()),
-                              BitsToBytesRoundUp(number_of_relocation_locations));
-    return false;
-  }
-  if (relocation_section.End() != image_size) {
-    *error_msg = StringPrintf("Relocation section does not end at file end in image file %s: %zu"
-                                  " expected: %" PRId64,
-                              relocated_filename.c_str(),
-                              static_cast<size_t>(relocation_section.End()),
-                              image_size);
-    return false;
-  }
-
-  off_t expected_diff = image_header.GetPatchDelta();
-  if (expected_diff == 0) {
-    *error_msg = StringPrintf("Unsuported patch delta of zero in %s",
-                              relocated_filename.c_str());
-    return false;
-  }
-
-  // Relocated image is expected to differ from the original due to relocation.
-  // Unrelocate the image in memory to compensate.
-  MemoryRegion relocations(image.get() + relocation_section.Offset(), relocation_section.Size());
-  BitMemoryRegion relocation_bitmask(relocations,
-                                     /* bit_offset */ 0u,
-                                     number_of_relocation_locations);
-  for (size_t index = 0; index != number_of_relocation_locations; ++index) {
-    if (relocation_bitmask.LoadBit(index)) {
-      uint32_t* image_value = reinterpret_cast<uint32_t*>(image.get() + index * 4u);
-      *image_value -= expected_diff;
-    }
-  }
-
-  // Image in memory is now supposed to be identical to the original.  We
-  // confirm this by comparing the digest of the in-memory image to the expected
-  // digest from relocation file.
-  uint8_t image_digest[SHA256_DIGEST_LENGTH];
-  SHA256(image.get(), image_size, image_digest);
-  if (memcmp(image_digest, rel.get(), SHA256_DIGEST_LENGTH) != 0) {
-    *error_msg =
-        StringPrintf(
-            "Relocated image %s does not match the original %s after unrelocation",
-            relocated_filename.c_str(),
-            original_filename.c_str());
-    return false;
-  }
-
-  // Relocated image is identical to the original, once relocations are taken into account
-  return true;
-}
-
-static bool VerifySymlink(const std::string& intended_target, const std::string& link_name) {
-  std::string actual_target;
-  if (!android::base::Readlink(link_name, &actual_target)) {
-    PLOG(ERROR) << "Readlink on " << link_name << " failed.";
-    return false;
-  }
-  return actual_target == intended_target;
-}
-
-static bool VerifyVdexAndOatSymlinks(const std::string& input_image_filename,
-                                     const std::string& output_image_filename) {
-  return VerifySymlink(ImageHeader::GetVdexLocationFromImageLocation(input_image_filename),
-                       ImageHeader::GetVdexLocationFromImageLocation(output_image_filename))
-      && VerifySymlink(ImageHeader::GetOatLocationFromImageLocation(input_image_filename),
-                       ImageHeader::GetOatLocationFromImageLocation(output_image_filename));
-}
-
-bool PatchOat::CreateVdexAndOatSymlinks(const std::string& input_image_filename,
-                                        const std::string& output_image_filename) {
-  std::string input_vdex_filename =
-      ImageHeader::GetVdexLocationFromImageLocation(input_image_filename);
-  std::string input_oat_filename =
-      ImageHeader::GetOatLocationFromImageLocation(input_image_filename);
-
-  std::unique_ptr<File> input_oat_file(OS::OpenFileForReading(input_oat_filename.c_str()));
-  if (input_oat_file.get() == nullptr) {
-    LOG(ERROR) << "Unable to open input oat file at " << input_oat_filename;
-    return false;
-  }
-  std::string error_msg;
-  std::unique_ptr<ElfFile> elf(ElfFile::Open(input_oat_file.get(),
-                                             PROT_READ | PROT_WRITE,
-                                             MAP_PRIVATE,
-                                             &error_msg));
-  if (elf == nullptr) {
-    LOG(ERROR) << "Unable to open oat file " << input_oat_filename << " : " << error_msg;
-    return false;
-  }
-
-  const OatHeader* oat_header = GetOatHeader(elf.get());
-  if (oat_header == nullptr) {
-    LOG(ERROR) << "Failed to find oat header in oat file " << input_oat_filename;
-    return false;
-  }
-
-  if (!oat_header->IsValid()) {
-    LOG(ERROR) << "Elf file " << input_oat_filename << " has an invalid oat header";
-    return false;
-  }
-
-  std::string output_vdex_filename =
-      ImageHeader::GetVdexLocationFromImageLocation(output_image_filename);
-  std::string output_oat_filename =
-      ImageHeader::GetOatLocationFromImageLocation(output_image_filename);
-
-  return SymlinkFile(input_oat_filename, output_oat_filename) &&
-         SymlinkFile(input_vdex_filename, output_vdex_filename);
-}
-
-bool PatchOat::Patch(const std::string& image_location,
-                     off_t delta,
-                     const std::string& output_image_directory,
-                     const std::string& output_image_relocation_directory,
-                     InstructionSet isa,
-                     TimingLogger* timings) {
-  bool output_image = !output_image_directory.empty();
-  bool output_image_relocation = !output_image_relocation_directory.empty();
-  if ((!output_image) && (!output_image_relocation)) {
-    // Nothing to do
-    return true;
-  }
-  if ((output_image_relocation) && (delta == 0)) {
-    LOG(ERROR) << "Cannot output image relocation information when requested relocation delta is 0";
-    return false;
-  }
-
-  CHECK(Runtime::Current() == nullptr);
-  CHECK(!image_location.empty()) << "image file must have a filename.";
-
-  TimingLogger::ScopedTiming t("Runtime Setup", timings);
-
-  CHECK_NE(isa, InstructionSet::kNone);
-
-  // Set up the runtime
-  PatchoatRuntimeOptionsHolder options_holder(image_location, isa);
-  if (!Runtime::Create(options_holder.GetRuntimeOptions(), false)) {
-    LOG(ERROR) << "Unable to initialize runtime";
-    return false;
-  }
-  std::unique_ptr<Runtime> runtime(Runtime::Current());
-
-  // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start,
-  // give it away now and then switch to a more manageable ScopedObjectAccess.
-  Thread::Current()->TransitionFromRunnableToSuspended(kNative);
-  ScopedObjectAccess soa(Thread::Current());
-
-  std::vector<gc::space::ImageSpace*> spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
-  std::map<gc::space::ImageSpace*, MemMap> space_to_memmap_map;
-
-  for (size_t i = 0; i < spaces.size(); ++i) {
-    t.NewTiming("Image Patching setup");
-    gc::space::ImageSpace* space = spaces[i];
-    std::string input_image_filename = space->GetImageFilename();
-    std::unique_ptr<File> input_image(OS::OpenFileForReading(input_image_filename.c_str()));
-    if (input_image.get() == nullptr) {
-      LOG(ERROR) << "Unable to open input image file at " << input_image_filename;
-      return false;
-    }
-
-    int64_t image_len = input_image->GetLength();
-    if (image_len < 0) {
-      LOG(ERROR) << "Error while getting image length";
-      return false;
-    }
-    ImageHeader image_header;
-    if (sizeof(image_header) != input_image->Read(reinterpret_cast<char*>(&image_header),
-                                                  sizeof(image_header), 0)) {
-      LOG(ERROR) << "Unable to read image header from image file " << input_image->GetPath();
-    }
-
-    /*bool is_image_pic = */IsImagePic(image_header, input_image->GetPath());
-    // Nothing special to do right now since the image always needs to get patched.
-    // Perhaps in some far-off future we may have images with relative addresses that are true-PIC.
-
-    // Create the map where we will write the image patches to.
-    std::string error_msg;
-    MemMap image = MemMap::MapFile(image_len,
-                                   PROT_READ | PROT_WRITE,
-                                   MAP_PRIVATE,
-                                   input_image->Fd(),
-                                   0,
-                                   /*low_4gb*/false,
-                                   input_image->GetPath().c_str(),
-                                   &error_msg);
-    if (!image.IsValid()) {
-      LOG(ERROR) << "Unable to map image file " << input_image->GetPath() << " : " << error_msg;
-      return false;
-    }
-
-
-    space_to_memmap_map.emplace(space, std::move(image));
-    PatchOat p = PatchOat(isa,
-                          &space_to_memmap_map[space],
-                          space->GetLiveBitmap(),
-                          space->GetMemMap(),
-                          delta,
-                          &space_to_memmap_map,
-                          timings);
-
-    t.NewTiming("Patching image");
-    if (!p.PatchImage(i == 0)) {
-      LOG(ERROR) << "Failed to patch image file " << input_image_filename;
-      return false;
-    }
-
-    // Write the patched image spaces.
-    if (output_image) {
-      std::string output_image_filename;
-      if (!GetDalvikCacheFilename(space->GetImageLocation().c_str(),
-                                  output_image_directory.c_str(),
-                                  &output_image_filename,
-                                  &error_msg)) {
-        LOG(ERROR) << "Failed to find relocated image file name: " << error_msg;
-        return false;
-      }
-
-      if (!CreateVdexAndOatSymlinks(input_image_filename, output_image_filename))
-        return false;
-
-      t.NewTiming("Writing image");
-      std::unique_ptr<File> output_image_file(CreateOrOpen(output_image_filename.c_str()));
-      if (output_image_file.get() == nullptr) {
-        LOG(ERROR) << "Failed to open output image file at " << output_image_filename;
-        return false;
-      }
-
-      bool success = p.WriteImage(output_image_file.get());
-      success = FinishFile(output_image_file.get(), success);
-      if (!success) {
-        return false;
-      }
-    }
-
-    if (output_image_relocation) {
-      t.NewTiming("Writing image relocation");
-      std::string original_image_filename(space->GetImageLocation() + ".rel");
-      std::string image_relocation_filename =
-          output_image_relocation_directory
-              + (android::base::StartsWith(original_image_filename, "/") ? "" : "/")
-              + original_image_filename.substr(original_image_filename.find_last_of('/'));
-      int64_t input_image_size = input_image->GetLength();
-      if (input_image_size < 0) {
-        LOG(ERROR) << "Error while getting input image size";
-        return false;
-      }
-      MemMap original = MemMap::MapFile(input_image_size,
-                                        PROT_READ,
-                                        MAP_PRIVATE,
-                                        input_image->Fd(),
-                                        0,
-                                        /*low_4gb*/false,
-                                        input_image->GetPath().c_str(),
-                                        &error_msg);
-      if (!original.IsValid()) {
-        LOG(ERROR) << "Unable to map image file " << input_image->GetPath() << " : " << error_msg;
-        return false;
-      }
-
-      const MemMap* relocated = p.image_;
-
-      if (!WriteRelFile(original, *relocated, image_relocation_filename, &error_msg)) {
-        LOG(ERROR) << "Failed to create image relocation file " << image_relocation_filename
-            << ": " << error_msg;
-        return false;
-      }
-    }
-  }
-
-  if (!kIsDebugBuild && !(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
-    // We want to just exit on non-debug builds, not bringing the runtime down
-    // in an orderly fashion. So release the following fields.
-    runtime.release();
-  }
-
-  return true;
-}
-
-bool PatchOat::Verify(const std::string& image_location,
-                      const std::string& output_image_directory,
-                      InstructionSet isa,
-                      TimingLogger* timings) {
-  if (image_location.empty()) {
-    LOG(ERROR) << "Original image file not provided";
-    return false;
-  }
-  if (output_image_directory.empty()) {
-    LOG(ERROR) << "Relocated image directory not provided";
-    return false;
-  }
-
-  TimingLogger::ScopedTiming t("Runtime Setup", timings);
-
-  CHECK_NE(isa, InstructionSet::kNone);
-
-  // Set up the runtime
-  PatchoatRuntimeOptionsHolder options_holder(image_location, isa);
-  if (!Runtime::Create(options_holder.GetRuntimeOptions(), false)) {
-    LOG(ERROR) << "Unable to initialize runtime";
-    return false;
-  }
-  std::unique_ptr<Runtime> runtime(Runtime::Current());
-
-  // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start,
-  // give it away now and then switch to a more manageable ScopedObjectAccess.
-  Thread::Current()->TransitionFromRunnableToSuspended(kNative);
-  ScopedObjectAccess soa(Thread::Current());
-
-  t.NewTiming("Image Verification setup");
-  std::vector<gc::space::ImageSpace*> spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
-
-  // TODO: Check that no other .rel files exist in the original dir
-
-  bool success = true;
-  std::string image_location_dir = android::base::Dirname(image_location);
-  for (size_t i = 0; i < spaces.size(); ++i) {
-    gc::space::ImageSpace* space = spaces[i];
-
-    std::string relocated_image_filename;
-    std::string error_msg;
-    if (!GetDalvikCacheFilename(space->GetImageLocation().c_str(),
-            output_image_directory.c_str(), &relocated_image_filename, &error_msg)) {
-      LOG(ERROR) << "Failed to find relocated image file name: " << error_msg;
-      success = false;
-      break;
-    }
-    // location:     /system/framework/boot.art
-    // isa:          arm64
-    // basename:     boot.art
-    // original:     /system/framework/arm64/boot.art
-    // relocation:   /system/framework/arm64/boot.art.rel
-    std::string original_image_filename =
-        GetSystemImageFilename(space->GetImageLocation().c_str(), isa);
-
-    if (!CheckImageIdenticalToOriginalExceptForRelocation(
-            relocated_image_filename, original_image_filename, &error_msg)) {
-      LOG(ERROR) << error_msg;
-      success = false;
-      break;
-    }
-
-    if (!VerifyVdexAndOatSymlinks(original_image_filename, relocated_image_filename)) {
-      LOG(ERROR) << "Verification of vdex and oat symlinks for "
-                 << space->GetImageLocation() << " failed.";
-      success = false;
-      break;
-    }
-  }
-
-  if (!kIsDebugBuild && !(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
-    // We want to just exit on non-debug builds, not bringing the runtime down
-    // in an orderly fashion. So release the following fields.
-    runtime.release();
-  }
-
-  return success;
-}
-
-bool PatchOat::WriteImage(File* out) {
-  CHECK(out != nullptr);
-  TimingLogger::ScopedTiming t("Writing image File", timings_);
-  std::string error_msg;
-
-  // No error checking here, this is best effort. The locking may or may not
-  // succeed and we don't really care either way.
-  ScopedFlock img_flock = LockedFile::DupOf(out->Fd(), out->GetPath(),
-                                            true /* read_only_mode */, &error_msg);
-
-  CHECK(image_ != nullptr);
-  size_t expect = image_->Size();
-  if (out->WriteFully(reinterpret_cast<char*>(image_->Begin()), expect) &&
-      out->SetLength(expect) == 0) {
-    return true;
-  } else {
-    LOG(ERROR) << "Writing to image file " << out->GetPath() << " failed.";
-    return false;
-  }
-}
-
-bool PatchOat::IsImagePic(const ImageHeader& image_header, const std::string& image_path) {
-  if (!image_header.CompilePic()) {
-    if (kIsDebugBuild) {
-      LOG(INFO) << "image at location " << image_path << " was *not* compiled pic";
-    }
-    return false;
-  }
-
-  if (kIsDebugBuild) {
-    LOG(INFO) << "image at location " << image_path << " was compiled PIC";
-  }
-
-  return true;
-}
-
-class PatchOat::PatchOatArtFieldVisitor : public ArtFieldVisitor {
- public:
-  explicit PatchOatArtFieldVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
-
-  void Visit(ArtField* field) override REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtField* const dest = patch_oat_->RelocatedCopyOf(field);
-    dest->SetDeclaringClass(
-        patch_oat_->RelocatedAddressOfPointer(field->GetDeclaringClass().Ptr()));
-  }
-
- private:
-  PatchOat* const patch_oat_;
-};
-
-void PatchOat::PatchArtFields(const ImageHeader* image_header) {
-  PatchOatArtFieldVisitor visitor(this);
-  image_header->VisitPackedArtFields(&visitor, heap_->Begin());
-}
-
-class PatchOat::PatchOatArtMethodVisitor : public ArtMethodVisitor {
- public:
-  explicit PatchOatArtMethodVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
-
-  void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* const dest = patch_oat_->RelocatedCopyOf(method);
-    patch_oat_->FixupMethod(method, dest);
-  }
-
- private:
-  PatchOat* const patch_oat_;
-};
-
-void PatchOat::PatchArtMethods(const ImageHeader* image_header) {
-  const PointerSize pointer_size = InstructionSetPointerSize(isa_);
-  PatchOatArtMethodVisitor visitor(this);
-  image_header->VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size);
-}
-
-void PatchOat::PatchImTables(const ImageHeader* image_header) {
-  const PointerSize pointer_size = InstructionSetPointerSize(isa_);
-  // We can safely walk target image since the conflict tables are independent.
-  image_header->VisitPackedImTables(
-      [this](ArtMethod* method) {
-        return RelocatedAddressOfPointer(method);
-      },
-      image_->Begin(),
-      pointer_size);
-}
-
-void PatchOat::PatchImtConflictTables(const ImageHeader* image_header) {
-  const PointerSize pointer_size = InstructionSetPointerSize(isa_);
-  // We can safely walk target image since the conflict tables are independent.
-  image_header->VisitPackedImtConflictTables(
-      [this](ArtMethod* method) {
-        return RelocatedAddressOfPointer(method);
-      },
-      image_->Begin(),
-      pointer_size);
-}
-
-class PatchOat::FixupRootVisitor : public RootVisitor {
- public:
-  explicit FixupRootVisitor(const PatchOat* patch_oat) : patch_oat_(patch_oat) {
-  }
-
-  void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
-    for (size_t i = 0; i < count; ++i) {
-      *roots[i] = patch_oat_->RelocatedAddressOfPointer(*roots[i]);
-    }
-  }
-
-  void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
-                  const RootInfo& info ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
-    for (size_t i = 0; i < count; ++i) {
-      roots[i]->Assign(patch_oat_->RelocatedAddressOfPointer(roots[i]->AsMirrorPtr()));
-    }
-  }
-
- private:
-  const PatchOat* const patch_oat_;
-};
-
-void PatchOat::PatchInternedStrings(const ImageHeader* image_header) {
-  const auto& section = image_header->GetInternedStringsSection();
-  if (section.Size() == 0) {
-    return;
-  }
-  InternTable temp_table;
-  // Note that we require that ReadFromMemory does not make an internal copy of the elements.
-  // This also relies on visit roots not doing any verification which could fail after we update
-  // the roots to be the image addresses.
-  temp_table.AddTableFromMemory(image_->Begin() + section.Offset());
-  FixupRootVisitor visitor(this);
-  temp_table.VisitRoots(&visitor, kVisitRootFlagAllRoots);
-}
-
-void PatchOat::PatchClassTable(const ImageHeader* image_header) {
-  const auto& section = image_header->GetClassTableSection();
-  if (section.Size() == 0) {
-    return;
-  }
-  // Note that we require that ReadFromMemory does not make an internal copy of the elements.
-  // This also relies on visit roots not doing any verification which could fail after we update
-  // the roots to be the image addresses.
-  WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
-  ClassTable temp_table;
-  temp_table.ReadFromMemory(image_->Begin() + section.Offset());
-  FixupRootVisitor visitor(this);
-  temp_table.VisitRoots(UnbufferedRootVisitor(&visitor, RootInfo(kRootUnknown)));
-}
-
-
-class PatchOat::RelocatedPointerVisitor {
- public:
-  explicit RelocatedPointerVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
-
-  template <typename T>
-  T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED = nullptr) const {
-    return patch_oat_->RelocatedAddressOfPointer(ptr);
-  }
-
- private:
-  PatchOat* const patch_oat_;
-};
-
-void PatchOat::PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots) {
-  auto* dex_caches = down_cast<mirror::ObjectArray<mirror::DexCache>*>(
-      img_roots->Get(ImageHeader::kDexCaches));
-  const PointerSize pointer_size = InstructionSetPointerSize(isa_);
-  for (size_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
-    auto* orig_dex_cache = dex_caches->GetWithoutChecks(i);
-    auto* copy_dex_cache = RelocatedCopyOf(orig_dex_cache);
-    // Though the DexCache array fields are usually treated as native pointers, we set the full
-    // 64-bit values here, clearing the top 32 bits for 32-bit targets. The zero-extension is
-    // done by casting to the unsigned type uintptr_t before casting to int64_t, i.e.
-    //     static_cast<int64_t>(reinterpret_cast<uintptr_t>(image_begin_ + offset))).
-    mirror::StringDexCacheType* orig_strings = orig_dex_cache->GetStrings();
-    mirror::StringDexCacheType* relocated_strings = RelocatedAddressOfPointer(orig_strings);
-    copy_dex_cache->SetField64<false>(
-        mirror::DexCache::StringsOffset(),
-        static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_strings)));
-    if (orig_strings != nullptr) {
-      orig_dex_cache->FixupStrings(RelocatedCopyOf(orig_strings), RelocatedPointerVisitor(this));
-    }
-    mirror::TypeDexCacheType* orig_types = orig_dex_cache->GetResolvedTypes();
-    mirror::TypeDexCacheType* relocated_types = RelocatedAddressOfPointer(orig_types);
-    copy_dex_cache->SetField64<false>(
-        mirror::DexCache::ResolvedTypesOffset(),
-        static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_types)));
-    if (orig_types != nullptr) {
-      orig_dex_cache->FixupResolvedTypes(RelocatedCopyOf(orig_types),
-                                         RelocatedPointerVisitor(this));
-    }
-    mirror::MethodDexCacheType* orig_methods = orig_dex_cache->GetResolvedMethods();
-    mirror::MethodDexCacheType* relocated_methods = RelocatedAddressOfPointer(orig_methods);
-    copy_dex_cache->SetField64<false>(
-        mirror::DexCache::ResolvedMethodsOffset(),
-        static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_methods)));
-    if (orig_methods != nullptr) {
-      mirror::MethodDexCacheType* copy_methods = RelocatedCopyOf(orig_methods);
-      for (size_t j = 0, num = orig_dex_cache->NumResolvedMethods(); j != num; ++j) {
-        mirror::MethodDexCachePair orig =
-            mirror::DexCache::GetNativePairPtrSize(orig_methods, j, pointer_size);
-        mirror::MethodDexCachePair copy(RelocatedAddressOfPointer(orig.object), orig.index);
-        mirror::DexCache::SetNativePairPtrSize(copy_methods, j, copy, pointer_size);
-      }
-    }
-    mirror::FieldDexCacheType* orig_fields = orig_dex_cache->GetResolvedFields();
-    mirror::FieldDexCacheType* relocated_fields = RelocatedAddressOfPointer(orig_fields);
-    copy_dex_cache->SetField64<false>(
-        mirror::DexCache::ResolvedFieldsOffset(),
-        static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_fields)));
-    if (orig_fields != nullptr) {
-      mirror::FieldDexCacheType* copy_fields = RelocatedCopyOf(orig_fields);
-      for (size_t j = 0, num = orig_dex_cache->NumResolvedFields(); j != num; ++j) {
-        mirror::FieldDexCachePair orig =
-            mirror::DexCache::GetNativePairPtrSize(orig_fields, j, pointer_size);
-        mirror::FieldDexCachePair copy(RelocatedAddressOfPointer(orig.object), orig.index);
-        mirror::DexCache::SetNativePairPtrSize(copy_fields, j, copy, pointer_size);
-      }
-    }
-    mirror::MethodTypeDexCacheType* orig_method_types = orig_dex_cache->GetResolvedMethodTypes();
-    mirror::MethodTypeDexCacheType* relocated_method_types =
-        RelocatedAddressOfPointer(orig_method_types);
-    copy_dex_cache->SetField64<false>(
-        mirror::DexCache::ResolvedMethodTypesOffset(),
-        static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_method_types)));
-    if (orig_method_types != nullptr) {
-      orig_dex_cache->FixupResolvedMethodTypes(RelocatedCopyOf(orig_method_types),
-                                               RelocatedPointerVisitor(this));
-    }
-
-    GcRoot<mirror::CallSite>* orig_call_sites = orig_dex_cache->GetResolvedCallSites();
-    GcRoot<mirror::CallSite>* relocated_call_sites = RelocatedAddressOfPointer(orig_call_sites);
-    copy_dex_cache->SetField64<false>(
-        mirror::DexCache::ResolvedCallSitesOffset(),
-        static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_call_sites)));
-    if (orig_call_sites != nullptr) {
-      orig_dex_cache->FixupResolvedCallSites(RelocatedCopyOf(orig_call_sites),
-                                             RelocatedPointerVisitor(this));
-    }
-  }
-}
-
-bool PatchOat::PatchImage(bool primary_image) {
-  ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
-  CHECK_GT(image_->Size(), sizeof(ImageHeader));
-  // These are the roots from the original file.
-  mirror::ObjectArray<mirror::Object>* img_roots = image_header->GetImageRoots().Ptr();
-  image_header->RelocateImage(delta_);
-
-  PatchArtFields(image_header);
-  PatchArtMethods(image_header);
-  PatchImTables(image_header);
-  PatchImtConflictTables(image_header);
-  PatchInternedStrings(image_header);
-  PatchClassTable(image_header);
-  // Patch dex file int/long arrays which point to ArtFields.
-  PatchDexFileArrays(img_roots);
-
-  if (primary_image) {
-    VisitObject(img_roots);
-  }
-
-  if (!image_header->IsValid()) {
-    LOG(ERROR) << "relocation renders image header invalid";
-    return false;
-  }
-
-  {
-    TimingLogger::ScopedTiming t("Walk Bitmap", timings_);
-    // Walk the bitmap.
-    WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-    auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
-      VisitObject(obj);
-    };
-    bitmap_->Walk(visitor);
-  }
-  return true;
-}
-
-
-void PatchOat::PatchVisitor::operator() (ObjPtr<mirror::Object> obj,
-                                         MemberOffset off,
-                                         bool is_static_unused ATTRIBUTE_UNUSED) const {
-  mirror::Object* referent = obj->GetFieldObject<mirror::Object, kVerifyNone>(off);
-  mirror::Object* moved_object = patcher_->RelocatedAddressOfPointer(referent);
-  copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
-}
-
-void PatchOat::PatchVisitor::operator() (ObjPtr<mirror::Class> cls ATTRIBUTE_UNUSED,
-                                         ObjPtr<mirror::Reference> ref) const {
-  MemberOffset off = mirror::Reference::ReferentOffset();
-  mirror::Object* referent = ref->GetReferent();
-  DCHECK(referent == nullptr ||
-         Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(referent)) << referent;
-  mirror::Object* moved_object = patcher_->RelocatedAddressOfPointer(referent);
-  copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
-}
-
-// Called by PatchImage.
-void PatchOat::VisitObject(mirror::Object* object) {
-  mirror::Object* copy = RelocatedCopyOf(object);
-  CHECK(copy != nullptr);
-  if (kUseBakerReadBarrier) {
-    object->AssertReadBarrierState();
-  }
-  PatchOat::PatchVisitor visitor(this, copy);
-  object->VisitReferences<kVerifyNone>(visitor, visitor);
-  if (object->IsClass<kVerifyNone>()) {
-    const PointerSize pointer_size = InstructionSetPointerSize(isa_);
-    mirror::Class* klass = object->AsClass();
-    mirror::Class* copy_klass = down_cast<mirror::Class*>(copy);
-    RelocatedPointerVisitor native_visitor(this);
-    klass->FixupNativePointers(copy_klass, pointer_size, native_visitor);
-    auto* vtable = klass->GetVTable();
-    if (vtable != nullptr) {
-      vtable->Fixup(RelocatedCopyOfFollowImages(vtable), pointer_size, native_visitor);
-    }
-    mirror::IfTable* iftable = klass->GetIfTable();
-    for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
-      if (iftable->GetMethodArrayCount(i) > 0) {
-        auto* method_array = iftable->GetMethodArray(i);
-        CHECK(method_array != nullptr);
-        method_array->Fixup(RelocatedCopyOfFollowImages(method_array),
-                            pointer_size,
-                            native_visitor);
-      }
-    }
-  } else if (object->GetClass() == GetClassRoot<mirror::Method>() ||
-             object->GetClass() == GetClassRoot<mirror::Constructor>()) {
-    // Need to go update the ArtMethod.
-    auto* dest = down_cast<mirror::Executable*>(copy);
-    auto* src = down_cast<mirror::Executable*>(object);
-    dest->SetArtMethod(RelocatedAddressOfPointer(src->GetArtMethod()));
-  }
-}
-
-void PatchOat::FixupMethod(ArtMethod* object, ArtMethod* copy) {
-  const PointerSize pointer_size = InstructionSetPointerSize(isa_);
-  copy->CopyFrom(object, pointer_size);
-  // Just update the entry points if it looks like we should.
-  // TODO: sanity check all the pointers' values
-  copy->SetDeclaringClass(RelocatedAddressOfPointer(object->GetDeclaringClass().Ptr()));
-  copy->SetEntryPointFromQuickCompiledCodePtrSize(RelocatedAddressOfPointer(
-      object->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size)), pointer_size);
-  // No special handling for IMT conflict table since all pointers are moved by the same offset.
-  copy->SetDataPtrSize(RelocatedAddressOfPointer(
-      object->GetDataPtrSize(pointer_size)), pointer_size);
-}
-
-static int orig_argc;
-static char** orig_argv;
-
-static std::string CommandLine() {
-  std::vector<std::string> command;
-  for (int i = 0; i < orig_argc; ++i) {
-    command.push_back(orig_argv[i]);
-  }
-  return android::base::Join(command, ' ');
-}
-
-static void UsageErrorV(const char* fmt, va_list ap) {
-  std::string error;
-  android::base::StringAppendV(&error, fmt, ap);
-  LOG(ERROR) << error;
-}
-
-static void UsageError(const char* fmt, ...) {
-  va_list ap;
-  va_start(ap, fmt);
-  UsageErrorV(fmt, ap);
-  va_end(ap);
-}
-
-NO_RETURN static void Usage(const char *fmt, ...) {
-  va_list ap;
-  va_start(ap, fmt);
-  UsageErrorV(fmt, ap);
-  va_end(ap);
-
-  UsageError("Command: %s", CommandLine().c_str());
-  UsageError("Usage: patchoat [options]...");
-  UsageError("");
-  UsageError("  --instruction-set=<isa>: Specifies the instruction set the patched code is");
-  UsageError("      compiled for (required).");
-  UsageError("");
-  UsageError("  --input-image-location=<file.art>: Specifies the 'location' of the image file to");
-  UsageError("      be patched.");
-  UsageError("");
-  UsageError("  --output-image-directory=<dir>: Specifies the directory to write the patched");
-  UsageError("      image file(s) to.");
-  UsageError("");
-  UsageError("  --output-image-relocation-directory=<dir>: Specifies the directory to write");
-  UsageError("      the image relocation information to.");
-  UsageError("");
-  UsageError("  --base-offset-delta=<delta>: Specify the amount to change the old base-offset by.");
-  UsageError("      This value may be negative.");
-  UsageError("");
-  UsageError("  --verify: Verify an existing patched file instead of creating one.");
-  UsageError("");
-  UsageError("  --dump-timings: dump out patch timing information");
-  UsageError("");
-  UsageError("  --no-dump-timings: do not dump out patch timing information");
-  UsageError("");
-
-  exit(EXIT_FAILURE);
-}
-
-static int patchoat_patch_image(TimingLogger& timings,
-                                InstructionSet isa,
-                                const std::string& input_image_location,
-                                const std::string& output_image_directory,
-                                const std::string& output_image_relocation_directory,
-                                off_t base_delta,
-                                bool base_delta_set,
-                                bool debug) {
-  CHECK(!input_image_location.empty());
-  if ((output_image_directory.empty()) && (output_image_relocation_directory.empty())) {
-    Usage("Image patching requires --output-image-directory or --output-image-relocation-directory");
-  }
-
-  if (!base_delta_set) {
-    Usage("Must supply a desired new offset or delta.");
-  }
-
-  if (!IsAligned<kPageSize>(base_delta)) {
-    Usage("Base offset/delta must be aligned to a pagesize (0x%08x) boundary.", kPageSize);
-  }
-
-  if (debug) {
-    LOG(INFO) << "moving offset by " << base_delta
-        << " (0x" << std::hex << base_delta << ") bytes or "
-        << std::dec << (base_delta/kPageSize) << " pages.";
-  }
-
-  TimingLogger::ScopedTiming pt("patch image and oat", &timings);
-
-  bool ret =
-      PatchOat::Patch(
-          input_image_location,
-          base_delta,
-          output_image_directory,
-          output_image_relocation_directory,
-          isa,
-          &timings);
-
-  if (kIsDebugBuild) {
-    LOG(INFO) << "Exiting with return ... " << ret;
-  }
-  return ret ? EXIT_SUCCESS : EXIT_FAILURE;
-}
-
-static int patchoat_verify_image(TimingLogger& timings,
-                                 InstructionSet isa,
-                                 const std::string& input_image_location,
-                                 const std::string& output_image_directory) {
-  CHECK(!input_image_location.empty());
-  TimingLogger::ScopedTiming pt("verify image and oat", &timings);
-
-  bool ret =
-      PatchOat::Verify(
-          input_image_location,
-          output_image_directory,
-          isa,
-          &timings);
-
-  if (kIsDebugBuild) {
-    LOG(INFO) << "Exiting with return ... " << ret;
-  }
-  return ret ? EXIT_SUCCESS : EXIT_FAILURE;
-}
-
-static int patchoat(int argc, char **argv) {
-  Locks::Init();
-  InitLogging(argv, Runtime::Abort);
-  MemMap::Init();
-  const bool debug = kIsDebugBuild;
-  orig_argc = argc;
-  orig_argv = argv;
-  TimingLogger timings("patcher", false, false);
-
-  // Skip over the command name.
-  argv++;
-  argc--;
-
-  if (argc == 0) {
-    Usage("No arguments specified");
-  }
-
-  timings.StartTiming("Patchoat");
-
-  // cmd line args
-  bool isa_set = false;
-  InstructionSet isa = InstructionSet::kNone;
-  std::string input_image_location;
-  std::string output_image_directory;
-  std::string output_image_relocation_directory;
-  off_t base_delta = 0;
-  bool base_delta_set = false;
-  bool dump_timings = kIsDebugBuild;
-  bool verify = false;
-
-  for (int i = 0; i < argc; ++i) {
-    const StringPiece option(argv[i]);
-    const bool log_options = false;
-    if (log_options) {
-      LOG(INFO) << "patchoat: option[" << i << "]=" << argv[i];
-    }
-    if (option.starts_with("--instruction-set=")) {
-      isa_set = true;
-      const char* isa_str = option.substr(strlen("--instruction-set=")).data();
-      isa = GetInstructionSetFromString(isa_str);
-      if (isa == InstructionSet::kNone) {
-        Usage("Unknown or invalid instruction set %s", isa_str);
-      }
-    } else if (option.starts_with("--input-image-location=")) {
-      input_image_location = option.substr(strlen("--input-image-location=")).data();
-    } else if (option.starts_with("--output-image-directory=")) {
-      output_image_directory = option.substr(strlen("--output-image-directory=")).data();
-    } else if (option.starts_with("--output-image-relocation-directory=")) {
-      output_image_relocation_directory =
-          option.substr(strlen("--output-image-relocation-directory=")).data();
-    } else if (option.starts_with("--base-offset-delta=")) {
-      const char* base_delta_str = option.substr(strlen("--base-offset-delta=")).data();
-      base_delta_set = true;
-      if (!android::base::ParseInt(base_delta_str, &base_delta)) {
-        Usage("Failed to parse --base-offset-delta argument '%s' as an off_t", base_delta_str);
-      }
-    } else if (option == "--dump-timings") {
-      dump_timings = true;
-    } else if (option == "--no-dump-timings") {
-      dump_timings = false;
-    } else if (option == "--verify") {
-      verify = true;
-    } else {
-      Usage("Unknown argument %s", option.data());
-    }
-  }
-
-  // The instruction set is mandatory. This simplifies things...
-  if (!isa_set) {
-    Usage("Instruction set must be set.");
-  }
-
-  int ret;
-  if (verify) {
-    ret = patchoat_verify_image(timings,
-                                isa,
-                                input_image_location,
-                                output_image_directory);
-  } else {
-    ret = patchoat_patch_image(timings,
-                               isa,
-                               input_image_location,
-                               output_image_directory,
-                               output_image_relocation_directory,
-                               base_delta,
-                               base_delta_set,
-                               debug);
-  }
-
-  timings.EndTiming();
-  if (dump_timings) {
-    LOG(INFO) << Dumpable<TimingLogger>(timings);
-  }
-
-  return ret;
-}
-
-}  // namespace art
-
-int main(int argc, char **argv) {
-  return art::patchoat(argc, argv);
-}
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
deleted file mode 100644
index 237ef50..0000000
--- a/patchoat/patchoat.h
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_PATCHOAT_PATCHOAT_H_
-#define ART_PATCHOAT_PATCHOAT_H_
-
-#include "arch/instruction_set.h"
-#include "base/enums.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "base/os.h"
-#include "elf_file.h"
-#include "elf_utils.h"
-#include "gc/accounting/space_bitmap.h"
-#include "gc/heap.h"
-#include "gc/space/image_space.h"
-#include "runtime.h"
-
-namespace art {
-
-class ArtMethod;
-class ImageHeader;
-class OatHeader;
-
-namespace mirror {
-class Object;
-class PointerArray;
-class Reference;
-class Class;
-}  // namespace mirror
-
-class PatchOat {
- public:
-  // Relocates the provided image by the specified offset. If output_image_directory is non-empty,
-  // outputs the relocated image into that directory. If output_image_relocation_directory is
-  // non-empty, outputs image relocation files (see GeneratePatch) into that directory.
-  static bool Patch(const std::string& image_location,
-                    off_t delta,
-                    const std::string& output_image_directory,
-                    const std::string& output_image_relocation_directory,
-                    InstructionSet isa,
-                    TimingLogger* timings);
-  static bool Verify(const std::string& image_location,
-                     const std::string& output_image_filename,
-                     InstructionSet isa,
-                     TimingLogger* timings);
-
-  // Generates a patch which can be used to efficiently relocate the original file or to check that
-  // a relocated file matches the original. The patch is generated from the difference of the
-  // |original| and the already |relocated| image, and written to |output| in the form of unsigned
-  // LEB128 for each relocation position.
-  static bool GeneratePatch(const MemMap& original,
-                            const MemMap& relocated,
-                            std::vector<uint8_t>* output,
-                            std::string* error_msg);
-
-  ~PatchOat() {}
-  PatchOat(PatchOat&&) = default;
-
- private:
-  // All pointers are only borrowed.
-  PatchOat(InstructionSet isa, MemMap* image,
-           gc::accounting::ContinuousSpaceBitmap* bitmap, MemMap* heap, off_t delta,
-           std::map<gc::space::ImageSpace*, MemMap>* map, TimingLogger* timings)
-      : image_(image), bitmap_(bitmap), heap_(heap),
-        delta_(delta), isa_(isa), space_map_(map), timings_(timings) {}
-
-  // Was the .art image at image_path made with --compile-pic ?
-  static bool IsImagePic(const ImageHeader& image_header, const std::string& image_path);
-
-  static bool CreateVdexAndOatSymlinks(const std::string& input_image_filename,
-                                       const std::string& output_image_filename);
-
-
-  void VisitObject(mirror::Object* obj)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  void FixupMethod(ArtMethod* object, ArtMethod* copy)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  bool PatchImage(bool primary_image) REQUIRES_SHARED(Locks::mutator_lock_);
-  void PatchArtFields(const ImageHeader* image_header) REQUIRES_SHARED(Locks::mutator_lock_);
-  void PatchArtMethods(const ImageHeader* image_header) REQUIRES_SHARED(Locks::mutator_lock_);
-  void PatchImTables(const ImageHeader* image_header) REQUIRES_SHARED(Locks::mutator_lock_);
-  void PatchImtConflictTables(const ImageHeader* image_header)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  void PatchInternedStrings(const ImageHeader* image_header)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  void PatchClassTable(const ImageHeader* image_header)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  void PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  bool WriteImage(File* out);
-
-  template <typename T>
-  T* RelocatedCopyOf(T* obj) const {
-    if (obj == nullptr) {
-      return nullptr;
-    }
-    DCHECK_GT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->Begin()));
-    DCHECK_LT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->End()));
-    uintptr_t heap_off =
-        reinterpret_cast<uintptr_t>(obj) - reinterpret_cast<uintptr_t>(heap_->Begin());
-    DCHECK_LT(heap_off, image_->Size());
-    return reinterpret_cast<T*>(image_->Begin() + heap_off);
-  }
-
-  template <typename T>
-  T* RelocatedCopyOfFollowImages(T* obj) const {
-    if (obj == nullptr) {
-      return nullptr;
-    }
-    // Find ImageSpace this belongs to.
-    auto image_spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
-    for (gc::space::ImageSpace* image_space : image_spaces) {
-      if (image_space->Contains(obj)) {
-        uintptr_t heap_off = reinterpret_cast<uintptr_t>(obj) -
-                             reinterpret_cast<uintptr_t>(image_space->GetMemMap()->Begin());
-        return reinterpret_cast<T*>(space_map_->find(image_space)->second.Begin() + heap_off);
-      }
-    }
-    LOG(FATAL) << "Did not find object in boot image space " << obj;
-    UNREACHABLE();
-  }
-
-  template <typename T>
-  T* RelocatedAddressOfPointer(T* obj) const {
-    if (obj == nullptr) {
-      return obj;
-    }
-    auto ret = reinterpret_cast<uintptr_t>(obj) + delta_;
-    // Trim off high bits in case negative relocation with 64 bit patchoat.
-    if (Is32BitISA()) {
-      ret = static_cast<uintptr_t>(static_cast<uint32_t>(ret));
-    }
-    return reinterpret_cast<T*>(ret);
-  }
-
-  bool Is32BitISA() const {
-    return InstructionSetPointerSize(isa_) == PointerSize::k32;
-  }
-
-  // Walks through the old image and patches the mmap'd copy of it to the new offset. It does not
-  // change the heap.
-  class PatchVisitor {
-   public:
-    PatchVisitor(PatchOat* patcher, mirror::Object* copy) : patcher_(patcher), copy_(copy) {}
-    ~PatchVisitor() {}
-    void operator() (ObjPtr<mirror::Object> obj, MemberOffset off, bool b) const
-        REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-    // For reference classes.
-    void operator() (ObjPtr<mirror::Class> cls, ObjPtr<mirror::Reference>  ref) const
-        REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-    // TODO: Consider using these for updating native class roots?
-    void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
-        const {}
-    void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
-
-   private:
-    PatchOat* const patcher_;
-    mirror::Object* const copy_;
-  };
-
-  // A mmap of the image we are patching. This is modified.
-  const MemMap* const image_;
-  // The bitmap over the image within the heap we are patching. This is not modified.
-  gc::accounting::ContinuousSpaceBitmap* const bitmap_;
-  // The heap we are patching. This is not modified.
-  const MemMap* const heap_;
-  // The amount we are changing the offset by.
-  const off_t delta_;
-  // Active instruction set, used to know the entrypoint size.
-  const InstructionSet isa_;
-
-  const std::map<gc::space::ImageSpace*, MemMap>* space_map_;
-
-  TimingLogger* timings_;
-
-  class FixupRootVisitor;
-  class RelocatedPointerVisitor;
-  class PatchOatArtFieldVisitor;
-  class PatchOatArtMethodVisitor;
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(PatchOat);
-};
-
-}  // namespace art
-#endif  // ART_PATCHOAT_PATCHOAT_H_
diff --git a/patchoat/patchoat_test.cc b/patchoat/patchoat_test.cc
deleted file mode 100644
index 79ae987..0000000
--- a/patchoat/patchoat_test.cc
+++ /dev/null
@@ -1,617 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <openssl/sha.h>
-#include <dirent.h>
-#include <sys/types.h>
-
-#include <string>
-#include <vector>
-
-#include "android-base/stringprintf.h"
-#include "android-base/strings.h"
-
-#include "base/hex_dump.h"
-#include "base/leb128.h"
-#include "dexopt_test.h"
-#include "runtime.h"
-
-#include <gtest/gtest.h>
-
-namespace art {
-
-using android::base::StringPrintf;
-
-class PatchoatTest : public DexoptTest {
- public:
-  static bool ListDirFilesEndingWith(
-      const std::string& dir,
-      const std::string& suffix,
-      std::vector<std::string>* filenames,
-      std::string* error_msg) {
-    DIR* d = opendir(dir.c_str());
-    if (d == nullptr) {
-      *error_msg = "Failed to open directory";
-      return false;
-    }
-    dirent* e;
-    struct stat s;
-    size_t suffix_len = suffix.size();
-    while ((e = readdir(d)) != nullptr) {
-      if ((strcmp(e->d_name, ".") == 0) || (strcmp(e->d_name, "..") == 0)) {
-        continue;
-      }
-      size_t name_len = strlen(e->d_name);
-      if ((name_len < suffix_len) || (strcmp(&e->d_name[name_len - suffix_len], suffix.c_str()))) {
-        continue;
-      }
-      std::string basename(e->d_name);
-      std::string filename = dir + "/" + basename;
-      int stat_result = lstat(filename.c_str(), &s);
-      if (stat_result != 0) {
-        *error_msg =
-            StringPrintf("Failed to stat %s: stat returned %d", filename.c_str(), stat_result);
-        return false;
-      }
-      if (S_ISDIR(s.st_mode)) {
-        continue;
-      }
-      filenames->push_back(basename);
-    }
-    closedir(d);
-    return true;
-  }
-
-  static void AddRuntimeArg(std::vector<std::string>& args, const std::string& arg) {
-    args.push_back("--runtime-arg");
-    args.push_back(arg);
-  }
-
-  bool CompileBootImage(const std::vector<std::string>& extra_args,
-                        const std::string& image_file_name_prefix,
-                        uint32_t base_addr,
-                        std::string* error_msg) {
-    Runtime* const runtime = Runtime::Current();
-    std::vector<std::string> argv;
-    argv.push_back(runtime->GetCompilerExecutable());
-    AddRuntimeArg(argv, "-Xms64m");
-    AddRuntimeArg(argv, "-Xmx64m");
-    std::vector<std::string> dex_files = GetLibCoreDexFileNames();
-    for (const std::string& dex_file : dex_files) {
-      argv.push_back("--dex-file=" + dex_file);
-      argv.push_back("--dex-location=" + dex_file);
-    }
-    if (runtime->IsJavaDebuggable()) {
-      argv.push_back("--debuggable");
-    }
-    runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
-
-    AddRuntimeArg(argv, "-Xverify:softfail");
-
-    if (!kIsTargetBuild) {
-      argv.push_back("--host");
-    }
-
-    argv.push_back("--image=" + image_file_name_prefix + ".art");
-    argv.push_back("--oat-file=" + image_file_name_prefix + ".oat");
-    argv.push_back("--oat-location=" + image_file_name_prefix + ".oat");
-    argv.push_back(StringPrintf("--base=0x%" PRIx32, base_addr));
-    argv.push_back("--compile-pic");
-    argv.push_back("--multi-image");
-    argv.push_back("--no-generate-debug-info");
-
-    std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
-    argv.insert(argv.end(), compiler_options.begin(), compiler_options.end());
-
-    // We must set --android-root.
-    const char* android_root = getenv("ANDROID_ROOT");
-    CHECK(android_root != nullptr);
-    argv.push_back("--android-root=" + std::string(android_root));
-    argv.insert(argv.end(), extra_args.begin(), extra_args.end());
-
-    return RunDex2OatOrPatchoat(argv, error_msg);
-  }
-
-  static std::vector<std::string> BasePatchoatCommand(const std::string& input_image_location,
-                                                      off_t base_offset_delta) {
-    Runtime* const runtime = Runtime::Current();
-    std::vector<std::string> argv;
-    argv.push_back(runtime->GetPatchoatExecutable());
-    argv.push_back("--input-image-location=" + input_image_location);
-    argv.push_back(StringPrintf("--base-offset-delta=0x%jx", (intmax_t) base_offset_delta));
-    argv.push_back(StringPrintf("--instruction-set=%s", GetInstructionSetString(kRuntimeISA)));
-
-    return argv;
-  }
-
-  bool RelocateBootImage(const std::string& input_image_location,
-                         const std::string& output_image_directory,
-                         off_t base_offset_delta,
-                         std::string* error_msg) {
-    std::vector<std::string> argv = BasePatchoatCommand(input_image_location, base_offset_delta);
-    argv.push_back("--output-image-directory=" + output_image_directory);
-
-    return RunDex2OatOrPatchoat(argv, error_msg);
-  }
-
-  bool VerifyBootImage(const std::string& input_image_location,
-                       const std::string& output_image_directory,
-                       off_t base_offset_delta,
-                       std::string* error_msg) {
-    std::vector<std::string> argv = BasePatchoatCommand(input_image_location, base_offset_delta);
-    argv.push_back("--output-image-directory=" + output_image_directory);
-    argv.push_back("--verify");
-
-    return RunDex2OatOrPatchoat(argv, error_msg);
-  }
-
-  bool GenerateBootImageRelFile(const std::string& input_image_location,
-                                const std::string& output_rel_directory,
-                                off_t base_offset_delta,
-                                std::string* error_msg) {
-    std::vector<std::string> argv = BasePatchoatCommand(input_image_location, base_offset_delta);
-    argv.push_back("--output-image-relocation-directory=" + output_rel_directory);
-
-    return RunDex2OatOrPatchoat(argv, error_msg);
-  }
-
-  bool RunDex2OatOrPatchoat(const std::vector<std::string>& args, std::string* error_msg) {
-    int link[2];
-
-    if (pipe(link) == -1) {
-      return false;
-    }
-
-    pid_t pid = fork();
-    if (pid == -1) {
-      return false;
-    }
-
-    if (pid == 0) {
-      // We need dex2oat to actually log things.
-      setenv("ANDROID_LOG_TAGS", "*:e", 1);
-      dup2(link[1], STDERR_FILENO);
-      close(link[0]);
-      close(link[1]);
-      std::vector<const char*> c_args;
-      for (const std::string& str : args) {
-        c_args.push_back(str.c_str());
-      }
-      c_args.push_back(nullptr);
-      execv(c_args[0], const_cast<char* const*>(c_args.data()));
-      exit(1);
-      UNREACHABLE();
-    } else {
-      close(link[1]);
-      char buffer[128];
-      memset(buffer, 0, 128);
-      ssize_t bytes_read = 0;
-
-      while (TEMP_FAILURE_RETRY(bytes_read = read(link[0], buffer, 128)) > 0) {
-        *error_msg += std::string(buffer, bytes_read);
-      }
-      close(link[0]);
-      int status = -1;
-      if (waitpid(pid, &status, 0) != -1) {
-        return (status == 0);
-      }
-      return false;
-    }
-  }
-
-  bool CompileBootImageToDir(
-      const std::string& output_dir,
-      const std::vector<std::string>& dex2oat_extra_args,
-      uint32_t base_addr,
-      std::string* error_msg) {
-    return CompileBootImage(dex2oat_extra_args, output_dir + "/boot", base_addr, error_msg);
-  }
-
-  bool CopyImageChecksumAndSetPatchDelta(
-      const std::string& src_image_filename,
-      const std::string& dest_image_filename,
-      off_t dest_patch_delta,
-      std::string* error_msg) {
-    std::unique_ptr<File> src_file(OS::OpenFileForReading(src_image_filename.c_str()));
-    if (src_file.get() == nullptr) {
-      *error_msg = StringPrintf("Failed to open source image file %s", src_image_filename.c_str());
-      return false;
-    }
-    ImageHeader src_header;
-    if (!src_file->ReadFully(&src_header, sizeof(src_header))) {
-      *error_msg = StringPrintf("Failed to read source image file %s", src_image_filename.c_str());
-      return false;
-    }
-
-    std::unique_ptr<File> dest_file(OS::OpenFileReadWrite(dest_image_filename.c_str()));
-    if (dest_file.get() == nullptr) {
-      *error_msg =
-          StringPrintf("Failed to open destination image file %s", dest_image_filename.c_str());
-      return false;
-    }
-    ImageHeader dest_header;
-    if (!dest_file->ReadFully(&dest_header, sizeof(dest_header))) {
-      *error_msg =
-          StringPrintf("Failed to read destination image file %s", dest_image_filename.c_str());
-      return false;
-    }
-    dest_header.SetOatChecksum(src_header.GetOatChecksum());
-    dest_header.SetPatchDelta(dest_patch_delta);
-    if (!dest_file->ResetOffset()) {
-      *error_msg =
-          StringPrintf(
-              "Failed to seek to start of destination image file %s", dest_image_filename.c_str());
-      return false;
-    }
-    if (!dest_file->WriteFully(&dest_header, sizeof(dest_header))) {
-      *error_msg =
-          StringPrintf("Failed to write to destination image file %s", dest_image_filename.c_str());
-      dest_file->Erase();
-      return false;
-    }
-    if (dest_file->FlushCloseOrErase() != 0) {
-      *error_msg =
-          StringPrintf(
-              "Failed to flush/close destination image file %s", dest_image_filename.c_str());
-      return false;
-    }
-
-    return true;
-  }
-
-  bool ReadFully(
-      const std::string& filename, std::vector<uint8_t>* contents, std::string* error_msg) {
-    std::unique_ptr<File> file(OS::OpenFileForReading(filename.c_str()));
-    if (file.get() == nullptr) {
-      *error_msg = "Failed to open";
-      return false;
-    }
-    int64_t size = file->GetLength();
-    if (size < 0) {
-      *error_msg = "Failed to get size";
-      return false;
-    }
-    contents->resize(size);
-    if (!file->ReadFully(&(*contents)[0], size)) {
-      *error_msg = "Failed to read";
-      contents->clear();
-      return false;
-    }
-    return true;
-  }
-
-  bool BinaryDiff(
-      const std::string& filename1, const std::string& filename2, std::string* error_msg) {
-    std::string read_error_msg;
-    std::vector<uint8_t> image1;
-    if (!ReadFully(filename1, &image1, &read_error_msg)) {
-      *error_msg = StringPrintf("Failed to read %s: %s", filename1.c_str(), read_error_msg.c_str());
-      return true;
-    }
-    std::vector<uint8_t> image2;
-    if (!ReadFully(filename2, &image2, &read_error_msg)) {
-      *error_msg = StringPrintf("Failed to read %s: %s", filename2.c_str(), read_error_msg.c_str());
-      return true;
-    }
-    if (image1.size() != image1.size()) {
-      *error_msg =
-          StringPrintf(
-              "%s and %s are of different size: %zu vs %zu",
-              filename1.c_str(),
-              filename2.c_str(),
-              image1.size(),
-              image2.size());
-      return true;
-    }
-    size_t size = image1.size();
-    for (size_t i = 0; i < size; i++) {
-      if (image1[i] != image2[i]) {
-        *error_msg =
-            StringPrintf("%s and %s differ at offset %zu", filename1.c_str(), filename2.c_str(), i);
-        size_t hexdump_size = std::min<size_t>(16u, size - i);
-        HexDump dump1(&image1[i], hexdump_size, /* show_actual_addresses */ false, /* prefix */ "");
-        HexDump dump2(&image2[i], hexdump_size, /* show_actual_addresses */ false, /* prefix */ "");
-        std::ostringstream oss;
-        oss << "\n" << dump1 << "\n" << dump2;
-        *error_msg += oss.str();
-        return true;
-      }
-    }
-
-    return false;
-  }
-};
-
-TEST_F(PatchoatTest, PatchoatRelocationSameAsDex2oatRelocation) {
-#if defined(ART_USE_READ_BARRIER)
-  // This test checks that relocating a boot image using patchoat produces the same result as
-  // producing the boot image for that relocated base address using dex2oat. To be precise, these
-  // two files will have two small differences: the OAT checksum and base address. However, this
-  // test takes this into account.
-
-  // Compile boot image into a random directory using dex2oat
-  ScratchFile dex2oat_orig_scratch;
-  dex2oat_orig_scratch.Unlink();
-  std::string dex2oat_orig_dir = dex2oat_orig_scratch.GetFilename();
-  ASSERT_EQ(0, mkdir(dex2oat_orig_dir.c_str(), 0700));
-  const uint32_t orig_base_addr = 0x60000000;
-  // Force deterministic output. We want the boot images created by this dex2oat run and the run
-  // below to differ only in their base address.
-  std::vector<std::string> dex2oat_extra_args;
-  dex2oat_extra_args.push_back("--force-determinism");
-  dex2oat_extra_args.push_back("-j1");  // Might not be needed. Causes a 3-5x slowdown.
-  std::string error_msg;
-  if (!CompileBootImageToDir(dex2oat_orig_dir, dex2oat_extra_args, orig_base_addr, &error_msg)) {
-    FAIL() << "CompileBootImage1 failed: " << error_msg;
-  }
-
-  // Compile a "relocated" boot image into a random directory using dex2oat. This image is relocated
-  // in the sense that it uses a different base address.
-  ScratchFile dex2oat_reloc_scratch;
-  dex2oat_reloc_scratch.Unlink();
-  std::string dex2oat_reloc_dir = dex2oat_reloc_scratch.GetFilename();
-  ASSERT_EQ(0, mkdir(dex2oat_reloc_dir.c_str(), 0700));
-  const uint32_t reloc_base_addr = 0x70000000;
-  if (!CompileBootImageToDir(dex2oat_reloc_dir, dex2oat_extra_args, reloc_base_addr, &error_msg)) {
-    FAIL() << "CompileBootImage2 failed: " << error_msg;
-  }
-  const off_t base_addr_delta = reloc_base_addr - orig_base_addr;
-
-  // Relocate the original boot image using patchoat. The image is relocated by the same amount
-  // as the second/relocated image produced by dex2oat.
-  ScratchFile patchoat_scratch;
-  patchoat_scratch.Unlink();
-  std::string patchoat_dir = patchoat_scratch.GetFilename();
-  ASSERT_EQ(0, mkdir(patchoat_dir.c_str(), 0700));
-  std::string dex2oat_orig_with_arch_dir =
-      dex2oat_orig_dir + "/" + GetInstructionSetString(kRuntimeISA);
-  // The arch-including symlink is needed by patchoat
-  ASSERT_EQ(0, symlink(dex2oat_orig_dir.c_str(), dex2oat_orig_with_arch_dir.c_str()));
-  if (!RelocateBootImage(
-      dex2oat_orig_dir + "/boot.art",
-      patchoat_dir,
-      base_addr_delta,
-      &error_msg)) {
-    FAIL() << "RelocateBootImage failed: " << error_msg;
-  }
-
-  // Assert that patchoat created the same set of .art files as dex2oat
-  std::vector<std::string> dex2oat_image_basenames;
-  std::vector<std::string> patchoat_image_basenames;
-  if (!ListDirFilesEndingWith(dex2oat_reloc_dir, ".art", &dex2oat_image_basenames, &error_msg)) {
-    FAIL() << "Failed to list *.art files in " << dex2oat_reloc_dir << ": " << error_msg;
-  }
-  if (!ListDirFilesEndingWith(patchoat_dir, ".art", &patchoat_image_basenames, &error_msg)) {
-    FAIL() << "Failed to list *.art files in " << patchoat_dir << ": " << error_msg;
-  }
-  std::sort(dex2oat_image_basenames.begin(), dex2oat_image_basenames.end());
-  std::sort(patchoat_image_basenames.begin(), patchoat_image_basenames.end());
-  // .art file names output by patchoat look like tmp@art-data-<random>-<random>@boot*.art. To
-  // compare these with .art file names output by dex2oat we retain only the part of the file name
-  // after the last @.
-  std::vector<std::string> patchoat_image_shortened_basenames(patchoat_image_basenames.size());
-  for (size_t i = 0; i < patchoat_image_basenames.size(); i++) {
-    patchoat_image_shortened_basenames[i] =
-        patchoat_image_basenames[i].substr(patchoat_image_basenames[i].find_last_of('@') + 1);
-  }
-  ASSERT_EQ(dex2oat_image_basenames, patchoat_image_shortened_basenames);
-
-  // Patch up the dex2oat-relocated image files so that it looks as though they were relocated by
-  // patchoat. patchoat preserves the OAT checksum header field and sets patch delta header field.
-  for (const std::string& image_basename : dex2oat_image_basenames) {
-    if (!CopyImageChecksumAndSetPatchDelta(
-        dex2oat_orig_dir + "/" + image_basename,
-        dex2oat_reloc_dir + "/" + image_basename,
-        base_addr_delta,
-        &error_msg)) {
-      FAIL() << "Unable to patch up " << image_basename << ": " << error_msg;
-    }
-  }
-
-  // Assert that the patchoat-relocated images are identical to the dex2oat-relocated images
-  for (size_t i = 0; i < dex2oat_image_basenames.size(); i++) {
-    const std::string& dex2oat_image_basename = dex2oat_image_basenames[i];
-    const std::string& dex2oat_image_filename = dex2oat_reloc_dir + "/" + dex2oat_image_basename;
-    const std::string& patchoat_image_filename = patchoat_dir + "/" + patchoat_image_basenames[i];
-    if (BinaryDiff(dex2oat_image_filename, patchoat_image_filename, &error_msg)) {
-      FAIL() << "patchoat- and dex2oat-relocated variants of " << dex2oat_image_basename
-          << " differ: " << error_msg;
-    }
-  }
-
-  ClearDirectory(dex2oat_orig_dir.c_str(), /*recursive*/ true);
-  ClearDirectory(dex2oat_reloc_dir.c_str(), /*recursive*/ true);
-  ClearDirectory(patchoat_dir.c_str(), /*recursive*/ true);
-  rmdir(dex2oat_orig_dir.c_str());
-  rmdir(dex2oat_reloc_dir.c_str());
-  rmdir(patchoat_dir.c_str());
-#else
-  LOG(INFO) << "Skipping PatchoatRelocationSameAsDex2oatRelocation";
-  // Force-print to std::cout so it's also outside the logcat.
-  std::cout << "Skipping PatchoatRelocationSameAsDex2oatRelocation" << std::endl;
-#endif
-}
-
-// These tests check that a boot image relocated using patchoat can be unrelocated
-// using the .rel file created by patchoat.
-//
-// The tests don't work when heap poisoning is enabled because some of the
-// references are negated. b/72117833 is tracking the effort to have patchoat
-// and its tests support heap poisoning.
-class PatchoatVerificationTest : public PatchoatTest {
- protected:
-  void CreateRelocatedBootImage() {
-    // Compile boot image into a random directory using dex2oat
-    ScratchFile dex2oat_orig_scratch;
-    dex2oat_orig_scratch.Unlink();
-    dex2oat_orig_dir_ = dex2oat_orig_scratch.GetFilename();
-    ASSERT_EQ(0, mkdir(dex2oat_orig_dir_.c_str(), 0700));
-    const uint32_t orig_base_addr = 0x60000000;
-    std::vector<std::string> dex2oat_extra_args;
-    std::string error_msg;
-    if (!CompileBootImageToDir(dex2oat_orig_dir_, dex2oat_extra_args, orig_base_addr, &error_msg)) {
-      FAIL() << "CompileBootImage1 failed: " << error_msg;
-    }
-
-    // Generate image relocation file for the original boot image
-    std::string dex2oat_orig_with_arch_dir =
-        dex2oat_orig_dir_ + "/" + GetInstructionSetString(kRuntimeISA);
-    // The arch-including symlink is needed by patchoat
-    ASSERT_EQ(0, symlink(dex2oat_orig_dir_.c_str(), dex2oat_orig_with_arch_dir.c_str()));
-    base_addr_delta_ = 0x100000;
-    if (!GenerateBootImageRelFile(
-        dex2oat_orig_dir_ + "/boot.art",
-        dex2oat_orig_dir_,
-        base_addr_delta_,
-        &error_msg)) {
-      FAIL() << "RelocateBootImage failed: " << error_msg;
-    }
-
-    // Relocate the original boot image using patchoat
-    ScratchFile relocated_scratch;
-    relocated_scratch.Unlink();
-    relocated_dir_ = relocated_scratch.GetFilename();
-    ASSERT_EQ(0, mkdir(relocated_dir_.c_str(), 0700));
-    // Use a different relocation delta from the one used when generating .rel files above. This is
-    // to make sure .rel files are not specific to a particular relocation delta.
-    base_addr_delta_ -= 0x10000;
-    if (!RelocateBootImage(
-        dex2oat_orig_dir_ + "/boot.art",
-        relocated_dir_,
-        base_addr_delta_,
-        &error_msg)) {
-      FAIL() << "RelocateBootImage failed: " << error_msg;
-    }
-
-    // Assert that patchoat created the same set of .art and .art.rel files
-    std::vector<std::string> rel_basenames;
-    std::vector<std::string> relocated_image_basenames;
-    if (!ListDirFilesEndingWith(dex2oat_orig_dir_, ".rel", &rel_basenames, &error_msg)) {
-      FAIL() << "Failed to list *.art.rel files in " << dex2oat_orig_dir_ << ": " << error_msg;
-    }
-    if (!ListDirFilesEndingWith(relocated_dir_, ".art", &relocated_image_basenames, &error_msg)) {
-      FAIL() << "Failed to list *.art files in " << relocated_dir_ << ": " << error_msg;
-    }
-    std::sort(rel_basenames.begin(), rel_basenames.end());
-    std::sort(relocated_image_basenames.begin(), relocated_image_basenames.end());
-
-    // .art and .art.rel file names output by patchoat look like
-    // tmp@art-data-<random>-<random>@boot*.art, encoding the name of the directory in their name.
-    // To compare these with each other, we retain only the part of the file name after the last @,
-    // and we also drop the extension.
-    std::vector<std::string> rel_shortened_basenames(rel_basenames.size());
-    std::vector<std::string> relocated_image_shortened_basenames(relocated_image_basenames.size());
-    for (size_t i = 0; i < rel_basenames.size(); i++) {
-      rel_shortened_basenames[i] = rel_basenames[i].substr(rel_basenames[i].find_last_of('@') + 1);
-      rel_shortened_basenames[i] =
-          rel_shortened_basenames[i].substr(0, rel_shortened_basenames[i].find('.'));
-    }
-    for (size_t i = 0; i < relocated_image_basenames.size(); i++) {
-      relocated_image_shortened_basenames[i] =
-          relocated_image_basenames[i].substr(relocated_image_basenames[i].find_last_of('@') + 1);
-      relocated_image_shortened_basenames[i] =
-          relocated_image_shortened_basenames[i].substr(
-              0, relocated_image_shortened_basenames[i].find('.'));
-    }
-    ASSERT_EQ(rel_shortened_basenames, relocated_image_shortened_basenames);
-  }
-
-  void TearDown() override {
-    if (!dex2oat_orig_dir_.empty()) {
-      ClearDirectory(dex2oat_orig_dir_.c_str(), /*recursive*/ true);
-      rmdir(dex2oat_orig_dir_.c_str());
-    }
-    if (!relocated_dir_.empty()) {
-      ClearDirectory(relocated_dir_.c_str(), /*recursive*/ true);
-      rmdir(relocated_dir_.c_str());
-    }
-    PatchoatTest::TearDown();
-  }
-
-  std::string dex2oat_orig_dir_;
-  std::string relocated_dir_;
-  off_t base_addr_delta_;
-};
-
-// Assert that verification works with the .rel files.
-TEST_F(PatchoatVerificationTest, Sucessful) {
-  TEST_DISABLED_FOR_HEAP_POISONING();
-  CreateRelocatedBootImage();
-
-  std::string error_msg;
-  if (!VerifyBootImage(
-      dex2oat_orig_dir_ + "/boot.art",
-      relocated_dir_,
-      base_addr_delta_,
-      &error_msg)) {
-    FAIL() << "VerifyBootImage failed: " << error_msg;
-  }
-}
-
-// Corrupt the image file and check that the verification fails gracefully.
-TEST_F(PatchoatVerificationTest, CorruptedImage) {
-  TEST_DISABLED_FOR_HEAP_POISONING();
-  CreateRelocatedBootImage();
-
-  std::string error_msg;
-  std::string relocated_image_filename;
-  if (!GetDalvikCacheFilename((dex2oat_orig_dir_ + "/boot.art").c_str(),
-                               relocated_dir_.c_str(),
-                               &relocated_image_filename,
-                               &error_msg)) {
-    FAIL() << "Failed to find relocated image file name: " << error_msg;
-  }
-  ASSERT_EQ(truncate(relocated_image_filename.c_str(), sizeof(ImageHeader)), 0)
-    << relocated_image_filename;
-
-  if (VerifyBootImage(
-      dex2oat_orig_dir_ + "/boot.art",
-      relocated_dir_,
-      base_addr_delta_,
-      &error_msg)) {
-    FAIL() << "VerifyBootImage should have failed since the image was intentionally corrupted";
-  }
-}
-
-// Corrupt the relocation file and check that the verification fails gracefully.
-TEST_F(PatchoatVerificationTest, CorruptedRelFile) {
-  TEST_DISABLED_FOR_HEAP_POISONING();
-  CreateRelocatedBootImage();
-
-  std::string error_msg;
-  std::string art_filename = dex2oat_orig_dir_ + "/boot.art";
-  std::string rel_filename = dex2oat_orig_dir_ + "/boot.art.rel";
-  std::unique_ptr<File> art_file(OS::OpenFileForReading(art_filename.c_str()));
-  std::unique_ptr<File> rel_file(OS::OpenFileReadWrite(rel_filename.c_str()));
-  rel_file->ClearContent();
-  uint8_t buffer[64] = {};
-  ASSERT_TRUE(rel_file->WriteFully(&buffer, SHA256_DIGEST_LENGTH));
-  // Encode single relocation which is just past the end of the image file.
-  size_t leb_size = EncodeUnsignedLeb128(buffer, art_file->GetLength()) - buffer;
-  ASSERT_TRUE(rel_file->WriteFully(&buffer, leb_size));
-  ASSERT_EQ(rel_file->FlushClose(), 0);
-  ASSERT_EQ(art_file->Close(), 0);
-
-  if (VerifyBootImage(
-      dex2oat_orig_dir_ + "/boot.art",
-      relocated_dir_,
-      base_addr_delta_,
-      &error_msg)) {
-    FAIL() << "VerifyBootImage should have failed since the rel file was intentionally corrupted";
-  }
-}
-
-}  // namespace art
diff --git a/profman/profman.cc b/profman/profman.cc
index cecd3c2..2b5bf48 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -35,6 +35,7 @@
 #include "base/logging.h"  // For InitLogging.
 #include "base/mem_map.h"
 #include "base/scoped_flock.h"
+#include "base/stl_util.h"
 #include "base/stringpiece.h"
 #include "base/time_utils.h"
 #include "base/unix_file/fd_file.h"
@@ -500,7 +501,7 @@
       LOG(ERROR) << "Cannot load profile info from filename=" << filename << " fd=" << fd;
       return -1;
     }
-    *dump += banner + "\n" + info->DumpInfo(dex_files) + "\n";
+    *dump += banner + "\n" + info->DumpInfo(MakeNonOwningPointerVector(*dex_files)) + "\n";
     return 0;
   }
 
@@ -513,10 +514,23 @@
     static const char* kEmptyString = "";
     static const char* kOrdinaryProfile = "=== profile ===";
     static const char* kReferenceProfile = "=== reference profile ===";
+    static const char* kDexFiles = "=== Dex files  ===";
 
     std::vector<std::unique_ptr<const DexFile>> dex_files;
     OpenApkFilesFromLocations(&dex_files);
+
     std::string dump;
+
+    // Dump checkfiles and corresponding checksums.
+    dump += kDexFiles;
+    dump += "\n";
+    for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
+      std::ostringstream oss;
+      oss << dex_file->GetLocation()
+          << " [checksum=" << std::hex << dex_file->GetLocationChecksum() << "]\n";
+      dump += oss.str();
+    }
+
     // Dump individual profile files.
     if (!profile_files_fd_.empty()) {
       for (int profile_file_fd : profile_files_fd_) {
@@ -530,12 +544,10 @@
         }
       }
     }
-    if (!profile_files_.empty()) {
-      for (const std::string& profile_file : profile_files_) {
-        int ret = DumpOneProfile(kOrdinaryProfile, profile_file, kInvalidFd, &dex_files, &dump);
-        if (ret != 0) {
-          return ret;
-        }
+    for (const std::string& profile_file : profile_files_) {
+      int ret = DumpOneProfile(kOrdinaryProfile, profile_file, kInvalidFd, &dex_files, &dump);
+      if (ret != 0) {
+        return ret;
       }
     }
     // Dump reference profile file.
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 570b0e6..18ddcc0 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -103,7 +103,7 @@
   bool CASDeclaringClass(ObjPtr<mirror::Class> expected_class, ObjPtr<mirror::Class> desired_class)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static MemberOffset DeclaringClassOffset() {
+  static constexpr MemberOffset DeclaringClassOffset() {
     return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
   }
 
@@ -118,7 +118,7 @@
     access_flags_.store(new_access_flags, std::memory_order_relaxed);
   }
 
-  static MemberOffset AccessFlagsOffset() {
+  static constexpr MemberOffset AccessFlagsOffset() {
     return MemberOffset(OFFSETOF_MEMBER(ArtMethod, access_flags_));
   }
 
@@ -351,11 +351,11 @@
     method_index_ = new_method_index;
   }
 
-  static MemberOffset DexMethodIndexOffset() {
+  static constexpr MemberOffset DexMethodIndexOffset() {
     return MemberOffset(OFFSETOF_MEMBER(ArtMethod, dex_method_index_));
   }
 
-  static MemberOffset MethodIndexOffset() {
+  static constexpr MemberOffset MethodIndexOffset() {
     return MemberOffset(OFFSETOF_MEMBER(ArtMethod, method_index_));
   }
 
@@ -431,16 +431,16 @@
 
   void UnregisterNative() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static MemberOffset DataOffset(PointerSize pointer_size) {
+  static constexpr MemberOffset DataOffset(PointerSize pointer_size) {
     return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
         PtrSizedFields, data_) / sizeof(void*) * static_cast<size_t>(pointer_size));
   }
 
-  static MemberOffset EntryPointFromJniOffset(PointerSize pointer_size) {
+  static constexpr MemberOffset EntryPointFromJniOffset(PointerSize pointer_size) {
     return DataOffset(pointer_size);
   }
 
-  static MemberOffset EntryPointFromQuickCompiledCodeOffset(PointerSize pointer_size) {
+  static constexpr MemberOffset EntryPointFromQuickCompiledCodeOffset(PointerSize pointer_size) {
     return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
         PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*)
             * static_cast<size_t>(pointer_size));
@@ -652,7 +652,7 @@
     return hotness_count_;
   }
 
-  static MemberOffset HotnessCountOffset() {
+  static constexpr MemberOffset HotnessCountOffset() {
     return MemberOffset(OFFSETOF_MEMBER(ArtMethod, hotness_count_));
   }
 
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 00c9360..5247a0e 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -27,6 +27,9 @@
 #define ADD_TEST_EQ(x, y)
 #endif
 
+// Rounds the value n up to the nearest multiple of sz. sz must be a multiple of two.
+#define ALIGN_UP(n, sz) (((n) + (sz - 1)) & ~((sz) - 1))
+
 #if defined(__LP64__)
 #define POINTER_SIZE_SHIFT 3
 #define POINTER_SIZE art::PointerSize::k64
@@ -96,8 +99,10 @@
 #define THREAD_LOCAL_ALLOC_STACK_END_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 17 * __SIZEOF_POINTER__)
 ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_END_OFFSET,
             art::Thread::ThreadLocalAllocStackEndOffset<POINTER_SIZE>().Int32Value())
-// Offset of field Thread::interpreter_cache_.
-#define THREAD_INTERPRETER_CACHE_OFFSET (144 + 312 * __SIZEOF_POINTER__)
+// Offset of field Thread::interpreter_cache_. This is aligned on a 16 byte boundary so we need to
+// round up depending on the size of tlsPtr_.
+#define THREAD_INTERPRETER_CACHE_OFFSET \
+  (ALIGN_UP((THREAD_CARD_TABLE_OFFSET + 301 * __SIZEOF_POINTER__), 16))
 ADD_TEST_EQ(THREAD_INTERPRETER_CACHE_OFFSET,
             art::Thread::InterpreterCacheOffset<POINTER_SIZE>().Int32Value())
 
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index d95f71a..c18f46b 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2501,7 +2501,7 @@
       // the Java-side could still succeed for racy programs if another thread is actively
       // modifying the class loader's path list.
 
-      if (!self->CanCallIntoJava()) {
+      if (self->IsRuntimeThread()) {
         // Oops, we can't call into java so we can't run actual class-loader code.
         // This is true for e.g. for the compiler (jit or aot).
         ObjPtr<mirror::Throwable> pre_allocated =
@@ -2634,6 +2634,17 @@
     }
   }
 
+  // This is to prevent the calls to ClassLoad and ClassPrepare which can cause java/user-supplied
+  // code to be executed. We put it up here so we can avoid all the allocations associated with
+  // creating the class. This can happen with (eg) jit threads.
+  if (!self->CanLoadClasses()) {
+    // Make sure we don't try to load anything, potentially causing an infinite loop.
+    ObjPtr<mirror::Throwable> pre_allocated =
+        Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
+    self->SetException(pre_allocated);
+    return nullptr;
+  }
+
   if (klass == nullptr) {
     // Allocate a class with the status of not ready.
     // Interface object should get the right size here. Regular class will
@@ -3622,6 +3633,18 @@
   // Identify the underlying component type
   CHECK_EQ('[', descriptor[0]);
   StackHandleScope<2> hs(self);
+
+  // This is to prevent the calls to ClassLoad and ClassPrepare which can cause java/user-supplied
+  // code to be executed. We put it up here so we can avoid all the allocations associated with
+  // creating the class. This can happen with (eg) jit threads.
+  if (!self->CanLoadClasses()) {
+    // Make sure we don't try to load anything, potentially causing an infinite loop.
+    ObjPtr<mirror::Throwable> pre_allocated =
+        Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
+    self->SetException(pre_allocated);
+    return nullptr;
+  }
+
   MutableHandle<mirror::Class> component_type(hs.NewHandle(FindClass(self, descriptor + 1,
                                                                      class_loader)));
   if (component_type == nullptr) {
@@ -3809,6 +3832,7 @@
 ObjPtr<mirror::Class> ClassLinker::InsertClass(const char* descriptor,
                                                ObjPtr<mirror::Class> klass,
                                                size_t hash) {
+  DCHECK(Thread::Current()->CanLoadClasses());
   if (VLOG_IS_ON(class_linker)) {
     ObjPtr<mirror::DexCache> dex_cache = klass->GetDexCache();
     std::string source;
@@ -4333,6 +4357,18 @@
                                                     jobjectArray methods,
                                                     jobjectArray throws) {
   Thread* self = soa.Self();
+
+  // This is to prevent the calls to ClassLoad and ClassPrepare which can cause java/user-supplied
+  // code to be executed. We put it up here so we can avoid all the allocations associated with
+  // creating the class. This can happen with (eg) jit-threads.
+  if (!self->CanLoadClasses()) {
+    // Make sure we don't try to load anything, potentially causing an infinite loop.
+    ObjPtr<mirror::Throwable> pre_allocated =
+        Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
+    self->SetException(pre_allocated);
+    return nullptr;
+  }
+
   StackHandleScope<10> hs(self);
   MutableHandle<mirror::Class> temp_klass(hs.NewHandle(
       AllocClass(self, GetClassRoot<mirror::Class>(this), sizeof(mirror::Class))));
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 657a78b..7199d5e 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -771,13 +771,19 @@
 
   // Avoid running Java code for exception initialization.
   // TODO: Checks to make this a bit less brittle.
+  //
+  // Note: this lambda ensures that the destruction of the ScopedLocalRefs will run in the extended
+  //       stack, which is important for modes with larger stack sizes (e.g., ASAN). Using a lambda
+  //       instead of a block simplifies the control flow.
+  auto create_and_throw = [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Allocate an uninitialized object.
+    ScopedLocalRef<jobject> exc(env,
+                                env->AllocObject(WellKnownClasses::java_lang_StackOverflowError));
+    if (exc == nullptr) {
+      LOG(WARNING) << "Could not allocate StackOverflowError object.";
+      return;
+    }
 
-  std::string error_msg;
-
-  // Allocate an uninitialized object.
-  ScopedLocalRef<jobject> exc(env,
-                              env->AllocObject(WellKnownClasses::java_lang_StackOverflowError));
-  if (exc.get() != nullptr) {
     // "Initialize".
     // StackOverflowError -> VirtualMachineError -> Error -> Throwable -> Object.
     // Only Throwable has "custom" fields:
@@ -793,57 +799,54 @@
     // detailMessage.
     // TODO: Use String::FromModifiedUTF...?
     ScopedLocalRef<jstring> s(env, env->NewStringUTF(msg.c_str()));
-    if (s.get() != nullptr) {
-      env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_detailMessage, s.get());
-
-      // cause.
-      env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_cause, exc.get());
-
-      // suppressedExceptions.
-      ScopedLocalRef<jobject> emptylist(env, env->GetStaticObjectField(
-          WellKnownClasses::java_util_Collections,
-          WellKnownClasses::java_util_Collections_EMPTY_LIST));
-      CHECK(emptylist.get() != nullptr);
-      env->SetObjectField(exc.get(),
-                          WellKnownClasses::java_lang_Throwable_suppressedExceptions,
-                          emptylist.get());
-
-      // stackState is set as result of fillInStackTrace. fillInStackTrace calls
-      // nativeFillInStackTrace.
-      ScopedLocalRef<jobject> stack_state_val(env, nullptr);
-      {
-        ScopedObjectAccessUnchecked soa(env);
-        stack_state_val.reset(soa.Self()->CreateInternalStackTrace<false>(soa));
-      }
-      if (stack_state_val.get() != nullptr) {
-        env->SetObjectField(exc.get(),
-                            WellKnownClasses::java_lang_Throwable_stackState,
-                            stack_state_val.get());
-
-        // stackTrace.
-        ScopedLocalRef<jobject> stack_trace_elem(env, env->GetStaticObjectField(
-            WellKnownClasses::libcore_util_EmptyArray,
-            WellKnownClasses::libcore_util_EmptyArray_STACK_TRACE_ELEMENT));
-        env->SetObjectField(exc.get(),
-                            WellKnownClasses::java_lang_Throwable_stackTrace,
-                            stack_trace_elem.get());
-      } else {
-        error_msg = "Could not create stack trace.";
-      }
-      // Throw the exception.
-      self->SetException(self->DecodeJObject(exc.get())->AsThrowable());
-    } else {
-      // Could not allocate a string object.
-      error_msg = "Couldn't throw new StackOverflowError because JNI NewStringUTF failed.";
+    if (s == nullptr) {
+      LOG(WARNING) << "Could not throw new StackOverflowError because JNI NewStringUTF failed.";
+      return;
     }
-  } else {
-    error_msg = "Could not allocate StackOverflowError object.";
-  }
 
-  if (!error_msg.empty()) {
-    LOG(WARNING) << error_msg;
-    CHECK(self->IsExceptionPending());
-  }
+    env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_detailMessage, s.get());
+
+    // cause.
+    env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_cause, exc.get());
+
+    // suppressedExceptions.
+    ScopedLocalRef<jobject> emptylist(env, env->GetStaticObjectField(
+        WellKnownClasses::java_util_Collections,
+        WellKnownClasses::java_util_Collections_EMPTY_LIST));
+    CHECK(emptylist != nullptr);
+    env->SetObjectField(exc.get(),
+                        WellKnownClasses::java_lang_Throwable_suppressedExceptions,
+                        emptylist.get());
+
+    // stackState is set as result of fillInStackTrace. fillInStackTrace calls
+    // nativeFillInStackTrace.
+    ScopedLocalRef<jobject> stack_state_val(env, nullptr);
+    {
+      ScopedObjectAccessUnchecked soa(env);  // TODO: Is this necessary?
+      stack_state_val.reset(soa.Self()->CreateInternalStackTrace<false>(soa));
+    }
+    if (stack_state_val != nullptr) {
+      env->SetObjectField(exc.get(),
+                          WellKnownClasses::java_lang_Throwable_stackState,
+                          stack_state_val.get());
+
+      // stackTrace.
+      ScopedLocalRef<jobject> stack_trace_elem(env, env->GetStaticObjectField(
+          WellKnownClasses::libcore_util_EmptyArray,
+          WellKnownClasses::libcore_util_EmptyArray_STACK_TRACE_ELEMENT));
+      env->SetObjectField(exc.get(),
+                          WellKnownClasses::java_lang_Throwable_stackTrace,
+                          stack_trace_elem.get());
+    } else {
+      LOG(WARNING) << "Could not create stack trace.";
+      // Note: we'll create an exception without stack state, which is valid.
+    }
+
+    // Throw the exception.
+    self->SetException(self->DecodeJObject(exc.get())->AsThrowable());
+  };
+  create_and_throw();
+  CHECK(self->IsExceptionPending());
 
   bool explicit_overflow_check = Runtime::Current()->ExplicitStackOverflowChecks();
   self->ResetDefaultStackEnd();  // Return to default stack size.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b108920..b679cbe 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -256,17 +256,6 @@
                << " " << dex_pc << ", " << dex_pc_offset;
   }
 
-  // We only care about invokes in the Jit.
-  void InvokeVirtualOrInterface(Thread* thread ATTRIBUTE_UNUSED,
-                                Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
-                                ArtMethod* method,
-                                uint32_t dex_pc,
-                                ArtMethod* target ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
-    LOG(ERROR) << "Unexpected invoke event in debugger " << ArtMethod::PrettyMethod(method)
-               << " " << dex_pc;
-  }
-
   // TODO Might be worth it to post ExceptionCatch event.
   void ExceptionHandled(Thread* thread ATTRIBUTE_UNUSED,
                         Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED) override {
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index 556ff69..462620f 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -39,8 +39,6 @@
 
 void DexoptTest::PreRuntimeCreate() {
   std::string error_msg;
-  ASSERT_TRUE(PreRelocateImage(GetImageLocation(), &error_msg)) << error_msg;
-  ASSERT_TRUE(PreRelocateImage(GetImageLocation2(), &error_msg)) << error_msg;
   UnreserveImageSpace();
 }
 
@@ -182,34 +180,6 @@
   GenerateOatForTest(dex_location, filter, /* with_alternate_image */ false);
 }
 
-bool DexoptTest::PreRelocateImage(const std::string& image_location, std::string* error_msg) {
-  std::string dalvik_cache;
-  bool have_android_data;
-  bool dalvik_cache_exists;
-  bool is_global_cache;
-  GetDalvikCache(GetInstructionSetString(kRuntimeISA),
-                 /* create_if_absent */ true,
-                 &dalvik_cache,
-                 &have_android_data,
-                 &dalvik_cache_exists,
-                 &is_global_cache);
-  if (!dalvik_cache_exists) {
-    *error_msg = "Failed to create dalvik cache";
-    return false;
-  }
-
-  std::string patchoat = GetAndroidRoot();
-  patchoat += kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat";
-
-  std::vector<std::string> argv;
-  argv.push_back(patchoat);
-  argv.push_back("--input-image-location=" + image_location);
-  argv.push_back("--output-image-directory=" + dalvik_cache);
-  argv.push_back("--instruction-set=" + std::string(GetInstructionSetString(kRuntimeISA)));
-  argv.push_back("--base-offset-delta=0x00008000");
-  return Exec(argv, error_msg);
-}
-
 void DexoptTest::ReserveImageSpace() {
   MemMap::Init();
 
diff --git a/runtime/dexopt_test.h b/runtime/dexopt_test.h
index 067b67a..efbdcba 100644
--- a/runtime/dexopt_test.h
+++ b/runtime/dexopt_test.h
@@ -62,11 +62,6 @@
   static bool Dex2Oat(const std::vector<std::string>& args, std::string* error_msg);
 
  private:
-  // Pre-Relocate the image to a known non-zero offset so we don't have to
-  // deal with the runtime randomly relocating the image by 0 and messing up
-  // the expected results of the tests.
-  bool PreRelocateImage(const std::string& image_location, std::string* error_msg);
-
   // Reserve memory around where the image will be loaded so other memory
   // won't conflict when it comes time to load the image.
   // This can be called with an already loaded image to reserve the space
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 1045d2a..4c52ed3 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -420,28 +420,17 @@
 #undef EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL
 #undef EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL
 
+// Follow virtual/interface indirections if applicable.
+// Will throw null-pointer exception the if the object is null.
 template<InvokeType type, bool access_check>
-inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
-                                     ObjPtr<mirror::Object>* this_object,
-                                     ArtMethod* referrer,
-                                     Thread* self) {
+ALWAYS_INLINE ArtMethod* FindMethodToCall(uint32_t method_idx,
+                                          ArtMethod* resolved_method,
+                                          ObjPtr<mirror::Object>* this_object,
+                                          ArtMethod* referrer,
+                                          Thread* self)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
-  constexpr ClassLinker::ResolveMode resolve_mode =
-      access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
-                   : ClassLinker::ResolveMode::kNoChecks;
-  ArtMethod* resolved_method;
-  if (type == kStatic) {
-    resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
-  } else {
-    StackHandleScope<1> hs(self);
-    HandleWrapperObjPtr<mirror::Object> h_this(hs.NewHandleWrapper(this_object));
-    resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
-  }
-  if (UNLIKELY(resolved_method == nullptr)) {
-    DCHECK(self->IsExceptionPending());  // Throw exception and unwind.
-    return nullptr;  // Failure.
-  }
-  // Next, null pointer check.
+  // Null pointer check.
   if (UNLIKELY(*this_object == nullptr && type != kStatic)) {
     if (UNLIKELY(resolved_method->GetDeclaringClass()->IsStringClass() &&
                  resolved_method->IsConstructor())) {
@@ -570,6 +559,31 @@
   }
 }
 
+template<InvokeType type, bool access_check>
+inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
+                                     ObjPtr<mirror::Object>* this_object,
+                                     ArtMethod* referrer,
+                                     Thread* self) {
+  ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+  constexpr ClassLinker::ResolveMode resolve_mode =
+      access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
+                   : ClassLinker::ResolveMode::kNoChecks;
+  ArtMethod* resolved_method;
+  if (type == kStatic) {
+    resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
+  } else {
+    StackHandleScope<1> hs(self);
+    HandleWrapperObjPtr<mirror::Object> h_this(hs.NewHandleWrapper(this_object));
+    resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
+  }
+  if (UNLIKELY(resolved_method == nullptr)) {
+    DCHECK(self->IsExceptionPending());  // Throw exception and unwind.
+    return nullptr;  // Failure.
+  }
+  return FindMethodToCall<type, access_check>(
+      method_idx, resolved_method, this_object, referrer, self);
+}
+
 // Explicit template declarations of FindMethodFromCode for all invoke types.
 #define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check)                 \
   template REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE                       \
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index cb85804..50c65ea 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -140,8 +140,11 @@
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, flip_function, method_verifier, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, method_verifier, thread_local_mark_stack, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_mark_stack, async_exception, sizeof(void*));
-    EXPECT_OFFSET_DIFF(Thread, tlsPtr_.async_exception, Thread, wait_mutex_, sizeof(void*),
-                       thread_tlsptr_end);
+    // The first field after tlsPtr_ is forced to a 16 byte alignment so it might have some space.
+    auto offset_tlsptr_end = OFFSETOF_MEMBER(Thread, tlsPtr_) +
+        sizeof(decltype(reinterpret_cast<Thread*>(16)->tlsPtr_));
+    CHECKED(offset_tlsptr_end - OFFSETOF_MEMBER(Thread, tlsPtr_.async_exception) == sizeof(void*),
+            "async_exception last field");
   }
 
   void CheckJniEntryPoints() {
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 0562167..bf26aea 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -830,16 +830,16 @@
            size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
   ~RosAlloc();
 
-  static size_t RunFreeListOffset() {
+  static constexpr size_t RunFreeListOffset() {
     return OFFSETOF_MEMBER(Run, free_list_);
   }
-  static size_t RunFreeListHeadOffset() {
+  static constexpr size_t RunFreeListHeadOffset() {
     return OFFSETOF_MEMBER(SlotFreeList<false>, head_);
   }
-  static size_t RunFreeListSizeOffset() {
+  static constexpr size_t RunFreeListSizeOffset() {
     return OFFSETOF_MEMBER(SlotFreeList<false>, size_);
   }
-  static size_t RunSlotNextOffset() {
+  static constexpr size_t RunSlotNextOffset() {
     return OFFSETOF_MEMBER(Slot, next_);
   }
 
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 8af5d55..e48365b 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -389,7 +389,7 @@
                                                   /*inout*/MemMap* oat_reservation,
                                                   /*out*/std::string* error_msg)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    TimingLogger logger(__PRETTY_FUNCTION__, true, VLOG_IS_ON(image));
+    TimingLogger logger(__PRETTY_FUNCTION__, /* precise= */ true, VLOG_IS_ON(image));
     std::unique_ptr<ImageSpace> space = Init(image_filename,
                                              image_location,
                                              validate_oat_file,
@@ -1323,7 +1323,7 @@
                       /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
                       /*out*/MemMap* extra_reservation,
                       /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
-    TimingLogger logger(__PRETTY_FUNCTION__, true, VLOG_IS_ON(image));
+    TimingLogger logger(__PRETTY_FUNCTION__, /* precise= */ true, VLOG_IS_ON(image));
     std::string filename = GetSystemImageFilename(image_location_.c_str(), image_isa_);
     std::vector<std::string> locations;
     if (!GetBootClassPathImageLocations(image_location_, filename, &locations, error_msg)) {
@@ -1393,7 +1393,7 @@
       /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
       /*out*/MemMap* extra_reservation,
       /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
-    TimingLogger logger(__PRETTY_FUNCTION__, true, VLOG_IS_ON(image));
+    TimingLogger logger(__PRETTY_FUNCTION__, /* precise= */ true, VLOG_IS_ON(image));
     DCHECK(DalvikCacheExists());
     std::vector<std::string> locations;
     if (!GetBootClassPathImageLocations(image_location_, cache_filename_, &locations, error_msg)) {
@@ -1597,11 +1597,25 @@
         : diff_(diff) {}
 
     void VisitClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
+      // A mirror::Class object consists of
+      //  - instance fields inherited from j.l.Object,
+      //  - instance fields inherited from j.l.Class,
+      //  - embedded tables (vtable, interface method table),
+      //  - static fields of the class itself.
+      // The reference fields are at the start of each field section (this is how the
+      // ClassLinker orders fields; except when that would create a gap between superclass
+      // fields and the first reference of the subclass due to alignment, it can be filled
+      // with smaller fields - but that's not the case for j.l.Object and j.l.Class).
+
+      DCHECK_ALIGNED(klass, kObjectAlignment);
+      static_assert(IsAligned<kHeapReferenceSize>(kObjectAlignment), "Object alignment check.");
       // First, patch the `klass->klass_`, known to be a reference to the j.l.Class.class.
       // This should be the only reference field in j.l.Object and we assert that below.
       PatchReferenceField</* kMayBeNull */ false>(klass, mirror::Object::ClassOffset());
       // Then patch the reference instance fields described by j.l.Class.class.
-      // Use the sizeof(Object) to determine where these reference fields start.
+      // Use the sizeof(Object) to determine where these reference fields start;
+      // this is the same as `class_class->GetFirstReferenceInstanceFieldOffset()`
+      // after patching but the j.l.Class may not have been patched yet.
       mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
       size_t num_reference_instance_fields = class_class->NumReferenceInstanceFields<kVerifyNone>();
       DCHECK_NE(num_reference_instance_fields, 0u);
@@ -1609,8 +1623,10 @@
       MemberOffset instance_field_offset(sizeof(mirror::Object));
       for (size_t i = 0; i != num_reference_instance_fields; ++i) {
         PatchReferenceField(klass, instance_field_offset);
-        instance_field_offset = MemberOffset(
-            instance_field_offset.Uint32Value() + sizeof(mirror::HeapReference<mirror::Object>));
+        static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
+                      "Heap reference sizes equality check.");
+        instance_field_offset =
+            MemberOffset(instance_field_offset.Uint32Value() + kHeapReferenceSize);
       }
       // Now that we have patched the `super_class_`, if this is the j.l.Class.class,
       // we can get a reference to j.l.Object.class and assert that it has only one
@@ -1626,8 +1642,10 @@
             klass->GetFirstReferenceStaticFieldOffset<kVerifyNone>(kPointerSize);
         for (size_t i = 0; i != num_reference_static_fields; ++i) {
           PatchReferenceField(klass, static_field_offset);
-          static_field_offset = MemberOffset(
-              static_field_offset.Uint32Value() + sizeof(mirror::HeapReference<mirror::Object>));
+          static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
+                        "Heap reference sizes equality check.");
+          static_field_offset =
+              MemberOffset(static_field_offset.Uint32Value() + kHeapReferenceSize);
         }
       }
       // Then patch native pointers.
@@ -1774,7 +1792,7 @@
     PatchObjectVisitor<kPointerSize> patch_object_visitor(diff);
 
     mirror::Class* dcheck_class_class = nullptr;  // Used only for a DCHECK().
-    for (size_t s = 0, size = spaces.size(); s != size; ++s) {
+    for (size_t s = 0u, size = spaces.size(); s != size; ++s) {
       const ImageSpace* space = spaces[s].get();
 
       // First patch the image header. The `diff` is OK for patching 32-bit fields but
@@ -1876,7 +1894,7 @@
       constructor_class = GetClassRoot<mirror::Constructor, kWithoutReadBarrier>(class_roots);
     }
 
-    for (size_t s = 0, size = spaces.size(); s != size; ++s) {
+    for (size_t s = 0u, size = spaces.size(); s != size; ++s) {
       const ImageSpace* space = spaces[s].get();
       const ImageHeader& image_header = space->GetImageHeader();
 
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 608b48e..171af6f 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -31,7 +31,8 @@
 static constexpr uint kEvacuateLivePercentThreshold = 75U;
 
 // Whether we protect the unused and cleared regions.
-static constexpr bool kProtectClearedRegions = true;
+// Only protect for target builds to prevent flaky test failures (b/63131961).
+static constexpr bool kProtectClearedRegions = kIsTargetBuild;
 
 // Wether we poison memory areas occupied by dead objects in unevacuated regions.
 static constexpr bool kPoisonDeadObjectsInUnevacuatedRegions = true;
diff --git a/runtime/image-inl.h b/runtime/image-inl.h
index 9fde669..2082064 100644
--- a/runtime/image-inl.h
+++ b/runtime/image-inl.h
@@ -51,9 +51,9 @@
 
 inline void ImageHeader::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const {
   const ImageSection& fields = GetFieldsSection();
-  for (size_t pos = 0; pos < fields.Size(); ) {
+  for (size_t pos = 0u; pos < fields.Size(); ) {
     auto* array = reinterpret_cast<LengthPrefixedArray<ArtField>*>(base + fields.Offset() + pos);
-    for (size_t i = 0; i < array->size(); ++i) {
+    for (size_t i = 0u; i < array->size(); ++i) {
       visitor->Visit(&array->At(i, sizeof(ArtField)));
     }
     pos += array->ComputeSize(array->size());
@@ -66,15 +66,15 @@
   const size_t method_alignment = ArtMethod::Alignment(pointer_size);
   const size_t method_size = ArtMethod::Size(pointer_size);
   const ImageSection& methods = GetMethodsSection();
-  for (size_t pos = 0; pos < methods.Size(); ) {
+  for (size_t pos = 0u; pos < methods.Size(); ) {
     auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + methods.Offset() + pos);
-    for (size_t i = 0; i < array->size(); ++i) {
+    for (size_t i = 0u; i < array->size(); ++i) {
       visitor->Visit(&array->At(i, method_size, method_alignment));
     }
     pos += array->ComputeSize(array->size(), method_size, method_alignment);
   }
   const ImageSection& runtime_methods = GetRuntimeMethodsSection();
-  for (size_t pos = 0; pos < runtime_methods.Size(); ) {
+  for (size_t pos = 0u; pos < runtime_methods.Size(); ) {
     auto* method = reinterpret_cast<ArtMethod*>(base + runtime_methods.Offset() + pos);
     visitor->Visit(method);
     pos += method_size;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 03fd964..4937132 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -160,7 +160,6 @@
       have_exception_thrown_listeners_(false),
       have_watched_frame_pop_listeners_(false),
       have_branch_listeners_(false),
-      have_invoke_virtual_or_interface_listeners_(false),
       have_exception_handled_listeners_(false),
       deoptimized_methods_lock_("deoptimized methods lock", kGenericBottomLock),
       deoptimization_enabled_(false),
@@ -562,11 +561,6 @@
                            branch_listeners_,
                            listener,
                            &have_branch_listeners_);
-  PotentiallyAddListenerTo(kInvokeVirtualOrInterface,
-                           events,
-                           invoke_virtual_or_interface_listeners_,
-                           listener,
-                           &have_invoke_virtual_or_interface_listeners_);
   PotentiallyAddListenerTo(kDexPcMoved,
                            events,
                            dex_pc_listeners_,
@@ -649,11 +643,6 @@
                                 branch_listeners_,
                                 listener,
                                 &have_branch_listeners_);
-  PotentiallyRemoveListenerFrom(kInvokeVirtualOrInterface,
-                                events,
-                                invoke_virtual_or_interface_listeners_,
-                                listener,
-                                &have_invoke_virtual_or_interface_listeners_);
   PotentiallyRemoveListenerFrom(kDexPcMoved,
                                 events,
                                 dex_pc_listeners_,
@@ -1213,21 +1202,6 @@
   }
 }
 
-void Instrumentation::InvokeVirtualOrInterfaceImpl(Thread* thread,
-                                                   ObjPtr<mirror::Object> this_object,
-                                                   ArtMethod* caller,
-                                                   uint32_t dex_pc,
-                                                   ArtMethod* callee) const {
-  Thread* self = Thread::Current();
-  StackHandleScope<1> hs(self);
-  Handle<mirror::Object> thiz(hs.NewHandle(this_object));
-  for (InstrumentationListener* listener : invoke_virtual_or_interface_listeners_) {
-    if (listener != nullptr) {
-      listener->InvokeVirtualOrInterface(thread, thiz, caller, dex_pc, callee);
-    }
-  }
-}
-
 void Instrumentation::WatchedFramePopImpl(Thread* thread, const ShadowFrame& frame) const {
   for (InstrumentationListener* listener : watched_frame_pop_listeners_) {
     if (listener != nullptr) {
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index e5d8800..b3fae25 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -143,14 +143,6 @@
                       int32_t dex_pc_offset)
       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
 
-  // Call-back for when we get an invokevirtual or an invokeinterface.
-  virtual void InvokeVirtualOrInterface(Thread* thread,
-                                        Handle<mirror::Object> this_object,
-                                        ArtMethod* caller,
-                                        uint32_t dex_pc,
-                                        ArtMethod* callee)
-      REQUIRES_SHARED(Locks::mutator_lock_) = 0;
-
   // Call-back when a shadow_frame with the needs_notify_pop_ boolean set is popped off the stack by
   // either return or exceptions. Normally instrumentation listeners should ensure that there are
   // shadow-frames by deoptimizing stacks.
@@ -193,7 +185,6 @@
     kFieldWritten = 0x20,
     kExceptionThrown = 0x40,
     kBranch = 0x80,
-    kInvokeVirtualOrInterface = 0x100,
     kWatchedFramePop = 0x200,
     kExceptionHandled = 0x400,
   };
@@ -377,10 +368,6 @@
     return have_branch_listeners_;
   }
 
-  bool HasInvokeVirtualOrInterfaceListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
-    return have_invoke_virtual_or_interface_listeners_;
-  }
-
   bool HasWatchedFramePopListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
     return have_watched_frame_pop_listeners_;
   }
@@ -393,8 +380,8 @@
     return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ ||
         have_field_read_listeners_ || have_field_write_listeners_ ||
         have_exception_thrown_listeners_ || have_method_unwind_listeners_ ||
-        have_branch_listeners_ || have_invoke_virtual_or_interface_listeners_ ||
-        have_watched_frame_pop_listeners_ || have_exception_handled_listeners_;
+        have_branch_listeners_ || have_watched_frame_pop_listeners_ ||
+        have_exception_handled_listeners_;
   }
 
   // Any instrumentation *other* than what is needed for Jit profiling active?
@@ -470,17 +457,6 @@
     }
   }
 
-  void InvokeVirtualOrInterface(Thread* thread,
-                                mirror::Object* this_object,
-                                ArtMethod* caller,
-                                uint32_t dex_pc,
-                                ArtMethod* callee) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (UNLIKELY(HasInvokeVirtualOrInterfaceListeners())) {
-      InvokeVirtualOrInterfaceImpl(thread, this_object, caller, dex_pc, callee);
-    }
-  }
-
   // Inform listeners that a branch has been taken (only supported by the interpreter).
   void WatchedFramePopped(Thread* thread, const ShadowFrame& frame) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -598,12 +574,6 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
   void BranchImpl(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void InvokeVirtualOrInterfaceImpl(Thread* thread,
-                                    ObjPtr<mirror::Object> this_object,
-                                    ArtMethod* caller,
-                                    uint32_t dex_pc,
-                                    ArtMethod* callee) const
-      REQUIRES_SHARED(Locks::mutator_lock_);
   void WatchedFramePopImpl(Thread* thread, const ShadowFrame& frame) const
       REQUIRES_SHARED(Locks::mutator_lock_);
   void FieldReadEventImpl(Thread* thread,
@@ -683,9 +653,6 @@
   // Do we have any branch listeners? Short-cut to avoid taking the instrumentation_lock_.
   bool have_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
 
-  // Do we have any invoke listeners? Short-cut to avoid taking the instrumentation_lock_.
-  bool have_invoke_virtual_or_interface_listeners_ GUARDED_BY(Locks::mutator_lock_);
-
   // Do we have any exception handled listeners? Short-cut to avoid taking the
   // instrumentation_lock_.
   bool have_exception_handled_listeners_ GUARDED_BY(Locks::mutator_lock_);
@@ -709,8 +676,6 @@
   std::list<InstrumentationListener*> method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
   std::list<InstrumentationListener*> method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
   std::list<InstrumentationListener*> branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
-  std::list<InstrumentationListener*> invoke_virtual_or_interface_listeners_
-      GUARDED_BY(Locks::mutator_lock_);
   std::list<InstrumentationListener*> dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_);
   std::list<InstrumentationListener*> field_read_listeners_ GUARDED_BY(Locks::mutator_lock_);
   std::list<InstrumentationListener*> field_write_listeners_ GUARDED_BY(Locks::mutator_lock_);
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 9146245..31cfeb6 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -50,7 +50,6 @@
       received_exception_thrown_event(false),
       received_exception_handled_event(false),
       received_branch_event(false),
-      received_invoke_virtual_or_interface_event(false),
       received_watched_frame_pop(false) {}
 
   virtual ~TestInstrumentationListener() {}
@@ -146,15 +145,6 @@
     received_branch_event = true;
   }
 
-  void InvokeVirtualOrInterface(Thread* thread ATTRIBUTE_UNUSED,
-                                Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
-                                ArtMethod* caller ATTRIBUTE_UNUSED,
-                                uint32_t dex_pc ATTRIBUTE_UNUSED,
-                                ArtMethod* callee ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
-    received_invoke_virtual_or_interface_event = true;
-  }
-
   void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED, const ShadowFrame& frame ATTRIBUTE_UNUSED)
       override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_watched_frame_pop  = true;
@@ -172,7 +162,6 @@
     received_exception_thrown_event = false;
     received_exception_handled_event = false;
     received_branch_event = false;
-    received_invoke_virtual_or_interface_event = false;
     received_watched_frame_pop = false;
   }
 
@@ -187,7 +176,6 @@
   bool received_exception_thrown_event;
   bool received_exception_handled_event;
   bool received_branch_event;
-  bool received_invoke_virtual_or_interface_event;
   bool received_watched_frame_pop;
 
  private:
@@ -382,8 +370,6 @@
         return instr->HasExceptionHandledListeners();
       case instrumentation::Instrumentation::kBranch:
         return instr->HasBranchListeners();
-      case instrumentation::Instrumentation::kInvokeVirtualOrInterface:
-        return instr->HasInvokeVirtualOrInterfaceListeners();
       case instrumentation::Instrumentation::kWatchedFramePop:
         return instr->HasWatchedFramePopListeners();
       default:
@@ -434,9 +420,6 @@
       case instrumentation::Instrumentation::kBranch:
         instr->Branch(self, method, dex_pc, -1);
         break;
-      case instrumentation::Instrumentation::kInvokeVirtualOrInterface:
-        instr->InvokeVirtualOrInterface(self, obj, method, dex_pc, method);
-        break;
       case instrumentation::Instrumentation::kWatchedFramePop:
         instr->WatchedFramePopped(self, frame);
         break;
@@ -477,8 +460,6 @@
         return listener.received_exception_handled_event;
       case instrumentation::Instrumentation::kBranch:
         return listener.received_branch_event;
-      case instrumentation::Instrumentation::kInvokeVirtualOrInterface:
-        return listener.received_invoke_virtual_or_interface_event;
       case instrumentation::Instrumentation::kWatchedFramePop:
         return listener.received_watched_frame_pop;
       default:
@@ -636,10 +617,6 @@
   TestEvent(instrumentation::Instrumentation::kBranch);
 }
 
-TEST_F(InstrumentationTest, InvokeVirtualOrInterfaceEvent) {
-  TestEvent(instrumentation::Instrumentation::kInvokeVirtualOrInterface);
-}
-
 TEST_F(InstrumentationTest, DeoptimizeDirectMethod) {
   ScopedObjectAccess soa(Thread::Current());
   jobject class_loader = LoadDex("Instrumentation");
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index a607b48..1e4239e 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -136,11 +136,34 @@
   }
   const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
   const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
+  ArtMethod* sf_method = shadow_frame.GetMethod();
+
+  // Try to find the method in small thread-local cache first.
+  InterpreterCache* tls_cache = self->GetInterpreterCache();
+  size_t tls_value;
+  ArtMethod* resolved_method;
+  if (LIKELY(tls_cache->Get(inst, &tls_value))) {
+    resolved_method = reinterpret_cast<ArtMethod*>(tls_value);
+  } else {
+    ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+    constexpr ClassLinker::ResolveMode resolve_mode =
+        do_access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
+                        : ClassLinker::ResolveMode::kNoChecks;
+    resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, sf_method, type);
+    if (UNLIKELY(resolved_method == nullptr)) {
+      CHECK(self->IsExceptionPending());
+      result->SetJ(0);
+      return false;
+    }
+    tls_cache->Set(inst, reinterpret_cast<size_t>(resolved_method));
+  }
+
+  // Null pointer check and virtual method resolution.
   ObjPtr<mirror::Object> receiver =
       (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
-  ArtMethod* sf_method = shadow_frame.GetMethod();
-  ArtMethod* const called_method = FindMethodFromCode<type, do_access_check>(
-      method_idx, &receiver, sf_method, self);
+  ArtMethod* const called_method = FindMethodToCall<type, do_access_check>(
+      method_idx, resolved_method, &receiver, sf_method, self);
+
   // The shadow frame should already be pushed, so we don't need to update it.
   if (UNLIKELY(called_method == nullptr)) {
     CHECK(self->IsExceptionPending());
@@ -165,15 +188,6 @@
           return !self->IsExceptionPending();
         }
       }
-    } else {
-      // TODO: Remove the InvokeVirtualOrInterface instrumentation, as it was only used by the JIT.
-      if (type == kVirtual || type == kInterface) {
-        instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
-        if (UNLIKELY(instrumentation->HasInvokeVirtualOrInterfaceListeners())) {
-          instrumentation->InvokeVirtualOrInterface(
-              self, receiver.Ptr(), sf_method, shadow_frame.GetDexPC(), called_method);
-        }
-      }
     }
     return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
                                              result);
@@ -277,12 +291,6 @@
           receiver, shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
       jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges*/false);
     }
-    instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
-    // TODO: Remove the InvokeVirtualOrInterface instrumentation, as it was only used by the JIT.
-    if (UNLIKELY(instrumentation->HasInvokeVirtualOrInterfaceListeners())) {
-      instrumentation->InvokeVirtualOrInterface(
-          self, receiver.Ptr(), shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
-    }
     // No need to check since we've been quickened.
     return DoCall<is_range, false>(called_method, self, shadow_frame, inst, inst_data, result);
   }
diff --git a/runtime/interpreter/shadow_frame.h b/runtime/interpreter/shadow_frame.h
index 0e4cf27..91371d1 100644
--- a/runtime/interpreter/shadow_frame.h
+++ b/runtime/interpreter/shadow_frame.h
@@ -279,47 +279,47 @@
     return lock_count_data_;
   }
 
-  static size_t LockCountDataOffset() {
+  static constexpr size_t LockCountDataOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, lock_count_data_);
   }
 
-  static size_t LinkOffset() {
+  static constexpr size_t LinkOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, link_);
   }
 
-  static size_t MethodOffset() {
+  static constexpr size_t MethodOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, method_);
   }
 
-  static size_t DexPCOffset() {
+  static constexpr size_t DexPCOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, dex_pc_);
   }
 
-  static size_t NumberOfVRegsOffset() {
+  static constexpr size_t NumberOfVRegsOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, number_of_vregs_);
   }
 
-  static size_t VRegsOffset() {
+  static constexpr size_t VRegsOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, vregs_);
   }
 
-  static size_t ResultRegisterOffset() {
+  static constexpr size_t ResultRegisterOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, result_register_);
   }
 
-  static size_t DexPCPtrOffset() {
+  static constexpr size_t DexPCPtrOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, dex_pc_ptr_);
   }
 
-  static size_t DexInstructionsOffset() {
+  static constexpr size_t DexInstructionsOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, dex_instructions_);
   }
 
-  static size_t CachedHotnessCountdownOffset() {
+  static constexpr size_t CachedHotnessCountdownOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, cached_hotness_countdown_);
   }
 
-  static size_t HotnessCountdownOffset() {
+  static constexpr size_t HotnessCountdownOffset() {
     return OFFSETOF_MEMBER(ShadowFrame, hotness_countdown_);
   }
 
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 33d228f..63cb6a4 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -18,12 +18,15 @@
 
 #include <sstream>
 
+#include "android-base/unique_fd.h"
+
 #include "arch/context.h"
 #include "art_method-inl.h"
 #include "base/enums.h"
 #include "base/histogram-inl.h"
 #include "base/logging.h"  // For VLOG.
 #include "base/membarrier.h"
+#include "base/memfd.h"
 #include "base/mem_map.h"
 #include "base/quasi_atomic.h"
 #include "base/stl_util.h"
@@ -52,16 +55,32 @@
 #include "thread-current-inl.h"
 #include "thread_list.h"
 
+using android::base::unique_fd;
+
 namespace art {
 namespace jit {
 
-static constexpr int kProtCode = PROT_READ | PROT_EXEC;
-static constexpr int kProtData = PROT_READ | PROT_WRITE;
-static constexpr int kProtProfile = PROT_READ;
-
 static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
 static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
 
+static constexpr int kProtR = PROT_READ;
+static constexpr int kProtRW = PROT_READ | PROT_WRITE;
+static constexpr int kProtRWX = PROT_READ | PROT_WRITE | PROT_EXEC;
+static constexpr int kProtRX = PROT_READ | PROT_EXEC;
+
+namespace {
+
+// Translate an address belonging to one memory map into an address in a second. This is useful
+// when there are two virtual memory ranges for the same physical memory range.
+template <typename T>
+T* TranslateAddress(T* src_ptr, const MemMap& src, const MemMap& dst) {
+  CHECK(src.HasAddress(src_ptr));
+  uint8_t* const raw_src_ptr = reinterpret_cast<uint8_t*>(src_ptr);
+  return reinterpret_cast<T*>(raw_src_ptr - src.Begin() + dst.Begin());
+}
+
+}  // namespace
+
 class JitCodeCache::JniStubKey {
  public:
   explicit JniStubKey(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -190,17 +209,41 @@
 
   // Register for membarrier expedited sync core if JIT will be generating code.
   if (!used_only_for_profile_data) {
-    art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore);
+    if (art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore) != 0) {
+      // MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ensures that CPU instruction pipelines are
+      // flushed and it's used when adding code to the JIT. The memory used by the new code may
+      // have just been released and, in theory, the old code could still be in a pipeline.
+      VLOG(jit) << "Kernel does not support membarrier sync-core";
+    }
   }
 
-  // Decide how we should map the code and data sections.
-  // If we use the code cache just for profiling we do not need to map the code section as
-  // executable.
-  // NOTE 1: this is yet another workaround to bypass strict SElinux policies in order to be able
-  //         to profile system server.
-  // NOTE 2: We could just not create the code section at all but we will need to
-  //         special case too many cases.
-  int memmap_flags_prot_code = used_only_for_profile_data ? kProtProfile : kProtCode;
+  // File descriptor enabling dual-view mapping of code section.
+  unique_fd mem_fd;
+
+  // Bionic supports memfd_create, but the call may fail on older kernels.
+  mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags */ 0));
+  if (mem_fd.get() < 0) {
+    VLOG(jit) << "Failed to initialize dual view JIT. memfd_create() error: "
+              << strerror(errno);
+  }
+
+  if (mem_fd.get() >= 0 && ftruncate(mem_fd, max_capacity) != 0) {
+    std::ostringstream oss;
+    oss << "Failed to initialize memory file: " << strerror(errno);
+    *error_msg = oss.str();
+    return nullptr;
+  }
+
+  // Data cache will be half of the initial allocation.
+  // Code cache will be the other half of the initial allocation.
+  // TODO: Make this variable?
+
+  // Align both capacities to page size, as that's the unit mspaces use.
+  initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
+  max_capacity = RoundDown(max_capacity, 2 * kPageSize);
+  const size_t data_capacity = max_capacity / 2;
+  const size_t exec_capacity = used_only_for_profile_data ? 0 : max_capacity - data_capacity;
+  DCHECK_LE(data_capacity + exec_capacity, max_capacity);
 
   std::string error_str;
   // Map name specific for android_os_Debug.cpp accounting.
@@ -208,71 +251,147 @@
   // We could do PC-relative addressing to avoid this problem, but that
   // would require reserving code and data area before submitting, which
   // means more windows for the code memory to be RWX.
-  MemMap data_map = MemMap::MapAnonymous(
-      "data-code-cache",
-      /* addr */ nullptr,
-      max_capacity,
-      kProtData,
-      /* low_4gb */ true,
-      /* reuse */ false,
-      /* reservation */ nullptr,
-      &error_str);
-  if (!data_map.IsValid()) {
+  int base_flags;
+  MemMap data_pages;
+  if (mem_fd.get() >= 0) {
+    // Dual view of JIT code cache case. Create an initial mapping of data pages large enough
+    // for data and non-writable view of JIT code pages. We use the memory file descriptor to
+    // enable dual mapping - we'll create a second mapping using the descriptor below. The
+    // mappings will look like:
+    //
+    //       VA                  PA
+    //
+    //       +---------------+
+    //       | non exec code |\
+    //       +---------------+ \
+    //       :               :\ \
+    //       +---------------+.\.+---------------+
+    //       |  exec code    |  \|     code      |
+    //       +---------------+...+---------------+
+    //       |      data     |   |     data      |
+    //       +---------------+...+---------------+
+    //
+    // In this configuration code updates are written to the non-executable view of the code
+    // cache, and the executable view of the code cache has fixed RX memory protections.
+    //
+    // This memory needs to be mapped shared as the code portions will have two mappings.
+    base_flags = MAP_SHARED;
+    data_pages = MemMap::MapFile(
+        data_capacity + exec_capacity,
+        kProtRW,
+        base_flags,
+        mem_fd,
+        /* start */ 0,
+        /* low_4gb */ true,
+        "data-code-cache",
+        &error_str);
+  } else {
+    // Single view of JIT code cache case. Create an initial mapping of data pages large enough
+    // for data and JIT code pages. The mappings will look like:
+    //
+    //       VA                  PA
+    //
+    //       +---------------+...+---------------+
+    //       |  exec code    |   |     code      |
+    //       +---------------+...+---------------+
+    //       |      data     |   |     data      |
+    //       +---------------+...+---------------+
+    //
+    // In this configuration code updates are written to the executable view of the code cache,
+    // and the executable view of the code cache transitions RX to RWX for the update and then
+    // back to RX after the update.
+    base_flags = MAP_PRIVATE | MAP_ANON;
+    data_pages = MemMap::MapAnonymous(
+        "data-code-cache",
+        /* addr */ nullptr,
+        data_capacity + exec_capacity,
+        kProtRW,
+        /* low_4gb */ true,
+        /* reuse */ false,
+        /* reservation */ nullptr,
+        &error_str);
+  }
+
+  if (!data_pages.IsValid()) {
     std::ostringstream oss;
     oss << "Failed to create read write cache: " << error_str << " size=" << max_capacity;
     *error_msg = oss.str();
     return nullptr;
   }
 
-  // Align both capacities to page size, as that's the unit mspaces use.
-  initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
-  max_capacity = RoundDown(max_capacity, 2 * kPageSize);
+  MemMap exec_pages;
+  MemMap non_exec_pages;
+  if (exec_capacity > 0) {
+    uint8_t* const divider = data_pages.Begin() + data_capacity;
+    // Set initial permission for executable view to catch any SELinux permission problems early
+    // (for processes that cannot map WX pages). Otherwise, this region does not need to be
+    // executable as there is no code in the cache yet.
+    exec_pages = data_pages.RemapAtEnd(divider,
+                                       "jit-code-cache",
+                                       kProtRX,
+                                       base_flags | MAP_FIXED,
+                                       mem_fd.get(),
+                                       (mem_fd.get() >= 0) ? data_capacity : 0,
+                                       &error_str);
+    if (!exec_pages.IsValid()) {
+      std::ostringstream oss;
+      oss << "Failed to create read execute code cache: " << error_str << " size=" << max_capacity;
+      *error_msg = oss.str();
+      return nullptr;
+    }
 
-  // Data cache is 1 / 2 of the map.
-  // TODO: Make this variable?
-  size_t data_size = max_capacity / 2;
-  size_t code_size = max_capacity - data_size;
-  DCHECK_EQ(code_size + data_size, max_capacity);
-  uint8_t* divider = data_map.Begin() + data_size;
-
-  MemMap code_map = data_map.RemapAtEnd(
-      divider, "jit-code-cache", memmap_flags_prot_code | PROT_WRITE, &error_str);
-  if (!code_map.IsValid()) {
-    std::ostringstream oss;
-    oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
-    *error_msg = oss.str();
-    return nullptr;
+    if (mem_fd.get() >= 0) {
+      // For dual view, create the secondary view of code memory used for updating code. This view
+      // is never executable.
+      non_exec_pages = MemMap::MapFile(exec_capacity,
+                                       kProtR,
+                                       base_flags,
+                                       mem_fd,
+                                       /* start */ data_capacity,
+                                       /* low_4GB */ false,
+                                       "jit-code-cache-rw",
+                                       &error_str);
+      if (!non_exec_pages.IsValid()) {
+        // Log and continue as single view JIT.
+        VLOG(jit) << "Failed to map non-executable view of JIT code cache";
+      }
+    }
+  } else {
+    // Profiling only. No memory for code required.
+    DCHECK(used_only_for_profile_data);
   }
-  DCHECK_EQ(code_map.Begin(), divider);
-  data_size = initial_capacity / 2;
-  code_size = initial_capacity - data_size;
-  DCHECK_EQ(code_size + data_size, initial_capacity);
+
+  const size_t initial_data_capacity = initial_capacity / 2;
+  const size_t initial_exec_capacity =
+      (exec_capacity == 0) ? 0 : (initial_capacity - initial_data_capacity);
+
   return new JitCodeCache(
-      std::move(code_map),
-      std::move(data_map),
-      code_size,
-      data_size,
+      std::move(data_pages),
+      std::move(exec_pages),
+      std::move(non_exec_pages),
+      initial_data_capacity,
+      initial_exec_capacity,
       max_capacity,
-      garbage_collect_code,
-      memmap_flags_prot_code);
+      garbage_collect_code);
 }
 
-JitCodeCache::JitCodeCache(MemMap&& code_map,
-                           MemMap&& data_map,
-                           size_t initial_code_capacity,
+JitCodeCache::JitCodeCache(MemMap&& data_pages,
+                           MemMap&& exec_pages,
+                           MemMap&& non_exec_pages,
                            size_t initial_data_capacity,
+                           size_t initial_exec_capacity,
                            size_t max_capacity,
-                           bool garbage_collect_code,
-                           int memmap_flags_prot_code)
+                           bool garbage_collect_code)
     : lock_("Jit code cache", kJitCodeCacheLock),
       lock_cond_("Jit code cache condition variable", lock_),
       collection_in_progress_(false),
-      code_map_(std::move(code_map)),
-      data_map_(std::move(data_map)),
+      data_pages_(std::move(data_pages)),
+      exec_pages_(std::move(exec_pages)),
+      non_exec_pages_(std::move(non_exec_pages)),
       max_capacity_(max_capacity),
-      current_capacity_(initial_code_capacity + initial_data_capacity),
-      code_end_(initial_code_capacity),
+      current_capacity_(initial_exec_capacity + initial_data_capacity),
       data_end_(initial_data_capacity),
+      exec_end_(initial_exec_capacity),
       last_collection_increased_code_cache_(false),
       garbage_collect_code_(garbage_collect_code),
       used_memory_for_data_(0),
@@ -284,40 +403,46 @@
       histogram_code_memory_use_("Memory used for compiled code", 16),
       histogram_profiling_info_memory_use_("Memory used for profiling info", 16),
       is_weak_access_enabled_(true),
-      inline_cache_cond_("Jit inline cache condition variable", lock_),
-      memmap_flags_prot_code_(memmap_flags_prot_code) {
+      inline_cache_cond_("Jit inline cache condition variable", lock_) {
 
-  DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
-  code_mspace_ = create_mspace_with_base(code_map_.Begin(), code_end_, false /*locked*/);
-  data_mspace_ = create_mspace_with_base(data_map_.Begin(), data_end_, false /*locked*/);
+  DCHECK_GE(max_capacity, initial_exec_capacity + initial_data_capacity);
 
-  if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
-    PLOG(FATAL) << "create_mspace_with_base failed";
+  // Initialize the data heap
+  data_mspace_ = create_mspace_with_base(data_pages_.Begin(), data_end_, false /*locked*/);
+  CHECK(data_mspace_ != nullptr) << "create_mspace_with_base (data) failed";
+
+  // Initialize the code heap
+  MemMap* code_heap = nullptr;
+  if (non_exec_pages_.IsValid()) {
+    code_heap = &non_exec_pages_;
+  } else if (exec_pages_.IsValid()) {
+    code_heap = &exec_pages_;
   }
-
-  SetFootprintLimit(current_capacity_);
-
-  CheckedCall(mprotect,
-              "mprotect jit code cache",
-              code_map_.Begin(),
-              code_map_.Size(),
-              memmap_flags_prot_code_);
-  CheckedCall(mprotect,
-              "mprotect jit data cache",
-              data_map_.Begin(),
-              data_map_.Size(),
-              kProtData);
+  if (code_heap != nullptr) {
+    // Make all pages reserved for the code heap writable. The mspace allocator, that manages the
+    // heap, will take and initialize pages in create_mspace_with_base().
+    CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW);
+    exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
+    CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
+    SetFootprintLimit(current_capacity_);
+    // Protect pages containing heap metadata. Updates to the code heap toggle write permission to
+    // perform the update and there are no other times write access is required.
+    CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR);
+  } else {
+    exec_mspace_ = nullptr;
+    SetFootprintLimit(current_capacity_);
+  }
 
   VLOG(jit) << "Created jit code cache: initial data size="
             << PrettySize(initial_data_capacity)
             << ", initial code size="
-            << PrettySize(initial_code_capacity);
+            << PrettySize(initial_exec_capacity);
 }
 
 JitCodeCache::~JitCodeCache() {}
 
 bool JitCodeCache::ContainsPc(const void* ptr) const {
-  return code_map_.Begin() <= ptr && ptr < code_map_.End();
+  return exec_pages_.Begin() <= ptr && ptr < exec_pages_.End();
 }
 
 bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
@@ -385,22 +510,20 @@
       : ScopedTrace("ScopedCodeCacheWrite"),
         code_cache_(code_cache) {
     ScopedTrace trace("mprotect all");
-    CheckedCall(
-        mprotect,
-        "make code writable",
-        code_cache_->code_map_.Begin(),
-        code_cache_->code_map_.Size(),
-        code_cache_->memmap_flags_prot_code_ | PROT_WRITE);
+    const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping();
+    if (updatable_pages != nullptr) {
+      int prot = code_cache_->HasDualCodeMapping() ? kProtRW : kProtRWX;
+      CheckedCall(mprotect, "Cache +W", updatable_pages->Begin(), updatable_pages->Size(), prot);
+    }
   }
 
   ~ScopedCodeCacheWrite() {
     ScopedTrace trace("mprotect code");
-    CheckedCall(
-        mprotect,
-        "make code protected",
-        code_cache_->code_map_.Begin(),
-        code_cache_->code_map_.Size(),
-        code_cache_->memmap_flags_prot_code_);
+    const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping();
+    if (updatable_pages != nullptr) {
+      int prot = code_cache_->HasDualCodeMapping() ? kProtR : kProtRX;
+      CheckedCall(mprotect, "Cache -W", updatable_pages->Begin(), updatable_pages->Size(), prot);
+    }
   }
 
  private:
@@ -602,7 +725,13 @@
   if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
     FreeData(GetRootTable(code_ptr));
   }  // else this is a JNI stub without any data.
-  FreeCode(reinterpret_cast<uint8_t*>(allocation));
+
+  uint8_t* code_allocation = reinterpret_cast<uint8_t*>(allocation);
+  if (HasDualCodeMapping()) {
+    code_allocation = TranslateAddress(code_allocation, exec_pages_, non_exec_pages_);
+  }
+
+  FreeCode(code_allocation);
 }
 
 void JitCodeCache::FreeAllMethodHeaders(
@@ -753,6 +882,16 @@
   }
 }
 
+const MemMap* JitCodeCache::GetUpdatableCodeMapping() const {
+  if (HasDualCodeMapping()) {
+    return &non_exec_pages_;
+  } else if (HasCodeMapping()) {
+    return &exec_pages_;
+  } else {
+    return nullptr;
+  }
+}
+
 uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
                                           ArtMethod* method,
                                           uint8_t* stack_map,
@@ -773,38 +912,73 @@
     DCheckRootsAreValid(roots);
   }
 
-  size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
-  // Ensure the header ends up at expected instruction alignment.
-  size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
-  size_t total_size = header_size + code_size;
-
   OatQuickMethodHeader* method_header = nullptr;
   uint8_t* code_ptr = nullptr;
-  uint8_t* memory = nullptr;
+
   MutexLock mu(self, lock_);
   // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
   // finish.
   WaitForPotentialCollectionToCompleteRunnable(self);
   {
     ScopedCodeCacheWrite scc(this);
-    memory = AllocateCode(total_size);
-    if (memory == nullptr) {
+
+    size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+    // Ensure the header ends up at expected instruction alignment.
+    size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
+    size_t total_size = header_size + code_size;
+
+    // AllocateCode allocates memory in non-executable region for alignment header and code. The
+    // header size may include alignment padding.
+    uint8_t* nox_memory = AllocateCode(total_size);
+    if (nox_memory == nullptr) {
       return nullptr;
     }
-    code_ptr = memory + header_size;
 
+    // code_ptr points to non-executable code.
+    code_ptr = nox_memory + header_size;
     std::copy(code, code + code_size, code_ptr);
     method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+
+    // From here code_ptr points to executable code.
+    if (HasDualCodeMapping()) {
+      code_ptr = TranslateAddress(code_ptr, non_exec_pages_, exec_pages_);
+    }
+
     new (method_header) OatQuickMethodHeader(
         (stack_map != nullptr) ? code_ptr - stack_map : 0u,
         code_size);
-    // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
-    // trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
-    // This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
-    // 6P) stop being supported or their kernels are fixed.
+
+    DCHECK(!Runtime::Current()->IsAotCompiler());
+    if (has_should_deoptimize_flag) {
+      method_header->SetHasShouldDeoptimizeFlag();
+    }
+
+    // Update method_header pointer to executable code region.
+    if (HasDualCodeMapping()) {
+      method_header = TranslateAddress(method_header, non_exec_pages_, exec_pages_);
+    }
+
+    // Both instruction and data caches need flushing to the point of unification where both share
+    // a common view of memory. Flushing the data cache ensures the dirty cachelines from the
+    // newly added code are written out to the point of unification. Flushing the instruction
+    // cache ensures the newly written code will be fetched from the point of unification before
+    // use. Memory in the code cache is re-cycled as code is added and removed. The flushes
+    // prevent stale code from residing in the instruction cache.
+    //
+    // Caches are flushed before write permission is removed because some ARMv8 Qualcomm kernels
+    // may trigger a segfault if a page fault occurs when requesting a cache maintenance
+    // operation. This is a kernel bug that we need to work around until affected devices
+    // (e.g. Nexus 5X and 6P) stop being supported or their kernels are fixed.
     //
     // For reference, this behavior is caused by this commit:
     // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
+    //
+    if (HasDualCodeMapping()) {
+      // Flush the data cache lines associated with the non-executable copy of the code just added.
+      FlushDataCache(nox_memory, nox_memory + total_size);
+    }
+    // FlushInstructionCache() flushes both data and instruction caches lines. The cacheline range
+    // flushed is for the executable mapping of the code just added.
     FlushInstructionCache(code_ptr, code_ptr + code_size);
 
     // Ensure CPU instruction pipelines are flushed for all cores. This is necessary for
@@ -813,16 +987,14 @@
     // shootdown (incidentally invalidating the CPU pipelines by sending an IPI to all cores to
     // notify them of the TLB invalidation). Some architectures, notably ARM and ARM64, have
     // hardware support that broadcasts TLB invalidations and so their kernels have no software
-    // based TLB shootdown.
+    // based TLB shootdown. The sync-core flavor of membarrier was introduced in Linux 4.16 to
+    // address this (see mbarrier(2)). The membarrier here will fail on prior kernels and on
+    // platforms lacking the appropriate support.
     art::membarrier(art::MembarrierCommand::kPrivateExpeditedSyncCore);
 
-    DCHECK(!Runtime::Current()->IsAotCompiler());
-    if (has_should_deoptimize_flag) {
-      method_header->SetHasShouldDeoptimizeFlag();
-    }
-
     number_of_compilations_++;
   }
+
   // We need to update the entry point in the runnable state for the instrumentation.
   {
     // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the
@@ -1167,9 +1339,9 @@
   DCHECK(IsAlignedParam(per_space_footprint, kPageSize));
   DCHECK_EQ(per_space_footprint * 2, new_footprint);
   mspace_set_footprint_limit(data_mspace_, per_space_footprint);
-  {
+  if (HasCodeMapping()) {
     ScopedCodeCacheWrite scc(this);
-    mspace_set_footprint_limit(code_mspace_, per_space_footprint);
+    mspace_set_footprint_limit(exec_mspace_, per_space_footprint);
   }
 }
 
@@ -1244,8 +1416,8 @@
       number_of_collections_++;
       live_bitmap_.reset(CodeCacheBitmap::Create(
           "code-cache-bitmap",
-          reinterpret_cast<uintptr_t>(code_map_.Begin()),
-          reinterpret_cast<uintptr_t>(code_map_.Begin() + current_capacity_ / 2)));
+          reinterpret_cast<uintptr_t>(exec_pages_.Begin()),
+          reinterpret_cast<uintptr_t>(exec_pages_.Begin() + current_capacity_ / 2)));
       collection_in_progress_ = true;
     }
   }
@@ -1614,15 +1786,17 @@
 // NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock
 // is already held.
 void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS {
-  if (code_mspace_ == mspace) {
-    size_t result = code_end_;
-    code_end_ += increment;
-    return reinterpret_cast<void*>(result + code_map_.Begin());
+  if (mspace == exec_mspace_) {
+    DCHECK(exec_mspace_ != nullptr);
+    const MemMap* const code_pages = GetUpdatableCodeMapping();
+    void* result = code_pages->Begin() + exec_end_;
+    exec_end_ += increment;
+    return result;
   } else {
     DCHECK_EQ(data_mspace_, mspace);
-    size_t result = data_end_;
+    void* result = data_pages_.Begin() + data_end_;
     data_end_ += increment;
-    return reinterpret_cast<void*>(result + data_map_.Begin());
+    return result;
   }
 }
 
@@ -1849,7 +2023,7 @@
 uint8_t* JitCodeCache::AllocateCode(size_t code_size) {
   size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
   uint8_t* result = reinterpret_cast<uint8_t*>(
-      mspace_memalign(code_mspace_, alignment, code_size));
+      mspace_memalign(exec_mspace_, alignment, code_size));
   size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
   // Ensure the header ends up at expected instruction alignment.
   DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment);
@@ -1859,7 +2033,7 @@
 
 void JitCodeCache::FreeCode(uint8_t* code) {
   used_memory_for_code_ -= mspace_usable_size(code);
-  mspace_free(code_mspace_, code);
+  mspace_free(exec_mspace_, code);
 }
 
 uint8_t* JitCodeCache::AllocateData(size_t data_size) {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index e2aa01c..76ad8db 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -223,7 +223,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
-    return mspace == code_mspace_ || mspace == data_mspace_;
+    return mspace == data_mspace_ || mspace == exec_mspace_;
   }
 
   void* MoreCore(const void* mspace, intptr_t increment);
@@ -279,13 +279,13 @@
 
  private:
   // Take ownership of maps.
-  JitCodeCache(MemMap&& code_map,
-               MemMap&& data_map,
-               size_t initial_code_capacity,
+  JitCodeCache(MemMap&& data_pages,
+               MemMap&& exec_pages,
+               MemMap&& non_exec_pages,
                size_t initial_data_capacity,
+               size_t initial_exec_capacity,
                size_t max_capacity,
-               bool garbage_collect_code,
-               int memmap_flags_prot_code);
+               bool garbage_collect_code);
 
   // Internal version of 'CommitCode' that will not retry if the
   // allocation fails. Return null if the allocation fails.
@@ -381,6 +381,16 @@
   uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
   void FreeData(uint8_t* data) REQUIRES(lock_);
 
+  bool HasDualCodeMapping() const {
+    return non_exec_pages_.IsValid();
+  }
+
+  bool HasCodeMapping() const {
+    return exec_pages_.IsValid();
+  }
+
+  const MemMap* GetUpdatableCodeMapping() const;
+
   bool IsWeakAccessEnabled(Thread* self) const;
   void WaitUntilInlineCacheAccessible(Thread* self)
       REQUIRES(!lock_)
@@ -395,14 +405,17 @@
   ConditionVariable lock_cond_ GUARDED_BY(lock_);
   // Whether there is a code cache collection in progress.
   bool collection_in_progress_ GUARDED_BY(lock_);
-  // Mem map which holds code.
-  MemMap code_map_;
   // Mem map which holds data (stack maps and profiling info).
-  MemMap data_map_;
-  // The opaque mspace for allocating code.
-  void* code_mspace_ GUARDED_BY(lock_);
+  MemMap data_pages_;
+  // Mem map which holds code and has executable permission.
+  MemMap exec_pages_;
+  // Mem map which holds code with non executable permission. Only valid for dual view JIT when
+  // this is the non-executable view of code used to write updates.
+  MemMap non_exec_pages_;
   // The opaque mspace for allocating data.
   void* data_mspace_ GUARDED_BY(lock_);
+  // The opaque mspace for allocating code.
+  void* exec_mspace_ GUARDED_BY(lock_);
   // Bitmap for collecting code and data.
   std::unique_ptr<CodeCacheBitmap> live_bitmap_;
   // Holds compiled code associated with the shorty for a JNI stub.
@@ -420,12 +433,12 @@
   // The current capacity in bytes of the code cache.
   size_t current_capacity_ GUARDED_BY(lock_);
 
-  // The current footprint in bytes of the code portion of the code cache.
-  size_t code_end_ GUARDED_BY(lock_);
-
   // The current footprint in bytes of the data portion of the code cache.
   size_t data_end_ GUARDED_BY(lock_);
 
+  // The current footprint in bytes of the code portion of the code cache.
+  size_t exec_end_ GUARDED_BY(lock_);
+
   // Whether the last collection round increased the code cache.
   bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
 
@@ -464,9 +477,6 @@
   // Condition to wait on for accessing inline caches.
   ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
 
-  // Mapping flags for the code section.
-  const int memmap_flags_prot_code_;
-
   friend class art::JitJniStubTestHelper;
   friend class ScopedCodeCacheWrite;
 
diff --git a/runtime/jni/check_jni.cc b/runtime/jni/check_jni.cc
index c5e8830..6f61f5e 100644
--- a/runtime/jni/check_jni.cc
+++ b/runtime/jni/check_jni.cc
@@ -181,7 +181,7 @@
     }
   }
 
-  VarArgs(VarArgs&& other) {
+  VarArgs(VarArgs&& other) noexcept {
     m_ = other.m_;
     cnt_ = other.cnt_;
     type_ = other.type_;
diff --git a/runtime/managed_stack.h b/runtime/managed_stack.h
index d1c230f..6a0f075 100644
--- a/runtime/managed_stack.h
+++ b/runtime/managed_stack.h
@@ -95,7 +95,7 @@
     tagged_top_quick_frame_ = TaggedTopQuickFrame::CreateTagged(top);
   }
 
-  static size_t TaggedTopQuickFrameOffset() {
+  static constexpr size_t TaggedTopQuickFrameOffset() {
     return OFFSETOF_MEMBER(ManagedStack, tagged_top_quick_frame_);
   }
 
diff --git a/runtime/mirror/accessible_object.h b/runtime/mirror/accessible_object.h
index d489f14..9660bf0 100644
--- a/runtime/mirror/accessible_object.h
+++ b/runtime/mirror/accessible_object.h
@@ -36,7 +36,8 @@
   }
 
  private:
-  uint8_t flag_;
+  // We only use the field indirectly using the FlagOffset() method.
+  uint8_t flag_ ATTRIBUTE_UNUSED;
   // Padding required for correct alignment of subclasses like Executable, Field, etc.
   uint8_t padding_[1] ATTRIBUTE_UNUSED;
 
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 2e39530..704fb11 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -50,14 +50,6 @@
   return header_size + data_size;
 }
 
-inline MemberOffset Array::DataOffset(size_t component_size) {
-  DCHECK(IsPowerOfTwo(component_size)) << component_size;
-  size_t data_offset = RoundUp(OFFSETOF_MEMBER(Array, first_element_), component_size);
-  DCHECK_EQ(RoundUp(data_offset, component_size), data_offset)
-      << "Array data offset isn't aligned with component size";
-  return MemberOffset(data_offset);
-}
-
 template<VerifyObjectFlags kVerifyFlags>
 inline bool Array::CheckIsValidIndex(int32_t index) {
   if (UNLIKELY(static_cast<uint32_t>(index) >=
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index a31a9144..7edc851 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -17,6 +17,7 @@
 #ifndef ART_RUNTIME_MIRROR_ARRAY_H_
 #define ART_RUNTIME_MIRROR_ARRAY_H_
 
+#include "base/bit_utils.h"
 #include "base/enums.h"
 #include "gc/allocator_type.h"
 #include "obj_ptr.h"
@@ -66,11 +67,17 @@
     SetField32<false, false, kVerifyNone>(OFFSET_OF_OBJECT_MEMBER(Array, length_), length);
   }
 
-  static MemberOffset LengthOffset() {
+  static constexpr MemberOffset LengthOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Array, length_);
   }
 
-  static MemberOffset DataOffset(size_t component_size);
+  static constexpr MemberOffset DataOffset(size_t component_size) {
+    DCHECK(IsPowerOfTwo(component_size)) << component_size;
+    size_t data_offset = RoundUp(OFFSETOF_MEMBER(Array, first_element_), component_size);
+    DCHECK_EQ(RoundUp(data_offset, component_size), data_offset)
+        << "Array data offset isn't aligned with component size";
+    return MemberOffset(data_offset);
+  }
 
   void* GetRawData(size_t component_size, int32_t index)
       REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -102,9 +109,11 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // The number of array elements.
-  int32_t length_;
+  // We only use the field indirectly using the LengthOffset() method.
+  int32_t length_ ATTRIBUTE_UNUSED;
   // Marker for the data (used by generated code)
-  uint32_t first_element_[0];
+  // We only use the field indirectly using the DataOffset() method.
+  uint32_t first_element_[0] ATTRIBUTE_UNUSED;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(Array);
 };
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index f640d3b..eddc84b 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -89,7 +89,7 @@
   static void SetStatus(Handle<Class> h_this, ClassStatus new_status, Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
-  static MemberOffset StatusOffset() {
+  static constexpr MemberOffset StatusOffset() {
     return MemberOffset(OFFSET_OF_OBJECT_MEMBER(Class, status_));
   }
 
@@ -173,7 +173,7 @@
     return GetField32<kVerifyFlags>(AccessFlagsOffset());
   }
 
-  static MemberOffset AccessFlagsOffset() {
+  static constexpr MemberOffset AccessFlagsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_);
   }
 
@@ -352,7 +352,7 @@
     return (access_flags & kAccClassIsProxy) != 0;
   }
 
-  static MemberOffset PrimitiveTypeOffset() {
+  static constexpr MemberOffset PrimitiveTypeOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_);
   }
 
@@ -440,7 +440,7 @@
 
   bool IsThrowableClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static MemberOffset ComponentTypeOffset() {
+  static constexpr MemberOffset ComponentTypeOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, component_type_);
   }
 
@@ -549,10 +549,10 @@
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   uint32_t GetObjectSize() REQUIRES_SHARED(Locks::mutator_lock_);
-  static MemberOffset ObjectSizeOffset() {
+  static constexpr MemberOffset ObjectSizeOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, object_size_);
   }
-  static MemberOffset ObjectSizeAllocFastPathOffset() {
+  static constexpr MemberOffset ObjectSizeAllocFastPathOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, object_size_alloc_fast_path_);
   }
 
@@ -636,7 +636,7 @@
     return GetSuperClass() != nullptr;
   }
 
-  static MemberOffset SuperClassOffset() {
+  static constexpr MemberOffset SuperClassOffset() {
     return MemberOffset(OFFSETOF_MEMBER(Class, super_class_));
   }
 
@@ -646,11 +646,11 @@
 
   void SetClassLoader(ObjPtr<ClassLoader> new_cl) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static MemberOffset DexCacheOffset() {
+  static constexpr MemberOffset DexCacheOffset() {
     return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_));
   }
 
-  static MemberOffset IfTableOffset() {
+  static constexpr MemberOffset IfTableOffset() {
     return MemberOffset(OFFSETOF_MEMBER(Class, iftable_));
   }
 
@@ -675,7 +675,7 @@
   ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetMethodsPtr()
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static MemberOffset MethodsOffset() {
+  static constexpr MemberOffset MethodsOffset() {
     return MemberOffset(OFFSETOF_MEMBER(Class, methods_));
   }
 
@@ -784,15 +784,15 @@
 
   void SetVTable(ObjPtr<PointerArray> new_vtable) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static MemberOffset VTableOffset() {
+  static constexpr MemberOffset VTableOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, vtable_);
   }
 
-  static MemberOffset EmbeddedVTableLengthOffset() {
+  static constexpr MemberOffset EmbeddedVTableLengthOffset() {
     return MemberOffset(sizeof(Class));
   }
 
-  static MemberOffset ImtPtrOffset(PointerSize pointer_size) {
+  static constexpr MemberOffset ImtPtrOffset(PointerSize pointer_size) {
     return MemberOffset(
         RoundUp(EmbeddedVTableLengthOffset().Uint32Value() + sizeof(uint32_t),
                 static_cast<size_t>(pointer_size)));
@@ -1427,6 +1427,7 @@
 
   // Tid used to check for recursive <clinit> invocation.
   pid_t clinit_thread_id_;
+  static_assert(sizeof(pid_t) == sizeof(int32_t), "java.lang.Class.clinitThreadId size check");
 
   // ClassDef index in dex file, -1 if no class definition such as an array.
   // TODO: really 16bits
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 22ccd20..da1cd3f 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -226,51 +226,51 @@
     return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
   }
 
-  static MemberOffset StringsOffset() {
+  static constexpr MemberOffset StringsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, strings_);
   }
 
-  static MemberOffset ResolvedTypesOffset() {
+  static constexpr MemberOffset ResolvedTypesOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_);
   }
 
-  static MemberOffset ResolvedFieldsOffset() {
+  static constexpr MemberOffset ResolvedFieldsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_fields_);
   }
 
-  static MemberOffset ResolvedMethodsOffset() {
+  static constexpr MemberOffset ResolvedMethodsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_);
   }
 
-  static MemberOffset ResolvedMethodTypesOffset() {
+  static constexpr MemberOffset ResolvedMethodTypesOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_method_types_);
   }
 
-  static MemberOffset ResolvedCallSitesOffset() {
+  static constexpr MemberOffset ResolvedCallSitesOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_call_sites_);
   }
 
-  static MemberOffset NumStringsOffset() {
+  static constexpr MemberOffset NumStringsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_strings_);
   }
 
-  static MemberOffset NumResolvedTypesOffset() {
+  static constexpr MemberOffset NumResolvedTypesOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_types_);
   }
 
-  static MemberOffset NumResolvedFieldsOffset() {
+  static constexpr MemberOffset NumResolvedFieldsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_fields_);
   }
 
-  static MemberOffset NumResolvedMethodsOffset() {
+  static constexpr MemberOffset NumResolvedMethodsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_methods_);
   }
 
-  static MemberOffset NumResolvedMethodTypesOffset() {
+  static constexpr MemberOffset NumResolvedMethodTypesOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_method_types_);
   }
 
-  static MemberOffset NumResolvedCallSitesOffset() {
+  static constexpr MemberOffset NumResolvedCallSitesOffset() {
     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_call_sites_);
   }
 
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 8fe9923..11e8cca 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -86,7 +86,7 @@
     return sizeof(Object);
   }
 
-  static MemberOffset ClassOffset() {
+  static constexpr MemberOffset ClassOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Object, klass_);
   }
 
@@ -138,7 +138,7 @@
       REQUIRES(!Locks::thread_list_lock_,
                !Locks::thread_suspend_count_lock_);
 
-  static MemberOffset MonitorOffset() {
+  static constexpr MemberOffset MonitorOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
   }
 
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index d08717c..b32db08 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -50,11 +50,11 @@
     return sizeof(String);
   }
 
-  static MemberOffset CountOffset() {
+  static constexpr MemberOffset CountOffset() {
     return OFFSET_OF_OBJECT_MEMBER(String, count_);
   }
 
-  static MemberOffset ValueOffset() {
+  static constexpr MemberOffset ValueOffset() {
     return OFFSET_OF_OBJECT_MEMBER(String, value_);
   }
 
diff --git a/runtime/offsets.h b/runtime/offsets.h
index 4df9b27..372b821 100644
--- a/runtime/offsets.h
+++ b/runtime/offsets.h
@@ -27,14 +27,14 @@
 // Allow the meaning of offsets to be strongly typed.
 class Offset {
  public:
-  explicit Offset(size_t val) : val_(val) {}
-  int32_t Int32Value() const {
+  constexpr explicit Offset(size_t val) : val_(val) {}
+  constexpr int32_t Int32Value() const {
     return static_cast<int32_t>(val_);
   }
-  uint32_t Uint32Value() const {
+  constexpr uint32_t Uint32Value() const {
     return static_cast<uint32_t>(val_);
   }
-  size_t SizeValue() const {
+  constexpr size_t SizeValue() const {
     return val_;
   }
 
@@ -46,7 +46,7 @@
 // Offsets relative to the current frame.
 class FrameOffset : public Offset {
  public:
-  explicit FrameOffset(size_t val) : Offset(val) {}
+  constexpr explicit FrameOffset(size_t val) : Offset(val) {}
   bool operator>(FrameOffset other) const { return val_ > other.val_; }
   bool operator<(FrameOffset other) const { return val_ < other.val_; }
 };
@@ -55,7 +55,7 @@
 template<PointerSize pointer_size>
 class ThreadOffset : public Offset {
  public:
-  explicit ThreadOffset(size_t val) : Offset(val) {}
+  constexpr explicit ThreadOffset(size_t val) : Offset(val) {}
 };
 
 using ThreadOffset32 = ThreadOffset<PointerSize::k32>;
@@ -64,7 +64,7 @@
 // Offsets relative to an object.
 class MemberOffset : public Offset {
  public:
-  explicit MemberOffset(size_t val) : Offset(val) {}
+  constexpr explicit MemberOffset(size_t val) : Offset(val) {}
 };
 
 }  // namespace art
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 4d16eb5..2e495cc 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -751,7 +751,7 @@
   UsageMessage(stream, "  -Xcompiler:filename\n");
   UsageMessage(stream, "  -Xcompiler-option dex2oat-option\n");
   UsageMessage(stream, "  -Ximage-compiler-option dex2oat-option\n");
-  UsageMessage(stream, "  -Xpatchoat:filename\n");
+  UsageMessage(stream, "  -Xpatchoat:filename (obsolete, ignored)\n");
   UsageMessage(stream, "  -Xusejit:booleanvalue\n");
   UsageMessage(stream, "  -Xjitinitialsize:N\n");
   UsageMessage(stream, "  -Xjitmaxsize:N\n");
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index d62cbdb..45f5633 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -277,7 +277,7 @@
     size_t identical;
 
     SummaryElement() : equiv(0), identical(0) {}
-    SummaryElement(SummaryElement&& ref) {
+    SummaryElement(SummaryElement&& ref) noexcept {
       root = ref.root;
       equiv = ref.equiv;
       identical = ref.identical;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a51d457..a48f1fe 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -695,15 +695,6 @@
   return env->NewGlobalRef(system_class_loader.get());
 }
 
-std::string Runtime::GetPatchoatExecutable() const {
-  if (!patchoat_executable_.empty()) {
-    return patchoat_executable_;
-  }
-  std::string patchoat_executable(GetAndroidRoot());
-  patchoat_executable += (kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat");
-  return patchoat_executable;
-}
-
 std::string Runtime::GetCompilerExecutable() const {
   if (!compiler_executable_.empty()) {
     return compiler_executable_;
@@ -1190,7 +1181,6 @@
   properties_ = runtime_options.ReleaseOrDefault(Opt::PropertiesList);
 
   compiler_callbacks_ = runtime_options.GetOrDefault(Opt::CompilerCallbacksPtr);
-  patchoat_executable_ = runtime_options.ReleaseOrDefault(Opt::PatchOat);
   must_relocate_ = runtime_options.GetOrDefault(Opt::Relocate);
   is_zygote_ = runtime_options.Exists(Opt::Zygote);
   is_explicit_gc_disabled_ = runtime_options.Exists(Opt::DisableExplicitGC);
@@ -1458,7 +1448,7 @@
   CHECK_EQ(self->GetThreadId(), ThreadList::kMainThreadId);
   CHECK(self != nullptr);
 
-  self->SetCanCallIntoJava(!IsAotCompiler());
+  self->SetIsRuntimeThread(IsAotCompiler());
 
   // Set us to runnable so tools using a runtime can allocate and GC by default
   self->TransitionFromSuspendedToRunnable();
diff --git a/runtime/runtime.h b/runtime/runtime.h
index f0bf754..478ff50 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -164,7 +164,6 @@
   }
 
   std::string GetCompilerExecutable() const;
-  std::string GetPatchoatExecutable() const;
 
   const std::vector<std::string>& GetCompilerOptions() const {
     return compiler_options_;
@@ -400,7 +399,7 @@
   QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
+  static constexpr size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
     return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
   }
 
@@ -845,7 +844,6 @@
   bool image_dex2oat_enabled_;
 
   std::string compiler_executable_;
-  std::string patchoat_executable_;
   std::vector<std::string> compiler_options_;
   std::vector<std::string> image_compiler_options_;
   std::string image_location_;
@@ -963,7 +961,7 @@
   bool implicit_suspend_checks_;    // Thread suspension checks are implicit.
 
   // Whether or not the sig chain (and implicitly the fault handler) should be
-  // disabled. Tools like dex2oat or patchoat don't need them. This enables
+  // disabled. Tools like dex2oat don't need them. This enables
   // building a statically link version of dex2oat.
   bool no_sig_chain_;
 
diff --git a/runtime/suspend_reason.h b/runtime/suspend_reason.h
index 4e75a4f..289a1a4 100644
--- a/runtime/suspend_reason.h
+++ b/runtime/suspend_reason.h
@@ -22,8 +22,6 @@
 namespace art {
 
 // The various reasons that we might be suspending a thread.
-// TODO Once kForDebugger is removed by removing the old debugger we should make the kForUserCode
-//      just a basic count for bookkeeping instead of linking it as directly with internal suspends.
 enum class SuspendReason {
   // Suspending for internal reasons (e.g. GC, stack trace, etc.).
   // TODO Split this into more descriptive sections.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index b6f0965..4a3d8cb 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1236,34 +1236,6 @@
   LOG(FATAL) << ss.str();
 }
 
-void Thread::SetCanBeSuspendedByUserCode(bool can_be_suspended_by_user_code) {
-  CHECK_EQ(this, Thread::Current()) << "This function may only be called on the current thread. "
-                                    << *Thread::Current() << " tried to modify the suspendability "
-                                    << "of " << *this;
-  // NB This checks the new value! This ensures that we can only set can_be_suspended_by_user_code
-  // to false if !CanCallIntoJava().
-  DCHECK(!CanCallIntoJava() || can_be_suspended_by_user_code)
-      << "Threads able to call into java may not be marked as unsuspendable!";
-  if (can_be_suspended_by_user_code == CanBeSuspendedByUserCode()) {
-    // Don't need to do anything if nothing is changing.
-    return;
-  }
-  art::MutexLock mu(this, *Locks::user_code_suspension_lock_);
-  art::MutexLock thread_list_mu(this, *Locks::thread_suspend_count_lock_);
-
-  // We want to add the user-code suspend count if we are newly allowing user-code suspends and
-  // remove them if we are disabling them.
-  int adj = can_be_suspended_by_user_code ? GetUserCodeSuspendCount() : -GetUserCodeSuspendCount();
-  // Adjust the global suspend count appropriately. Use kInternal to not change the ForUserCode
-  // count.
-  if (adj != 0) {
-    bool suspend = ModifySuspendCountInternal(this, adj, nullptr, SuspendReason::kInternal);
-    CHECK(suspend) << this << " was unable to modify it's own suspend count!";
-  }
-  // Mark thread as accepting user-code suspensions.
-  can_be_suspended_by_user_code_ = can_be_suspended_by_user_code;
-}
-
 bool Thread::ModifySuspendCountInternal(Thread* self,
                                         int delta,
                                         AtomicInteger* suspend_barrier,
@@ -1285,17 +1257,6 @@
       LOG(ERROR) << "attempting to modify suspend count in an illegal way.";
       return false;
     }
-    DCHECK(this == self || this->IsSuspended())
-        << "Only self kForUserCode suspension on an unsuspended thread is allowed: " << this;
-    if (UNLIKELY(!CanBeSuspendedByUserCode())) {
-      VLOG(threads) << this << " is being requested to suspend for user code but that is disabled "
-                    << "the thread will not actually go to sleep.";
-      // Having the user_code_suspend_count still be around is useful but we don't need to actually
-      // do anything since we aren't going to 'really' suspend. Just adjust the
-      // user_code_suspend_count and return.
-      tls32_.user_code_suspend_count += delta;
-      return true;
-    }
   }
   if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) {
     UnsafeLogFatalForSuspendCount(self, this);
@@ -2156,8 +2117,7 @@
 Thread::Thread(bool daemon)
     : tls32_(daemon),
       wait_monitor_(nullptr),
-      can_call_into_java_(true),
-      can_be_suspended_by_user_code_(true) {
+      is_runtime_thread_(false) {
   wait_mutex_ = new Mutex("a thread wait mutex");
   wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
   tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>;
@@ -2181,6 +2141,10 @@
   tls32_.is_transitioning_to_runnable = false;
 }
 
+bool Thread::CanLoadClasses() const {
+  return !IsRuntimeThread() || !Runtime::Current()->IsJavaDebuggable();
+}
+
 bool Thread::IsStillStarting() const {
   // You might think you can check whether the state is kStarting, but for much of thread startup,
   // the thread is in kNative; it might also be in kVmWait.
diff --git a/runtime/thread.h b/runtime/thread.h
index 3c85b80..3d13774 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -651,28 +651,28 @@
   //
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThinLockIdOffset() {
+  static constexpr ThreadOffset<pointer_size> ThinLockIdOffset() {
     return ThreadOffset<pointer_size>(
         OFFSETOF_MEMBER(Thread, tls32_) +
         OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> InterruptedOffset() {
+  static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
     return ThreadOffset<pointer_size>(
         OFFSETOF_MEMBER(Thread, tls32_) +
         OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThreadFlagsOffset() {
+  static constexpr ThreadOffset<pointer_size> ThreadFlagsOffset() {
     return ThreadOffset<pointer_size>(
         OFFSETOF_MEMBER(Thread, tls32_) +
         OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> IsGcMarkingOffset() {
+  static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
     return ThreadOffset<pointer_size>(
         OFFSETOF_MEMBER(Thread, tls32_) +
         OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
@@ -687,21 +687,12 @@
 
  private:
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
+  static constexpr ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
     size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
-    size_t scale;
-    size_t shrink;
-    if (pointer_size == kRuntimePointerSize) {
-      scale = 1;
-      shrink = 1;
-    } else if (pointer_size > kRuntimePointerSize) {
-      scale = static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize);
-      shrink = 1;
-    } else {
-      DCHECK_GT(kRuntimePointerSize, pointer_size);
-      scale = 1;
-      shrink = static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size);
-    }
+    size_t scale = (pointer_size > kRuntimePointerSize) ?
+      static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize) : 1;
+    size_t shrink = (kRuntimePointerSize > pointer_size) ?
+      static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size) : 1;
     return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
   }
 
@@ -741,82 +732,82 @@
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> SelfOffset() {
+  static constexpr ThreadOffset<pointer_size> SelfOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
+  static constexpr ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(
         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> MterpDefaultIBaseOffset() {
+  static constexpr ThreadOffset<pointer_size> MterpDefaultIBaseOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(
         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_default_ibase));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> MterpAltIBaseOffset() {
+  static constexpr ThreadOffset<pointer_size> MterpAltIBaseOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(
         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_alt_ibase));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ExceptionOffset() {
+  static constexpr ThreadOffset<pointer_size> ExceptionOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> PeerOffset() {
+  static constexpr ThreadOffset<pointer_size> PeerOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
   }
 
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> CardTableOffset() {
+  static constexpr ThreadOffset<pointer_size> CardTableOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
+  static constexpr ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(
         OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThreadLocalPosOffset() {
+  static constexpr ThreadOffset<pointer_size> ThreadLocalPosOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
                                                                 thread_local_pos));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThreadLocalEndOffset() {
+  static constexpr ThreadOffset<pointer_size> ThreadLocalEndOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
                                                                 thread_local_end));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
+  static constexpr ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
                                                                 thread_local_objects));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> RosAllocRunsOffset() {
+  static constexpr ThreadOffset<pointer_size> RosAllocRunsOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
                                                                 rosalloc_runs));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
+  static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
                                                                 thread_local_alloc_stack_top));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
+  static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
                                                                 thread_local_alloc_stack_end));
   }
@@ -859,19 +850,19 @@
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> StackEndOffset() {
+  static constexpr ThreadOffset<pointer_size> StackEndOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(
         OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> JniEnvOffset() {
+  static constexpr ThreadOffset<pointer_size> JniEnvOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(
         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
+  static constexpr ThreadOffset<pointer_size> TopOfManagedStackOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(
         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
         ManagedStack::TaggedTopQuickFrameOffset());
@@ -893,7 +884,7 @@
   ALWAYS_INLINE ShadowFrame* PopShadowFrame();
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> TopShadowFrameOffset() {
+  static constexpr ThreadOffset<pointer_size> TopShadowFrameOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(
         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
         ManagedStack::TopShadowFrameOffset());
@@ -922,7 +913,7 @@
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> TopHandleScopeOffset() {
+  static constexpr ThreadOffset<pointer_size> TopHandleScopeOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
                                                                 top_handle_scope));
   }
@@ -990,26 +981,18 @@
     --tls32_.disable_thread_flip_count;
   }
 
-  // Returns true if the thread is subject to user_code_suspensions.
-  bool CanBeSuspendedByUserCode() const {
-    return can_be_suspended_by_user_code_;
-  }
-
-  // Sets CanBeSuspenededByUserCode and adjusts the suspend-count as needed. This may only be called
-  // when running on the current thread. It is **absolutely required** that this be called only on
-  // the Thread::Current() thread.
-  void SetCanBeSuspendedByUserCode(bool can_be_suspended_by_user_code)
-      REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::user_code_suspension_lock_);
-
   // Returns true if the thread is allowed to call into java.
-  bool CanCallIntoJava() const {
-    return can_call_into_java_;
+  bool IsRuntimeThread() const {
+    return is_runtime_thread_;
   }
 
-  void SetCanCallIntoJava(bool can_call_into_java) {
-    can_call_into_java_ = can_call_into_java;
+  void SetIsRuntimeThread(bool is_runtime_thread) {
+    is_runtime_thread_ = is_runtime_thread;
   }
 
+  // Returns true if the thread is allowed to load java classes.
+  bool CanLoadClasses() const;
+
   // Activates single step control for debugging. The thread takes the
   // ownership of the given SingleStepControl*. It is deleted by a call
   // to DeactivateSingleStepControl or upon thread destruction.
@@ -1315,11 +1298,11 @@
   static void ClearAllInterpreterCaches();
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> InterpreterCacheOffset() {
+  static constexpr ThreadOffset<pointer_size> InterpreterCacheOffset() {
     return ThreadOffset<pointer_size>(OFFSETOF_MEMBER(Thread, interpreter_cache_));
   }
 
-  static int InterpreterCacheSizeLog2() {
+  static constexpr int InterpreterCacheSizeLog2() {
     return WhichPowerOf2(InterpreterCache::kSize);
   }
 
@@ -1587,9 +1570,8 @@
     // critical section enter.
     uint32_t disable_thread_flip_count;
 
-    // If CanBeSuspendedByUserCode, how much of 'suspend_count_' is by request of user code, used to
-    // distinguish threads suspended by the runtime from those suspended by user code. Otherwise
-    // this is just a count of how many user-code suspends have been attempted (but were ignored).
+    // How much of 'suspend_count_' is by request of user code, used to distinguish threads
+    // suspended by the runtime from those suspended by user code.
     // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
     // told that AssertHeld should be good enough.
     int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
@@ -1783,6 +1765,14 @@
     mirror::Throwable* async_exception;
   } tlsPtr_;
 
+  // Small thread-local cache to be used from the interpreter.
+  // It is keyed by dex instruction pointer.
+  // The value is opcode-depended (e.g. field offset).
+  InterpreterCache interpreter_cache_;
+
+  // All fields below this line should not be accessed by native code. This means these fields can
+  // be modified, rearranged, added or removed without having to modify asm_support.h
+
   // Guards the 'wait_monitor_' members.
   Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
@@ -1804,18 +1794,8 @@
   // compiled code or entrypoints.
   SafeMap<std::string, std::unique_ptr<TLSData>> custom_tls_ GUARDED_BY(Locks::custom_tls_lock_);
 
-  // True if the thread is allowed to call back into java (for e.g. during class resolution).
-  // By default this is true.
-  bool can_call_into_java_;
-
-  // True if the thread is subject to user-code suspension. By default this is true. This can only
-  // be false for threads where '!can_call_into_java_'.
-  bool can_be_suspended_by_user_code_;
-
-  // Small thread-local cache to be used from the interpreter.
-  // It is keyed by dex instruction pointer.
-  // The value is opcode-depended (e.g. field offset).
-  InterpreterCache interpreter_cache_;
+  // True if the thread is some form of runtime thread (ex, GC or JIT).
+  bool is_runtime_thread_;
 
   friend class Dbg;  // For SetStateUnsafe.
   friend class gc::collector::SemiSpace;  // For getting stack traces.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index cddc275..ec40716 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -902,8 +902,6 @@
                                         bool request_suspension,
                                         SuspendReason reason,
                                         bool* timed_out) {
-  CHECK_NE(reason, SuspendReason::kForUserCode) << "Cannot suspend for user-code by peer. Must be "
-                                                << "done directly on the thread.";
   const uint64_t start_time = NanoTime();
   useconds_t sleep_us = kThreadSuspendInitialSleepUs;
   *timed_out = false;
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 28fc59c..f1c808b 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -102,15 +102,10 @@
                                      nullptr,
                                      worker->thread_pool_->create_peers_));
   worker->thread_ = Thread::Current();
-  // Thread pool workers cannot call into java.
-  worker->thread_->SetCanCallIntoJava(false);
-  // Thread pool workers should not be getting paused by user-code.
-  worker->thread_->SetCanBeSuspendedByUserCode(false);
+  // Mark thread pool workers as runtime-threads.
+  worker->thread_->SetIsRuntimeThread(true);
   // Do work until its time to shut down.
   worker->Run();
-  // Thread pool worker is finished. We want to allow suspension during shutdown.
-  worker->thread_->SetCanBeSuspendedByUserCode(true);
-  // Thread shuts down.
   runtime->DetachCurrentThread();
   return nullptr;
 }
diff --git a/runtime/ti/agent.cc b/runtime/ti/agent.cc
index fc51567..97c39bb 100644
--- a/runtime/ti/agent.cc
+++ b/runtime/ti/agent.cc
@@ -176,7 +176,7 @@
   }
 }
 
-Agent::Agent(Agent&& other)
+Agent::Agent(Agent&& other) noexcept
     : dlopen_handle_(nullptr),
       onload_(nullptr),
       onattach_(nullptr),
@@ -184,7 +184,7 @@
   *this = std::move(other);
 }
 
-Agent& Agent::operator=(Agent&& other) {
+Agent& Agent::operator=(Agent&& other) noexcept {
   if (this != &other) {
     if (dlopen_handle_ != nullptr) {
       Unload();
diff --git a/runtime/ti/agent.h b/runtime/ti/agent.h
index 24a6f1c..faf76a1 100644
--- a/runtime/ti/agent.h
+++ b/runtime/ti/agent.h
@@ -105,8 +105,8 @@
   // TODO We need to acquire some locks probably.
   void Unload();
 
-  Agent(Agent&& other);
-  Agent& operator=(Agent&& other);
+  Agent(Agent&& other) noexcept;
+  Agent& operator=(Agent&& other) noexcept;
 
   ~Agent();
 
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 0e8d318..4ee983d 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -888,15 +888,6 @@
   LOG(ERROR) << "Unexpected branch event in tracing" << ArtMethod::PrettyMethod(method);
 }
 
-void Trace::InvokeVirtualOrInterface(Thread*,
-                                     Handle<mirror::Object>,
-                                     ArtMethod* method,
-                                     uint32_t dex_pc,
-                                     ArtMethod*) {
-  LOG(ERROR) << "Unexpected invoke event in tracing" << ArtMethod::PrettyMethod(method)
-             << " " << dex_pc;
-}
-
 void Trace::WatchedFramePop(Thread* self ATTRIBUTE_UNUSED,
                             const ShadowFrame& frame ATTRIBUTE_UNUSED) {
   LOG(ERROR) << "Unexpected WatchedFramePop event in tracing";
diff --git a/runtime/trace.h b/runtime/trace.h
index 5d96493..926a34f 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -224,12 +224,6 @@
               uint32_t dex_pc,
               int32_t dex_pc_offset)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
-  void InvokeVirtualOrInterface(Thread* thread,
-                                Handle<mirror::Object> this_object,
-                                ArtMethod* caller,
-                                uint32_t dex_pc,
-                                ArtMethod* callee)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
   void WatchedFramePop(Thread* thread, const ShadowFrame& frame)
       REQUIRES_SHARED(Locks::mutator_lock_) override;
   // Reuse an old stack trace if it exists, otherwise allocate a new one.
diff --git a/test/117-nopatchoat/expected.txt b/test/117-nopatchoat/expected.txt
deleted file mode 100644
index 7a24e31..0000000
--- a/test/117-nopatchoat/expected.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-JNI_OnLoad called
-Has oat is true, has executable oat is expected.
-This is a function call
diff --git a/test/117-nopatchoat/info.txt b/test/117-nopatchoat/info.txt
deleted file mode 100644
index aa9f57c..0000000
--- a/test/117-nopatchoat/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Test that disables patchoat'ing the application.
diff --git a/test/117-nopatchoat/nopatchoat.cc b/test/117-nopatchoat/nopatchoat.cc
deleted file mode 100644
index c673dd7..0000000
--- a/test/117-nopatchoat/nopatchoat.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "class_linker.h"
-#include "dex/dex_file-inl.h"
-#include "gc/heap.h"
-#include "gc/space/image_space.h"
-#include "mirror/class-inl.h"
-#include "oat_file.h"
-#include "runtime.h"
-#include "scoped_thread_state_change-inl.h"
-#include "thread.h"
-
-namespace art {
-
-class NoPatchoatTest {
- public:
-  static const OatDexFile* getOatDexFile(jclass cls) {
-    ScopedObjectAccess soa(Thread::Current());
-    ObjPtr<mirror::Class> klass = soa.Decode<mirror::Class>(cls);
-    const DexFile& dex_file = klass->GetDexFile();
-    return dex_file.GetOatDexFile();
-  }
-
-  static bool isRelocationDeltaZero() {
-    std::vector<gc::space::ImageSpace*> spaces =
-        Runtime::Current()->GetHeap()->GetBootImageSpaces();
-    return !spaces.empty() && spaces[0]->GetImageHeader().GetPatchDelta() == 0;
-  }
-
-  static bool hasExecutableOat(jclass cls) {
-    const OatDexFile* oat_dex_file = getOatDexFile(cls);
-
-    return oat_dex_file != nullptr && oat_dex_file->GetOatFile()->IsExecutable();
-  }
-};
-
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isRelocationDeltaZero(JNIEnv*, jclass) {
-  return NoPatchoatTest::isRelocationDeltaZero();
-}
-
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasExecutableOat(JNIEnv*, jclass cls) {
-  return NoPatchoatTest::hasExecutableOat(cls);
-}
-
-}  // namespace art
diff --git a/test/117-nopatchoat/run b/test/117-nopatchoat/run
deleted file mode 100755
index 4c33f7a..0000000
--- a/test/117-nopatchoat/run
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ensure flags includes prebuild and relocate. It doesn't make sense unless we
-# have a oat file we want to relocate.
-flags="$@"
-
-# This test is supposed to test with oat files. Make sure that the no-prebuild flag isn't set,
-# or complain.
-# Note: prebuild is the default.
-if [[ "${flags}" == *--no-prebuild* ]] ; then
-  echo "Test 117-nopatchoat is not intended to run in no-prebuild mode."
-  exit 1
-fi
-
-# This test is supposed to test relocation. Make sure that the no-relocate flag isn't set,
-# or complain.
-# Note: relocate is the default.
-if [[ "${flags}" == *--no-relocate* ]] ; then
-  echo "Test 117-nopatchoat is not intended to run in no-relocate mode."
-  exit 1
-fi
-
-${RUN} ${flags}
diff --git a/test/117-nopatchoat/src/Main.java b/test/117-nopatchoat/src/Main.java
deleted file mode 100644
index dfb98b0..0000000
--- a/test/117-nopatchoat/src/Main.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Main {
-  public static void main(String[] args) {
-    System.loadLibrary(args[0]);
-
-    boolean executable_correct = hasExecutableOat();
-
-    System.out.println(
-        "Has oat is " + hasOatFile() + ", has executable oat is " + (
-        executable_correct ? "expected" : "not expected") + ".");
-
-    System.out.println(functionCall());
-  }
-
-  public static String functionCall() {
-    String arr[] = {"This", "is", "a", "function", "call"};
-    String ret = "";
-    for (int i = 0; i < arr.length; i++) {
-      ret = ret + arr[i] + " ";
-    }
-    return ret.substring(0, ret.length() - 1);
-  }
-
-  private native static boolean hasOatFile();
-
-  private native static boolean hasExecutableOat();
-
-  private native static boolean isRelocationDeltaZero();
-}
diff --git a/test/1935-get-set-current-frame-jit/src/Main.java b/test/1935-get-set-current-frame-jit/src/Main.java
index 97f0973..cc8a4c4 100644
--- a/test/1935-get-set-current-frame-jit/src/Main.java
+++ b/test/1935-get-set-current-frame-jit/src/Main.java
@@ -58,7 +58,8 @@
     }
     public void run() {
       int TARGET = 42;
-      if (hasJit() && expectOsr && !Main.isInterpreted()) {
+      boolean normalJit = hasJit() && getJitThreshold() != 0;  // Excluding JIT-at-first-use.
+      if (normalJit && expectOsr && !Main.isInterpreted()) {
           System.out.println("Unexpectedly in jit code prior to restarting the JIT!");
       }
       startJit();
@@ -72,10 +73,10 @@
       do {
         // Don't actually do anything here.
         inBusyLoop = true;
-      } while (hasJit() && !Main.isInOsrCode("run") && osrDeadline.compareTo(Instant.now()) > 0);
+      } while (normalJit && !Main.isInOsrCode("run") && osrDeadline.compareTo(Instant.now()) > 0);
       // We shouldn't be doing OSR since we are using JVMTI and the set prevents OSR.
       // Set local will also push us to interpreter but the get local may remain in compiled code.
-      if (hasJit()) {
+      if (normalJit) {
         boolean inOsr = Main.isInOsrCode("run");
         if (expectOsr && !inOsr) {
           throw new Error(
@@ -184,4 +185,5 @@
   public static native boolean stopJit();
   public static native boolean startJit();
   public static native boolean hasJit();
+  public static native int getJitThreshold();
 }
diff --git a/test/563-checker-fakestring/smali/TestCase.smali b/test/563-checker-fakestring/smali/TestCase.smali
index 0fe39ee..4721eca 100644
--- a/test/563-checker-fakestring/smali/TestCase.smali
+++ b/test/563-checker-fakestring/smali/TestCase.smali
@@ -307,7 +307,6 @@
 .end method
 
 ## CHECK-START: java.lang.String TestCase.loopAndStringInitAndPhi(byte[], boolean) register (after)
-## CHECK:                        NewInstance
 ## CHECK-NOT:                    NewInstance
 ## CHECK-DAG:   <<Invoke1:l\d+>> InvokeStaticOrDirect method_name:java.lang.String.<init>
 ## CHECK-DAG:   <<Invoke2:l\d+>> InvokeStaticOrDirect method_name:java.lang.String.<init>
@@ -337,3 +336,140 @@
    return-object v0
 
 .end method
+
+.method public static loopAndTwoStringInitAndPhi([BZZ)Ljava/lang/String;
+   .registers 6
+
+   new-instance v0, Ljava/lang/String;
+   new-instance v2, Ljava/lang/String;
+
+   if-nez p2, :allocate_other
+
+   # Loop
+   :loop_header
+   if-eqz p1, :loop_exit
+   goto :loop_header
+
+   :loop_exit
+   const-string v1, "UTF8"
+   invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+   goto :exit
+
+   :allocate_other
+
+   # Loop
+   :loop_header2
+   if-eqz p1, :loop_exit2
+   goto :loop_header2
+
+   :loop_exit2
+   const-string v1, "UTF8"
+   invoke-direct {v2, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+   move-object v0, v2
+
+   :exit
+   return-object v0
+
+.end method
+
+# Regression test for a new string flowing into a catch phi.
+.method public static stringAndCatch([BZ)Ljava/lang/Object;
+   .registers 4
+
+   const v0, 0x0
+
+   :try_start_a
+   new-instance v0, Ljava/lang/String;
+
+   # Loop
+   :loop_header
+   if-eqz p1, :loop_exit
+   goto :loop_header
+
+   :loop_exit
+   const-string v1, "UTF8"
+   invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+   goto :exit
+   :try_end_a
+   .catch Ljava/lang/Exception; {:try_start_a .. :try_end_a} :catch_a
+
+   :catch_a
+   # Initially, we create a catch phi with the potential uninitalized string, which used to
+   # trip the compiler. However, using that catch phi is an error caught by the verifier, so
+   # having the phi is benign.
+   const v0, 0x0
+
+   :exit
+   return-object v0
+
+.end method
+
+# Same test as above, but with a catch phi being used by the string constructor.
+.method public static stringAndCatch2([BZ)Ljava/lang/Object;
+   .registers 4
+
+   const v0, 0x0
+   new-instance v0, Ljava/lang/String;
+
+   :try_start_a
+   const-string v1, "UTF8"
+   :try_end_a
+   .catch Ljava/lang/Exception; {:try_start_a .. :try_end_a} :catch_a
+
+   :catch_a
+   const-string v1, "UTF8"
+   invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+   return-object v0
+
+.end method
+
+# Same test as above, but with a catch phi being used by the string constructor and
+# a null test.
+.method public static stringAndCatch3([BZ)Ljava/lang/Object;
+   .registers 4
+
+   const v0, 0x0
+   new-instance v0, Ljava/lang/String;
+
+   :try_start_a
+   const-string v1, "UTF8"
+   :try_end_a
+   .catch Ljava/lang/Exception; {:try_start_a .. :try_end_a} :catch_a
+
+   :catch_a
+   if-eqz v0, :unexpected
+   const-string v1, "UTF8"
+   invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+   goto :exit
+   :unexpected
+   const-string v0, "UTF8"
+   :exit
+   return-object v0
+
+.end method
+
+# Regression test that tripped the compiler.
+.method public static stringAndPhi([BZ)Ljava/lang/Object;
+   .registers 4
+
+   new-instance v0, Ljava/lang/String;
+   const-string v1, "UTF8"
+
+   :loop_header
+   if-nez p1, :unused
+   if-eqz p1, :invoke
+   goto :loop_header
+
+   :invoke
+   invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+   goto :exit
+
+   :unused
+   const-string v0, "UTF8"
+   if-nez p1, :exit
+   goto :unused
+
+   :exit
+   return-object v0
+
+.end method
diff --git a/test/563-checker-fakestring/src/Main.java b/test/563-checker-fakestring/src/Main.java
index df9e9dc..77a108f 100644
--- a/test/563-checker-fakestring/src/Main.java
+++ b/test/563-checker-fakestring/src/Main.java
@@ -133,6 +133,29 @@
       result = (String) m.invoke(null, new Object[] { testData, false });
       assertEqual(testString, result);
     }
+    {
+      Method m =
+          c.getMethod("loopAndTwoStringInitAndPhi", byte[].class, boolean.class, boolean.class);
+      String result = (String) m.invoke(null, new Object[] { testData, false, false });
+      assertEqual(testString, result);
+      result = (String) m.invoke(null, new Object[] { testData, false, true });
+      assertEqual(testString, result);
+    }
+    {
+      Method m = c.getMethod("stringAndCatch", byte[].class, boolean.class);
+      String result = (String) m.invoke(null, new Object[] { testData, false });
+      assertEqual(testString, result);
+    }
+    {
+      Method m = c.getMethod("stringAndCatch2", byte[].class, boolean.class);
+      String result = (String) m.invoke(null, new Object[] { testData, false });
+      assertEqual(testString, result);
+    }
+    {
+      Method m = c.getMethod("stringAndCatch3", byte[].class, boolean.class);
+      String result = (String) m.invoke(null, new Object[] { testData, false });
+      assertEqual(testString, result);
+    }
   }
 
   public static boolean doThrow = false;
diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc
index 7c1507f..17ccd9a 100644
--- a/test/566-polymorphic-inlining/polymorphic_inline.cc
+++ b/test/566-polymorphic-inlining/polymorphic_inline.cc
@@ -41,11 +41,12 @@
       header = OatQuickMethodHeader::FromEntryPoint(pc);
       break;
     } else {
+      ScopedThreadSuspension sts(soa.Self(), kSuspended);
       // Sleep to yield to the compiler thread.
       usleep(1000);
-      // Will either ensure it's compiled or do the compilation itself.
-      jit->CompileMethod(method, soa.Self(), /* osr */ false);
     }
+    // Will either ensure it's compiled or do the compilation itself.
+    jit->CompileMethod(method, soa.Self(), /* osr */ false);
   }
 
   CodeInfo info(header);
diff --git a/test/684-select-condition/expected.txt b/test/684-select-condition/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/684-select-condition/expected.txt
diff --git a/test/684-select-condition/info.txt b/test/684-select-condition/info.txt
new file mode 100644
index 0000000..f9d4acd
--- /dev/null
+++ b/test/684-select-condition/info.txt
@@ -0,0 +1 @@
+Regression test for a bug in ARM's code generator for HSelect.
diff --git a/test/684-select-condition/src/Main.java b/test/684-select-condition/src/Main.java
new file mode 100644
index 0000000..196ff1a
--- /dev/null
+++ b/test/684-select-condition/src/Main.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+  public static void main(String args[]) {
+    doFloatingPointTest("1", "1.0");
+    doFloatingPointTest("4", "2.0");
+    checkValue(String.valueOf(doIntegerTest1(4)), "0");
+    checkValue(String.valueOf(doIntegerTest2(4)), "4");
+
+    // Another variant of the floating point test, but less brittle.
+    staticField = 1;
+    checkValue(String.valueOf($noinline$test()), "1.0");
+    staticField = 4;
+    checkValue(String.valueOf($noinline$test()), "2.0");
+  }
+
+  // This code is a reduced version of the original reproducer. The arm
+  // code generator used to generate wrong code for it. Note that this
+  // test is very brittle and a simple change in it could cause the compiler
+  // to not trip.
+  public static void doFloatingPointTest(String s, String expected) {
+    float a = (float)Integer.valueOf(s);
+    a = a < 2.0f ? a : 2.0f;
+    checkValue("" + a, expected);
+  }
+
+  // The compiler used to trip on the two following methods. The test there
+  // is very brittle and requires not running constant folding after
+  // load/store elimination.
+  public static int doIntegerTest1(int param) {
+    Main main = new Main();
+    main.field = 0;
+    return (main.field == 0) ? 0 : param;
+  }
+
+  public static int doIntegerTest2(int param) {
+    Main main = new Main();
+    main.field = 0;
+    return (main.field != 0) ? 0 : param;
+  }
+
+  public static void checkValue(String actual, String expected) {
+    if (!expected.equals(actual)) {
+      throw new Error("Expected " + expected + ", got " + actual);
+    }
+  }
+
+  static void $noinline$nothing() {}
+  static int $noinline$getField() { return staticField; }
+
+  static float $noinline$test() {
+    // The 2.0f shall be materialized for GreaterThanOrEqual at the beginning of the method;
+    // since the following call clobbers caller-saves, it is allocated to s16.
+    // r0(field) = InvokeStaticOrDirect[]
+    int one = $noinline$getField();
+    // s0(a_1) = TypeConversion[r0(one)]
+    float a = (float)one;
+    // s16(a_2) = Select[s0(a_1), C(2.0f), GreaterThanOrEqual[s0(a_1), s16(2.0f)]]
+    a = a < 2.0f ? a : 2.0f;
+    // The following call is added to clobber caller-saves, forcing the output of the Select
+    // to be allocated to s16.
+    $noinline$nothing();
+    return a;
+  }
+
+  int field;
+  static int staticField;
+}
diff --git a/test/Android.bp b/test/Android.bp
index 4b61463..8f23058 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -454,7 +454,6 @@
         "004-UnsafeTest/unsafe_test.cc",
         "044-proxy/native_proxy.cc",
         "051-thread/thread_test.cc",
-        "117-nopatchoat/nopatchoat.cc",
         "1337-gc-coverage/gc_coverage.cc",
         "136-daemon-jni-shutdown/daemon_jni_shutdown.cc",
         "137-cfi/cfi.cc",
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index c9b789e..4967834 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -313,4 +313,9 @@
   }
 }
 
+extern "C" JNIEXPORT jint JNICALL Java_Main_getJitThreshold(JNIEnv*, jclass) {
+  jit::Jit* jit = Runtime::Current()->GetJit();
+  return (jit != nullptr) ? jit->HotMethodThreshold() : 0;
+}
+
 }  // namespace art
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index d5db76a..e9ed051 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -40,7 +40,6 @@
 TEST_DIRECTORY="nativetest"
 MAIN=""
 OPTIMIZE="y"
-PATCHOAT=""
 PREBUILD="y"
 QUIET="n"
 RELOCATE="n"
@@ -59,7 +58,6 @@
 VERIFY="y" # y=yes,n=no,s=softfail
 ZYGOTE=""
 DEX_VERIFY=""
-USE_PATCHOAT="y"
 INSTRUCTION_SET_FEATURES=""
 ARGS=""
 EXTERNAL_LOG_TAGS="n" # if y respect externally set ANDROID_LOG_TAGS.
@@ -166,10 +164,6 @@
         shift
         BOOT_IMAGE="$1"
         shift
-    elif [ "x$1" = "x--no-patchoat" ]; then
-        PATCHOAT="-Xpatchoat:${FALSE_BIN}"
-        USE_PATCHOAT="n"
-        shift
     elif [ "x$1" = "x--relocate" ]; then
         RELOCATE="y"
         shift
@@ -774,7 +768,6 @@
                   $FLAGS \
                   $DEX_VERIFY \
                   -XXlib:$LIB \
-                  $PATCHOAT \
                   $DEX2OAT \
                   $DALVIKVM_ISA_FEATURES_ARGS \
                   $ZYGOTE \
@@ -803,15 +796,11 @@
 fi
 RUN_TEST_ASAN_OPTIONS="${RUN_TEST_ASAN_OPTIONS}detect_leaks=0"
 
-# For running, we must turn off logging when dex2oat or patchoat are missing. Otherwise we use
+# For running, we must turn off logging when dex2oat is missing. Otherwise we use
 # the same defaults as for prebuilt: everything when --dev, otherwise errors and above only.
 if [ "$EXTERNAL_LOG_TAGS" = "n" ]; then
   if [ "$DEV_MODE" = "y" ]; then
       export ANDROID_LOG_TAGS='*:d'
-  elif [ "$USE_PATCHOAT" = "n" ]; then
-      # All tests would log the error of failing dex2oat/patchoat. Be silent here and only
-      # log fatal events.
-      export ANDROID_LOG_TAGS='*:s'
   elif [ "$HAVE_IMAGE" = "n" ]; then
       # All tests would log the error of missing image. Be silent here and only log fatal
       # events.
diff --git a/test/knownfailures.json b/test/knownfailures.json
index fc4b25f..6eb4707 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -70,22 +70,20 @@
                         "doesn't (and isn't meant to) work with --prebuild."]
     },
     {
-        "tests": ["117-nopatchoat",
-                  "147-stripped-dex-fallback",
+        "tests": ["147-stripped-dex-fallback",
                   "608-checker-unresolved-lse"],
         "variant": "no-prebuild"
     },
     {
-        "tests": ["117-nopatchoat",
-                  "118-noimage-dex2oat"],
+        "tests": ["118-noimage-dex2oat"],
         "variant": "no-relocate",
-        "description": ["117-nopatchoat is not broken per-se it just doesn't",
-                        "work (and isn't meant to) without --prebuild",
+        "description": ["118-noimage-dex2oat is not broken per-se it just ",
+                        "doesn't work (and isn't meant to) without --prebuild ",
                         "--relocate"]
     },
     {
         "tests" : "629-vdex-speed",
-        "variant": "interp-ac | interpreter | jit | relocate-npatchoat",
+        "variant": "interp-ac | interpreter | jit",
         "description": "629 requires compilation."
     },
     {
@@ -172,19 +170,18 @@
     },
     {
         "tests": "147-stripped-dex-fallback",
-        "variant": "no-image | relocate-npatchoat",
+        "variant": "no-image",
         "description": ["147-stripped-dex-fallback is disabled because it",
                         "requires --prebuild."]
     },
     {
         "tests": ["116-nodex2oat",
-                  "117-nopatchoat",
                   "118-noimage-dex2oat",
                   "137-cfi",
                   "138-duplicate-classes-check2"],
-        "variant": "no-image | relocate-npatchoat",
+        "variant": "no-image",
         "description": ["All these tests check that we have sane behavior if we",
-                        "don't have a patchoat or dex2oat. Therefore we",
+                        "don't have a dex2oat. Therefore we",
                         "shouldn't run them in situations where we actually",
                         "don't have these since they explicitly test for them.",
                         "These all also assume we have an image."]
@@ -339,7 +336,6 @@
     {
         "tests": ["018-stack-overflow",
                   "116-nodex2oat",
-                  "117-nopatchoat",
                   "118-noimage-dex2oat",
                   "126-miranda-multidex",
                   "137-cfi"],
@@ -767,7 +763,6 @@
           "111-unresolvable-exception",
           "115-native-bridge",
           "116-nodex2oat",
-          "117-nopatchoat",
           "118-noimage-dex2oat",
           "127-checker-secondarydex",
           "129-ThreadGetId",
@@ -1045,7 +1040,7 @@
     },
     {
         "tests": "677-fsi",
-        "variant": "no-image | no-prebuild | relocate-npatchoat | jvm",
+        "variant": "no-image | no-prebuild | jvm",
         "description": ["Test requires a successful dex2oat invocation"]
     },
     {
diff --git a/test/run-test b/test/run-test
index ef17302..6a36055 100755
--- a/test/run-test
+++ b/test/run-test
@@ -76,9 +76,16 @@
     export ANDROID_BUILD_TOP=$oldwd
 fi
 
+# OUT_DIR defaults to out, and may be relative to $ANDROID_BUILD_TOP.
+# Convert it to an absolute path, since we cd into the tmp_dir to run the tests.
+export OUT_DIR=${OUT_DIR:-out}
+if [[ "$OUT_DIR" != /* ]]; then
+    export OUT_DIR=$ANDROID_BUILD_TOP/$OUT_DIR
+fi
+
 # ANDROID_HOST_OUT is not set in a build environment.
 if [ -z "$ANDROID_HOST_OUT" ]; then
-    export ANDROID_HOST_OUT=${OUT_DIR:-$ANDROID_BUILD_TOP/out}/host/linux-x86
+    export ANDROID_HOST_OUT=${OUT_DIR}/host/linux-x86
 fi
 
 # Allow changing DESUGAR script to something else, or to disable it with DESUGAR=false.
@@ -148,7 +155,6 @@
 strace="false"
 always_clean="no"
 never_clean="no"
-have_patchoat="yes"
 have_image="yes"
 multi_image_suffix=""
 android_root="/system"
@@ -194,9 +200,6 @@
         lib="libdvm.so"
         runtime="dalvik"
         shift
-    elif [ "x$1" = "x--no-patchoat" ]; then
-        have_patchoat="no"
-        shift
     elif [ "x$1" = "x--no-image" ]; then
         have_image="no"
         shift
@@ -572,10 +575,6 @@
     fi
 fi
 
-if [ "$have_patchoat" = "no" ]; then
-  run_args="${run_args} --no-patchoat"
-fi
-
 if [ ! "$runtime" = "jvm" ]; then
   run_args="${run_args} --lib $lib"
 fi
@@ -631,11 +630,6 @@
     usage="yes"
 fi
 
-if [ "$bisection_search" = "yes" -a "$have_patchoat" = "no" ]; then
-    err_echo "--bisection-search and --no-patchoat are mutually exclusive"
-    usage="yes"
-fi
-
 # TODO: Chroot-based bisection search is not supported yet (see below); implement it.
 if [ "$bisection_search" = "yes" -a -n "$chroot" ]; then
   err_echo "--chroot with --bisection-search is unsupported"
@@ -704,7 +698,6 @@
              "If used, then the"
         echo "                          other runtime options are ignored."
         echo "    --no-dex2oat          Run as though dex2oat was failing."
-        echo "    --no-patchoat         Run as though patchoat was failing."
         echo "    --prebuild            Run dex2oat on the files before starting test. (default)"
         echo "    --no-prebuild         Do not run dex2oat on the files before starting"
         echo "                          the test."
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 10c8619..2fa7d2a 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -117,6 +117,7 @@
 gdb = False
 gdb_arg = ''
 runtime_option = ''
+with_agent = []
 run_test_option = []
 stop_testrunner = False
 dex2oat_jobs = -1   # -1 corresponds to default threads for dex2oat
@@ -147,7 +148,7 @@
   VARIANT_TYPE_DICT['gc'] = {'gcstress', 'gcverify', 'cms'}
   VARIANT_TYPE_DICT['prebuild'] = {'no-prebuild', 'prebuild'}
   VARIANT_TYPE_DICT['cdex_level'] = {'cdex-none', 'cdex-fast'}
-  VARIANT_TYPE_DICT['relocate'] = {'relocate-npatchoat', 'relocate', 'no-relocate'}
+  VARIANT_TYPE_DICT['relocate'] = {'relocate', 'no-relocate'}
   VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'}
   VARIANT_TYPE_DICT['address_sizes'] = {'64', '32'}
   VARIANT_TYPE_DICT['jvmti'] = {'no-jvmti', 'jvmti-stress', 'redefine-stress', 'trace-stress',
@@ -333,6 +334,9 @@
   if runtime_option:
     for opt in runtime_option:
       options_all += ' --runtime-option ' + opt
+  if with_agent:
+    for opt in with_agent:
+      options_all += ' --with-agent ' + opt
 
   if dex2oat_jobs != -1:
     options_all += ' --dex2oat-jobs ' + str(dex2oat_jobs)
@@ -436,8 +440,6 @@
         options_test += ' --relocate'
       elif relocate == 'no-relocate':
         options_test += ' --no-relocate'
-      elif relocate == 'relocate-npatchoat':
-        options_test += ' --relocate --no-patchoat'
 
       if trace == 'trace':
         options_test += ' --trace'
@@ -911,6 +913,7 @@
   global timeout
   global dex2oat_jobs
   global run_all_configs
+  global with_agent
 
   parser = argparse.ArgumentParser(description="Runs all or a subset of the ART test suite.")
   parser.add_argument('-t', '--test', action='append', dest='tests', help='name(s) of the test(s)')
@@ -943,6 +946,8 @@
                             This should be enclosed in single-quotes to allow for spaces. The option
                             will be split using shlex.split() prior to invoking run-test.
                             Example \"--run-test-option='--with-agent libtifast.so=MethodExit'\"""")
+  global_group.add_argument('--with-agent', action='append', dest='with_agent',
+                            help="""Pass an agent to be attached to the runtime""")
   global_group.add_argument('--runtime-option', action='append', dest='runtime_option',
                             help="""Pass an option to the runtime. Runtime options
                             starting with a '-' must be separated by a '=', for
@@ -991,6 +996,7 @@
     if options['gdb_arg']:
       gdb_arg = options['gdb_arg']
   runtime_option = options['runtime_option'];
+  with_agent = options['with_agent'];
   run_test_option = sum(map(shlex.split, options['run_test_option']), [])
 
   timeout = options['timeout']
diff --git a/tools/jit-load/Android.bp b/tools/jit-load/Android.bp
new file mode 100644
index 0000000..a57a408
--- /dev/null
+++ b/tools/jit-load/Android.bp
@@ -0,0 +1,85 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Build variants {target,host} x {debug,ndebug} x {32,64}
+
+cc_defaults {
+    name: "jitload-defaults",
+    host_supported: true,
+    srcs: [
+        "jitload.cc",
+    ],
+    defaults: ["art_defaults"],
+
+    // Note that this tool needs to be built for both 32-bit and 64-bit since it requires
+    // to be same ISA as what it is attached to.
+    compile_multilib: "both",
+
+    shared_libs: [
+        "libbase",
+    ],
+    target: {
+        android: {
+        },
+        host: {
+        },
+    },
+    header_libs: [
+        "libopenjdkjvmti_headers",
+    ],
+    multilib: {
+        lib32: {
+            suffix: "32",
+        },
+        lib64: {
+            suffix: "64",
+        },
+    },
+    symlink_preferred_arch: true,
+}
+
+art_cc_library {
+    name: "libjitload",
+    defaults: ["jitload-defaults"],
+    shared_libs: [
+        "libart",
+        "libdexfile",
+        "libprofile",
+        "libartbase",
+    ],
+}
+
+art_cc_library {
+    name: "libjitloadd",
+    defaults: [
+        "art_debug_defaults",
+        "jitload-defaults",
+    ],
+    shared_libs: [
+        "libartd",
+        "libdexfiled",
+        "libprofiled",
+        "libartbased",
+    ],
+}
+
+//art_cc_test {
+//    name: "art_titrace_tests",
+//    defaults: [
+//        "art_gtest_defaults",
+//    ],
+//    srcs: ["titrace_test.cc"],
+//}
diff --git a/tools/jit-load/README.md b/tools/jit-load/README.md
new file mode 100644
index 0000000..8aa4513
--- /dev/null
+++ b/tools/jit-load/README.md
@@ -0,0 +1,35 @@
+# jitload
+
+Jitload is an art-specific agent allowing one to count the number of classes
+loaded on the jit-thread or verify that none were.
+
+# Usage
+### Build
+>    `make libjitload`  # or 'make libjitloadd' with debugging checks enabled
+
+The libraries will be built for 32-bit, 64-bit, host and target. Below examples assume you want to use the 64-bit version.
+### Command Line
+
+>    `art -Xplugin:$ANDROID_HOST_OUT/lib64/libopenjdkjvmti.so -agentpath:$ANDROID_HOST_OUT/lib64/libjitload.so -cp tmp/java/helloworld.dex -Xint helloworld`
+
+* `-Xplugin` and `-agentpath` need to be used, otherwise libtitrace agent will fail during init.
+* If using `libartd.so`, make sure to use the debug version of jvmti and agent.
+* Pass the '=fatal' option to the agent to cause it to abort if any classes are
+  loaded on a jit thread. Otherwise a warning will be printed.
+
+>    `art -d -Xplugin:$ANDROID_HOST_OUT/lib64/libopenjdkjvmtid.so -agentpath:$ANDROID_HOST_OUT/lib64/libjitloadd.so=fatal -cp tmp/java/helloworld.dex -Xint helloworld`
+
+* To use with run-test or testrunner.py use the --with-agent argument.
+
+>    `./test/run-test --host --with-agent libtitraced.so=fatal 001-HelloWorld`
+
+
+### Printing the Results
+All statistics gathered during the trace are printed automatically when the
+program normally exits. In the case of Android applications, they are always
+killed, so we need to manually print the results.
+
+>    `kill -SIGQUIT $(pid com.example.android.displayingbitmaps)`
+
+Will initiate a dump of the counts (to logcat).
+
diff --git a/tools/jit-load/jitload.cc b/tools/jit-load/jitload.cc
new file mode 100644
index 0000000..d67eef0
--- /dev/null
+++ b/tools/jit-load/jitload.cc
@@ -0,0 +1,144 @@
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <android-base/logging.h>
+#include <jni.h>
+#include <jvmti.h>
+
+#include "base/runtime_debug.h"
+#include "jit/jit.h"
+#include "runtime-inl.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+namespace jitload {
+
+// Special env version that allows JVMTI-like access on userdebug builds.
+static constexpr jint kArtTiVersion = JVMTI_VERSION_1_2 | 0x40000000;
+
+#define CHECK_CALL_SUCCESS(c) \
+  do { \
+    auto vc = (c); \
+    CHECK(vc == JNI_OK || vc == JVMTI_ERROR_NONE) << "call " << #c  << " did not succeed\n"; \
+  } while (false)
+
+static jthread GetJitThread() {
+  art::ScopedObjectAccess soa(art::Thread::Current());
+  auto* jit = art::Runtime::Current()->GetJit();
+  if (jit == nullptr) {
+    return nullptr;
+  }
+  auto* thread_pool = jit->GetThreadPool();
+  if (thread_pool == nullptr) {
+    return nullptr;
+  }
+  // Currently we only have a single jit thread so we only look at that one.
+  return soa.AddLocalReference<jthread>(
+          thread_pool->GetWorkers()[0]->GetThread()->GetPeerFromOtherThread());
+}
+
+JNICALL void VmInitCb(jvmtiEnv* jvmti,
+                      JNIEnv* env ATTRIBUTE_UNUSED,
+                      jthread curthread ATTRIBUTE_UNUSED) {
+  jthread jit_thread = GetJitThread();
+  if (jit_thread != nullptr) {
+    CHECK_EQ(jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_CLASS_PREPARE, jit_thread),
+             JVMTI_ERROR_NONE);
+  }
+}
+
+struct AgentOptions {
+  bool fatal;
+  uint64_t cnt;
+};
+
+JNICALL static void DataDumpRequestCb(jvmtiEnv* jvmti) {
+  AgentOptions* ops;
+  CHECK_CALL_SUCCESS(jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&ops)));
+  LOG(WARNING) << "Jit thread has loaded " << ops->cnt << " classes";
+}
+
+JNICALL void ClassPrepareJit(jvmtiEnv* jvmti,
+                             JNIEnv* jni_env ATTRIBUTE_UNUSED,
+                             jthread thr ATTRIBUTE_UNUSED,
+                             jclass klass) {
+  AgentOptions* ops;
+  CHECK_CALL_SUCCESS(jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&ops)));
+  char* klass_name;
+  CHECK_CALL_SUCCESS(jvmti->GetClassSignature(klass, &klass_name, nullptr));
+  (ops->fatal ? LOG_STREAM(FATAL)
+              : LOG_STREAM(WARNING)) << "Loaded " << klass_name << " on jit thread!";
+  ops->cnt++;
+  CHECK_CALL_SUCCESS(jvmti->Deallocate(reinterpret_cast<unsigned char*>(klass_name)));
+}
+
+JNICALL void VMDeathCb(jvmtiEnv* jvmti, JNIEnv* env ATTRIBUTE_UNUSED) {
+  DataDumpRequestCb(jvmti);
+}
+
+static jvmtiEnv* SetupJvmti(JavaVM* vm, const char* options) {
+  android::base::InitLogging(/* argv */nullptr);
+
+  jvmtiEnv* jvmti = nullptr;
+  if (vm->GetEnv(reinterpret_cast<void**>(&jvmti), JVMTI_VERSION_1_0) != JNI_OK &&
+      vm->GetEnv(reinterpret_cast<void**>(&jvmti), kArtTiVersion) != JNI_OK) {
+    LOG(FATAL) << "Unable to setup JVMTI environment!";
+  }
+  jvmtiEventCallbacks cb {
+        .VMInit = VmInitCb,
+        .ClassPrepare = ClassPrepareJit,
+        .DataDumpRequest = DataDumpRequestCb,
+        .VMDeath = VMDeathCb,
+  };
+  AgentOptions* ops;
+  CHECK_CALL_SUCCESS(
+      jvmti->Allocate(sizeof(AgentOptions), reinterpret_cast<unsigned char**>(&ops)));
+  ops->fatal = (strcmp(options, "fatal") == 0);
+  ops->cnt = 0;
+  CHECK_CALL_SUCCESS(jvmti->SetEnvironmentLocalStorage(ops));
+  CHECK_CALL_SUCCESS(jvmti->SetEventCallbacks(&cb, sizeof(cb)));
+  CHECK_CALL_SUCCESS(jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_VM_INIT, nullptr));
+  CHECK_CALL_SUCCESS(
+      jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_DATA_DUMP_REQUEST, nullptr));
+  return jvmti;
+}
+
+// Early attachment (e.g. 'java -agent[lib|path]:filename.so').
+extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* vm, char* options, void* /* reserved */) {
+  SetupJvmti(vm, options);
+  return JNI_OK;
+}
+
+// Late attachment (e.g. 'am attach-agent').
+extern "C" JNIEXPORT jint JNICALL Agent_OnAttach(JavaVM *vm, char* options, void* /* reserved */) {
+  jvmtiEnv* jvmti = SetupJvmti(vm, options);
+
+  JNIEnv* jni = nullptr;
+  jthread thr = nullptr;
+  CHECK_CALL_SUCCESS(vm->GetEnv(reinterpret_cast<void**>(&jni), JNI_VERSION_1_6));
+  CHECK_CALL_SUCCESS(jvmti->GetCurrentThread(&thr));
+
+  // Final setup is done in the VmInitCb.
+  VmInitCb(jvmti, jni, thr);
+
+  jni->DeleteLocalRef(thr);
+  return JNI_OK;
+}
+
+#undef CHECK_CALL_SUCCESS
+
+}  // namespace jitload
+
diff --git a/tools/libcore_gcstress_debug_failures.txt b/tools/libcore_gcstress_debug_failures.txt
index 23533af..b8ad955 100644
--- a/tools/libcore_gcstress_debug_failures.txt
+++ b/tools/libcore_gcstress_debug_failures.txt
@@ -45,7 +45,8 @@
     "libcore.libcore.icu.RelativeDateTimeFormatterTest#test_getRelativeTimeSpanStringAbbrev",
     "libcore.libcore.icu.RelativeDateTimeFormatterTest#test_getRelativeTimeSpanStringCTS",
     "libcore.libcore.icu.RelativeDateTimeFormatterTest#test_getRelativeTimeSpanStringFrench",
-    "libcore.libcore.icu.RelativeDateTimeFormatterTest#test_getRelativeTimeSpanStringGerman"
+    "libcore.libcore.icu.RelativeDateTimeFormatterTest#test_getRelativeTimeSpanStringGerman",
+    "org.apache.harmony.tests.java.lang.ProcessManagerTest#testSleep"
   ]
 }
 ]
diff --git a/tools/titrace/titrace.cc b/tools/titrace/titrace.cc
index 981ad56..ca568d7 100644
--- a/tools/titrace/titrace.cc
+++ b/tools/titrace/titrace.cc
@@ -54,7 +54,7 @@
   }
 
   TiMemory(const TiMemory& other) = delete;
-  TiMemory(TiMemory&& other) {
+  TiMemory(TiMemory&& other) noexcept {
     env_ = other.env_;
     mem_ = other.mem_;
     size_ = other.size_;
@@ -66,7 +66,7 @@
     }
   }
 
-  TiMemory& operator=(TiMemory&& other) {
+  TiMemory& operator=(TiMemory&& other) noexcept {
     if (mem_ != other.mem_) {
       TiMemory::~TiMemory();
     }
diff --git a/tools/tracefast-plugin/tracefast.cc b/tools/tracefast-plugin/tracefast.cc
index 4ea5b2d..98f7ea5 100644
--- a/tools/tracefast-plugin/tracefast.cc
+++ b/tools/tracefast-plugin/tracefast.cc
@@ -111,13 +111,6 @@
               int32_t dex_pc_offset ATTRIBUTE_UNUSED)
       override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
-  void InvokeVirtualOrInterface(art::Thread* thread ATTRIBUTE_UNUSED,
-                                art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
-                                art::ArtMethod* caller ATTRIBUTE_UNUSED,
-                                uint32_t dex_pc ATTRIBUTE_UNUSED,
-                                art::ArtMethod* callee ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
-
   void WatchedFramePop(art::Thread* thread ATTRIBUTE_UNUSED,
                        const art::ShadowFrame& frame ATTRIBUTE_UNUSED)
       override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
diff --git a/tools/veridex/Android.bp b/tools/veridex/Android.bp
index 96d4a09..92ace03 100644
--- a/tools/veridex/Android.bp
+++ b/tools/veridex/Android.bp
@@ -14,6 +14,7 @@
 
 cc_binary {
     name: "veridex",
+    defaults: ["art_defaults"],
     host_supported: true,
     srcs: [
         "flow_analysis.cc",
diff --git a/tools/veridex/hidden_api_finder.cc b/tools/veridex/hidden_api_finder.cc
index cb45c58..a8c53b3 100644
--- a/tools/veridex/hidden_api_finder.cc
+++ b/tools/veridex/hidden_api_finder.cc
@@ -232,7 +232,7 @@
     counts[ref_string]++;
   }
 
-  for (const std::pair<const std::string, const size_t>& pair : counts) {
+  for (const std::pair<const std::string, size_t>& pair : counts) {
     os << kPrefix << pair.first;
     if (pair.second > 1) {
        os << " (" << pair.second << " occurrences)";
diff --git a/tools/veridex/precise_hidden_api_finder.cc b/tools/veridex/precise_hidden_api_finder.cc
index 08ac6d7..9e02cbf 100644
--- a/tools/veridex/precise_hidden_api_finder.cc
+++ b/tools/veridex/precise_hidden_api_finder.cc
@@ -85,7 +85,7 @@
 void PreciseHiddenApiFinder::Dump(std::ostream& os, HiddenApiStats* stats) {
   static const char* kPrefix = "       ";
   std::map<std::string, std::vector<MethodReference>> named_uses;
-  for (auto it : concrete_uses_) {
+  for (auto& it : concrete_uses_) {
     MethodReference ref = it.first;
     for (const ReflectAccessInfo& info : it.second) {
       std::string cls(info.cls.ToString());
@@ -98,7 +98,7 @@
     }
   }
 
-  for (auto it : named_uses) {
+  for (auto& it : named_uses) {
     ++stats->reflection_count;
     const std::string& full_name = it.first;
     HiddenApiAccessFlags::ApiList api_list = hidden_api_.GetApiList(full_name);
diff --git a/tools/veridex/veridex.h b/tools/veridex/veridex.h
index 31ddbf4..e0d8261 100644
--- a/tools/veridex/veridex.h
+++ b/tools/veridex/veridex.h
@@ -44,7 +44,6 @@
  */
 class VeriClass {
  public:
-  VeriClass(const VeriClass& other) = default;
   VeriClass() = default;
   VeriClass(Primitive::Type k, uint8_t dims, const DexFile::ClassDef* cl)
       : kind_(k), dimensions_(dims), class_def_(cl) {}