Merge changes I8e26cf11,I9edbe1d5,Ia7129eca

* changes:
  Start netd before running tests on Buildbot devices.
  Ignore failures on O devices in JDWP tests.
  Ignore failures on O devices in some network-related libcore tests.
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index b342abe..b483e5f 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -300,11 +300,13 @@
   $(HOST_CORE_IMAGE_DEFAULT_64) \
   $(HOST_CORE_IMAGE_DEFAULT_32) \
   oatdumpd-host \
-  oatdumpds-host
+  oatdumpds-host \
+  dexdump2-host
 ART_GTEST_oatdump_test_TARGET_DEPS := \
   $(TARGET_CORE_IMAGE_DEFAULT_64) \
   $(TARGET_CORE_IMAGE_DEFAULT_32) \
-  oatdumpd-target
+  oatdumpd-target \
+  dexdump2-target
 ART_GTEST_oatdump_image_test_HOST_DEPS := $(ART_GTEST_oatdump_test_HOST_DEPS)
 ART_GTEST_oatdump_image_test_TARGET_DEPS := $(ART_GTEST_oatdump_test_TARGET_DEPS)
 ART_GTEST_oatdump_app_test_HOST_DEPS := $(ART_GTEST_oatdump_test_HOST_DEPS) \
diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h
index 4a6c914..be26e67 100644
--- a/compiler/optimizing/data_type.h
+++ b/compiler/optimizing/data_type.h
@@ -210,6 +210,12 @@
   static bool IsTypeConversionImplicit(Type input_type, Type result_type);
   static bool IsTypeConversionImplicit(int64_t value, Type result_type);
 
+  static bool IsZeroExtension(Type input_type, Type result_type) {
+    return IsIntOrLongType(result_type) &&
+        IsUnsignedType(input_type) &&
+        Size(result_type) > Size(input_type);
+  }
+
   static const char* PrettyDescriptor(Type type);
 
  private:
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 676fe6b..d3cf956 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -67,7 +67,6 @@
   bool TryCombineVecMultiplyAccumulate(HVecMul* mul);
 
   void VisitShift(HBinaryOperation* shift);
-
   void VisitEqual(HEqual* equal) OVERRIDE;
   void VisitNotEqual(HNotEqual* equal) OVERRIDE;
   void VisitBooleanNot(HBooleanNot* bool_not) OVERRIDE;
@@ -78,6 +77,7 @@
   void VisitNullCheck(HNullCheck* instruction) OVERRIDE;
   void VisitArrayLength(HArrayLength* instruction) OVERRIDE;
   void VisitCheckCast(HCheckCast* instruction) OVERRIDE;
+  void VisitAbs(HAbs* instruction) OVERRIDE;
   void VisitAdd(HAdd* instruction) OVERRIDE;
   void VisitAnd(HAnd* instruction) OVERRIDE;
   void VisitCondition(HCondition* instruction) OVERRIDE;
@@ -903,6 +903,30 @@
           to_type == DataType::Type::kInt64);
 }
 
+// Returns an acceptable substitution for "a" on the select
+// construct "a <cmp> b ? c : .."  during MIN/MAX recognition.
+static HInstruction* AllowInMinMax(IfCondition cmp,
+                                   HInstruction* a,
+                                   HInstruction* b,
+                                   HInstruction* c) {
+  int64_t value = 0;
+  if (IsInt64AndGet(b, /*out*/ &value) &&
+      (((cmp == kCondLT || cmp == kCondLE) && c->IsMax()) ||
+       ((cmp == kCondGT || cmp == kCondGE) && c->IsMin()))) {
+    HConstant* other = c->AsBinaryOperation()->GetConstantRight();
+    if (other != nullptr && a == c->AsBinaryOperation()->GetLeastConstantLeft()) {
+      int64_t other_value = Int64FromConstant(other);
+      bool is_max = (cmp == kCondLT || cmp == kCondLE);
+      // Allow the max for a <  100 ? max(a, -100) : ..
+      //    or the min for a > -100 ? min(a,  100) : ..
+      if (is_max ? (value >= other_value) : (value <= other_value)) {
+        return c;
+      }
+    }
+  }
+  return nullptr;
+}
+
 void InstructionSimplifierVisitor::VisitSelect(HSelect* select) {
   HInstruction* replace_with = nullptr;
   HInstruction* condition = select->GetCondition();
@@ -946,9 +970,17 @@
     DataType::Type t_type = true_value->GetType();
     DataType::Type f_type = false_value->GetType();
     // Here we have a <cmp> b ? true_value : false_value.
-    // Test if both values are compatible integral types (resulting
-    // MIN/MAX/ABS type will be int or long, like the condition).
+    // Test if both values are compatible integral types (resulting MIN/MAX/ABS
+    // type will be int or long, like the condition). Replacements are general,
+    // but assume conditions prefer constants on the right.
     if (DataType::IsIntegralType(t_type) && DataType::Kind(t_type) == DataType::Kind(f_type)) {
+      // Allow a <  100 ? max(a, -100) : ..
+      //    or a > -100 ? min(a,  100) : ..
+      // to use min/max instead of a to detect nested min/max expressions.
+      HInstruction* new_a = AllowInMinMax(cmp, a, b, true_value);
+      if (new_a != nullptr) {
+        a = new_a;
+      }
       // Try to replace typical integral MIN/MAX/ABS constructs.
       if ((cmp == kCondLT || cmp == kCondLE || cmp == kCondGT || cmp == kCondGE) &&
           ((a == true_value && b == false_value) ||
@@ -957,19 +989,16 @@
         //    or a > b ? a : b (MAX) or a > b ? b : a (MIN).
         bool is_min = (cmp == kCondLT || cmp == kCondLE) == (a == true_value);
         replace_with = NewIntegralMinMax(GetGraph()->GetAllocator(), a, b, select, is_min);
-      } else if (true_value->IsNeg()) {
-        HInstruction* negated = true_value->InputAt(0);
-        if ((cmp == kCondLT || cmp == kCondLE) &&
-            (a == negated && a == false_value && IsInt64Value(b, 0))) {
-          // Found a < 0 ? -a : a which can be replaced by ABS(a).
-          replace_with = NewIntegralAbs(GetGraph()->GetAllocator(), false_value, select);
-        }
-      } else if (false_value->IsNeg()) {
-        HInstruction* negated = false_value->InputAt(0);
-        if ((cmp == kCondGT || cmp == kCondGE) &&
-            (a == true_value && a == negated && IsInt64Value(b, 0))) {
-          // Found a > 0 ? a : -a which can be replaced by ABS(a).
-          replace_with = NewIntegralAbs(GetGraph()->GetAllocator(), true_value, select);
+      } else if (((cmp == kCondLT || cmp == kCondLE) && true_value->IsNeg()) ||
+                 ((cmp == kCondGT || cmp == kCondGE) && false_value->IsNeg())) {
+        bool negLeft = (cmp == kCondLT || cmp == kCondLE);
+        HInstruction* the_negated = negLeft ? true_value->InputAt(0) : false_value->InputAt(0);
+        HInstruction* not_negated = negLeft ? false_value : true_value;
+        if (a == the_negated && a == not_negated && IsInt64Value(b, 0)) {
+          // Found a < 0 ? -a :  a
+          //    or a > 0 ?  a : -a
+          // which can be replaced by ABS(a).
+          replace_with = NewIntegralAbs(GetGraph()->GetAllocator(), a, select);
         }
       } else if (true_value->IsSub() && false_value->IsSub()) {
         HInstruction* true_sub1 = true_value->InputAt(0);
@@ -981,8 +1010,8 @@
              ((cmp == kCondLT || cmp == kCondLE) &&
               (a == true_sub2 && b == true_sub1 && a == false_sub1 && b == false_sub2))) &&
             AreLowerPrecisionArgs(t_type, a, b)) {
-          // Found a > b ? a - b  : b - a   or
-          //       a < b ? b - a  : a - b
+          // Found a > b ? a - b  : b - a
+          //    or a < b ? b - a  : a - b
           // which can be replaced by ABS(a - b) for lower precision operands a, b.
           replace_with = NewIntegralAbs(GetGraph()->GetAllocator(), true_value, select);
         }
@@ -1241,6 +1270,17 @@
   }
 }
 
+void InstructionSimplifierVisitor::VisitAbs(HAbs* instruction) {
+  HInstruction* input = instruction->GetInput();
+  if (DataType::IsZeroExtension(input->GetType(), instruction->GetResultType())) {
+    // Zero extension from narrow to wide can never set sign bit in the wider
+    // operand, making the subsequent Abs redundant (e.g., abs(b & 0xff) for byte b).
+    instruction->ReplaceWith(input);
+    instruction->GetBlock()->RemoveInstruction(instruction);
+    RecordSimplification();
+  }
+}
+
 void InstructionSimplifierVisitor::VisitAdd(HAdd* instruction) {
   HConstant* input_cst = instruction->GetConstantRight();
   HInstruction* input_other = instruction->GetLeastConstantLeft();
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 81c0b50..c3d643a 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2875,6 +2875,14 @@
   __ Bind(&done);
 }
 
+void IntrinsicLocationsBuilderARM64::VisitReachabilityFence(HInvoke* invoke) {
+  LocationSummary* locations =
+      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
+  locations->SetInAt(0, Location::Any());
+}
+
+void IntrinsicCodeGeneratorARM64::VisitReachabilityFence(HInvoke* invoke ATTRIBUTE_UNUSED) { }
+
 UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
 
 UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOf);
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index e61a0b0..29aecbc 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -3028,6 +3028,14 @@
   }
 }
 
+void IntrinsicLocationsBuilderARMVIXL::VisitReachabilityFence(HInvoke* invoke) {
+  LocationSummary* locations =
+      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
+  locations->SetInAt(0, Location::Any());
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitReachabilityFence(HInvoke* invoke ATTRIBUTE_UNUSED) { }
+
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble)   // Could be done by changing rounding mode, maybe?
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong)     // High register pressure.
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, SystemArrayCopyChar)
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index bc1292b..ae248a3 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2693,6 +2693,14 @@
   __ Bind(&done);
 }
 
+void IntrinsicLocationsBuilderMIPS::VisitReachabilityFence(HInvoke* invoke) {
+  LocationSummary* locations =
+      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
+  locations->SetInAt(0, Location::Any());
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitReachabilityFence(HInvoke* invoke ATTRIBUTE_UNUSED) { }
+
 // Unimplemented intrinsics.
 
 UNIMPLEMENTED_INTRINSIC(MIPS, MathCeil)
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index f429afd..9a9ae71 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -2354,6 +2354,14 @@
   __ Bind(&done);
 }
 
+void IntrinsicLocationsBuilderMIPS64::VisitReachabilityFence(HInvoke* invoke) {
+  LocationSummary* locations =
+      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
+  locations->SetInAt(0, Location::Any());
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitReachabilityFence(HInvoke* invoke ATTRIBUTE_UNUSED) { }
+
 UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent)
 UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy)
 
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index c4f322b..f84a33b 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -2928,6 +2928,13 @@
   __ Bind(&done);
 }
 
+void IntrinsicLocationsBuilderX86::VisitReachabilityFence(HInvoke* invoke) {
+  LocationSummary* locations =
+      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
+  locations->SetInAt(0, Location::Any());
+}
+
+void IntrinsicCodeGeneratorX86::VisitReachabilityFence(HInvoke* invoke ATTRIBUTE_UNUSED) { }
 
 UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble)
 UNIMPLEMENTED_INTRINSIC(X86, ReferenceGetReferent)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 437bc3d..7627dc9 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2737,6 +2737,14 @@
   __ Bind(&done);
 }
 
+void IntrinsicLocationsBuilderX86_64::VisitReachabilityFence(HInvoke* invoke) {
+  LocationSummary* locations =
+      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
+  locations->SetInAt(0, Location::Any());
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitReachabilityFence(HInvoke* invoke ATTRIBUTE_UNUSED) { }
+
 UNIMPLEMENTED_INTRINSIC(X86_64, ReferenceGetReferent)
 UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
 UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index e0a9cfb..9a26f2f 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -125,11 +125,6 @@
   }
 
   void Log() const {
-    if (!kIsDebugBuild && !VLOG_IS_ON(compiler)) {
-      // Log only in debug builds or if the compiler is verbose.
-      return;
-    }
-
     uint32_t compiled_intrinsics = GetStat(MethodCompilationStat::kCompiledIntrinsic);
     uint32_t compiled_native_stubs = GetStat(MethodCompilationStat::kCompiledNativeStub);
     uint32_t bytecode_attempts =
diff --git a/compiler/optimizing/select_generator.cc b/compiler/optimizing/select_generator.cc
index 66e5142..3f52bdd 100644
--- a/compiler/optimizing/select_generator.cc
+++ b/compiler/optimizing/select_generator.cc
@@ -43,12 +43,16 @@
   for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
     HInstruction* instruction = it.Current();
     if (instruction->IsControlFlow()) {
-      if (num_instructions > kMaxInstructionsInBranch) {
-        return false;
-      }
       return instruction->IsGoto() || instruction->IsReturn();
     } else if (instruction->CanBeMoved() && !instruction->HasSideEffects()) {
-      num_instructions++;
+      if (instruction->IsSelect() &&
+          instruction->AsSelect()->GetCondition()->GetBlock() == block) {
+        // Count one HCondition and HSelect in the same block as a single instruction.
+        // This enables finding nested selects.
+        continue;
+      } else if (++num_instructions > kMaxInstructionsInBranch) {
+        return false;  // bail as soon as we exceed number of allowed instructions
+      }
     } else {
       return false;
     }
@@ -97,6 +101,7 @@
     HBasicBlock* true_block = if_instruction->IfTrueSuccessor();
     HBasicBlock* false_block = if_instruction->IfFalseSuccessor();
     DCHECK_NE(true_block, false_block);
+
     if (!IsSimpleBlock(true_block) ||
         !IsSimpleBlock(false_block) ||
         !BlocksMergeTogether(true_block, false_block)) {
@@ -107,10 +112,10 @@
     // If the branches are not empty, move instructions in front of the If.
     // TODO(dbrazdil): This puts an instruction between If and its condition.
     //                 Implement moving of conditions to first users if possible.
-    if (!true_block->IsSingleGoto() && !true_block->IsSingleReturn()) {
+    while (!true_block->IsSingleGoto() && !true_block->IsSingleReturn()) {
       true_block->GetFirstInstruction()->MoveBefore(if_instruction);
     }
-    if (!false_block->IsSingleGoto() && !false_block->IsSingleReturn()) {
+    while (!false_block->IsSingleGoto() && !false_block->IsSingleReturn()) {
       false_block->GetFirstInstruction()->MoveBefore(if_instruction);
     }
     DCHECK(true_block->IsSingleGoto() || true_block->IsSingleReturn());
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 6950b93..e2c53bb 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -404,6 +404,7 @@
   UsageError("      Example: --very-large-app-threshold=100000000");
   UsageError("");
   UsageError("  --app-image-fd=<file-descriptor>: specify output file descriptor for app image.");
+  UsageError("      The image is non-empty only if a profile is passed in.");
   UsageError("      Example: --app-image-fd=10");
   UsageError("");
   UsageError("  --app-image-file=<file-name>: specify a file name for app image.");
@@ -1479,9 +1480,15 @@
   }
 
   void LoadClassProfileDescriptors() {
-    if (profile_compilation_info_ != nullptr && IsImage()) {
-      Runtime* runtime = Runtime::Current();
-      CHECK(runtime != nullptr);
+    if (!IsImage()) {
+      return;
+    }
+    // If we don't have a profile, treat it as an empty set of classes. b/77340429
+    if (image_classes_ == nullptr) {
+      // May be non-null when --image-classes is passed in, in that case avoid clearing the list.
+      image_classes_.reset(new std::unordered_set<std::string>());
+    }
+    if (profile_compilation_info_ != nullptr) {
       // Filter out class path classes since we don't want to include these in the image.
       image_classes_.reset(
           new std::unordered_set<std::string>(
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 0cd39ac..c890f8b 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -2093,4 +2093,36 @@
   ASSERT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) != 0) << status << " " << output_;
 }
 
+TEST_F(Dex2oatTest, AppImageNoProfile) {
+  ScratchFile app_image_file;
+  const std::string out_dir = GetScratchDir();
+  const std::string odex_location = out_dir + "/base.odex";
+  GenerateOdexForTest(GetTestDexFileName("ManyMethods"),
+                      odex_location,
+                      CompilerFilter::Filter::kSpeedProfile,
+                      { "--app-image-fd=" + std::to_string(app_image_file.GetFd()) },
+                      true,  // expect_success
+                      false,  // use_fd
+                      [](const OatFile&) {});
+  // Open our generated oat file.
+  std::string error_msg;
+  std::unique_ptr<OatFile> odex_file(OatFile::Open(odex_location.c_str(),
+                                                   odex_location.c_str(),
+                                                   nullptr,
+                                                   nullptr,
+                                                   false,
+                                                   /*low_4gb*/false,
+                                                   odex_location.c_str(),
+                                                   &error_msg));
+  ASSERT_TRUE(odex_file != nullptr);
+  ImageHeader header = {};
+  ASSERT_TRUE(app_image_file.GetFile()->PreadFully(
+      reinterpret_cast<void*>(&header),
+      sizeof(header),
+      /*offset*/ 0u)) << app_image_file.GetFile()->GetLength();
+  EXPECT_GT(header.GetImageSection(ImageHeader::kSectionObjects).Size(), 0u);
+  EXPECT_EQ(header.GetImageSection(ImageHeader::kSectionArtMethods).Size(), 0u);
+  EXPECT_EQ(header.GetImageSection(ImageHeader::kSectionArtFields).Size(), 0u);
+}
+
 }  // namespace art
diff --git a/libartbase/base/allocator.cc b/libartbase/base/allocator.cc
index 17da789..c7be4e0 100644
--- a/libartbase/base/allocator.cc
+++ b/libartbase/base/allocator.cc
@@ -76,7 +76,7 @@
 
 // These globals are safe since they don't have any non-trivial destructors.
 Atomic<size_t> g_bytes_used[kAllocatorTagCount];
-volatile size_t g_max_bytes_used[kAllocatorTagCount];
+Atomic<size_t> g_max_bytes_used[kAllocatorTagCount];
 Atomic<uint64_t> g_total_bytes_used[kAllocatorTagCount];
 
 void Dump(std::ostream& os) {
@@ -84,7 +84,7 @@
     os << "Dumping native memory usage\n";
     for (size_t i = 0; i < kAllocatorTagCount; ++i) {
       uint64_t bytes_used = g_bytes_used[i].load(std::memory_order_relaxed);
-      uint64_t max_bytes_used = g_max_bytes_used[i];
+      uint64_t max_bytes_used = g_max_bytes_used[i].load(std::memory_order_relaxed);
       uint64_t total_bytes_used = g_total_bytes_used[i].load(std::memory_order_relaxed);
       if (total_bytes_used != 0) {
         os << static_cast<AllocatorTag>(i) << " active=" << bytes_used << " max="
diff --git a/libartbase/base/allocator.h b/libartbase/base/allocator.h
index 7ddbacf..662f78e 100644
--- a/libartbase/base/allocator.h
+++ b/libartbase/base/allocator.h
@@ -71,12 +71,14 @@
 
 namespace TrackedAllocators {
 
+// We use memory_order_relaxed updates of the following counters. Values are treated as approximate
+// wherever concurrent updates are possible.
 // Running count of number of bytes used for this kind of allocation. Increased by allocations,
 // decreased by deallocations.
 extern Atomic<size_t> g_bytes_used[kAllocatorTagCount];
 
 // Largest value of bytes used seen.
-extern volatile size_t g_max_bytes_used[kAllocatorTagCount];
+extern Atomic<size_t> g_max_bytes_used[kAllocatorTagCount];
 
 // Total number of bytes allocated of this kind.
 extern Atomic<uint64_t> g_total_bytes_used[kAllocatorTagCount];
@@ -84,15 +86,17 @@
 void Dump(std::ostream& os);
 
 inline void RegisterAllocation(AllocatorTag tag, size_t bytes) {
-  g_total_bytes_used[tag].fetch_add(bytes, std::memory_order_seq_cst);
-  size_t new_bytes = g_bytes_used[tag].fetch_add(bytes, std::memory_order_seq_cst) + bytes;
-  if (g_max_bytes_used[tag] < new_bytes) {
-    g_max_bytes_used[tag] = new_bytes;
+  g_total_bytes_used[tag].fetch_add(bytes, std::memory_order_relaxed);
+  size_t new_bytes = g_bytes_used[tag].fetch_add(bytes, std::memory_order_relaxed) + bytes;
+  size_t max_bytes = g_max_bytes_used[tag].load(std::memory_order_relaxed);
+  while (max_bytes < new_bytes
+    && !g_max_bytes_used[tag].compare_exchange_weak(max_bytes /* updated */, new_bytes,
+                                                    std::memory_order_relaxed)) {
   }
 }
 
 inline void RegisterFree(AllocatorTag tag, size_t bytes) {
-  g_bytes_used[tag].fetch_sub(bytes, std::memory_order_seq_cst);
+  g_bytes_used[tag].fetch_sub(bytes, std::memory_order_relaxed);
 }
 
 }  // namespace TrackedAllocators
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 433ed9a..3bff123 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1180,6 +1180,17 @@
       }
     }
 
+    // Update header for shared section.
+    uint32_t shared_section_offset = 0u;
+    uint32_t shared_section_size = 0u;
+    if (dex_file->IsCompactDexFile()) {
+      CompactDexFile::Header* const header =
+          reinterpret_cast<CompactDexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()));
+      shared_section_offset = header->data_off_;
+      shared_section_size = header->data_size_;
+      // The shared section will be serialized right after the dex file.
+      header->data_off_ = header->file_size_;
+    }
     // Verify output directory exists
     if (!OS::DirectoryExists(options_.export_dex_location_)) {
       // TODO: Extend OS::DirectoryExists if symlink support is required
@@ -1226,16 +1237,22 @@
       return false;
     }
 
-    bool success = false;
-      success = file->WriteFully(dex_file->Begin(), fsize);
-    // }
-
+    bool success = file->WriteFully(dex_file->Begin(), fsize);
     if (!success) {
       os << "Failed to write dex file";
       file->Erase();
       return false;
     }
 
+    if (shared_section_size != 0) {
+      success = file->WriteFully(dex_file->Begin() + shared_section_offset, shared_section_size);
+      if (!success) {
+        os << "Failed to write shared data section";
+        file->Erase();
+        return false;
+      }
+    }
+
     if (file->FlushCloseOrErase() != 0) {
       os << "Flush and close failed";
       return false;
diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc
index 0034469..18cb2fd 100644
--- a/oatdump/oatdump_test.cc
+++ b/oatdump/oatdump_test.cc
@@ -75,6 +75,11 @@
   std::string error_msg;
   ASSERT_TRUE(Exec(kDynamic, kModeOat, {"--export-dex-to=" + tmp_dir_}, kListOnly, &error_msg))
       << error_msg;
+  const std::string dex_location = tmp_dir_+ "/core-oj-hostdex.jar_export.dex";
+  const std::string dexdump2 = GetExecutableFilePath("dexdump2",
+                                                     /*is_debug*/false,
+                                                     /*is_static*/false);
+  ASSERT_TRUE(ForkAndExecAndWait({dexdump2, "-d", dex_location}, &error_msg)) << error_msg;
 }
 TEST_F(OatDumpTest, TestExportDexStatic) {
   TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
diff --git a/oatdump/oatdump_test.h b/oatdump/oatdump_test.h
index fac0bb2..b85730d 100644
--- a/oatdump/oatdump_test.h
+++ b/oatdump/oatdump_test.h
@@ -70,20 +70,24 @@
     kStatic,   // oatdump(d)s, dex2oat(d)s
   };
 
-  // Returns path to the oatdump/dex2oat binary.
-  std::string GetExecutableFilePath(Flavor flavor, const char* name) {
+  // Returns path to the oatdump/dex2oat/dexdump binary.
+  std::string GetExecutableFilePath(const char* name, bool is_debug, bool is_static) {
     std::string root = GetTestAndroidRoot();
     root += "/bin/";
     root += name;
-    if (kIsDebugBuild) {
+    if (is_debug) {
       root += "d";
     }
-    if (flavor == kStatic) {
+    if (is_static) {
       root += "s";
     }
     return root;
   }
 
+  std::string GetExecutableFilePath(Flavor flavor, const char* name) {
+    return GetExecutableFilePath(name, kIsDebugBuild, flavor == kStatic);
+  }
+
   enum Mode {
     kModeOat,
     kModeOatWithBootImage,
@@ -127,17 +131,7 @@
     };
     exec_argv.insert(exec_argv.end(), args.begin(), args.end());
 
-    pid_t pid;
-    int pipe_fd;
-    bool result = ForkAndExec(exec_argv, &pid, &pipe_fd, error_msg);
-    if (result) {
-      close(pipe_fd);
-      int status = 0;
-      if (waitpid(pid, &status, 0) != -1) {
-        result = (status == 0);
-      }
-    }
-    return result;
+    return ForkAndExecAndWait(exec_argv, error_msg);
   }
 
   // Run the test with custom arguments.
@@ -300,6 +294,21 @@
     }
   }
 
+  bool ForkAndExecAndWait(const std::vector<std::string>& exec_argv,
+                          /*out*/ std::string* error_msg) {
+    pid_t pid;
+    int pipe_fd;
+    bool result = ForkAndExec(exec_argv, &pid, &pipe_fd, error_msg);
+    if (result) {
+      close(pipe_fd);
+      int status = 0;
+      if (waitpid(pid, &status, 0) != -1) {
+        result = (status == 0);
+      }
+    }
+    return result;
+  }
+
   std::string tmp_dir_;
 
  private:
diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc
index 6d84ffa..a6f1207 100644
--- a/openjdkjvmti/deopt_manager.cc
+++ b/openjdkjvmti/deopt_manager.cc
@@ -53,20 +53,29 @@
 namespace openjdkjvmti {
 
 // TODO We should make this much more selective in the future so we only return true when we
-// actually care about the method (i.e. had locals changed, have breakpoints, etc.). For now though
-// we can just assume that we care we are loaded at all.
-//
-// Even if we don't keep track of this at the method level we might want to keep track of it at the
-// level of enabled capabilities.
-bool JvmtiMethodInspectionCallback::IsMethodBeingInspected(
-    art::ArtMethod* method ATTRIBUTE_UNUSED) {
-  return true;
+// actually care about the method at this time (ie active frames had locals changed). For now we
+// just assume that if anything has changed any frame's locals we care about all methods. If nothing
+// has we only care about methods with active breakpoints on them. In the future we should probably
+// rewrite all of this to instead do this at the ShadowFrame or thread granularity.
+bool JvmtiMethodInspectionCallback::IsMethodBeingInspected(art::ArtMethod* method) {
+  // Non-java-debuggable runtimes we need to assume that any method might not be debuggable and
+  // therefore potentially being inspected (due to inlines). If we are debuggable we rely hard on
+  // inlining not being done since we don't keep track of which methods get inlined where and simply
+  // look to see if the method is breakpointed.
+  return !art::Runtime::Current()->IsJavaDebuggable() ||
+      manager_->HaveLocalsChanged() ||
+      manager_->MethodHasBreakpoints(method);
 }
 
 bool JvmtiMethodInspectionCallback::IsMethodSafeToJit(art::ArtMethod* method) {
   return !manager_->MethodHasBreakpoints(method);
 }
 
+bool JvmtiMethodInspectionCallback::MethodNeedsDebugVersion(
+    art::ArtMethod* method ATTRIBUTE_UNUSED) {
+  return true;
+}
+
 DeoptManager::DeoptManager()
   : deoptimization_status_lock_("JVMTI_DeoptimizationStatusLock",
                                 static_cast<art::LockLevel>(
@@ -75,7 +84,10 @@
     performing_deoptimization_(false),
     global_deopt_count_(0),
     deopter_count_(0),
-    inspection_callback_(this) { }
+    breakpoint_status_lock_("JVMTI_BreakpointStatusLock",
+                            static_cast<art::LockLevel>(art::LockLevel::kAbortLock + 1)),
+    inspection_callback_(this),
+    set_local_variable_called_(false) { }
 
 void DeoptManager::Setup() {
   art::ScopedThreadStateChange stsc(art::Thread::Current(),
@@ -121,14 +133,11 @@
 }
 
 bool DeoptManager::MethodHasBreakpoints(art::ArtMethod* method) {
-  art::MutexLock lk(art::Thread::Current(), deoptimization_status_lock_);
+  art::MutexLock lk(art::Thread::Current(), breakpoint_status_lock_);
   return MethodHasBreakpointsLocked(method);
 }
 
 bool DeoptManager::MethodHasBreakpointsLocked(art::ArtMethod* method) {
-  if (deopter_count_ == 0) {
-    return false;
-  }
   auto elem = breakpoint_status_.find(method);
   return elem != breakpoint_status_.end() && elem->second != 0;
 }
@@ -158,18 +167,23 @@
 
   art::ScopedThreadSuspension sts(self, art::kSuspended);
   deoptimization_status_lock_.ExclusiveLock(self);
+  {
+    breakpoint_status_lock_.ExclusiveLock(self);
 
-  DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
+    DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
 
-  if (MethodHasBreakpointsLocked(method)) {
-    // Don't need to do anything extra.
-    breakpoint_status_[method]++;
-    // Another thread might be deoptimizing the very method we just added new breakpoints for. Wait
-    // for any deopts to finish before moving on.
-    WaitForDeoptimizationToFinish(self);
-    return;
+    if (MethodHasBreakpointsLocked(method)) {
+      // Don't need to do anything extra.
+      breakpoint_status_[method]++;
+      // Another thread might be deoptimizing the very method we just added new breakpoints for.
+      // Wait for any deopts to finish before moving on.
+      breakpoint_status_lock_.ExclusiveUnlock(self);
+      WaitForDeoptimizationToFinish(self);
+      return;
+    }
+    breakpoint_status_[method] = 1;
+    breakpoint_status_lock_.ExclusiveUnlock(self);
   }
-  breakpoint_status_[method] = 1;
   auto instrumentation = art::Runtime::Current()->GetInstrumentation();
   if (instrumentation->IsForcedInterpretOnly()) {
     // We are already interpreting everything so no need to do anything.
@@ -196,17 +210,22 @@
   // need but since that is very heavy we will instead just use a condition variable to make sure we
   // don't race with ourselves.
   deoptimization_status_lock_.ExclusiveLock(self);
+  bool is_last_breakpoint;
+  {
+    art::MutexLock mu(self, breakpoint_status_lock_);
 
-  DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
-  DCHECK(MethodHasBreakpointsLocked(method)) << "Breakpoint on a method was removed without "
-                                             << "breakpoints present!";
+    DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
+    DCHECK(MethodHasBreakpointsLocked(method)) << "Breakpoint on a method was removed without "
+                                              << "breakpoints present!";
+    breakpoint_status_[method] -= 1;
+    is_last_breakpoint = (breakpoint_status_[method] == 0);
+  }
   auto instrumentation = art::Runtime::Current()->GetInstrumentation();
-  breakpoint_status_[method] -= 1;
   if (UNLIKELY(instrumentation->IsForcedInterpretOnly())) {
     // We don't need to do anything since we are interpreting everything anyway.
     deoptimization_status_lock_.ExclusiveUnlock(self);
     return;
-  } else if (breakpoint_status_[method] == 0) {
+  } else if (is_last_breakpoint) {
     if (UNLIKELY(is_default)) {
       RemoveDeoptimizeAllMethodsLocked(self);
     } else {
diff --git a/openjdkjvmti/deopt_manager.h b/openjdkjvmti/deopt_manager.h
index a495b68..6e991de 100644
--- a/openjdkjvmti/deopt_manager.h
+++ b/openjdkjvmti/deopt_manager.h
@@ -32,6 +32,7 @@
 #ifndef ART_OPENJDKJVMTI_DEOPT_MANAGER_H_
 #define ART_OPENJDKJVMTI_DEOPT_MANAGER_H_
 
+#include <atomic>
 #include <unordered_map>
 
 #include "jni.h"
@@ -62,6 +63,9 @@
   bool IsMethodSafeToJit(art::ArtMethod* method)
       OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
 
+  bool MethodNeedsDebugVersion(art::ArtMethod* method)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+
  private:
   DeoptManager* manager_;
 };
@@ -107,9 +111,17 @@
 
   static DeoptManager* Get();
 
+  bool HaveLocalsChanged() const {
+    return set_local_variable_called_.load();
+  }
+
+  void SetLocalsUpdated() {
+    set_local_variable_called_.store(true);
+  }
+
  private:
   bool MethodHasBreakpointsLocked(art::ArtMethod* method)
-      REQUIRES(deoptimization_status_lock_);
+      REQUIRES(breakpoint_status_lock_);
 
   // Wait until nothing is currently in the middle of deoptimizing/undeoptimizing something. This is
   // needed to ensure that everything is synchronized since threads need to drop the
@@ -156,13 +168,20 @@
   // Number of users of deoptimization there currently are.
   uint32_t deopter_count_ GUARDED_BY(deoptimization_status_lock_);
 
+  // A mutex that just protects the breakpoint-status map. This mutex should always be at the
+  // bottom of the lock hierarchy. Nothing more should be locked if we hold this.
+  art::Mutex breakpoint_status_lock_ ACQUIRED_BEFORE(art::Locks::abort_lock_);
   // A map from methods to the number of breakpoints in them from all envs.
   std::unordered_map<art::ArtMethod*, uint32_t> breakpoint_status_
-      GUARDED_BY(deoptimization_status_lock_);
+      GUARDED_BY(breakpoint_status_lock_);
 
   // The MethodInspectionCallback we use to tell the runtime if we care about particular methods.
   JvmtiMethodInspectionCallback inspection_callback_;
 
+  // Set to true if anything calls SetLocalVariables on any thread since we need to be careful about
+  // OSR after this.
+  std::atomic<bool> set_local_variable_called_;
+
   // Helper for setting up/tearing-down for deoptimization.
   friend class ScopedDeoptimizationContext;
 };
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index bf2e6cd..b83310d 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -915,6 +915,9 @@
   if (depth < 0) {
     return ERR(ILLEGAL_ARGUMENT);
   }
+  // Make sure that we know not to do any OSR anymore.
+  // TODO We should really keep track of this at the Frame granularity.
+  DeoptManager::Get()->SetLocalsUpdated();
   art::Thread* self = art::Thread::Current();
   // Suspend JIT since it can get confused if we deoptimize methods getting jitted.
   art::jit::ScopedJitSuspend suspend_jit;
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 4a944964..28659cb 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -364,6 +364,11 @@
   return !Dbg::MethodHasAnyBreakpoints(m);
 }
 
+bool DebuggerActiveMethodInspectionCallback::MethodNeedsDebugVersion(
+    ArtMethod* m ATTRIBUTE_UNUSED) {
+  return Dbg::IsDebuggerActive();
+}
+
 void InternalDebuggerControlCallback::StartDebugger() {
   // Release the mutator lock.
   ScopedThreadStateChange stsc(art::Thread::Current(), kNative);
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 7401813..e1de991 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -56,6 +56,7 @@
 struct DebuggerActiveMethodInspectionCallback : public MethodInspectionCallback {
   bool IsMethodBeingInspected(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
   bool IsMethodSafeToJit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool MethodNeedsDebugVersion(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
 };
 
 struct DebuggerDdmCallback : public DdmCallback {
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 3015b10..671079b 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -37,7 +37,9 @@
 // Static fault manger object accessed by signal handler.
 FaultManager fault_manager;
 
-extern "C" __attribute__((visibility("default"))) void art_sigsegv_fault() {
+// This needs to be NO_INLINE since some debuggers do not read the inline-info to set a breakpoint
+// if it isn't.
+extern "C" NO_INLINE __attribute__((visibility("default"))) void art_sigsegv_fault() {
   // Set a breakpoint here to be informed when a SIGSEGV is unhandled by ART.
   VLOG(signals)<< "Caught unknown SIGSEGV in ART fault handler - chaining to next handler.";
 }
diff --git a/runtime/image.cc b/runtime/image.cc
index f147078..316f7a5 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
 namespace art {
 
 const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '5', '8', '\0' };  // R^3 Bitstring type check.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '5', '9', '\0' };  // ReachabilityFence.
 
 ImageHeader::ImageHeader(uint32_t image_begin,
                          uint32_t image_size,
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 84a148f..d7f33d5 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -139,10 +139,13 @@
 
 bool Instrumentation::NeedDebugVersionFor(ArtMethod* method) const
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  return Runtime::Current()->IsJavaDebuggable() &&
+  art::Runtime* runtime = Runtime::Current();
+  // If anything says we need the debug version or we are debuggable we will need the debug version
+  // of the method.
+  return (runtime->GetRuntimeCallbacks()->MethodNeedsDebugVersion(method) ||
+          runtime->IsJavaDebuggable()) &&
          !method->IsNative() &&
-         !method->IsProxyMethod() &&
-         Runtime::Current()->GetRuntimeCallbacks()->IsMethodBeingInspected(method);
+         !method->IsProxyMethod();
 }
 
 void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index 022b139..69dae31 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -399,6 +399,16 @@
 VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSetPlain)
 VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSetRelease)
 
+static ALWAYS_INLINE bool MterpReachabilityFence(ShadowFrame* shadow_frame ATTRIBUTE_UNUSED,
+                                                 const Instruction* inst ATTRIBUTE_UNUSED,
+                                                 uint16_t inst_data ATTRIBUTE_UNUSED,
+                                                 JValue* result_register ATTRIBUTE_UNUSED)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  // Do nothing; Its only purpose is to keep the argument reference live
+  // at preceding suspend points. That's automatic in the interpreter.
+  return true;
+}
+
 // Macro to help keep track of what's left to implement.
 #define UNIMPLEMENTED_CASE(name)    \
     case Intrinsics::k##name:       \
@@ -499,6 +509,7 @@
     UNIMPLEMENTED_CASE(MemoryPokeIntNative /* (JI)V */)
     UNIMPLEMENTED_CASE(MemoryPokeLongNative /* (JJ)V */)
     UNIMPLEMENTED_CASE(MemoryPokeShortNative /* (JS)V */)
+    INTRINSIC_CASE(ReachabilityFence /* (Ljava/lang/Object;)V */)
     INTRINSIC_CASE(StringCharAt)
     INTRINSIC_CASE(StringCompareTo)
     INTRINSIC_CASE(StringEquals)
diff --git a/runtime/intrinsics_list.h b/runtime/intrinsics_list.h
index da08793..2f91f5d 100644
--- a/runtime/intrinsics_list.h
+++ b/runtime/intrinsics_list.h
@@ -218,6 +218,7 @@
   V(VarHandleReleaseFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "releaseFence", "()V") \
   V(VarHandleLoadLoadFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "loadLoadFence", "()V") \
   V(VarHandleStoreStoreFence, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "storeStoreFence", "()V") \
+  V(ReachabilityFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/ref/Reference;", "reachabilityFence", "(Ljava/lang/Object;)V") \
   SIGNATURE_POLYMORPHIC_INTRINSICS_LIST(V)
 
 #endif  // ART_RUNTIME_INTRINSICS_LIST_H_
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 470287b..5518eb2 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -161,6 +161,10 @@
       .Define({"-XX:EnableHSpaceCompactForOOM", "-XX:DisableHSpaceCompactForOOM"})
           .WithValues({true, false})
           .IntoKey(M::EnableHSpaceCompactForOOM)
+      .Define("-XX:DumpNativeStackOnSigQuit:_")
+          .WithType<bool>()
+          .WithValueMap({{"false", false}, {"true", true}})
+          .IntoKey(M::DumpNativeStackOnSigQuit)
       .Define("-XX:MadviseRandomAccess:_")
           .WithType<bool>()
           .WithValueMap({{"false", false}, {"true", true}})
@@ -731,6 +735,7 @@
   UsageMessage(stream, "  -XX:BackgroundGC=none\n");
   UsageMessage(stream, "  -XX:LargeObjectSpace={disabled,map,freelist}\n");
   UsageMessage(stream, "  -XX:LargeObjectThreshold=N\n");
+  UsageMessage(stream, "  -XX:DumpNativeStackOnSigQuit=booleanvalue\n");
   UsageMessage(stream, "  -XX:MadviseRandomAccess:booleanvalue\n");
   UsageMessage(stream, "  -XX:SlowDebug={false,true}\n");
   UsageMessage(stream, "  -Xmethod-trace\n");
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 00ccc19..b80ce7d 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -272,6 +272,7 @@
       pending_hidden_api_warning_(false),
       dedupe_hidden_api_warnings_(true),
       always_set_hidden_api_warning_flag_(false),
+      dump_native_stack_on_sig_quit_(true),
       pruned_dalvik_cache_(false),
       // Initially assume we perceive jank in case the process state is never updated.
       process_state_(kProcessStateJankPerceptible),
@@ -1153,6 +1154,7 @@
   is_explicit_gc_disabled_ = runtime_options.Exists(Opt::DisableExplicitGC);
   dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::Dex2Oat);
   image_dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::ImageDex2Oat);
+  dump_native_stack_on_sig_quit_ = runtime_options.GetOrDefault(Opt::DumpNativeStackOnSigQuit);
 
   vfprintf_ = runtime_options.GetOrDefault(Opt::HookVfprintf);
   exit_ = runtime_options.GetOrDefault(Opt::HookExit);
diff --git a/runtime/runtime.h b/runtime/runtime.h
index f5f3e31..03f17bc 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -663,6 +663,10 @@
     safe_mode_ = mode;
   }
 
+  bool GetDumpNativeStackOnSigQuit() const {
+    return dump_native_stack_on_sig_quit_;
+  }
+
   bool GetPrunedDalvikCache() const {
     return pruned_dalvik_cache_;
   }
@@ -1016,6 +1020,9 @@
   // when there is a warning. This is only used for testing.
   bool always_set_hidden_api_warning_flag_;
 
+  // Whether threads should dump their native stack on SIGQUIT.
+  bool dump_native_stack_on_sig_quit_;
+
   // Whether the dalvik cache was pruned when initializing the runtime.
   bool pruned_dalvik_cache_;
 
diff --git a/runtime/runtime_callbacks.cc b/runtime/runtime_callbacks.cc
index cd3c0b7..758917c 100644
--- a/runtime/runtime_callbacks.cc
+++ b/runtime/runtime_callbacks.cc
@@ -106,6 +106,15 @@
   return false;
 }
 
+bool RuntimeCallbacks::MethodNeedsDebugVersion(ArtMethod* m) {
+  for (MethodInspectionCallback* cb : method_inspection_callbacks_) {
+    if (cb->MethodNeedsDebugVersion(m)) {
+      return true;
+    }
+  }
+  return false;
+}
+
 void RuntimeCallbacks::AddThreadLifecycleCallback(ThreadLifecycleCallback* cb) {
   thread_callbacks_.push_back(cb);
 }
diff --git a/runtime/runtime_callbacks.h b/runtime/runtime_callbacks.h
index 24386ba..9f0410d 100644
--- a/runtime/runtime_callbacks.h
+++ b/runtime/runtime_callbacks.h
@@ -130,6 +130,10 @@
   // Note that '!IsMethodSafeToJit(m) implies IsMethodBeingInspected(m)'. That is that if this
   // method returns false IsMethodBeingInspected must return true.
   virtual bool IsMethodSafeToJit(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+
+  // Returns true if we expect the method to be debuggable but are not doing anything unusual with
+  // it currently.
+  virtual bool MethodNeedsDebugVersion(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
 };
 
 class RuntimeCallbacks {
@@ -198,6 +202,11 @@
   // entrypoint should not be changed to JITed code.
   bool IsMethodSafeToJit(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Returns true if some MethodInspectionCallback indicates the method needs to use a debug
+  // version. This allows later code to set breakpoints or perform other actions that could be
+  // broken by some optimizations.
+  bool MethodNeedsDebugVersion(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+
   void AddMethodInspectionCallback(MethodInspectionCallback* cb)
       REQUIRES_SHARED(Locks::mutator_lock_);
   void RemoveMethodInspectionCallback(MethodInspectionCallback* cb)
diff --git a/runtime/runtime_common.cc b/runtime/runtime_common.cc
index 41bfb58..eae2505 100644
--- a/runtime/runtime_common.cc
+++ b/runtime/runtime_common.cc
@@ -41,6 +41,7 @@
 using android::base::StringPrintf;
 
 static constexpr bool kUseSigRTTimeout = true;
+static constexpr bool kDumpNativeStackOnTimeout = true;
 
 const char* GetSignalName(int signal_number) {
   switch (signal_number) {
@@ -370,30 +371,11 @@
 #pragma GCC diagnostic ignored "-Wframe-larger-than="
 #endif
 
-void HandleUnexpectedSignalCommon(int signal_number,
-                                  siginfo_t* info,
-                                  void* raw_context,
-                                  bool handle_timeout_signal,
-                                  bool dump_on_stderr) {
-  static bool handling_unexpected_signal = false;
-  if (handling_unexpected_signal) {
-    LogHelper::LogLineLowStack(__FILE__,
-                               __LINE__,
-                               ::android::base::FATAL_WITHOUT_ABORT,
-                               "HandleUnexpectedSignal reentered\n");
-    if (handle_timeout_signal) {
-      if (IsTimeoutSignal(signal_number)) {
-        // Ignore a recursive timeout.
-        return;
-      }
-    }
-    _exit(1);
-  }
-  handling_unexpected_signal = true;
-
-  gAborting++;  // set before taking any locks
-  MutexLock mu(Thread::Current(), *Locks::unexpected_signal_lock_);
-
+static void HandleUnexpectedSignalCommonDump(int signal_number,
+                                             siginfo_t* info,
+                                             void* raw_context,
+                                             bool handle_timeout_signal,
+                                             bool dump_on_stderr) {
   auto logger = [&](auto& stream) {
     bool has_address = (signal_number == SIGILL || signal_number == SIGBUS ||
                         signal_number == SIGFPE || signal_number == SIGSEGV);
@@ -440,7 +422,7 @@
       // Special timeout signal. Try to dump all threads.
       // Note: Do not use DumpForSigQuit, as that might disable native unwind, but the native parts
       //       are of value here.
-      runtime->GetThreadList()->Dump(std::cerr);
+      runtime->GetThreadList()->Dump(std::cerr, kDumpNativeStackOnTimeout);
       std::cerr << std::endl;
     }
 
@@ -452,6 +434,71 @@
   }
 }
 
+void HandleUnexpectedSignalCommon(int signal_number,
+                                  siginfo_t* info,
+                                  void* raw_context,
+                                  bool handle_timeout_signal,
+                                  bool dump_on_stderr) {
+  // Local _static_ storing the currently handled signal (or -1).
+  static int handling_unexpected_signal = -1;
+
+  // Whether the dump code should be run under the unexpected-signal lock. For diagnostics we
+  // allow recursive unexpected-signals in certain cases - avoid a deadlock.
+  bool grab_lock = true;
+
+  if (handling_unexpected_signal != -1) {
+    LogHelper::LogLineLowStack(__FILE__,
+                               __LINE__,
+                               ::android::base::FATAL_WITHOUT_ABORT,
+                               "HandleUnexpectedSignal reentered\n");
+    // Print the signal number. Don't use any standard functions, just some arithmetic. Just best
+    // effort, with a minimal buffer.
+    if (0 < signal_number && signal_number < 100) {
+      char buf[] = { ' ',
+                     'S',
+                     static_cast<char>('0' + (signal_number / 10)),
+                     static_cast<char>('0' + (signal_number % 10)),
+                     '\n',
+                     0 };
+      LogHelper::LogLineLowStack(__FILE__,
+                                 __LINE__,
+                                 ::android::base::FATAL_WITHOUT_ABORT,
+                                 buf);
+    }
+    if (handle_timeout_signal) {
+      if (IsTimeoutSignal(signal_number)) {
+        // Ignore a recursive timeout.
+        return;
+      }
+    }
+    // If we were handling a timeout signal, try to go on. Otherwise hard-exit.
+    // This relies on the expectation that we'll only ever get one timeout signal.
+    if (!handle_timeout_signal || handling_unexpected_signal != GetTimeoutSignal()) {
+      _exit(1);
+    }
+    grab_lock = false;  // The "outer" handling instance already holds the lock.
+  }
+  handling_unexpected_signal = signal_number;
+
+  gAborting++;  // set before taking any locks
+
+  if (grab_lock) {
+    MutexLock mu(Thread::Current(), *Locks::unexpected_signal_lock_);
+
+    HandleUnexpectedSignalCommonDump(signal_number,
+                                     info,
+                                     raw_context,
+                                     handle_timeout_signal,
+                                     dump_on_stderr);
+  } else {
+    HandleUnexpectedSignalCommonDump(signal_number,
+                                     info,
+                                     raw_context,
+                                     handle_timeout_signal,
+                                     dump_on_stderr);
+  }
+}
+
 #if defined(__APPLE__)
 #pragma GCC diagnostic pop
 #endif
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index dcb1335..4121ad6 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -70,6 +70,7 @@
 RUNTIME_OPTIONS_KEY (bool,                UseTLAB,                        (kUseTlab || kUseReadBarrier))
 RUNTIME_OPTIONS_KEY (bool,                EnableHSpaceCompactForOOM,      true)
 RUNTIME_OPTIONS_KEY (bool,                UseJitCompilation,              false)
+RUNTIME_OPTIONS_KEY (bool,                DumpNativeStackOnSigQuit,       true)
 RUNTIME_OPTIONS_KEY (bool,                MadviseRandomAccess,            false)
 RUNTIME_OPTIONS_KEY (unsigned int,        JITCompileThreshold)
 RUNTIME_OPTIONS_KEY (unsigned int,        JITWarmupThreshold)
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 50cf9e0..d17f409 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1161,9 +1161,10 @@
      << "]";
 }
 
-void Thread::Dump(std::ostream& os, BacktraceMap* backtrace_map, bool force_dump_stack) const {
+void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtrace_map,
+                  bool force_dump_stack) const {
   DumpState(os);
-  DumpStack(os, backtrace_map, force_dump_stack);
+  DumpStack(os, dump_native_stack, backtrace_map, force_dump_stack);
 }
 
 mirror::String* Thread::GetThreadName() const {
@@ -1967,7 +1968,10 @@
   }
 }
 
-void Thread::DumpStack(std::ostream& os, BacktraceMap* backtrace_map, bool force_dump_stack) const {
+void Thread::DumpStack(std::ostream& os,
+                       bool dump_native_stack,
+                       BacktraceMap* backtrace_map,
+                       bool force_dump_stack) const {
   // TODO: we call this code when dying but may not have suspended the thread ourself. The
   //       IsSuspended check is therefore racy with the use for dumping (normally we inhibit
   //       the race with the thread_suspend_count_lock_).
@@ -1980,7 +1984,7 @@
   }
   if (safe_to_dump || force_dump_stack) {
     // If we're currently in native code, dump that stack before dumping the managed stack.
-    if (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this)) {
+    if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) {
       DumpKernelStack(os, GetTid(), "  kernel: ", false);
       ArtMethod* method =
           GetCurrentMethod(nullptr,
@@ -2877,6 +2881,17 @@
 
   Handle<mirror::Class> h_aste_class(hs.NewHandle<mirror::Class>(
       h_aste_array_class->GetComponentType()));
+
+  // Make sure the AnnotatedStackTraceElement.class is initialized, b/76208924 .
+  class_linker->EnsureInitialized(soa.Self(),
+                                  h_aste_class,
+                                  /* can_init_fields */ true,
+                                  /* can_init_parents */ true);
+  if (soa.Self()->IsExceptionPending()) {
+    // This should not fail in a healthy runtime.
+    return nullptr;
+  }
+
   ArtField* stack_trace_element_field = h_aste_class->FindField(
       soa.Self(), h_aste_class.Get(), "stackTraceElement", "Ljava/lang/StackTraceElement;");
   DCHECK(stack_trace_element_field != nullptr);
diff --git a/runtime/thread.h b/runtime/thread.h
index af1401e..22b77ee 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -207,6 +207,7 @@
 
   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
   void Dump(std::ostream& os,
+            bool dump_native_stack = true,
             BacktraceMap* backtrace_map = nullptr,
             bool force_dump_stack = false) const
       REQUIRES(!Locks::thread_suspend_count_lock_)
@@ -1317,6 +1318,7 @@
 
   void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
   void DumpStack(std::ostream& os,
+                 bool dump_native_stack = true,
                  BacktraceMap* backtrace_map = nullptr,
                  bool force_dump_stack = false) const
       REQUIRES(!Locks::thread_suspend_count_lock_)
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index ee68399..44af867 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -152,8 +152,9 @@
       suspend_all_historam_.PrintConfidenceIntervals(os, 0.99, data);  // Dump time to suspend.
     }
   }
-  Dump(os);
-  DumpUnattachedThreads(os, kDumpUnattachedThreadNativeStackForSigQuit);
+  bool dump_native_stack = Runtime::Current()->GetDumpNativeStackOnSigQuit();
+  Dump(os, dump_native_stack);
+  DumpUnattachedThreads(os, dump_native_stack && kDumpUnattachedThreadNativeStackForSigQuit);
 }
 
 static void DumpUnattachedThread(std::ostream& os, pid_t tid, bool dump_native_stack)
@@ -200,10 +201,11 @@
 // A closure used by Thread::Dump.
 class DumpCheckpoint FINAL : public Closure {
  public:
-  explicit DumpCheckpoint(std::ostream* os)
+  DumpCheckpoint(std::ostream* os, bool dump_native_stack)
       : os_(os),
         barrier_(0),
-        backtrace_map_(BacktraceMap::Create(getpid())) {
+        backtrace_map_(dump_native_stack ? BacktraceMap::Create(getpid()) : nullptr),
+        dump_native_stack_(dump_native_stack) {
     if (backtrace_map_ != nullptr) {
       backtrace_map_->SetSuffixesToIgnore(std::vector<std::string> { "oat", "odex" });
     }
@@ -217,7 +219,7 @@
     std::ostringstream local_os;
     {
       ScopedObjectAccess soa(self);
-      thread->Dump(local_os, backtrace_map_.get());
+      thread->Dump(local_os, dump_native_stack_, backtrace_map_.get());
     }
     {
       // Use the logging lock to ensure serialization when writing to the common ostream.
@@ -245,16 +247,18 @@
   Barrier barrier_;
   // A backtrace map, so that all threads use a shared info and don't reacquire/parse separately.
   std::unique_ptr<BacktraceMap> backtrace_map_;
+  // Whether we should dump the native stack.
+  const bool dump_native_stack_;
 };
 
-void ThreadList::Dump(std::ostream& os) {
+void ThreadList::Dump(std::ostream& os, bool dump_native_stack) {
   Thread* self = Thread::Current();
   {
     MutexLock mu(self, *Locks::thread_list_lock_);
     os << "DALVIK THREADS (" << list_.size() << "):\n";
   }
   if (self != nullptr) {
-    DumpCheckpoint checkpoint(&os);
+    DumpCheckpoint checkpoint(&os, dump_native_stack);
     size_t threads_running_checkpoint;
     {
       // Use SOA to prevent deadlocks if multiple threads are calling Dump() at the same time.
@@ -265,7 +269,7 @@
       checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint);
     }
   } else {
-    DumpUnattachedThreads(os, /* dump_native_stack */ true);
+    DumpUnattachedThreads(os, dump_native_stack);
   }
 }
 
@@ -487,6 +491,7 @@
               // Found a runnable thread that hasn't responded to the empty checkpoint request.
               // Assume it's stuck and safe to dump its stack.
               thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT),
+                           /*dump_native_stack*/ true,
                            /*backtrace_map*/ nullptr,
                            /*force_dump_stack*/ true);
             }
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index 09b10d2..895c1a4 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -57,7 +57,7 @@
   void DumpForSigQuit(std::ostream& os)
       REQUIRES(!Locks::thread_list_lock_, !Locks::mutator_lock_);
   // For thread suspend timeout dumps.
-  void Dump(std::ostream& os)
+  void Dump(std::ostream& os, bool dump_native_stack = true)
       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
   pid_t GetLockOwner();  // For SignalCatcher.
 
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index ec4dc41..838d7f1 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -317,6 +317,7 @@
   if (quickening_info.empty()) {
     return ArrayRef<const uint8_t>();
   }
+  CHECK_LT(dex_method_idx, dex_file.NumMethodIds());
   const uint32_t quickening_offset =
       GetQuickenInfoOffsetTable(dex_file, quickening_info).GetOffset(dex_method_idx);
   if (quickening_offset == 0u) {
diff --git a/test/036-finalizer/src/Main.java b/test/036-finalizer/src/Main.java
index 51d4a81..be7ae4a 100644
--- a/test/036-finalizer/src/Main.java
+++ b/test/036-finalizer/src/Main.java
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+import java.lang.ref.Reference;
 import java.lang.ref.WeakReference;
 import java.util.ArrayList;
 import java.util.List;
@@ -80,6 +81,7 @@
         // the test fail (even when keeping the `null` assignment). b/76454261
         FinalizerTest keepLive = wimp.get();
         System.out.println("wimp: " + wimpString(wimp));
+        Reference.reachabilityFence(keepLive);
         keepLive = null;  // Clear the reference.
 
         /* this will try to collect and finalize ft */
diff --git a/test/072-reachability-fence/expected.txt b/test/072-reachability-fence/expected.txt
new file mode 100644
index 0000000..fdd0d7b
--- /dev/null
+++ b/test/072-reachability-fence/expected.txt
@@ -0,0 +1,5 @@
+Starting
+Reference 0 was live.
+Reference 3 was live.
+Reference 4 was live.
+Finished
diff --git a/test/072-reachability-fence/info.txt b/test/072-reachability-fence/info.txt
new file mode 100644
index 0000000..21b6d6a
--- /dev/null
+++ b/test/072-reachability-fence/info.txt
@@ -0,0 +1,4 @@
+Check that reachabilityFence() prevents garbage collection of objects only referred to by a dead
+reference.
+
+This is not very convincing, since we currently usually keep such objects around anyway.
diff --git a/test/072-reachability-fence/src/Main.java b/test/072-reachability-fence/src/Main.java
new file mode 100644
index 0000000..ac1e131
--- /dev/null
+++ b/test/072-reachability-fence/src/Main.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.ref.Reference;
+import java.lang.ref.WeakReference;
+
+public class Main {
+    public static void main(String[] args) {
+        System.out.println("Starting");
+        WeakReference wrefs[] = new WeakReference[5];
+        String str0 = generateString("String", 0);
+        String str1 = generateString("String", 1);
+        String str2 = generateString("String", 2);
+        String str3 = generateString("String", 3);
+        String str4 = generateString("String", 4);
+        wrefs[0] = new WeakReference(str0);
+        wrefs[1] = new WeakReference(str1);
+        wrefs[2] = new WeakReference(str2);
+        wrefs[3] = new WeakReference(str3);
+        wrefs[4] = new WeakReference(str4);
+        // Clear a couple as a sanity check.
+        str1 = null;
+        str2 = null;
+        // str<n> dead here; in the future we will possibly reuse the registers.
+        // Give the compiler something to fill the registers with.
+        String str5 = generateString("String", 5);
+        String str6 = generateString("String", 6);
+        String str7 = generateString("String", 7);
+        String str8 = generateString("String", 8);
+        String str9 = generateString("String", 9);
+        Runtime.getRuntime().gc();
+        for (int i = 0; i < 5; ++i) {
+          if (wrefs[i].get() != null) {
+            System.out.println("Reference " + i + " was live.");
+          }
+        }
+        Reference.reachabilityFence(str0);
+        Reference.reachabilityFence(str1);
+        Reference.reachabilityFence(str2);
+        Reference.reachabilityFence(str3);
+        Reference.reachabilityFence(str4);
+        System.out.println("Finished");
+    }
+
+    private static String generateString(String base, int num) {
+        return base + num;
+    }
+}
diff --git a/test/171-init-aste/expected.txt b/test/171-init-aste/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/171-init-aste/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/171-init-aste/info.txt b/test/171-init-aste/info.txt
new file mode 100644
index 0000000..201e8ad
--- /dev/null
+++ b/test/171-init-aste/info.txt
@@ -0,0 +1 @@
+Regression test for failure to initialize dalvik.system.AnnotatedStackTraceElement.
diff --git a/test/171-init-aste/src-art/Main.java b/test/171-init-aste/src-art/Main.java
new file mode 100644
index 0000000..9d36610
--- /dev/null
+++ b/test/171-init-aste/src-art/Main.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import dalvik.system.AnnotatedStackTraceElement;
+
+public class Main {
+    public static void main(String args[]) throws Exception {
+        Class<?> vmStack = Class.forName("dalvik.system.VMStack");
+        Method getAnnotatedThreadStackTrace =
+                vmStack.getDeclaredMethod("getAnnotatedThreadStackTrace", Thread.class);
+        Object[] annotatedStackTrace =
+                (Object[]) getAnnotatedThreadStackTrace.invoke(null, Thread.currentThread());
+        AnnotatedStackTraceElement annotatedElement =
+            (AnnotatedStackTraceElement) annotatedStackTrace[0];
+        // This used to fail an assertion that the AnnotatedStackTraceElement.class
+        // is at least initializing (i.e. initializing, initialized or resolved-erroneous).
+        // Note: We cannot use reflection for this test because getDeclaredMethod() would
+        // initialize the class and hide the failure.
+        annotatedElement.getStackTraceElement();
+
+        System.out.println("passed");
+    }
+}
diff --git a/test/171-init-aste/src/Main.java b/test/171-init-aste/src/Main.java
new file mode 100644
index 0000000..4479cb4
--- /dev/null
+++ b/test/171-init-aste/src/Main.java
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+    // Note: This file is used for the RI which does not support
+    // dalvik.system.AnnotatedStackTraceElement (see src-art/Main.java),
+    // so that we do not need an exclusion in known failures.
+    public static void main(String args[]) throws Exception {
+        System.out.println("passed");
+    }
+}
diff --git a/test/1935-get-set-current-frame-jit/expected.txt b/test/1935-get-set-current-frame-jit/expected.txt
index cdb8f6a..a685891 100644
--- a/test/1935-get-set-current-frame-jit/expected.txt
+++ b/test/1935-get-set-current-frame-jit/expected.txt
@@ -1,7 +1,5 @@
 JNI_OnLoad called
 From GetLocalInt(), value is 42
-isInOsrCode? false
 	Value is '42'
 Setting TARGET to 1337
-isInOsrCode? false
 	Value is '1337'
diff --git a/test/1935-get-set-current-frame-jit/run b/test/1935-get-set-current-frame-jit/run
index 51875a7..e569d08 100755
--- a/test/1935-get-set-current-frame-jit/run
+++ b/test/1935-get-set-current-frame-jit/run
@@ -14,5 +14,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Ask for stack traces to be dumped to a file rather than to stdout.
-./default-run "$@" --jvmti
+# Ensure the test is not subject to code collection
+./default-run "$@" --jvmti --runtime-option -Xjitinitialsize:32M
diff --git a/test/1935-get-set-current-frame-jit/src/Main.java b/test/1935-get-set-current-frame-jit/src/Main.java
index 714a98a..378aaf7 100644
--- a/test/1935-get-set-current-frame-jit/src/Main.java
+++ b/test/1935-get-set-current-frame-jit/src/Main.java
@@ -21,6 +21,7 @@
 import java.lang.reflect.Executable;
 import java.lang.reflect.Method;
 import java.nio.ByteBuffer;
+import java.time.Instant;
 import java.util.concurrent.Semaphore;
 import java.util.Arrays;
 import java.util.Collection;
@@ -49,9 +50,11 @@
   public static class IntRunner implements Runnable {
     private volatile boolean continueBusyLoop;
     private volatile boolean inBusyLoop;
-    public IntRunner() {
+    private final boolean expectOsr;
+    public IntRunner(boolean expectOsr) {
       this.continueBusyLoop = true;
       this.inBusyLoop = false;
+      this.expectOsr = expectOsr;
     }
     public void run() {
       int TARGET = 42;
@@ -59,14 +62,23 @@
       while (continueBusyLoop) {
         inBusyLoop = true;
       }
-      int i = 0;
-      while (Main.isInterpreted() && i < 10000) {
-        Main.ensureJitCompiled(IntRunner.class, "run");
-        i++;
-      }
-      // We shouldn't be doing OSR since we are using JVMTI and the get/set prevents OSR.
+      // Wait up to 300 seconds for OSR to kick in if we expect it. If we don't give up after only
+      // 3 seconds.
+      Instant osrDeadline = Instant.now().plusSeconds(expectOsr ? 600 : 3);
+      do {
+        // Don't actually do anything here.
+        inBusyLoop = true;
+      } while (hasJit() && !Main.isInOsrCode("run") && osrDeadline.compareTo(Instant.now()) > 0);
+      // We shouldn't be doing OSR since we are using JVMTI and the set prevents OSR.
       // Set local will also push us to interpreter but the get local may remain in compiled code.
-      System.out.println("isInOsrCode? " + (hasJit() && Main.isInOsrCode("run")));
+      if (hasJit()) {
+        boolean inOsr = Main.isInOsrCode("run");
+        if (expectOsr && !inOsr) {
+          throw new Error("Expected to be in OSR but was not.");
+        } else if (!expectOsr && inOsr) {
+          throw new Error("Expected not to be in OSR but was.");
+        }
+      }
       reportValue(TARGET);
     }
     public void waitForBusyLoopStart() { while (!inBusyLoop) {} }
@@ -78,7 +90,7 @@
   public static void runGet() throws Exception {
     Method target = IntRunner.class.getDeclaredMethod("run");
     // Get Int
-    IntRunner int_runner = new IntRunner();
+    IntRunner int_runner = new IntRunner(true);
     Thread target_get = new Thread(int_runner, "GetLocalInt - Target");
     target_get.start();
     int_runner.waitForBusyLoopStart();
@@ -108,7 +120,7 @@
   public static void runSet() throws Exception {
     Method target = IntRunner.class.getDeclaredMethod("run");
     // Set Int
-    IntRunner int_runner = new IntRunner();
+    IntRunner int_runner = new IntRunner(false);
     Thread target_set = new Thread(int_runner, "SetLocalInt - Target");
     target_set.start();
     int_runner.waitForBusyLoopStart();
@@ -157,7 +169,6 @@
     throw new Error("Unable to find stack frame in method " + target + " on thread " + thr);
   }
 
-  public static native void ensureJitCompiled(Class k, String f);
   public static native boolean isInterpreted();
   public static native boolean isInOsrCode(String methodName);
   public static native boolean hasJit();
diff --git a/test/645-checker-abs-simd/src/Main.java b/test/645-checker-abs-simd/src/Main.java
index 870a403..819304a 100644
--- a/test/645-checker-abs-simd/src/Main.java
+++ b/test/645-checker-abs-simd/src/Main.java
@@ -48,10 +48,7 @@
   }
 
   /// CHECK-START: void Main.doitChar(char[]) loop_optimization (before)
-  /// CHECK-DAG: Phi       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArrayGet  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: Abs       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: ArraySet  loop:<<Loop>>      outer_loop:none
+  /// CHECK-NOT: Abs
   //
   /// CHECK-START: void Main.doitChar(char[]) loop_optimization (after)
   /// CHECK-NOT: VecAbs
diff --git a/test/678-checker-simd-saturation/src/Main.java b/test/678-checker-simd-saturation/src/Main.java
index decc691..7a22ca1 100644
--- a/test/678-checker-simd-saturation/src/Main.java
+++ b/test/678-checker-simd-saturation/src/Main.java
@@ -397,7 +397,22 @@
     }
   }
 
-  // TODO: recognize the more common if-else too.
+  /// CHECK-START: void Main.satAlt2(short[], short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<Clp1:i\d+>> IntConstant -32768                   loop:none
+  /// CHECK-DAG: <<Clp2:i\d+>> IntConstant  32767                   loop:none
+  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet [{{l\d+}},<<Phi:i\d+>>]     loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet [{{l\d+}},<<Phi>>]          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Max:i\d+>>  Max [<<Add>>,<<Clp1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Min:i\d+>>  Min [<<Max>>,<<Clp2>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [<<Min>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Conv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void Main.satAlt2(short[], short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad [{{l\d+}},<<Phi:i\d+>>]      loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad [{{l\d+}},<<Phi>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:d\d+>>  VecSaturationAdd [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},<<Phi>>,<<Add>>]  loop:<<Loop>>      outer_loop:none
   public static void satAlt2(short[] a, short[] b, short[] c) {
     int n = Math.min(a.length, Math.min(b.length, c.length));
     for (int i = 0; i < n; i++) {
@@ -411,7 +426,11 @@
     }
   }
 
-  // TODO: recognize conditional too.
+  /// CHECK-START-{ARM,ARM64}: void Main.satAlt3(short[], short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad [{{l\d+}},<<Phi:i\d+>>]      loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad [{{l\d+}},<<Phi>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:d\d+>>  VecSaturationAdd [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},<<Phi>>,<<Add>>]  loop:<<Loop>>      outer_loop:none
   public static void satAlt3(short[] a, short[] b, short[] c) {
     int n = Math.min(a.length, Math.min(b.length, c.length));
     for (int i = 0; i < n; i++) {
diff --git a/test/679-checker-minmax/src/Main.java b/test/679-checker-minmax/src/Main.java
index 38085bb..4f0261c 100644
--- a/test/679-checker-minmax/src/Main.java
+++ b/test/679-checker-minmax/src/Main.java
@@ -19,6 +19,10 @@
  */
 public class Main {
 
+  //
+  // Different types.
+  //
+
   /// CHECK-START: int Main.min1(int, int) instruction_simplifier$after_inlining (before)
   /// CHECK-DAG: <<Cnd:z\d+>> GreaterThanOrEqual [<<Op1:i\d+>>,<<Op2:i\d+>>]
   /// CHECK-DAG: <<Sel:i\d+>> Select [<<Op1>>,<<Op2>>,<<Cnd>>]
@@ -229,7 +233,116 @@
     return a >= b ? a : b;
   }
 
+
+  //
+  // Complications.
+  //
+
+  // TODO: coming soon, under discussion
+  public static int min0(int[] a, int[] b) {
+    // Repeat of array references needs finding the common subexpressions
+    // prior to doing the select and min/max recognition.
+    return a[0] <= b[0] ? a[0] : b[0];
+  }
+
+  // TODO: coming soon, under discussion
+  public static int max0(int[] a, int[] b) {
+    // Repeat of array references needs finding the common subexpressions
+    // prior to doing the select and min/max recognition.
+    return a[0] >= b[0] ? a[0] : b[0];
+  }
+
+  /// CHECK-START: int Main.minmax1(int) instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Par:i\d+>>  ParameterValue
+  /// CHECK-DAG: <<P100:i\d+>> IntConstant 100
+  /// CHECK-DAG: <<M100:i\d+>> IntConstant -100
+  /// CHECK-DAG: <<Cnd1:z\d+>> LessThanOrEqual [<<Par>>,<<P100>>]
+  /// CHECK-DAG: <<Sel1:i\d+>> Select [<<P100>>,<<Par>>,<<Cnd1>>]
+  /// CHECK-DAG: <<Cnd2:z\d+>> GreaterThanOrEqual [<<Sel1>>,<<M100>>]
+  /// CHECK-DAG: <<Sel2:i\d+>> Select [<<M100>>,<<Sel1>>,<<Cnd2>>]
+  /// CHECK-DAG:               Return [<<Sel2>>]
+  //
+  /// CHECK-START: int Main.minmax1(int) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Par:i\d+>>  ParameterValue
+  /// CHECK-DAG: <<P100:i\d+>> IntConstant 100
+  /// CHECK-DAG: <<M100:i\d+>> IntConstant -100
+  /// CHECK-DAG: <<Min:i\d+>>  Min [<<Par>>,<<P100>>]
+  /// CHECK-DAG: <<Max:i\d+>>  Max [<<Min>>,<<M100>>]
+  /// CHECK-DAG:               Return [<<Max>>]
+  //
+  /// CHECK-START: int Main.minmax1(int) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:               Select
+  public static int minmax1(int x) {
+    // Simple if-if gives clean select sequence.
+    if (x > 100) {
+      x = 100;
+    }
+    if (x < -100) {
+      x = -100;
+    }
+    return x;
+  }
+
+  /// CHECK-START: int Main.minmax2(int) instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Par:i\d+>>  ParameterValue
+  /// CHECK-DAG: <<P100:i\d+>> IntConstant 100
+  /// CHECK-DAG: <<M100:i\d+>> IntConstant -100
+  /// CHECK-DAG: <<Cnd1:z\d+>> LessThanOrEqual [<<Par>>,<<P100>>]
+  /// CHECK-DAG: <<Cnd2:z\d+>> GreaterThanOrEqual [<<Par>>,<<M100>>]
+  /// CHECK-DAG: <<Sel1:i\d+>> Select [<<M100>>,<<Par>>,<<Cnd2>>]
+  /// CHECK-DAG: <<Sel2:i\d+>> Select [<<P100>>,<<Sel1>>,<<Cnd1>>]
+  /// CHECK-DAG:               Return [<<Sel2>>]
+  //
+  /// CHECK-START: int Main.minmax2(int) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Par:i\d+>>  ParameterValue
+  /// CHECK-DAG: <<P100:i\d+>> IntConstant 100
+  /// CHECK-DAG: <<M100:i\d+>> IntConstant -100
+  /// CHECK-DAG: <<Max:i\d+>>  Max [<<Par>>,<<M100>>]
+  /// CHECK-DAG: <<Min:i\d+>>  Min [<<Max>>,<<P100>>]
+  /// CHECK-DAG:               Return [<<Min>>]
+  //
+  /// CHECK-START: int Main.minmax2(int) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:               Select
+  public static int minmax2(int x) {
+    // Simple if-else requires inspecting bounds of resulting selects.
+    if (x > 100) {
+      x = 100;
+    } else if (x < -100) {
+      x = -100;
+    }
+    return x;
+  }
+
+  /// CHECK-START: int Main.minmax3(int) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Par:i\d+>>  ParameterValue
+  /// CHECK-DAG: <<P100:i\d+>> IntConstant 100
+  /// CHECK-DAG: <<M100:i\d+>> IntConstant -100
+  /// CHECK-DAG: <<Max:i\d+>>  Max [<<Par>>,<<M100>>]
+  /// CHECK-DAG: <<Min:i\d+>>  Min [<<Max>>,<<P100>>]
+  /// CHECK-DAG:               Return [<<Min>>]
+  //
+  /// CHECK-START: int Main.minmax3(int) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:               Select
+  public static int minmax3(int x) {
+    return (x > 100) ? 100 : ((x < -100) ? -100 : x);
+  }
+
+  /// CHECK-START: int Main.minmax4(int) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Par:i\d+>>  ParameterValue
+  /// CHECK-DAG: <<P100:i\d+>> IntConstant 100
+  /// CHECK-DAG: <<M100:i\d+>> IntConstant -100
+  /// CHECK-DAG: <<Min:i\d+>>  Min [<<Par>>,<<P100>>]
+  /// CHECK-DAG: <<Max:i\d+>>  Max [<<Min>>,<<M100>>]
+  /// CHECK-DAG:               Return [<<Max>>]
+  //
+  /// CHECK-START: int Main.minmax4(int) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:               Select
+  public static int minmax4(int x) {
+    return (x < -100) ? -100 : ((x > 100) ? 100 : x);
+  }
+
   public static void main(String[] args) {
+    // Types.
     expectEquals(10, min1(10, 20));
     expectEquals(10, min2(10, 20));
     expectEquals(10, min3(10, 20));
@@ -244,6 +357,23 @@
     expectEquals(20, max5((short) 10, (short) 20));
     expectEquals(20, max6((byte) 10, (byte) 20));
     expectEquals(20L, max7(10L, 20L));
+    // Complications.
+    int[] a = { 10 };
+    int[] b = { 20 };
+    expectEquals(10, min0(a, b));
+    expectEquals(20, max0(a, b));
+    expectEquals(-100, minmax1(-200));
+    expectEquals(10, minmax1(10));
+    expectEquals(100, minmax1(200));
+    expectEquals(-100, minmax2(-200));
+    expectEquals(10, minmax2(10));
+    expectEquals(100, minmax2(200));
+    expectEquals(-100, minmax3(-200));
+    expectEquals(10, minmax3(10));
+    expectEquals(100, minmax3(200));
+    expectEquals(-100, minmax4(-200));
+    expectEquals(10, minmax4(10));
+    expectEquals(100, minmax4(200));
     System.out.println("passed");
   }
 
diff --git a/test/681-checker-abs/src/Main.java b/test/681-checker-abs/src/Main.java
index 8064b1d..d1ba7c6 100644
--- a/test/681-checker-abs/src/Main.java
+++ b/test/681-checker-abs/src/Main.java
@@ -19,6 +19,38 @@
  */
 public class Main {
 
+  /// CHECK-START: int Main.absI(int) instruction_simplifier (before)
+  /// CHECK-DAG: <<Par:i\d+>> ParameterValue
+  /// CHECK-DAG: <<Abs:i\d+>> InvokeStaticOrDirect [<<Par>>] intrinsic:MathAbsInt
+  /// CHECK-DAG:              Return [<<Abs>>]
+  //
+  /// CHECK-START: int Main.absI(int) instruction_simplifier (after)
+  /// CHECK-DAG: <<Par:i\d+>> ParameterValue
+  /// CHECK-DAG: <<Abs:i\d+>> Abs [<<Par>>]
+  /// CHECK-DAG:              Return [<<Abs>>]
+  //
+  /// CHECK-START: int Main.absI(int) instruction_simplifier (after)
+  /// CHECK-NOT:              InvokeStaticOrDirect
+  public static int absI(int a) {
+    return Math.abs(a);
+  }
+
+  /// CHECK-START: long Main.absL(long) instruction_simplifier (before)
+  /// CHECK-DAG: <<Par:j\d+>> ParameterValue
+  /// CHECK-DAG: <<Abs:j\d+>> InvokeStaticOrDirect [<<Par>>] intrinsic:MathAbsLong
+  /// CHECK-DAG:              Return [<<Abs>>]
+  //
+  /// CHECK-START: long Main.absL(long) instruction_simplifier (after)
+  /// CHECK-DAG: <<Par:j\d+>> ParameterValue
+  /// CHECK-DAG: <<Abs:j\d+>> Abs [<<Par>>]
+  /// CHECK-DAG:              Return [<<Abs>>]
+  //
+  /// CHECK-START: long Main.absL(long) instruction_simplifier (after)
+  /// CHECK-NOT:              InvokeStaticOrDirect
+  public static long absL(long a) {
+    return Math.abs(a);
+  }
+
   /// CHECK-START: int Main.abs1(int) instruction_simplifier$after_inlining (before)
   /// CHECK-DAG: <<Par:i\d+>> ParameterValue
   /// CHECK-DAG: <<Zer:i\d+>> IntConstant 0
@@ -152,7 +184,74 @@
     return a >= 0 ? a : -a;
   }
 
+  //
+  // Nop zero extension.
+  //
+
+  /// CHECK-START: int Main.zabs1(byte) instruction_simplifier (before)
+  /// CHECK-DAG: <<Par:b\d+>> ParameterValue
+  /// CHECK-DAG: <<Msk:i\d+>> IntConstant 255
+  /// CHECK-DAG: <<And:i\d+>> [<<Par>>,<<Msk>>]
+  /// CHECK-DAG: <<Abs:i\d+>> InvokeStaticOrDirect [<<And>>] intrinsic:MathAbsInt
+  /// CHECK-DAG:              Return [<<Abs>>]
+  //
+  /// CHECK-START: int Main.zabs1(byte) instruction_simplifier (after)
+  /// CHECK-DAG: <<Par:b\d+>> ParameterValue
+  /// CHECK-DAG: <<Cnv:a\d+>> TypeConversion [<<Par>>]
+  /// CHECK-DAG:              Return [<<Cnv>>]
+  //
+  /// CHECK-START: int Main.zabs1(byte) instruction_simplifier (after)
+  /// CHECK-NOT:              InvokeStaticOrDirect
+  /// CHECK-NOT:              Abs
+  public static int zabs1(byte a) {
+    return Math.abs(a & 0xff);
+  }
+
+  /// CHECK-START: int Main.zabs2(short) instruction_simplifier (before)
+  /// CHECK-DAG: <<Par:s\d+>> ParameterValue
+  /// CHECK-DAG: <<Msk:i\d+>> IntConstant 65535
+  /// CHECK-DAG: <<And:i\d+>> [<<Msk>>,<<Par>>]
+  /// CHECK-DAG: <<Abs:i\d+>> InvokeStaticOrDirect [<<And>>] intrinsic:MathAbsInt
+  /// CHECK-DAG:              Return [<<Abs>>]
+  //
+  /// CHECK-START: int Main.zabs2(short) instruction_simplifier (after)
+  /// CHECK-DAG: <<Par:s\d+>> ParameterValue
+  /// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<Par>>]
+  /// CHECK-DAG:              Return [<<Cnv>>]
+  //
+  /// CHECK-START: int Main.zabs2(short) instruction_simplifier (after)
+  /// CHECK-NOT:              InvokeStaticOrDirect
+  /// CHECK-NOT:              Abs
+  public static int zabs2(short a) {
+    return Math.abs(a & 0xffff);
+  }
+
+  /// CHECK-START: int Main.zabs3(char) instruction_simplifier (before)
+  /// CHECK-DAG: <<Par:c\d+>> ParameterValue
+  /// CHECK-DAG: <<Abs:i\d+>> InvokeStaticOrDirect [<<Par>>] intrinsic:MathAbsInt
+  /// CHECK-DAG:              Return [<<Abs>>]
+  //
+  /// CHECK-START: int Main.zabs3(char) instruction_simplifier (after)
+  /// CHECK-DAG: <<Par:c\d+>> ParameterValue
+  /// CHECK-DAG: <<Abs:i\d+>> Abs [<<Par>>]
+  /// CHECK-DAG:              Return [<<Abs>>]
+  //
+  /// CHECK-START: int Main.zabs3(char) instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Par:c\d+>> ParameterValue
+  /// CHECK-DAG:              Return [<<Par>>]
+  //
+  /// CHECK-START: int Main.zabs3(char) instruction_simplifier$after_inlining (after)
+  /// CHECK-NOT:              InvokeStaticOrDirect
+  /// CHECK-NOT:              Abs
+  public static int zabs3(char a) {
+    return Math.abs(a);
+  }
+
   public static void main(String[] args) {
+    expectEquals(10, absI(-10));
+    expectEquals(20, absI(20));
+    expectEquals(10L, absL(-10L));
+    expectEquals(20L, absL(20L));
     expectEquals(10, abs1(-10));
     expectEquals(20, abs1(20));
     expectEquals(10, abs2(-10));
@@ -167,6 +266,12 @@
     expectEquals(20, abs6((byte) 20));
     expectEquals(10L, abs7(-10L));
     expectEquals(20L, abs7(20L));
+    expectEquals(1, zabs1((byte) 1));
+    expectEquals(0xff, zabs1((byte) -1));
+    expectEquals(1, zabs2((short) 1));
+    expectEquals(0xffff, zabs2((short) -1));
+    expectEquals(1, zabs3((char) 1));
+    expectEquals(0xffff, zabs3((char) -1));
     System.out.println("passed");
   }
 
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 86adb73..e9127a8 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -775,6 +775,9 @@
   TMP_DIR_OPTION="-Djava.io.tmpdir=/data/local/tmp"
 fi
 
+# We set DumpNativeStackOnSigQuit to false to avoid stressing libunwind.
+# b/27185632
+# b/24664297
 dalvikvm_cmdline="$INVOKE_WITH $GDB $ANDROID_ROOT/bin/$DALVIKVM \
                   $GDB_ARGS \
                   $FLAGS \
@@ -789,6 +792,7 @@
                   $DEBUGGER_OPTS \
                   $DALVIKVM_BOOT_OPT \
                   $TMP_DIR_OPTION \
+                  -XX:DumpNativeStackOnSigQuit:false \
                   -cp $DEX_LOCATION/$TEST_NAME.jar$SECONDARY_DEX $MAIN $ARGS"
 
 # Remove whitespace.
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 734a600..0cfb661 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -174,56 +174,37 @@
 
   global _user_input_variants
   global run_all_configs
+  # These are the default variant-options we will use if nothing in the group is specified.
+  default_variants = {
+      'target': {'host', 'target'},
+      'pictest': {'npictest'},
+      'prebuild': {'prebuild'},
+      'cdex_level': {'cdex-fast'},
+      'jvmti': { 'no-jvmti'},
+      'compiler': {'optimizing',
+                   'jit',
+                   'interpreter',
+                   'interp-ac',
+                   'speed-profile'},
+      'relocate': {'no-relocate'},
+      'trace': {'ntrace'},
+      'gc': {'cms'},
+      'jni': {'checkjni'},
+      'image': {'picimage'},
+      'pictest': {'pictest'},
+      'debuggable': {'ndebuggable'},
+      'run': {'debug'},
+      # address_sizes_target depends on the target so it is dealt with below.
+  }
+  # We want to pull these early since the full VARIANT_TYPE_DICT has a few additional ones we don't
+  # want to pick up if we pass --all.
+  default_variants_keys = default_variants.keys()
   if run_all_configs:
-    target_types = _user_input_variants['target']
-    _user_input_variants = VARIANT_TYPE_DICT
-    _user_input_variants['target'] = target_types
+    default_variants = VARIANT_TYPE_DICT
 
-  if not _user_input_variants['target']:
-    _user_input_variants['target'].add('host')
-    _user_input_variants['target'].add('target')
-
-  if not _user_input_variants['prebuild']: # Default
-    _user_input_variants['prebuild'].add('prebuild')
-
-  if not _user_input_variants['cdex_level']: # Default
-    _user_input_variants['cdex_level'].add('cdex-fast')
-
-  # By default only run without jvmti
-  if not _user_input_variants['jvmti']:
-    _user_input_variants['jvmti'].add('no-jvmti')
-
-  # By default we run all 'compiler' variants.
-  if not _user_input_variants['compiler'] and _user_input_variants['target'] != 'jvm':
-    _user_input_variants['compiler'].add('optimizing')
-    _user_input_variants['compiler'].add('jit')
-    _user_input_variants['compiler'].add('interpreter')
-    _user_input_variants['compiler'].add('interp-ac')
-    _user_input_variants['compiler'].add('speed-profile')
-
-  if not _user_input_variants['relocate']: # Default
-    _user_input_variants['relocate'].add('no-relocate')
-
-  if not _user_input_variants['trace']: # Default
-    _user_input_variants['trace'].add('ntrace')
-
-  if not _user_input_variants['gc']: # Default
-    _user_input_variants['gc'].add('cms')
-
-  if not _user_input_variants['jni']: # Default
-    _user_input_variants['jni'].add('checkjni')
-
-  if not _user_input_variants['image']: # Default
-    _user_input_variants['image'].add('picimage')
-
-  if not _user_input_variants['pictest']: # Default
-    _user_input_variants['pictest'].add('npictest')
-
-  if not _user_input_variants['debuggable']: # Default
-    _user_input_variants['debuggable'].add('ndebuggable')
-
-  if not _user_input_variants['run']: # Default
-    _user_input_variants['run'].add('debug')
+  for key in default_variants_keys:
+    if not _user_input_variants[key]:
+      _user_input_variants[key] = default_variants[key]
 
   _user_input_variants['address_sizes_target'] = collections.defaultdict(set)
   if not _user_input_variants['address_sizes']: