Merge "Bounds check elimination."
diff --git a/build/Android.executable.mk b/build/Android.executable.mk
index 86f445f..ae42136 100644
--- a/build/Android.executable.mk
+++ b/build/Android.executable.mk
@@ -99,7 +99,18 @@
   ifeq ($$(art_target_or_host),target)
     LOCAL_MODULE_TARGET_ARCH := $(ART_SUPPORTED_ARCH)
   endif
+
+  # If multilib, need to provide stem.
   LOCAL_MULTILIB := $$(art_multilib)
+  ifeq ($$(art_multilib),both)
+    ifeq ($$(art_ndebug_or_debug),ndebug)
+      LOCAL_MODULE_STEM_32 := $$(art_executable)32
+      LOCAL_MODULE_STEM_64 := $$(art_executable)
+    else #debug
+      LOCAL_MODULE_STEM_32 := $$(art_executable)d32
+      LOCAL_MODULE_STEM_64 := $$(art_executable)d
+    endif
+  endif
 
   include external/libcxx/libcxx.mk
   ifeq ($$(art_target_or_host),target)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index bb634f7..340304a 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -90,9 +90,6 @@
   runtime/base/stringprintf_test.cc \
   runtime/base/timing_logger_test.cc \
   runtime/base/unix_file/fd_file_test.cc \
-  runtime/base/unix_file/null_file_test.cc \
-  runtime/base/unix_file/random_access_file_utils_test.cc \
-  runtime/base/unix_file/string_file_test.cc \
   runtime/class_linker_test.cc \
   runtime/dex_file_test.cc \
   runtime/dex_file_verifier_test.cc \
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 523d143..9dbd2f4 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -34,6 +34,8 @@
 # $(1): compiler - default, optimizing or interpreter.
 # $(2): pic/no-pic
 # $(3): 2ND_ or undefined, 2ND_ for 32-bit host builds.
+# $(4): wrapper, e.g., valgrind.
+# $(5): dex2oat suffix, e.g, valgrind requires 32 right now.
 # NB depending on HOST_CORE_DEX_LOCATIONS so we are sure to have the dex files in frameworks for
 # run-test --no-image
 define create-core-oat-host-rules
@@ -44,13 +46,14 @@
   core_pic_infix :=
   core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY)
 
-  ifeq ($(1),default)
-    core_compile_options += --compiler-backend=Quick
+  # With the optimizing compiler, we want to rerun dex2oat whenever there is
+  # a dex2oat change to catch regressions early.
+  ifeq ($(ART_USE_OPTIMIZING_COMPILER), true)
+    core_dex2oat_dependency := $(DEX2OAT)
   endif
+
   ifeq ($(1),optimizing)
     core_compile_options += --compiler-backend=Optimizing
-    # With the optimizing compiler, we want to rerun dex2oat whenever there is
-    # a dex2oat change to catch regressions early.
     core_dex2oat_dependency := $(DEX2OAT)
     core_infix := -optimizing
   endif
@@ -78,25 +81,30 @@
     $$(error found $(2) expected pic or no-pic)
   endif
 
-  core_image_name := $($(3)HOST_CORE_IMG_OUT_BASE)$$(core_infix)$$(core_pic_infix)$(CORE_IMG_SUFFIX)
-  core_oat_name := $($(3)HOST_CORE_OAT_OUT_BASE)$$(core_infix)$$(core_pic_infix)$(CORE_OAT_SUFFIX)
+  core_image_name := $($(3)HOST_CORE_IMG_OUT_BASE)$$(core_infix)$$(core_pic_infix)$(4)$(CORE_IMG_SUFFIX)
+  core_oat_name := $($(3)HOST_CORE_OAT_OUT_BASE)$$(core_infix)$$(core_pic_infix)$(4)$(CORE_OAT_SUFFIX)
 
   # Using the bitness suffix makes it easier to add as a dependency for the run-test mk.
   ifeq ($(3),)
-    HOST_CORE_IMAGE_$(1)_$(2)_64 := $$(core_image_name)
+    $(4)HOST_CORE_IMAGE_$(1)_$(2)_64 := $$(core_image_name)
   else
-    HOST_CORE_IMAGE_$(1)_$(2)_32 := $$(core_image_name)
+    $(4)HOST_CORE_IMAGE_$(1)_$(2)_32 := $$(core_image_name)
   endif
-  HOST_CORE_IMG_OUTS += $$(core_image_name)
-  HOST_CORE_OAT_OUTS += $$(core_oat_name)
+  $(4)HOST_CORE_IMG_OUTS += $$(core_image_name)
+  $(4)HOST_CORE_OAT_OUTS += $$(core_oat_name)
 
+  # If we have a wrapper, make the target phony.
+  ifneq ($(4),)
+.PHONY: $$(core_image_name)
+  endif
 $$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
 $$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
 $$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
 $$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency)
 	@echo "host dex2oat: $$@ ($$?)"
 	@mkdir -p $$(dir $$@)
-	$$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
+	$$(hide) $(4) $$(DEX2OAT)$(5) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
+	  --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
 	  --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(HOST_CORE_DEX_FILES)) \
 	  $$(addprefix --dex-location=,$$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
 	  --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
@@ -117,20 +125,29 @@
 endef  # create-core-oat-host-rules
 
 # $(1): compiler - default, optimizing or interpreter.
+# $(2): wrapper.
+# $(3): dex2oat suffix.
 define create-core-oat-host-rule-combination
-  $(call create-core-oat-host-rules,$(1),no-pic,)
-  $(call create-core-oat-host-rules,$(1),pic,)
+  $(call create-core-oat-host-rules,$(1),no-pic,,$(2),$(3))
+  $(call create-core-oat-host-rules,$(1),pic,,$(2),$(3))
 
   ifneq ($(HOST_PREFER_32_BIT),true)
-    $(call create-core-oat-host-rules,$(1),no-pic,2ND_)
-    $(call create-core-oat-host-rules,$(1),pic,2ND_)
+    $(call create-core-oat-host-rules,$(1),no-pic,2ND_,$(2),$(3))
+    $(call create-core-oat-host-rules,$(1),pic,2ND_,$(2),$(3))
   endif
 endef
 
-$(eval $(call create-core-oat-host-rule-combination,default))
-$(eval $(call create-core-oat-host-rule-combination,optimizing))
-$(eval $(call create-core-oat-host-rule-combination,interpreter))
+$(eval $(call create-core-oat-host-rule-combination,default,,))
+$(eval $(call create-core-oat-host-rule-combination,optimizing,,))
+$(eval $(call create-core-oat-host-rule-combination,interpreter,,))
 
+valgrindHOST_CORE_IMG_OUTS :=
+valgrindHOST_CORE_OAT_OUTS :=
+$(eval $(call create-core-oat-host-rule-combination,default,valgrind,32))
+$(eval $(call create-core-oat-host-rule-combination,optimizing,valgrind,32))
+$(eval $(call create-core-oat-host-rule-combination,interpreter,valgrind,32))
+
+valgrind-test-art-host-dex2oat-host: $(valgrindHOST_CORE_IMG_OUTS)
 
 define create-core-oat-target-rules
   core_compile_options :=
@@ -140,18 +157,22 @@
   core_pic_infix :=
   core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY)
 
-  ifeq ($(1),default)
-    core_compile_options += --compiler-backend=Quick
-  endif
-  ifeq ($(1),optimizing)
+  # With the optimizing compiler, we want to rerun dex2oat whenever there is
+  # a dex2oat change to catch regressions early.
+  ifeq ($(ART_USE_OPTIMIZING_COMPILER), true)
+    core_dex2oat_dependency := $(DEX2OAT)
     ifeq ($($(3)TARGET_ARCH),arm64)
       # TODO: Enable image generation on arm64 once the backend
       # is on par with other architectures.
-      core_compile_options += --compiler-backend=Quick
+      core_compile_options += --compiler-filter=interpret-only
+    endif
+  endif
+
+  ifeq ($(1),optimizing)
+    ifeq ($($(3)TARGET_ARCH),arm64)
+      core_compile_options += --compiler-filter=interpret-only
     else
       core_compile_options += --compiler-backend=Optimizing
-      # With the optimizing compiler, we want to rerun dex2oat whenever there is
-      # a dex2oat change to catch regressions early.
       core_dex2oat_dependency := $(DEX2OAT)
     endif
     core_infix := -optimizing
@@ -180,29 +201,34 @@
     $$(error found $(2) expected pic or no-pic)
   endif
 
-  core_image_name := $($(3)TARGET_CORE_IMG_OUT_BASE)$$(core_infix)$$(core_pic_infix)$(CORE_IMG_SUFFIX)
-  core_oat_name := $($(3)TARGET_CORE_OAT_OUT_BASE)$$(core_infix)$$(core_pic_infix)$(CORE_OAT_SUFFIX)
+  core_image_name := $($(3)TARGET_CORE_IMG_OUT_BASE)$$(core_infix)$$(core_pic_infix)$(4)$(CORE_IMG_SUFFIX)
+  core_oat_name := $($(3)TARGET_CORE_OAT_OUT_BASE)$$(core_infix)$$(core_pic_infix)$(4)$(CORE_OAT_SUFFIX)
 
   # Using the bitness suffix makes it easier to add as a dependency for the run-test mk.
   ifeq ($(3),)
     ifdef TARGET_2ND_ARCH
-      TARGET_CORE_IMAGE_$(1)_$(2)_64 := $$(core_image_name)
+      $(4)TARGET_CORE_IMAGE_$(1)_$(2)_64 := $$(core_image_name)
     else
-      TARGET_CORE_IMAGE_$(1)_$(2)_32 := $$(core_image_name)
+      $(4)TARGET_CORE_IMAGE_$(1)_$(2)_32 := $$(core_image_name)
     endif
   else
-    TARGET_CORE_IMAGE_$(1)_$(2)_32 := $$(core_image_name)
+    $(4)TARGET_CORE_IMAGE_$(1)_$(2)_32 := $$(core_image_name)
   endif
-  TARGET_CORE_IMG_OUTS += $$(core_image_name)
-  TARGET_CORE_OAT_OUTS += $$(core_oat_name)
+  $(4)TARGET_CORE_IMG_OUTS += $$(core_image_name)
+  $(4)TARGET_CORE_OAT_OUTS += $$(core_oat_name)
 
+  # If we have a wrapper, make the target phony.
+  ifneq ($(4),)
+.PHONY: $$(core_image_name)
+  endif
 $$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
 $$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
 $$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
 $$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency)
 	@echo "target dex2oat: $$@ ($$?)"
 	@mkdir -p $$(dir $$@)
-	$$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_XMS) --runtime-arg -Xmx$(DEX2OAT_XMX) \
+	$$(hide) $(4) $$(DEX2OAT)$(5) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
+	  --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
 	  --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(TARGET_CORE_DEX_FILES)) \
 	  $$(addprefix --dex-location=,$$(TARGET_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
 	  --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
@@ -223,16 +249,28 @@
 endef  # create-core-oat-target-rules
 
 # $(1): compiler - default, optimizing or interpreter.
+# $(2): wrapper.
+# $(3): dex2oat suffix.
 define create-core-oat-target-rule-combination
-  $(call create-core-oat-target-rules,$(1),no-pic,)
-  $(call create-core-oat-target-rules,$(1),pic,)
+  $(call create-core-oat-target-rules,$(1),no-pic,,$(2),$(3))
+  $(call create-core-oat-target-rules,$(1),pic,,$(2),$(3))
 
   ifdef TARGET_2ND_ARCH
-    $(call create-core-oat-target-rules,$(1),no-pic,2ND_)
-    $(call create-core-oat-target-rules,$(1),pic,2ND_)
+    $(call create-core-oat-target-rules,$(1),no-pic,2ND_,$(2),$(3))
+    $(call create-core-oat-target-rules,$(1),pic,2ND_,$(2),$(3))
   endif
 endef
 
-$(eval $(call create-core-oat-target-rule-combination,default))
-$(eval $(call create-core-oat-target-rule-combination,optimizing))
-$(eval $(call create-core-oat-target-rule-combination,interpreter))
+$(eval $(call create-core-oat-target-rule-combination,default,,))
+$(eval $(call create-core-oat-target-rule-combination,optimizing,,))
+$(eval $(call create-core-oat-target-rule-combination,interpreter,,))
+
+valgrindTARGET_CORE_IMG_OUTS :=
+valgrindTARGET_CORE_OAT_OUTS :=
+$(eval $(call create-core-oat-target-rule-combination,default,valgrind,32))
+$(eval $(call create-core-oat-target-rule-combination,optimizing,valgrind,32))
+$(eval $(call create-core-oat-target-rule-combination,interpreter,valgrind,32))
+
+valgrind-test-art-host-dex2oat-target: $(valgrindTARGET_CORE_IMG_OUTS)
+
+valgrind-test-art-host-dex2oat: valgrind-test-art-host-dex2oat-host valgrind-test-art-host-dex2oat-target
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 3b3170e..a3fe8ad 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -606,7 +606,7 @@
 };
 std::ostream& operator<<(std::ostream& os, const SelectInstructionKind& kind);
 
-// LIR fixup kinds for Arm
+// LIR fixup kinds for Arm and X86.
 enum FixupKind {
   kFixupNone,
   kFixupLabel,             // For labels we just adjust the offset.
@@ -624,6 +624,7 @@
   kFixupMovImmHST,         // kThumb2MovImm16HST.
   kFixupAlign4,            // Align to 4-byte boundary.
   kFixupA53Erratum835769,  // Cortex A53 Erratum 835769.
+  kFixupSwitchTable,       // X86_64 packed switch table.
 };
 std::ostream& operator<<(std::ostream& os, const FixupKind& kind);
 
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 117d8f0..36d065f 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -156,6 +156,13 @@
     case Instruction::USHR_INT:
     case Instruction::USHR_INT_2ADDR:
       return true;
+    case Instruction::CONST:
+    case Instruction::CONST_4:
+    case Instruction::CONST_16:
+      if ((value >> 16) == 0) {
+        return true;  // movw, 16-bit unsigned.
+      }
+      FALLTHROUGH_INTENDED;
     case Instruction::AND_INT:
     case Instruction::AND_INT_2ADDR:
     case Instruction::AND_INT_LIT16:
@@ -899,12 +906,12 @@
  */
 LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
                                   OpSize size) {
-  LIR* load = NULL;
-  ArmOpcode opcode = kThumbBkpt;
+  LIR* load = nullptr;
+  ArmOpcode opcode16 = kThumbBkpt;  // 16-bit Thumb opcode.
+  ArmOpcode opcode32 = kThumbBkpt;  // 32-bit Thumb2 opcode.
   bool short_form = false;
-  bool thumb2Form = (displacement < 4092 && displacement >= 0);
   bool all_low = r_dest.Is32Bit() && r_base.Low8() && r_dest.Low8();
-  int encoded_disp = displacement;
+  int scale = 0;  // Used for opcode16 and some indexed loads.
   bool already_generated = false;
   switch (size) {
     case kDouble:
@@ -932,57 +939,45 @@
         already_generated = true;
         break;
       }
+      DCHECK_EQ((displacement & 0x3), 0);
+      scale = 2;
       if (r_dest.Low8() && (r_base == rs_rARM_PC) && (displacement <= 1020) &&
           (displacement >= 0)) {
         short_form = true;
-        encoded_disp >>= 2;
-        opcode = kThumbLdrPcRel;
+        opcode16 = kThumbLdrPcRel;
       } else if (r_dest.Low8() && (r_base == rs_rARM_SP) && (displacement <= 1020) &&
                  (displacement >= 0)) {
         short_form = true;
-        encoded_disp >>= 2;
-        opcode = kThumbLdrSpRel;
-      } else if (all_low && displacement < 128 && displacement >= 0) {
-        DCHECK_EQ((displacement & 0x3), 0);
-        short_form = true;
-        encoded_disp >>= 2;
-        opcode = kThumbLdrRRI5;
-      } else if (thumb2Form) {
-        short_form = true;
-        opcode = kThumb2LdrRRI12;
+        opcode16 = kThumbLdrSpRel;
+      } else {
+        short_form = all_low && (displacement >> (5 + scale)) == 0;
+        opcode16 = kThumbLdrRRI5;
+        opcode32 = kThumb2LdrRRI12;
       }
       break;
     case kUnsignedHalf:
-      if (all_low && displacement < 64 && displacement >= 0) {
-        DCHECK_EQ((displacement & 0x1), 0);
-        short_form = true;
-        encoded_disp >>= 1;
-        opcode = kThumbLdrhRRI5;
-      } else if (displacement < 4092 && displacement >= 0) {
-        short_form = true;
-        opcode = kThumb2LdrhRRI12;
-      }
+      DCHECK_EQ((displacement & 0x1), 0);
+      scale = 1;
+      short_form = all_low && (displacement >> (5 + scale)) == 0;
+      opcode16 = kThumbLdrhRRI5;
+      opcode32 = kThumb2LdrhRRI12;
       break;
     case kSignedHalf:
-      if (thumb2Form) {
-        short_form = true;
-        opcode = kThumb2LdrshRRI12;
-      }
+      DCHECK_EQ((displacement & 0x1), 0);
+      scale = 1;
+      DCHECK_EQ(opcode16, kThumbBkpt);  // Not available.
+      opcode32 = kThumb2LdrshRRI12;
       break;
     case kUnsignedByte:
-      if (all_low && displacement < 32 && displacement >= 0) {
-        short_form = true;
-        opcode = kThumbLdrbRRI5;
-      } else if (thumb2Form) {
-        short_form = true;
-        opcode = kThumb2LdrbRRI12;
-      }
+      DCHECK_EQ(scale, 0);  // Keep scale = 0.
+      short_form = all_low && (displacement >> (5 + scale)) == 0;
+      opcode16 = kThumbLdrbRRI5;
+      opcode32 = kThumb2LdrbRRI12;
       break;
     case kSignedByte:
-      if (thumb2Form) {
-        short_form = true;
-        opcode = kThumb2LdrsbRRI12;
-      }
+      DCHECK_EQ(scale, 0);  // Keep scale = 0.
+      DCHECK_EQ(opcode16, kThumbBkpt);  // Not available.
+      opcode32 = kThumb2LdrsbRRI12;
       break;
     default:
       LOG(FATAL) << "Bad size: " << size;
@@ -990,12 +985,33 @@
 
   if (!already_generated) {
     if (short_form) {
-      load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), encoded_disp);
+      load = NewLIR3(opcode16, r_dest.GetReg(), r_base.GetReg(), displacement >> scale);
+    } else if ((displacement >> 12) == 0) {  // Thumb2 form.
+      load = NewLIR3(opcode32, r_dest.GetReg(), r_base.GetReg(), displacement);
+    } else if (!InexpensiveConstantInt(displacement >> scale, Instruction::CONST) &&
+        InexpensiveConstantInt(displacement & ~0x00000fff, Instruction::ADD_INT)) {
+      // In this case, using LoadIndexed would emit 3 insns (movw+movt+ldr) but we can
+      // actually do it in two because we know that the kOpAdd is a single insn. On the
+      // other hand, we introduce an extra dependency, so this is not necessarily faster.
+      if (opcode16 != kThumbBkpt && r_dest.Low8() &&
+          InexpensiveConstantInt(displacement & ~(0x1f << scale), Instruction::ADD_INT)) {
+        // We can use the 16-bit Thumb opcode for the load.
+        OpRegRegImm(kOpAdd, r_dest, r_base, displacement & ~(0x1f << scale));
+        load = NewLIR3(opcode16, r_dest.GetReg(), r_dest.GetReg(), (displacement >> scale) & 0x1f);
+      } else {
+        DCHECK_NE(opcode32, kThumbBkpt);
+        OpRegRegImm(kOpAdd, r_dest, r_base, displacement & ~0x00000fff);
+        load = NewLIR3(opcode32, r_dest.GetReg(), r_dest.GetReg(), displacement & 0x00000fff);
+      }
     } else {
+      if (!InexpensiveConstantInt(displacement >> scale, Instruction::CONST) ||
+          (scale != 0 && InexpensiveConstantInt(displacement, Instruction::CONST))) {
+        scale = 0;  // Prefer unscaled indexing if the same number of insns.
+      }
       RegStorage reg_offset = AllocTemp();
-      LoadConstant(reg_offset, encoded_disp);
+      LoadConstant(reg_offset, displacement >> scale);
       DCHECK(!r_dest.IsFloat());
-      load = LoadBaseIndexed(r_base, reg_offset, r_dest, 0, size);
+      load = LoadBaseIndexed(r_base, reg_offset, r_dest, scale, size);
       FreeTemp(reg_offset);
     }
   }
@@ -1041,12 +1057,12 @@
 
 LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
                                    OpSize size) {
-  LIR* store = NULL;
-  ArmOpcode opcode = kThumbBkpt;
+  LIR* store = nullptr;
+  ArmOpcode opcode16 = kThumbBkpt;  // 16-bit Thumb opcode.
+  ArmOpcode opcode32 = kThumbBkpt;  // 32-bit Thumb2 opcode.
   bool short_form = false;
-  bool thumb2Form = (displacement < 4092 && displacement >= 0);
   bool all_low = r_src.Is32Bit() && r_base.Low8() && r_src.Low8();
-  int encoded_disp = displacement;
+  int scale = 0;  // Used for opcode16 and some indexed loads.
   bool already_generated = false;
   switch (size) {
     case kDouble:
@@ -1078,53 +1094,67 @@
         already_generated = true;
         break;
       }
+      DCHECK_EQ((displacement & 0x3), 0);
+      scale = 2;
       if (r_src.Low8() && (r_base == rs_r13sp) && (displacement <= 1020) && (displacement >= 0)) {
         short_form = true;
-        encoded_disp >>= 2;
-        opcode = kThumbStrSpRel;
-      } else if (all_low && displacement < 128 && displacement >= 0) {
-        DCHECK_EQ((displacement & 0x3), 0);
-        short_form = true;
-        encoded_disp >>= 2;
-        opcode = kThumbStrRRI5;
-      } else if (thumb2Form) {
-        short_form = true;
-        opcode = kThumb2StrRRI12;
+        opcode16 = kThumbStrSpRel;
+      } else {
+        short_form = all_low && (displacement >> (5 + scale)) == 0;
+        opcode16 = kThumbStrRRI5;
+        opcode32 = kThumb2StrRRI12;
       }
       break;
     case kUnsignedHalf:
     case kSignedHalf:
-      if (all_low && displacement < 64 && displacement >= 0) {
-        DCHECK_EQ((displacement & 0x1), 0);
-        short_form = true;
-        encoded_disp >>= 1;
-        opcode = kThumbStrhRRI5;
-      } else if (thumb2Form) {
-        short_form = true;
-        opcode = kThumb2StrhRRI12;
-      }
+      DCHECK_EQ((displacement & 0x1), 0);
+      scale = 1;
+      short_form = all_low && (displacement >> (5 + scale)) == 0;
+      opcode16 = kThumbStrhRRI5;
+      opcode32 = kThumb2StrhRRI12;
       break;
     case kUnsignedByte:
     case kSignedByte:
-      if (all_low && displacement < 32 && displacement >= 0) {
-        short_form = true;
-        opcode = kThumbStrbRRI5;
-      } else if (thumb2Form) {
-        short_form = true;
-        opcode = kThumb2StrbRRI12;
-      }
+      DCHECK_EQ(scale, 0);  // Keep scale = 0.
+      short_form = all_low && (displacement >> (5 + scale)) == 0;
+      opcode16 = kThumbStrbRRI5;
+      opcode32 = kThumb2StrbRRI12;
       break;
     default:
       LOG(FATAL) << "Bad size: " << size;
   }
   if (!already_generated) {
     if (short_form) {
-      store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), encoded_disp);
-    } else {
+      store = NewLIR3(opcode16, r_src.GetReg(), r_base.GetReg(), displacement >> scale);
+    } else if ((displacement >> 12) == 0) {
+      store = NewLIR3(opcode32, r_src.GetReg(), r_base.GetReg(), displacement);
+    } else if (!InexpensiveConstantInt(displacement >> scale, Instruction::CONST) &&
+        InexpensiveConstantInt(displacement & ~0x00000fff, Instruction::ADD_INT)) {
+      // In this case, using StoreIndexed would emit 3 insns (movw+movt+str) but we can
+      // actually do it in two because we know that the kOpAdd is a single insn. On the
+      // other hand, we introduce an extra dependency, so this is not necessarily faster.
       RegStorage r_scratch = AllocTemp();
-      LoadConstant(r_scratch, encoded_disp);
+      if (opcode16 != kThumbBkpt && r_src.Low8() && r_scratch.Low8() &&
+          InexpensiveConstantInt(displacement & ~(0x1f << scale), Instruction::ADD_INT)) {
+        // We can use the 16-bit Thumb opcode for the load.
+        OpRegRegImm(kOpAdd, r_scratch, r_base, displacement & ~(0x1f << scale));
+        store = NewLIR3(opcode16, r_src.GetReg(), r_scratch.GetReg(),
+                        (displacement >> scale) & 0x1f);
+      } else {
+        DCHECK_NE(opcode32, kThumbBkpt);
+        OpRegRegImm(kOpAdd, r_scratch, r_base, displacement & ~0x00000fff);
+        store = NewLIR3(opcode32, r_src.GetReg(), r_scratch.GetReg(), displacement & 0x00000fff);
+      }
+      FreeTemp(r_scratch);
+    } else {
+      if (!InexpensiveConstantInt(displacement >> scale, Instruction::CONST) ||
+          (scale != 0 && InexpensiveConstantInt(displacement, Instruction::CONST))) {
+        scale = 0;  // Prefer unscaled indexing if the same number of insns.
+      }
+      RegStorage r_scratch = AllocTemp();
+      LoadConstant(r_scratch, displacement >> scale);
       DCHECK(!r_src.IsFloat());
-      store = StoreBaseIndexed(r_base, r_scratch, r_src, 0, size);
+      store = StoreBaseIndexed(r_base, r_scratch, r_src, scale, size);
       FreeTemp(r_scratch);
     }
   }
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 58bcee2..066041c 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -538,9 +538,12 @@
         bx_offset = tab_rec->anchor->offset + 4;
         break;
       case kX86:
-      case kX86_64:
         bx_offset = 0;
         break;
+      case kX86_64:
+        // RIP relative to switch table.
+        bx_offset = tab_rec->offset;
+        break;
       case kArm64:
       case kMips:
         bx_offset = tab_rec->anchor->offset;
@@ -775,6 +778,10 @@
         ": " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
     native_gc_map_builder.AddEntry(native_offset, references);
   }
+
+  // Maybe not necessary, but this could help prevent errors where we access the verified method
+  // after it has been deleted.
+  mir_graph_->GetCurrentDexCompilationUnit()->ClearVerifiedMethod();
 }
 
 /* Determine the offset of each literal field */
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 84d68d2..ad2ed01 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -553,7 +553,7 @@
 }
 
 static bool NeedsRex(int32_t raw_reg) {
-  return RegStorage::RegNum(raw_reg) > 7;
+  return raw_reg != kRIPReg && RegStorage::RegNum(raw_reg) > 7;
 }
 
 static uint8_t LowRegisterBits(int32_t raw_reg) {
@@ -689,7 +689,13 @@
           entry->opcode != kX86Lea32RM && entry->opcode != kX86Lea64RM) {
         DCHECK_NE(entry->flags & (IS_LOAD | IS_STORE), UINT64_C(0)) << entry->name;
       }
-      size += IS_SIMM8(displacement) ? 1 : 4;
+      if (raw_base == kRIPReg) {
+        DCHECK(cu_->target64) <<
+          "Attempt to use a 64-bit RIP adressing with instruction " << entry->name;
+        size += 4;
+      } else {
+        size += IS_SIMM8(displacement) ? 1 : 4;
+      }
     }
   }
   size += entry->skeleton.immediate_bytes;
@@ -1022,14 +1028,24 @@
 
 void X86Mir2Lir::EmitModrmDisp(uint8_t reg_or_opcode, uint8_t base, int32_t disp) {
   DCHECK_LT(reg_or_opcode, 8);
-  DCHECK_LT(base, 8);
-  uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg_or_opcode << 3) | base;
-  code_buffer_.push_back(modrm);
-  if (base == rs_rX86_SP_32.GetRegNum()) {
-    // Special SIB for SP base
-    code_buffer_.push_back(0 << 6 | rs_rX86_SP_32.GetRegNum() << 3 | rs_rX86_SP_32.GetRegNum());
+  if (base == kRIPReg) {
+    // x86_64 RIP handling: always 32 bit displacement.
+    uint8_t modrm = (0x0 << 6) | (reg_or_opcode << 3) | 0x5;
+    code_buffer_.push_back(modrm);
+    code_buffer_.push_back(disp & 0xFF);
+    code_buffer_.push_back((disp >> 8) & 0xFF);
+    code_buffer_.push_back((disp >> 16) & 0xFF);
+    code_buffer_.push_back((disp >> 24) & 0xFF);
+  } else {
+    DCHECK_LT(base, 8);
+    uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg_or_opcode << 3) | base;
+    code_buffer_.push_back(modrm);
+    if (base == rs_rX86_SP_32.GetRegNum()) {
+      // Special SIB for SP base
+      code_buffer_.push_back(0 << 6 | rs_rX86_SP_32.GetRegNum() << 3 | rs_rX86_SP_32.GetRegNum());
+    }
+    EmitDisp(base, disp);
   }
-  EmitDisp(base, disp);
 }
 
 void X86Mir2Lir::EmitModrmSibDisp(uint8_t reg_or_opcode, uint8_t base, uint8_t index,
@@ -1141,7 +1157,7 @@
   CheckValidByteRegister(entry, raw_reg);
   EmitPrefixAndOpcode(entry, raw_reg, NO_REG, raw_base);
   uint8_t low_reg = LowRegisterBits(raw_reg);
-  uint8_t low_base = LowRegisterBits(raw_base);
+  uint8_t low_base = (raw_base == kRIPReg) ? raw_base : LowRegisterBits(raw_base);
   EmitModrmDisp(low_reg, low_base, disp);
   DCHECK_EQ(0, entry->skeleton.modrm_opcode);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
@@ -1758,12 +1774,29 @@
             LIR *target_lir = lir->target;
             DCHECK(target_lir != NULL);
             CodeOffset target = target_lir->offset;
-            lir->operands[2] = target;
-            int newSize = GetInsnSize(lir);
-            if (newSize != lir->flags.size) {
-              lir->flags.size = newSize;
-              res = kRetryAll;
+            // Handle 64 bit RIP addressing.
+            if (lir->operands[1] == kRIPReg) {
+              // Offset is relative to next instruction.
+              lir->operands[2] = target - (lir->offset + lir->flags.size);
+            } else {
+              lir->operands[2] = target;
+              int newSize = GetInsnSize(lir);
+              if (newSize != lir->flags.size) {
+                lir->flags.size = newSize;
+                res = kRetryAll;
+              }
             }
+          } else if (lir->flags.fixup == kFixupSwitchTable) {
+            DCHECK(cu_->target64);
+            DCHECK_EQ(lir->opcode, kX86Lea64RM) << "Unknown instruction: " << X86Mir2Lir::EncodingMap[lir->opcode].name;
+            DCHECK_EQ(lir->operands[1], static_cast<int>(kRIPReg));
+            // Grab the target offset from the saved data.
+            Mir2Lir::EmbeddedData* tab_rec =
+                reinterpret_cast<Mir2Lir::EmbeddedData*>(UnwrapPointer(lir->operands[4]));
+            CodeOffset target = tab_rec->offset;
+            // Handle 64 bit RIP addressing.
+            // Offset is relative to next instruction.
+            lir->operands[2] = target - (lir->offset + lir->flags.size);
           }
           break;
       }
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index be10d93..544ac3b 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -142,25 +142,7 @@
 
   // Get the switch value
   rl_src = LoadValue(rl_src, kCoreReg);
-  // NewLIR0(kX86Bkpt);
 
-  // Materialize a pointer to the switch table
-  RegStorage start_of_method_reg;
-  if (base_of_code_ != nullptr) {
-    // We can use the saved value.
-    RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
-    if (rl_method.wide) {
-      rl_method = LoadValueWide(rl_method, kCoreReg);
-    } else {
-      rl_method = LoadValue(rl_method, kCoreReg);
-    }
-    start_of_method_reg = rl_method.reg;
-    store_method_addr_used_ = true;
-  } else {
-    start_of_method_reg = AllocTempRef();
-    NewLIR1(kX86StartOfMethod, start_of_method_reg.GetReg());
-  }
-  DCHECK_EQ(start_of_method_reg.Is64Bit(), cu_->target64);
   int low_key = s4FromSwitchData(&table[2]);
   RegStorage keyReg;
   // Remove the bias, if necessary
@@ -170,19 +152,49 @@
     keyReg = AllocTemp();
     OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key);
   }
+
   // Bounds check - if < 0 or >= size continue following switch
   OpRegImm(kOpCmp, keyReg, size - 1);
   LIR* branch_over = OpCondBranch(kCondHi, NULL);
 
-  // Load the displacement from the switch table
-  RegStorage disp_reg = AllocTemp();
-  NewLIR5(kX86PcRelLoadRA, disp_reg.GetReg(), start_of_method_reg.GetReg(), keyReg.GetReg(),
-          2, WrapPointer(tab_rec));
-  // Add displacement to start of method
-  OpRegReg(kOpAdd, start_of_method_reg, cu_->target64 ? As64BitReg(disp_reg) : disp_reg);
+  RegStorage addr_for_jump;
+  if (cu_->target64) {
+    RegStorage table_base = AllocTempWide();
+    // Load the address of the table into table_base.
+    LIR* lea = RawLIR(current_dalvik_offset_, kX86Lea64RM, table_base.GetReg(), kRIPReg,
+                      256, 0, WrapPointer(tab_rec));
+    lea->flags.fixup = kFixupSwitchTable;
+    AppendLIR(lea);
+
+    // Load the offset from the table out of the table.
+    addr_for_jump = AllocTempWide();
+    NewLIR5(kX86MovsxdRA, addr_for_jump.GetReg(), table_base.GetReg(), keyReg.GetReg(), 2, 0);
+
+    // Add the offset from the table to the table base.
+    OpRegReg(kOpAdd, addr_for_jump, table_base);
+  } else {
+    // Materialize a pointer to the switch table.
+    RegStorage start_of_method_reg;
+    if (base_of_code_ != nullptr) {
+      // We can use the saved value.
+      RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
+      rl_method = LoadValue(rl_method, kCoreReg);
+      start_of_method_reg = rl_method.reg;
+      store_method_addr_used_ = true;
+    } else {
+      start_of_method_reg = AllocTempRef();
+      NewLIR1(kX86StartOfMethod, start_of_method_reg.GetReg());
+    }
+    // Load the displacement from the switch table.
+    addr_for_jump = AllocTemp();
+    NewLIR5(kX86PcRelLoadRA, addr_for_jump.GetReg(), start_of_method_reg.GetReg(), keyReg.GetReg(),
+            2, WrapPointer(tab_rec));
+    // Add displacement to start of method.
+    OpRegReg(kOpAdd, addr_for_jump, start_of_method_reg);
+  }
+
   // ..and go!
-  LIR* switch_branch = NewLIR1(kX86JmpR, start_of_method_reg.GetReg());
-  tab_rec->anchor = switch_branch;
+  tab_rec->anchor = NewLIR1(kX86JmpR, addr_for_jump.GetReg());
 
   /* branch_over target here */
   LIR* target = NewLIR0(kPseudoTargetLabel);
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 80cdc83..85ab92b 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1289,6 +1289,18 @@
 }
 
 LIR* X86Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+  if (cu_->target64) {
+    // We can do this directly using RIP addressing.
+    // We don't know the proper offset for the value, so pick one that will force
+    // 4 byte offset.  We will fix this up in the assembler later to have the right
+    // value.
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
+    LIR* res = NewLIR3(kX86Mov32RM, reg.GetReg(), kRIPReg, 256);
+    res->target = target;
+    res->flags.fixup = kFixupLoad;
+    return res;
+  }
+
   CHECK(base_of_code_ != nullptr);
 
   // Address the start of the method
@@ -1309,7 +1321,6 @@
                     0, 0, target);
   res->target = target;
   res->flags.fixup = kFixupLoad;
-  store_method_addr_used_ = true;
   return res;
 }
 
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 998aeff..ae80e9f 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -206,7 +206,7 @@
   RegStorage::InvalidReg(),  // kSelf - Thread pointer.
   RegStorage::InvalidReg(),  // kSuspend - Used to reduce suspend checks for some targets.
   RegStorage::InvalidReg(),  // kLr - no register as the return address is pushed on entry.
-  RegStorage::InvalidReg(),  // kPc - TODO: RIP based addressing.
+  RegStorage(kRIPReg),       // kPc
   rs_rX86_SP_32,             // kSp
   rs_rDI,                    // kArg0
   rs_rSI,                    // kArg1
@@ -662,6 +662,12 @@
     xp_reg_info->SetIsTemp(true);
   }
 
+  // Special Handling for x86_64 RIP addressing.
+  if (cu_->target64) {
+    RegisterInfo* info = new (arena_) RegisterInfo(RegStorage(kRIPReg), kEncodeNone);
+    reginfo_map_[kRIPReg] = info;
+  }
+
   // Alias single precision xmm to double xmms.
   // TODO: as needed, add larger vector sizes - alias all to the largest.
   for (RegisterInfo* info : reg_pool_->sp_regs_) {
@@ -1608,9 +1614,6 @@
 }
 
 void X86Mir2Lir::AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir) {
-  // The literal pool needs position independent logic.
-  store_method_addr_used_ = true;
-
   // To deal with correct memory ordering, reverse order of constants.
   int32_t constants[4];
   constants[3] = mir->dalvikInsn.arg[0];
@@ -1624,20 +1627,28 @@
     data_target = AddVectorLiteral(constants);
   }
 
-  // Address the start of the method.
-  RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
-  if (rl_method.wide) {
-    rl_method = LoadValueWide(rl_method, kCoreReg);
-  } else {
-    rl_method = LoadValue(rl_method, kCoreReg);
-  }
-
   // Load the proper value from the literal area.
   // We don't know the proper offset for the value, so pick one that will force
-  // 4 byte offset.  We will fix this up in the assembler later to have the right
-  // value.
+  // 4 byte offset.  We will fix this up in the assembler later to have the
+  // right value.
+  LIR* load;
   ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
-  LIR *load = NewLIR3(opcode, reg, rl_method.reg.GetReg(), 256 /* bogus */);
+  if (cu_->target64) {
+    load = NewLIR3(opcode, reg, kRIPReg, 256 /* bogus */);
+  } else {
+    // Address the start of the method.
+    RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
+    if (rl_method.wide) {
+      rl_method = LoadValueWide(rl_method, kCoreReg);
+    } else {
+      rl_method = LoadValue(rl_method, kCoreReg);
+    }
+
+    load = NewLIR3(opcode, reg, rl_method.reg.GetReg(), 256 /* bogus */);
+
+    // The literal pool needs position independent logic.
+    store_method_addr_used_ = true;
+  }
   load->flags.fixup = kFixupLoad;
   load->target = data_target;
 }
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index ad3222c..3b58698 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -570,32 +570,36 @@
     if (is_fp) {
       DCHECK(r_dest.IsDouble());
       if (value == 0) {
-        return NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
-      } else if (base_of_code_ != nullptr) {
+        return NewLIR2(kX86XorpdRR, low_reg_val, low_reg_val);
+      } else if (base_of_code_ != nullptr || cu_->target64) {
         // We will load the value from the literal area.
         LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
         if (data_target == NULL) {
           data_target = AddWideData(&literal_list_, val_lo, val_hi);
         }
 
-        // Address the start of the method
-        RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
-        if (rl_method.wide) {
-          rl_method = LoadValueWide(rl_method, kCoreReg);
-        } else {
-          rl_method = LoadValue(rl_method, kCoreReg);
-        }
-
         // Load the proper value from the literal area.
-        // We don't know the proper offset for the value, so pick one that will force
-        // 4 byte offset.  We will fix this up in the assembler later to have the right
-        // value.
+        // We don't know the proper offset for the value, so pick one that
+        // will force 4 byte offset.  We will fix this up in the assembler
+        // later to have the right value.
         ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
-        res = LoadBaseDisp(rl_method.reg, 256 /* bogus */, RegStorage::FloatSolo64(low_reg_val),
-                           kDouble, kNotVolatile);
+        if (cu_->target64) {
+          res = NewLIR3(kX86MovsdRM, low_reg_val, kRIPReg, 256 /* bogus */);
+        } else {
+          // Address the start of the method.
+          RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
+          if (rl_method.wide) {
+            rl_method = LoadValueWide(rl_method, kCoreReg);
+          } else {
+            rl_method = LoadValue(rl_method, kCoreReg);
+          }
+
+          res = LoadBaseDisp(rl_method.reg, 256 /* bogus */, RegStorage::FloatSolo64(low_reg_val),
+                             kDouble, kNotVolatile);
+          store_method_addr_used_ = true;
+        }
         res->target = data_target;
         res->flags.fixup = kFixupLoad;
-        store_method_addr_used_ = true;
       } else {
         if (r_dest.IsPair()) {
           if (val_lo == 0) {
@@ -960,12 +964,14 @@
     curr_bb = iter.Next();
   }
 
-  // Did we need a pointer to the method code?
+  // Did we need a pointer to the method code?  Not in 64 bit mode.
+  base_of_code_ = nullptr;
+
+  // store_method_addr_ must be false for x86_64, since RIP addressing is used.
+  CHECK(!(cu_->target64 && store_method_addr_));
   if (store_method_addr_) {
-    base_of_code_ = mir_graph_->GetNewCompilerTemp(kCompilerTempBackend, cu_->target64 == true);
+    base_of_code_ = mir_graph_->GetNewCompilerTemp(kCompilerTempBackend, false);
     DCHECK(base_of_code_ != nullptr);
-  } else {
-    base_of_code_ = nullptr;
   }
 }
 
@@ -994,19 +1000,22 @@
       AnalyzeFPInstruction(opcode, bb, mir);
       break;
     case kMirOpConstVector:
-      store_method_addr_ = true;
+      if (!cu_->target64) {
+        store_method_addr_ = true;
+      }
       break;
     case kMirOpPackedMultiply:
     case kMirOpPackedShiftLeft:
     case kMirOpPackedSignedShiftRight:
-    case kMirOpPackedUnsignedShiftRight: {
-      // Byte emulation requires constants from the literal pool.
-      OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
-      if (opsize == kSignedByte || opsize == kUnsignedByte) {
-        store_method_addr_ = true;
+    case kMirOpPackedUnsignedShiftRight:
+      if (!cu_->target64) {
+        // Byte emulation requires constants from the literal pool.
+        OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
+        if (opsize == kSignedByte || opsize == kUnsignedByte) {
+          store_method_addr_ = true;
+        }
       }
       break;
-    }
     default:
       // Ignore the rest.
       break;
@@ -1016,6 +1025,7 @@
 void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock* bb, MIR* mir) {
   // Looking for
   // - Do we need a pointer to the code (used for packed switches and double lits)?
+  // 64 bit uses RIP addressing instead.
 
   switch (opcode) {
     // Instructions referencing doubles.
@@ -1038,7 +1048,9 @@
     // Packed switches and array fills need a pointer to the base of the method.
     case Instruction::FILL_ARRAY_DATA:
     case Instruction::PACKED_SWITCH:
-      store_method_addr_ = true;
+      if (!cu_->target64) {
+        store_method_addr_ = true;
+      }
       break;
     case Instruction::INVOKE_STATIC:
     case Instruction::INVOKE_STATIC_RANGE:
@@ -1115,7 +1127,8 @@
 
 void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock* bb, MIR* mir) {
   UNUSED(opcode, bb);
-  // For now this is only actual for x86-32.
+
+  // 64 bit RIP addressing doesn't need store_method_addr_ set.
   if (cu_->target64) {
     return;
   }
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 76a67c4..3e0a852 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -217,6 +217,9 @@
   xr14 = RegStorage::k128BitSolo | 14,
   xr15 = RegStorage::k128BitSolo | 15,
 
+  // Special value for RIP 64 bit addressing.
+  kRIPReg = 255,
+
   // TODO: as needed, add 256, 512 and 1024-bit xmm views.
 };
 
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 4929b5b..932a532 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -84,6 +84,15 @@
   return (it != verified_methods_.end()) ? it->second : nullptr;
 }
 
+void VerificationResults::RemoveVerifiedMethod(MethodReference ref) {
+  WriterMutexLock mu(Thread::Current(), verified_methods_lock_);
+  auto it = verified_methods_.find(ref);
+  if (it != verified_methods_.end()) {
+    delete it->second;
+    verified_methods_.erase(it);
+  }
+}
+
 void VerificationResults::AddRejectedClass(ClassReference ref) {
   {
     WriterMutexLock mu(Thread::Current(), rejected_classes_lock_);
diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h
index 0e7923f..7fc2a23 100644
--- a/compiler/dex/verification_results.h
+++ b/compiler/dex/verification_results.h
@@ -48,6 +48,7 @@
 
     const VerifiedMethod* GetVerifiedMethod(MethodReference ref)
         LOCKS_EXCLUDED(verified_methods_lock_);
+    void RemoveVerifiedMethod(MethodReference ref) LOCKS_EXCLUDED(verified_methods_lock_);
 
     void AddRejectedClass(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_);
     bool IsClassRejected(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index ab9f41a..e427471 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -23,6 +23,10 @@
 #include <vector>
 #include <unistd.h>
 
+#ifndef __APPLE__
+#include <malloc.h>  // For mallinfo
+#endif
+
 #include "base/stl_util.h"
 #include "base/timing_logger.h"
 #include "class_linker.h"
@@ -497,6 +501,7 @@
                                 TimingLogger* timings) {
   DCHECK(!Runtime::Current()->IsStarted());
   std::unique_ptr<ThreadPool> thread_pool(new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
+  VLOG(compiler) << "Before precompile " << GetMemoryUsageString();
   PreCompile(class_loader, dex_files, thread_pool.get(), timings);
   Compile(class_loader, dex_files, thread_pool.get(), timings);
   if (dump_stats_) {
@@ -593,20 +598,25 @@
 void CompilerDriver::PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
                                 ThreadPool* thread_pool, TimingLogger* timings) {
   LoadImageClasses(timings);
+  VLOG(compiler) << "LoadImageClasses: " << GetMemoryUsageString();
 
   Resolve(class_loader, dex_files, thread_pool, timings);
+  VLOG(compiler) << "Resolve: " << GetMemoryUsageString();
 
   if (!compiler_options_->IsVerificationEnabled()) {
-    LOG(INFO) << "Verify none mode specified, skipping verification.";
+    VLOG(compiler) << "Verify none mode specified, skipping verification.";
     SetVerified(class_loader, dex_files, thread_pool, timings);
     return;
   }
 
   Verify(class_loader, dex_files, thread_pool, timings);
+  VLOG(compiler) << "Verify: " << GetMemoryUsageString();
 
   InitializeClasses(class_loader, dex_files, thread_pool, timings);
+  VLOG(compiler) << "InitializeClasses: " << GetMemoryUsageString();
 
   UpdateImageClasses(timings);
+  VLOG(compiler) << "UpdateImageClasses: " << GetMemoryUsageString();
 }
 
 bool CompilerDriver::IsImageClass(const char* descriptor) const {
@@ -2002,6 +2012,7 @@
     CHECK(dex_file != nullptr);
     CompileDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
   }
+  VLOG(compiler) << "Compile: " << GetMemoryUsageString();
 }
 
 void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, size_t class_def_index) {
@@ -2128,6 +2139,7 @@
                                    bool compilation_enabled) {
   CompiledMethod* compiled_method = nullptr;
   uint64_t start_ns = kTimeCompileMethod ? NanoTime() : 0;
+  MethodReference method_ref(&dex_file, method_idx);
 
   if ((access_flags & kAccNative) != 0) {
     // Are we interpreting only and have support for generic JNI down calls?
@@ -2141,7 +2153,6 @@
   } else if ((access_flags & kAccAbstract) != 0) {
     // Abstract methods don't have code.
   } else {
-    MethodReference method_ref(&dex_file, method_idx);
     bool compile = compilation_enabled &&
                    verification_results_->IsCandidateForCompilation(method_ref, access_flags);
     if (compile) {
@@ -2178,16 +2189,18 @@
     // When compiling with PIC, there should be zero non-relative linker patches
     CHECK(!compile_pic || non_relative_linker_patch_count == 0u);
 
-    MethodReference ref(&dex_file, method_idx);
-    DCHECK(GetCompiledMethod(ref) == nullptr) << PrettyMethod(method_idx, dex_file);
+    DCHECK(GetCompiledMethod(method_ref) == nullptr) << PrettyMethod(method_idx, dex_file);
     {
       MutexLock mu(self, compiled_methods_lock_);
-      compiled_methods_.Put(ref, compiled_method);
+      compiled_methods_.Put(method_ref, compiled_method);
       non_relative_linker_patch_count_ += non_relative_linker_patch_count;
     }
-    DCHECK(GetCompiledMethod(ref) != nullptr) << PrettyMethod(method_idx, dex_file);
+    DCHECK(GetCompiledMethod(method_ref) != nullptr) << PrettyMethod(method_idx, dex_file);
   }
 
+  // Done compiling, delete the verified method to reduce native memory usage.
+  verification_results_->RemoveVerifiedMethod(method_ref);
+
   if (self->IsExceptionPending()) {
     ScopedObjectAccess soa(self);
     LOG(FATAL) << "Unexpected exception compiling: " << PrettyMethod(method_idx, dex_file) << "\n"
@@ -2337,4 +2350,21 @@
   }
   return !compile;
 }
+
+std::string CompilerDriver::GetMemoryUsageString() const {
+  std::ostringstream oss;
+  const ArenaPool* arena_pool = GetArenaPool();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  oss << "arena alloc=" << PrettySize(arena_pool->GetBytesAllocated());
+  oss << " java alloc=" << PrettySize(heap->GetBytesAllocated());
+#ifdef HAVE_MALLOC_H
+  struct mallinfo info = mallinfo();
+  const size_t allocated_space = static_cast<size_t>(info.uordblks);
+  const size_t free_space = static_cast<size_t>(info.fordblks);
+  oss << " native alloc=" << PrettySize(allocated_space) << " free="
+      << PrettySize(free_space);
+#endif
+  return oss.str();
+}
+
 }  // namespace art
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index d837dbc..615e0d0 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -39,6 +39,7 @@
 #include "thread_pool.h"
 #include "utils/arena_allocator.h"
 #include "utils/dedupe_set.h"
+#include "dex/verified_method.h"
 
 namespace art {
 
@@ -398,6 +399,9 @@
   // Should the compiler run on this method given profile information?
   bool SkipCompilation(const std::string& method_name);
 
+  // Get memory usage during compilation.
+  std::string GetMemoryUsageString() const;
+
  private:
   // These flags are internal to CompilerDriver for collecting INVOKE resolution statistics.
   // The only external contract is that unresolved method has flags 0 and resolved non-0.
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
index 84f5799..03ae489 100644
--- a/compiler/driver/dex_compilation_unit.h
+++ b/compiler/driver/dex_compilation_unit.h
@@ -102,6 +102,10 @@
     return verified_method_;
   }
 
+  void ClearVerifiedMethod() {
+    verified_method_ = nullptr;
+  }
+
   const std::string& GetSymbol();
 
  private:
@@ -117,7 +121,7 @@
   const uint16_t class_def_idx_;
   const uint32_t dex_method_idx_;
   const uint32_t access_flags_;
-  const VerifiedMethod* const verified_method_;
+  const VerifiedMethod* verified_method_;
 
   std::string symbol_;
 };
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index 2ffbd10..5488e2f 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -46,7 +46,11 @@
     EXPECT_EQ(expected_value, ef->FindDynamicSymbolAddress(symbol_name)); \
   } while (false)
 
+#if defined(ART_USE_OPTIMIZING_COMPILER)
+TEST_F(ElfWriterTest, DISABLED_dlsym) {
+#else
 TEST_F(ElfWriterTest, dlsym) {
+#endif
   std::string elf_location;
   if (IsHost()) {
     const char* host_dir = getenv("ANDROID_HOST_OUT");
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 3b1d914..ab5c6c7 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -77,6 +77,7 @@
     Thread::Current()->TransitionFromSuspendedToRunnable();
     PruneNonImageClasses();  // Remove junk
     ComputeLazyFieldsForImageClasses();  // Add useful information
+    ProcessStrings();
     Thread::Current()->TransitionFromRunnableToSuspended(kNative);
   }
   gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -561,9 +562,9 @@
     bool is_prefix = false;
     if (it != existing_strings.end()) {
       CHECK_LE(length, it->second);
-      is_prefix = std::equal(combined_chars.begin() + it->first,
-                             combined_chars.begin() + it->first + it->second,
-                             combined_chars.begin() + new_string.first);
+      is_prefix = std::equal(combined_chars.begin() + new_string.first,
+                             combined_chars.begin() + new_string.first + new_string.second,
+                             combined_chars.begin() + it->first);
     }
     if (is_prefix) {
       // Shares a prefix, set the offset to where the new offset will be.
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index eb6181c..9561054 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -16,6 +16,7 @@
 
 #include "builder.h"
 
+#include "base/logging.h"
 #include "class_linker.h"
 #include "dex_file.h"
 #include "dex_file-inl.h"
@@ -68,6 +69,74 @@
   size_t index_;
 };
 
+class SwitchTable : public ValueObject {
+ public:
+  SwitchTable(const Instruction& instruction, uint32_t dex_pc, bool sparse)
+      : instruction_(instruction), dex_pc_(dex_pc), sparse_(sparse) {
+    int32_t table_offset = instruction.VRegB_31t();
+    const uint16_t* table = reinterpret_cast<const uint16_t*>(&instruction) + table_offset;
+    if (sparse) {
+      CHECK_EQ(table[0], static_cast<uint16_t>(Instruction::kSparseSwitchSignature));
+    } else {
+      CHECK_EQ(table[0], static_cast<uint16_t>(Instruction::kPackedSwitchSignature));
+    }
+    num_entries_ = table[1];
+    values_ = reinterpret_cast<const int32_t*>(&table[2]);
+  }
+
+  uint16_t GetNumEntries() const {
+    return num_entries_;
+  }
+
+  void CheckIndex(size_t index) const {
+    if (sparse_) {
+      // In a sparse table, we have num_entries_ keys and num_entries_ values, in that order.
+      DCHECK_LT(index, 2 * static_cast<size_t>(num_entries_));
+    } else {
+      // In a packed table, we have the starting key and num_entries_ values.
+      DCHECK_LT(index, 1 + static_cast<size_t>(num_entries_));
+    }
+  }
+
+  int32_t GetEntryAt(size_t index) const {
+    CheckIndex(index);
+    return values_[index];
+  }
+
+  uint32_t GetDexPcForIndex(size_t index) const {
+    CheckIndex(index);
+    return dex_pc_ +
+        (reinterpret_cast<const int16_t*>(values_ + index) -
+         reinterpret_cast<const int16_t*>(&instruction_));
+  }
+
+  // Index of the first value in the table.
+  size_t GetFirstValueIndex() const {
+    if (sparse_) {
+      // In a sparse table, we have num_entries_ keys and num_entries_ values, in that order.
+      return num_entries_;
+    } else {
+      // In a packed table, we have the starting key and num_entries_ values.
+      return 1;
+    }
+  }
+
+ private:
+  const Instruction& instruction_;
+  const uint32_t dex_pc_;
+
+  // Whether this is a sparse-switch table (or a packed-switch one).
+  const bool sparse_;
+
+  // This can't be const as it needs to be computed off of the given instruction, and complicated
+  // expressions in the initializer list seemed very ugly.
+  uint16_t num_entries_;
+
+  const int32_t* values_;
+
+  DISALLOW_COPY_AND_ASSIGN(SwitchTable);
+};
+
 void HGraphBuilder::InitializeLocals(uint16_t count) {
   graph_->SetNumberOfVRegs(count);
   locals_.SetSize(count);
@@ -286,7 +355,6 @@
                                          size_t* number_of_dex_instructions,
                                          size_t* number_of_blocks,
                                          size_t* number_of_branches) {
-  // TODO: Support switch instructions.
   branch_targets_.SetSize(code_end - code_ptr);
 
   // Create the first block for the dex instructions, single successor of the entry block.
@@ -296,7 +364,7 @@
 
   // Iterate over all instructions and find branching instructions. Create blocks for
   // the locations these instructions branch to.
-  size_t dex_pc = 0;
+  uint32_t dex_pc = 0;
   while (code_ptr < code_end) {
     (*number_of_dex_instructions)++;
     const Instruction& instruction = *Instruction::At(code_ptr);
@@ -316,6 +384,41 @@
         branch_targets_.Put(dex_pc, block);
         (*number_of_blocks)++;
       }
+    } else if (instruction.IsSwitch()) {
+      SwitchTable table(instruction, dex_pc, instruction.Opcode() == Instruction::SPARSE_SWITCH);
+
+      uint16_t num_entries = table.GetNumEntries();
+
+      // In a packed-switch, the entry at index 0 is the starting key. In a sparse-switch, the
+      // entry at index 0 is the first key, and values are after *all* keys.
+      size_t offset = table.GetFirstValueIndex();
+
+      // Use a larger loop counter type to avoid overflow issues.
+      for (size_t i = 0; i < num_entries; ++i) {
+        // The target of the case.
+        uint32_t target = dex_pc + table.GetEntryAt(i + offset);
+        if (FindBlockStartingAt(target) == nullptr) {
+          block = new (arena_) HBasicBlock(graph_, target);
+          branch_targets_.Put(target, block);
+          (*number_of_blocks)++;
+        }
+
+        // The next case gets its own block.
+        if (i < num_entries) {
+          block = new (arena_) HBasicBlock(graph_, target);
+          branch_targets_.Put(table.GetDexPcForIndex(i), block);
+          (*number_of_blocks)++;
+        }
+      }
+
+      // Fall-through. Add a block if there is more code afterwards.
+      dex_pc += instruction.SizeInCodeUnits();
+      code_ptr += instruction.SizeInCodeUnits();
+      if ((code_ptr < code_end) && (FindBlockStartingAt(dex_pc) == nullptr)) {
+        block = new (arena_) HBasicBlock(graph_, dex_pc);
+        branch_targets_.Put(dex_pc, block);
+        (*number_of_blocks)++;
+      }
     } else {
       code_ptr += instruction.SizeInCodeUnits();
       dex_pc += instruction.SizeInCodeUnits();
@@ -337,9 +440,10 @@
 
 void HGraphBuilder::Conversion_12x(const Instruction& instruction,
                                    Primitive::Type input_type,
-                                   Primitive::Type result_type) {
+                                   Primitive::Type result_type,
+                                   uint32_t dex_pc) {
   HInstruction* first = LoadLocal(instruction.VRegB(), input_type);
-  current_block_->AddInstruction(new (arena_) HTypeConversion(result_type, first));
+  current_block_->AddInstruction(new (arena_) HTypeConversion(result_type, first, dex_pc));
   UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
 }
 
@@ -863,6 +967,85 @@
   return true;
 }
 
+bool HGraphBuilder::BuildPackedSwitch(const Instruction& instruction, uint32_t dex_pc) {
+  SwitchTable table(instruction, dex_pc, false);
+
+  // Value to test against.
+  HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
+
+  uint16_t num_entries = table.GetNumEntries();
+  // There should be at least one entry here.
+  DCHECK_GT(num_entries, 0U);
+
+  // Chained cmp-and-branch, starting from starting_key.
+  int32_t starting_key = table.GetEntryAt(0);
+
+  for (size_t i = 1; i <= num_entries; i++) {
+    BuildSwitchCaseHelper(instruction, i, i == num_entries, table, value, starting_key + i - 1,
+                          table.GetEntryAt(i), dex_pc);
+  }
+  return true;
+}
+
+bool HGraphBuilder::BuildSparseSwitch(const Instruction& instruction, uint32_t dex_pc) {
+  SwitchTable table(instruction, dex_pc, true);
+
+  // Value to test against.
+  HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
+
+  uint16_t num_entries = table.GetNumEntries();
+  // There should be at least one entry here.
+  DCHECK_GT(num_entries, 0U);
+
+  for (size_t i = 0; i < num_entries; i++) {
+    BuildSwitchCaseHelper(instruction, i, i == static_cast<size_t>(num_entries) - 1, table, value,
+                          table.GetEntryAt(i), table.GetEntryAt(i + num_entries), dex_pc);
+  }
+  return true;
+}
+
+void HGraphBuilder::BuildSwitchCaseHelper(const Instruction& instruction, size_t index,
+                                          bool is_last_case, const SwitchTable& table,
+                                          HInstruction* value, int32_t case_value_int,
+                                          int32_t target_offset, uint32_t dex_pc) {
+  PotentiallyAddSuspendCheck(target_offset, dex_pc);
+
+  // The current case's value.
+  HInstruction* this_case_value = GetIntConstant(case_value_int);
+
+  // Compare value and this_case_value.
+  HEqual* comparison = new (arena_) HEqual(value, this_case_value);
+  current_block_->AddInstruction(comparison);
+  HInstruction* ifinst = new (arena_) HIf(comparison);
+  current_block_->AddInstruction(ifinst);
+
+  // Case hit: use the target offset to determine where to go.
+  HBasicBlock* case_target = FindBlockStartingAt(dex_pc + target_offset);
+  DCHECK(case_target != nullptr);
+  current_block_->AddSuccessor(case_target);
+
+  // Case miss: go to the next case (or default fall-through).
+  // When there is a next case, we use the block stored with the table offset representing this
+  // case (that is where we registered them in ComputeBranchTargets).
+  // When there is no next case, we use the following instruction.
+  // TODO: Find a good way to peel the last iteration to avoid conditional, but still have re-use.
+  if (!is_last_case) {
+    HBasicBlock* next_case_target = FindBlockStartingAt(table.GetDexPcForIndex(index));
+    DCHECK(next_case_target != nullptr);
+    current_block_->AddSuccessor(next_case_target);
+
+    // Need to manually add the block, as there is no dex-pc transition for the cases.
+    graph_->AddBlock(next_case_target);
+
+    current_block_ = next_case_target;
+  } else {
+    HBasicBlock* default_target = FindBlockStartingAt(dex_pc + instruction.SizeInCodeUnits());
+    DCHECK(default_target != nullptr);
+    current_block_->AddSuccessor(default_target);
+    current_block_ = nullptr;
+  }
+}
+
 void HGraphBuilder::PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_pc) {
   if (target_offset <= 0) {
     // Unconditionnally add a suspend check to backward branches. We can remove
@@ -1079,52 +1262,67 @@
     }
 
     case Instruction::INT_TO_LONG: {
-      Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimLong);
+      Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimLong, dex_pc);
       break;
     }
 
     case Instruction::INT_TO_FLOAT: {
-      Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimFloat);
+      Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimFloat, dex_pc);
       break;
     }
 
     case Instruction::INT_TO_DOUBLE: {
-      Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimDouble);
+      Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimDouble, dex_pc);
       break;
     }
 
     case Instruction::LONG_TO_INT: {
-      Conversion_12x(instruction, Primitive::kPrimLong, Primitive::kPrimInt);
+      Conversion_12x(instruction, Primitive::kPrimLong, Primitive::kPrimInt, dex_pc);
       break;
     }
 
     case Instruction::LONG_TO_FLOAT: {
-      Conversion_12x(instruction, Primitive::kPrimLong, Primitive::kPrimFloat);
+      Conversion_12x(instruction, Primitive::kPrimLong, Primitive::kPrimFloat, dex_pc);
       break;
     }
 
     case Instruction::LONG_TO_DOUBLE: {
-      Conversion_12x(instruction, Primitive::kPrimLong, Primitive::kPrimDouble);
+      Conversion_12x(instruction, Primitive::kPrimLong, Primitive::kPrimDouble, dex_pc);
       break;
     }
 
     case Instruction::FLOAT_TO_INT: {
-      Conversion_12x(instruction, Primitive::kPrimFloat, Primitive::kPrimInt);
+      Conversion_12x(instruction, Primitive::kPrimFloat, Primitive::kPrimInt, dex_pc);
+      break;
+    }
+
+    case Instruction::FLOAT_TO_LONG: {
+      Conversion_12x(instruction, Primitive::kPrimFloat, Primitive::kPrimLong, dex_pc);
+      break;
+    }
+
+    case Instruction::FLOAT_TO_DOUBLE: {
+      Conversion_12x(instruction, Primitive::kPrimFloat, Primitive::kPrimDouble, dex_pc);
+      break;
+    }
+
+    case Instruction::DOUBLE_TO_FLOAT: {
+      Conversion_12x(instruction, Primitive::kPrimDouble, Primitive::kPrimFloat, dex_pc);
       break;
     }
 
     case Instruction::INT_TO_BYTE: {
-      Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimByte);
+      Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimByte, dex_pc);
       break;
     }
 
     case Instruction::INT_TO_SHORT: {
-      Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimShort);
+      Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimShort, dex_pc);
       break;
     }
 
     case Instruction::INT_TO_CHAR: {
-      Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimChar);
+      Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimChar, dex_pc);
       break;
     }
 
@@ -1760,6 +1958,20 @@
       break;
     }
 
+    case Instruction::PACKED_SWITCH: {
+      if (!BuildPackedSwitch(instruction, dex_pc)) {
+        return false;
+      }
+      break;
+    }
+
+    case Instruction::SPARSE_SWITCH: {
+      if (!BuildSparseSwitch(instruction, dex_pc)) {
+        return false;
+      }
+      break;
+    }
+
     default:
       return false;
   }
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 8519bcb..73c2f50 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -29,6 +29,7 @@
 namespace art {
 
 class Instruction;
+class SwitchTable;
 
 class HGraphBuilder : public ValueObject {
  public:
@@ -135,7 +136,8 @@
 
   void Conversion_12x(const Instruction& instruction,
                       Primitive::Type input_type,
-                      Primitive::Type result_type);
+                      Primitive::Type result_type,
+                      uint32_t dex_pc);
 
   void BuildCheckedDivRem(uint16_t out_reg,
                           uint16_t first_reg,
@@ -202,6 +204,17 @@
                       uint16_t type_index,
                       uint32_t dex_pc);
 
+  // Builds an instruction sequence for a packed switch statement.
+  bool BuildPackedSwitch(const Instruction& instruction, uint32_t dex_pc);
+
+  // Builds an instruction sequence for a sparse switch statement.
+  bool BuildSparseSwitch(const Instruction& instruction, uint32_t dex_pc);
+
+  void BuildSwitchCaseHelper(const Instruction& instruction, size_t index,
+                             bool is_last_case, const SwitchTable& table,
+                             HInstruction* value, int32_t case_value_int,
+                             int32_t target_offset, uint32_t dex_pc);
+
   ArenaAllocator* const arena_;
 
   // A list of the size of the dex code holding block information for
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index e581af2..7f358ea 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -499,6 +499,21 @@
 }
 
 void CodeGenerator::RecordPcInfo(HInstruction* instruction, uint32_t dex_pc) {
+  if (instruction != nullptr && instruction->IsTypeConversion()) {
+    // The code generated for some type conversions may call the
+    // runtime, thus normally requiring a subsequent call to this
+    // method.  However, the method verifier does not produce PC
+    // information for Dex type conversion instructions, as it
+    // considers them as "atomic" (they cannot join a GC).
+    // Therefore we do not currently record PC information for such
+    // instructions.  As this may change later, we added this special
+    // case so that code generators may nevertheless call
+    // CodeGenerator::RecordPcInfo without triggering an error in
+    // CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x")
+    // thereafter.
+    return;
+  }
+
   // Collect PC infos for the mapping table.
   struct PcInfo pc_info;
   pc_info.dex_pc = dex_pc;
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 7c8f6a2..1d42c47 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -37,6 +37,8 @@
 
 // Maximum value for a primitive integer.
 static int32_t constexpr kPrimIntMax = 0x7fffffff;
+// Maximum value for a primitive long.
+static int64_t constexpr kPrimLongMax = 0x7fffffffffffffff;
 
 class Assembler;
 class CodeGenerator;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 448a5a0..5076c85 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -44,8 +44,9 @@
 static constexpr Register kRuntimeParameterCoreRegisters[] = { R0, R1, R2, R3 };
 static constexpr size_t kRuntimeParameterCoreRegistersLength =
     arraysize(kRuntimeParameterCoreRegisters);
-static constexpr SRegister kRuntimeParameterFpuRegisters[] = { };
-static constexpr size_t kRuntimeParameterFpuRegistersLength = 0;
+static constexpr SRegister kRuntimeParameterFpuRegisters[] = { S0 };
+static constexpr size_t kRuntimeParameterFpuRegistersLength =
+    arraysize(kRuntimeParameterFpuRegisters);
 
 class InvokeRuntimeCallingConvention : public CallingConvention<Register, SRegister> {
  public:
@@ -874,6 +875,7 @@
       || instruction->IsBoundsCheck()
       || instruction->IsNullCheck()
       || instruction->IsDivZeroCheck()
+      || instruction->GetLocations()->CanCall()
       || !IsLeafMethod());
 }
 
@@ -1359,11 +1361,18 @@
 }
 
 void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
   Primitive::Type result_type = conversion->GetResultType();
   Primitive::Type input_type = conversion->GetInputType();
   DCHECK_NE(result_type, input_type);
+
+  // Float-to-long conversions invoke the runtime.
+  LocationSummary::CallKind call_kind =
+      (input_type == Primitive::kPrimFloat && result_type == Primitive::kPrimLong)
+      ? LocationSummary::kCall
+      : LocationSummary::kNoCall;
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+
   switch (result_type) {
     case Primitive::kPrimByte:
       switch (input_type) {
@@ -1434,7 +1443,15 @@
           locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
           break;
 
-        case Primitive::kPrimFloat:
+        case Primitive::kPrimFloat: {
+          // Processing a Dex `float-to-long' instruction.
+          InvokeRuntimeCallingConvention calling_convention;
+          locations->SetInAt(0, Location::FpuRegisterLocation(
+              calling_convention.GetFpuRegisterAt(0)));
+          locations->SetOut(Location::RegisterPairLocation(R0, R1));
+          break;
+        }
+
         case Primitive::kPrimDouble:
           LOG(FATAL) << "Type conversion from " << input_type << " to "
                      << result_type << " not yet implemented";
@@ -1484,8 +1501,9 @@
           break;
 
         case Primitive::kPrimDouble:
-          LOG(FATAL) << "Type conversion from " << input_type
-                     << " to " << result_type << " not yet implemented";
+          // Processing a Dex `double-to-float' instruction.
+          locations->SetInAt(0, Location::RequiresFpuRegister());
+          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
           break;
 
         default:
@@ -1515,8 +1533,9 @@
           break;
 
         case Primitive::kPrimFloat:
-          LOG(FATAL) << "Type conversion from " << input_type
-                     << " to " << result_type << " not yet implemented";
+          // Processing a Dex `float-to-double' instruction.
+          locations->SetInAt(0, Location::RequiresFpuRegister());
+          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
           break;
 
         default:
@@ -1623,6 +1642,13 @@
           break;
 
         case Primitive::kPrimFloat:
+          // Processing a Dex `float-to-long' instruction.
+          // This call does not actually record PC information.
+          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
+                                  conversion,
+                                  conversion->GetDexPc());
+          break;
+
         case Primitive::kPrimDouble:
           LOG(FATAL) << "Type conversion from " << input_type << " to "
                      << result_type << " not yet implemented";
@@ -1704,8 +1730,9 @@
         }
 
         case Primitive::kPrimDouble:
-          LOG(FATAL) << "Type conversion from " << input_type
-                     << " to " << result_type << " not yet implemented";
+          // Processing a Dex `double-to-float' instruction.
+          __ vcvtsd(out.AsFpuRegister<SRegister>(),
+                    FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
           break;
 
         default:
@@ -1760,8 +1787,9 @@
         }
 
         case Primitive::kPrimFloat:
-          LOG(FATAL) << "Type conversion from " << input_type
-                     << " to " << result_type << " not yet implemented";
+          // Processing a Dex `float-to-double' instruction.
+          __ vcvtds(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
+                    in.AsFpuRegister<SRegister>());
           break;
 
         default:
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 6f83d9f..2aa121d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1326,11 +1326,18 @@
 }
 
 void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
   Primitive::Type result_type = conversion->GetResultType();
   Primitive::Type input_type = conversion->GetInputType();
   DCHECK_NE(result_type, input_type);
+
+  // Float-to-long conversions invoke the runtime.
+  LocationSummary::CallKind call_kind =
+      (input_type == Primitive::kPrimFloat && result_type == Primitive::kPrimLong)
+      ? LocationSummary::kCall
+      : LocationSummary::kNoCall;
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+
   switch (result_type) {
     case Primitive::kPrimByte:
       switch (input_type) {
@@ -1401,7 +1408,15 @@
           locations->SetOut(Location::RegisterPairLocation(EAX, EDX));
           break;
 
-        case Primitive::kPrimFloat:
+        case Primitive::kPrimFloat: {
+          // Processing a Dex `float-to-long' instruction.
+          InvokeRuntimeCallingConvention calling_convention;
+          locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+          // The runtime helper puts the result in EAX, EDX.
+          locations->SetOut(Location::RegisterPairLocation(EAX, EDX));
+          break;
+        }
+
         case Primitive::kPrimDouble:
           LOG(FATAL) << "Type conversion from " << input_type << " to "
                      << result_type << " not yet implemented";
@@ -1449,8 +1464,9 @@
           break;
 
         case Primitive::kPrimDouble:
-          LOG(FATAL) << "Type conversion from " << input_type
-                     << " to " << result_type << " not yet implemented";
+          // Processing a Dex `double-to-float' instruction.
+          locations->SetInAt(0, Location::RequiresFpuRegister());
+          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
           break;
 
         default:
@@ -1479,8 +1495,9 @@
           break;
 
         case Primitive::kPrimFloat:
-          LOG(FATAL) << "Type conversion from " << input_type
-                     << " to " << result_type << " not yet implemented";
+          // Processing a Dex `float-to-double' instruction.
+          locations->SetInAt(0, Location::RequiresFpuRegister());
+          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
           break;
 
         default:
@@ -1615,6 +1632,12 @@
           break;
 
         case Primitive::kPrimFloat:
+          // Processing a Dex `float-to-long' instruction.
+          __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pF2l)));
+          // This call does not actually record PC information.
+          codegen_->RecordPcInfo(conversion, conversion->GetDexPc());
+          break;
+
         case Primitive::kPrimDouble:
           LOG(FATAL) << "Type conversion from " << input_type << " to "
                      << result_type << " not yet implemented";
@@ -1694,8 +1717,8 @@
         }
 
         case Primitive::kPrimDouble:
-          LOG(FATAL) << "Type conversion from " << input_type
-                     << " to " << result_type << " not yet implemented";
+          // Processing a Dex `double-to-float' instruction.
+          __ cvtsd2ss(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>());
           break;
 
         default:
@@ -1741,8 +1764,8 @@
         }
 
         case Primitive::kPrimFloat:
-          LOG(FATAL) << "Type conversion from " << input_type
-                     << " to " << result_type << " not yet implemented";
+          // Processing a Dex `float-to-double' instruction.
+          __ cvtss2sd(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>());
           break;
 
         default:
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 47fd304..5761fb1 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1394,6 +1394,12 @@
           break;
 
         case Primitive::kPrimFloat:
+          // Processing a Dex `float-to-long' instruction.
+          locations->SetInAt(0, Location::RequiresFpuRegister());
+          locations->SetOut(Location::RequiresRegister());
+          locations->AddTemp(Location::RequiresFpuRegister());
+          break;
+
         case Primitive::kPrimDouble:
           LOG(FATAL) << "Type conversion from " << input_type << " to "
                      << result_type << " not yet implemented";
@@ -1439,8 +1445,9 @@
           break;
 
         case Primitive::kPrimDouble:
-          LOG(FATAL) << "Type conversion from " << input_type
-                     << " to " << result_type << " not yet implemented";
+          // Processing a Dex `double-to-float' instruction.
+          locations->SetInAt(0, Location::RequiresFpuRegister());
+          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
           break;
 
         default:
@@ -1467,8 +1474,9 @@
           break;
 
         case Primitive::kPrimFloat:
-          LOG(FATAL) << "Type conversion from " << input_type
-                     << " to " << result_type << " not yet implemented";
+          // Processing a Dex `float-to-double' instruction.
+          locations->SetInAt(0, Location::RequiresFpuRegister());
+          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
           break;
 
         default:
@@ -1565,14 +1573,14 @@
 
           __ movl(output, Immediate(kPrimIntMax));
           // temp = int-to-float(output)
-          __ cvtsi2ss(temp, output);
+          __ cvtsi2ss(temp, output, false);
           // if input >= temp goto done
           __ comiss(input, temp);
           __ j(kAboveEqual, &done);
           // if input == NaN goto nan
           __ j(kUnordered, &nan);
           // output = float-to-int-truncate(input)
-          __ cvttss2si(output, input);
+          __ cvttss2si(output, input, false);
           __ jmp(&done);
           __ Bind(&nan);
           //  output = 0
@@ -1604,7 +1612,31 @@
           __ movsxd(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>());
           break;
 
-        case Primitive::kPrimFloat:
+        case Primitive::kPrimFloat: {
+          // Processing a Dex `float-to-long' instruction.
+          XmmRegister input = in.AsFpuRegister<XmmRegister>();
+          CpuRegister output = out.AsRegister<CpuRegister>();
+          XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+          Label done, nan;
+
+          __ movq(output, Immediate(kPrimLongMax));
+          // temp = int-to-float(output)
+          __ cvtsi2ss(temp, output, true);
+          // if input >= temp goto done
+          __ comiss(input, temp);
+          __ j(kAboveEqual, &done);
+          // if input == NaN goto nan
+          __ j(kUnordered, &nan);
+          // output = float-to-int-truncate(input)
+          __ cvttss2si(output, input, true);
+          __ jmp(&done);
+          __ Bind(&nan);
+          //  output = 0
+          __ xorq(output, output);
+          __ Bind(&done);
+          break;
+        }
+
         case Primitive::kPrimDouble:
           LOG(FATAL) << "Type conversion from " << input_type << " to "
                      << result_type << " not yet implemented";
@@ -1656,8 +1688,8 @@
           break;
 
         case Primitive::kPrimDouble:
-          LOG(FATAL) << "Type conversion from " << input_type
-                     << " to " << result_type << " not yet implemented";
+          // Processing a Dex `double-to-float' instruction.
+          __ cvtsd2ss(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>());
           break;
 
         default:
@@ -1682,8 +1714,8 @@
           break;
 
         case Primitive::kPrimFloat:
-          LOG(FATAL) << "Type conversion from " << input_type
-                     << " to " << result_type << " not yet implemented";
+          // Processing a Dex `float-to-double' instruction.
+          __ cvtss2sd(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>());
           break;
 
         default:
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 3908a61..8a25de1 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2001,8 +2001,8 @@
 class HTypeConversion : public HExpression<1> {
  public:
   // Instantiate a type conversion of `input` to `result_type`.
-  HTypeConversion(Primitive::Type result_type, HInstruction* input)
-      : HExpression(result_type, SideEffects::None()) {
+  HTypeConversion(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc)
+      : HExpression(result_type, SideEffects::None()), dex_pc_(dex_pc) {
     SetRawInputAt(0, input);
     DCHECK_NE(input->GetType(), result_type);
   }
@@ -2011,12 +2011,18 @@
   Primitive::Type GetInputType() const { return GetInput()->GetType(); }
   Primitive::Type GetResultType() const { return GetType(); }
 
+  // Required by the x86 and ARM code generators when producing calls
+  // to the runtime.
+  uint32_t GetDexPc() const { return dex_pc_; }
+
   bool CanBeMoved() const OVERRIDE { return true; }
   bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; }
 
   DECLARE_INSTRUCTION(TypeConversion);
 
  private:
+  const uint32_t dex_pc_;
+
   DISALLOW_COPY_AND_ASSIGN(HTypeConversion);
 };
 
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 474d8a9..3c21236 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -663,9 +663,19 @@
 
 
 void X86_64Assembler::cvttss2si(CpuRegister dst, XmmRegister src) {
+  cvttss2si(dst, src, false);
+}
+
+
+void X86_64Assembler::cvttss2si(CpuRegister dst, XmmRegister src, bool is64bit) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0xF3);
-  EmitOptionalRex32(dst, src);
+  if (is64bit) {
+    // Emit a REX.W prefix if the operand size is 64 bits.
+    EmitRex64(dst, src);
+  } else {
+    EmitOptionalRex32(dst, src);
+  }
   EmitUint8(0x0F);
   EmitUint8(0x2C);
   EmitXmmRegisterOperand(dst.LowBits(), src);
@@ -1997,6 +2007,10 @@
   EmitOptionalRex(false, true, dst.NeedsRex(), false, src.NeedsRex());
 }
 
+void X86_64Assembler::EmitRex64(CpuRegister dst, XmmRegister src) {
+  EmitOptionalRex(false, true, dst.NeedsRex(), false, src.NeedsRex());
+}
+
 void X86_64Assembler::EmitRex64(CpuRegister dst, const Operand& operand) {
   uint8_t rex = 0x48 | operand.rex();  // REX.W000
   if (dst.NeedsRex()) {
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 6e71e4a..4c28366 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -340,6 +340,7 @@
   void cvtsd2ss(XmmRegister dst, XmmRegister src);
 
   void cvttss2si(CpuRegister dst, XmmRegister src);  // Note: this is the r32 version.
+  void cvttss2si(CpuRegister dst, XmmRegister src, bool is64bit);
   void cvttsd2si(CpuRegister dst, XmmRegister src);  // Note: this is the r32 version.
 
   void cvtdq2pd(XmmRegister dst, XmmRegister src);
@@ -688,6 +689,7 @@
   void EmitRex64(CpuRegister dst, CpuRegister src);
   void EmitRex64(CpuRegister dst, const Operand& operand);
   void EmitRex64(XmmRegister dst, CpuRegister src);
+  void EmitRex64(CpuRegister dst, XmmRegister src);
 
   // Emit a REX prefix to normalize byte registers plus necessary register bit encodings.
   void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, CpuRegister src);
diff --git a/dex2oat/Android.mk b/dex2oat/Android.mk
index 4f39c42..cf30667 100644
--- a/dex2oat/Android.mk
+++ b/dex2oat/Android.mk
@@ -38,8 +38,8 @@
 
 # We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
 ifeq ($(ART_BUILD_HOST_NDEBUG),true)
-  $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart-compiler libziparchive-host,art/compiler,host,ndebug))
+  $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart-compiler libziparchive-host,art/compiler,host,ndebug,both))
 endif
 ifeq ($(ART_BUILD_HOST_DEBUG),true)
-  $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd-compiler libziparchive-host,art/compiler,host,debug))
+  $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd-compiler libziparchive-host,art/compiler,host,debug,both))
 endif
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 25d6db1..00661f4 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -25,10 +25,6 @@
 #include <string>
 #include <vector>
 
-#ifndef __APPLE__
-#include <malloc.h>  // For mallinfo
-#endif
-
 #if defined(__linux__) && defined(__arm__)
 #include <sys/personality.h>
 #include <sys/utsname.h>
@@ -267,7 +263,7 @@
 // during development when fatal aborts lead to a cascade of failures
 // that result in a deadlock.
 class WatchDog {
-// WatchDog defines its own CHECK_PTHREAD_CALL to avoid using Log which uses locks
+// WatchDog defines its own CHECK_PTHREAD_CALL to avoid using LOG which uses locks
 #undef CHECK_PTHREAD_CALL
 #define CHECK_WATCH_DOG_PTHREAD_CALL(call, args, what) \
   do { \
@@ -330,41 +326,23 @@
             message.c_str());
   }
 
-  static void Warn(const std::string& message) {
-    Message('W', message);
-  }
-
   [[noreturn]] static void Fatal(const std::string& message) {
     Message('F', message);
     exit(1);
   }
 
   void Wait() {
-    bool warning = true;
-    CHECK_GT(kWatchDogTimeoutSeconds, kWatchDogWarningSeconds);
     // TODO: tune the multiplier for GC verification, the following is just to make the timeout
     //       large.
     int64_t multiplier = kVerifyObjectSupport > kVerifyObjectModeFast ? 100 : 1;
-    timespec warning_ts;
-    InitTimeSpec(true, CLOCK_REALTIME, multiplier * kWatchDogWarningSeconds * 1000, 0, &warning_ts);
     timespec timeout_ts;
     InitTimeSpec(true, CLOCK_REALTIME, multiplier * kWatchDogTimeoutSeconds * 1000, 0, &timeout_ts);
     const char* reason = "dex2oat watch dog thread waiting";
     CHECK_WATCH_DOG_PTHREAD_CALL(pthread_mutex_lock, (&mutex_), reason);
     while (!shutting_down_) {
-      int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &mutex_,
-                                                         warning ? &warning_ts
-                                                                 : &timeout_ts));
+      int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &mutex_, &timeout_ts));
       if (rc == ETIMEDOUT) {
-        std::string message(StringPrintf("dex2oat did not finish after %d seconds",
-                                         warning ? kWatchDogWarningSeconds
-                                                 : kWatchDogTimeoutSeconds));
-        if (warning) {
-          Warn(message.c_str());
-          warning = false;
-        } else {
-          Fatal(message.c_str());
-        }
+        Fatal(StringPrintf("dex2oat did not finish after %d seconds", kWatchDogTimeoutSeconds));
       } else if (rc != 0) {
         std::string message(StringPrintf("pthread_cond_timedwait failed: %s",
                                          strerror(errno)));
@@ -378,9 +356,6 @@
   // Debug builds are slower so they have larger timeouts.
   static const unsigned int kSlowdownFactor = kIsDebugBuild ? 5U : 1U;
 
-  static const unsigned int kWatchDogWarningSeconds = kUsePortableCompiler ?
-      kSlowdownFactor * 2 * 60 :   // 2 minutes scaled by kSlowdownFactor (portable).
-      kSlowdownFactor * 1 * 60;    // 1 minute scaled by kSlowdownFactor  (not-portable).
   static const unsigned int kWatchDogTimeoutSeconds = kUsePortableCompiler ?
       kSlowdownFactor * 30 * 60 :  // 30 minutes scaled by kSlowdownFactor (portable).
       kSlowdownFactor * 6 * 60;    // 6 minutes scaled by kSlowdownFactor  (not-portable).
@@ -709,8 +684,8 @@
       } else if (option == "--no-include-patch-information") {
         include_patch_information = false;
       } else if (option.starts_with("--verbose-methods=")) {
-        // TODO: rather than switch off compiler logging, make all VLOG(compiler) messages conditional
-        //       on having verbost methods.
+        // TODO: rather than switch off compiler logging, make all VLOG(compiler) messages
+        //       conditional on having verbost methods.
         gLogVerbosity.compiler = false;
         Split(option.substr(strlen("--verbose-methods=")).ToString(), ',', &verbose_methods_);
       } else if (option.starts_with("--dump-init-failures=")) {
@@ -1104,7 +1079,8 @@
       if (kSaveDexInput) {
         for (size_t i = 0; i < dex_files_.size(); ++i) {
           const DexFile* dex_file = dex_files_[i];
-          std::string tmp_file_name(StringPrintf("/data/local/tmp/dex2oat.%d.%zd.dex", getpid(), i));
+          std::string tmp_file_name(StringPrintf("/data/local/tmp/dex2oat.%d.%zd.dex",
+                                                 getpid(), i));
           std::unique_ptr<File> tmp_file(OS::CreateEmptyFile(tmp_file_name.c_str()));
           if (tmp_file.get() == nullptr) {
             PLOG(ERROR) << "Failed to open file " << tmp_file_name
@@ -1130,7 +1106,9 @@
      * If we're not in interpret-only or verify-none mode, go ahead and compile small applications.
      * Don't bother to check if we're doing the image.
      */
-    if (!image_ && compiler_options_->IsCompilationEnabled() && compiler_kind_ == Compiler::kQuick) {
+    if (!image_ &&
+        compiler_options_->IsCompilationEnabled() &&
+        compiler_kind_ == Compiler::kQuick) {
       size_t num_methods = 0;
       for (size_t i = 0; i != dex_files_.size(); ++i) {
         const DexFile* dex_file = dex_files_[i];
@@ -1614,20 +1592,9 @@
   }
 
   void LogCompletionTime() {
-    std::ostringstream mallinfostr;
-#ifdef HAVE_MALLOC_H
-    struct mallinfo info = mallinfo();
-    const size_t allocated_space = static_cast<size_t>(info.uordblks);
-    const size_t free_space = static_cast<size_t>(info.fordblks);
-    mallinfostr << " native alloc=" << PrettySize(allocated_space) << " free="
-        << PrettySize(free_space);
-#endif
-    const ArenaPool* arena_pool = driver_->GetArenaPool();
-    gc::Heap* heap = Runtime::Current()->GetHeap();
     LOG(INFO) << "dex2oat took " << PrettyDuration(NanoTime() - start_ns_)
-              << " (threads: " << thread_count_ << ")"
-              << " arena alloc=" << PrettySize(arena_pool->GetBytesAllocated())
-              << " java alloc=" << PrettySize(heap->GetBytesAllocated()) << mallinfostr.str();
+              << " (threads: " << thread_count_ << ") "
+              << driver_->GetMemoryUsageString();
   }
 
   std::unique_ptr<CompilerOptions> compiler_options_;
@@ -1689,7 +1656,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(Dex2Oat);
 };
 
-const unsigned int WatchDog::kWatchDogWarningSeconds;
 const unsigned int WatchDog::kWatchDogTimeoutSeconds;
 
 static void b13564922() {
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 7dfdb75..b362b73 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -31,9 +31,7 @@
   base/stringprintf.cc \
   base/timing_logger.cc \
   base/unix_file/fd_file.cc \
-  base/unix_file/null_file.cc \
   base/unix_file/random_access_file_utils.cc \
-  base/unix_file/string_file.cc \
   check_jni.cc \
   class_linker.cc \
   common_throws.cc \
diff --git a/runtime/barrier.cc b/runtime/barrier.cc
index 5a8fbb3..66ee870 100644
--- a/runtime/barrier.cc
+++ b/runtime/barrier.cc
@@ -52,7 +52,7 @@
   // Pass function is called by the last thread, the count will
   // be decremented to zero and a Broadcast will be made on the
   // condition variable, thus waking this up.
-  if (count_ != 0) {
+  while (count_ != 0) {
     condition_.Wait(self);
   }
 }
@@ -62,7 +62,18 @@
   SetCountLocked(self, count_ + delta);
   bool timed_out = false;
   if (count_ != 0) {
-    timed_out = condition_.TimedWait(self, timeout_ms, 0);
+    uint32_t timeout_ns = 0;
+    uint64_t abs_timeout = NanoTime() + MsToNs(timeout_ms);
+    for (;;) {
+      timed_out = condition_.TimedWait(self, timeout_ms, timeout_ns);
+      if (timed_out || count_ == 0) return timed_out;
+      // Compute time remaining on timeout.
+      uint64_t now = NanoTime();
+      int64_t time_left = abs_timeout - now;
+      if (time_left <= 0) return true;
+      timeout_ns = time_left % (1000*1000);
+      timeout_ms = time_left / (1000*1000);
+    }
   }
   return timed_out;
 }
diff --git a/runtime/barrier.h b/runtime/barrier.h
index 5ca88e8..0e7f61e 100644
--- a/runtime/barrier.h
+++ b/runtime/barrier.h
@@ -14,6 +14,16 @@
  * limitations under the License.
  */
 
+// CAUTION: THIS IS NOT A FULLY GENERAL BARRIER API.
+
+// It may either be used as a "latch" or single-use barrier, or it may be reused under
+// very limited conditions, e.g. if only Pass(), but not Wait() is called.  Unlike a standard
+// latch API, it is possible to initialize the latch to a count of zero, repeatedly call
+// Pass() or Wait(), and only then set the count using the Increment() method.  Threads at
+// a Wait() are only awoken if the count reaches zero AFTER the decrement is applied.
+// This works because, also unlike most latch APIs, there is no way to Wait() without
+// decrementing the count, and thus nobody can spuriosly wake up on the initial zero.
+
 #ifndef ART_RUNTIME_BARRIER_H_
 #define ART_RUNTIME_BARRIER_H_
 
@@ -22,20 +32,23 @@
 
 namespace art {
 
+// TODO: Maybe give this a better name.
 class Barrier {
  public:
   explicit Barrier(int count);
   virtual ~Barrier();
 
-  // Pass through the barrier, decrements the count but does not block.
+  // Pass through the barrier, decrement the count but do not block.
   void Pass(Thread* self);
 
   // Wait on the barrier, decrement the count.
   void Wait(Thread* self);
 
-  // Set the count to a new value, if the value is 0 then everyone waiting on the condition
-  // variable is resumed.
-  void Init(Thread* self, int count);
+  // The following three calls are only safe if we somehow know that no other thread both
+  // - has been woken up, and
+  // - has not left the Wait() or Increment() call.
+  // If these calls are made in that situation, the offending thread is likely to go back
+  // to sleep, resulting in a deadlock.
 
   // Increment the count by delta, wait on condition if count is non zero.
   void Increment(Thread* self, int delta) LOCKS_EXCLUDED(lock_);
@@ -44,6 +57,10 @@
   // true if time out occurred.
   bool Increment(Thread* self, int delta, uint32_t timeout_ms) LOCKS_EXCLUDED(lock_);
 
+  // Set the count to a new value.  This should only be used if there is no possibility that
+  // another thread is still in Wait().  See above.
+  void Init(Thread* self, int count);
+
  private:
   void SetCountLocked(Thread* self, int count) EXCLUSIVE_LOCKS_REQUIRED(lock_);
 
diff --git a/runtime/barrier_test.cc b/runtime/barrier_test.cc
index de348dc..f68a5d4 100644
--- a/runtime/barrier_test.cc
+++ b/runtime/barrier_test.cc
@@ -27,22 +27,17 @@
 namespace art {
 class CheckWaitTask : public Task {
  public:
-  CheckWaitTask(Barrier* barrier, AtomicInteger* count1, AtomicInteger* count2,
-                   AtomicInteger* count3)
+  CheckWaitTask(Barrier* barrier, AtomicInteger* count1, AtomicInteger* count2)
       : barrier_(barrier),
         count1_(count1),
-        count2_(count2),
-        count3_(count3) {}
+        count2_(count2) {}
 
   void Run(Thread* self) {
-    LOG(INFO) << "Before barrier 1 " << *self;
+    LOG(INFO) << "Before barrier" << *self;
     ++*count1_;
     barrier_->Wait(self);
     ++*count2_;
-    LOG(INFO) << "Before barrier 2 " << *self;
-    barrier_->Wait(self);
-    ++*count3_;
-    LOG(INFO) << "After barrier 2 " << *self;
+    LOG(INFO) << "After barrier" << *self;
   }
 
   virtual void Finalize() {
@@ -53,7 +48,6 @@
   Barrier* const barrier_;
   AtomicInteger* const count1_;
   AtomicInteger* const count2_;
-  AtomicInteger* const count3_;
 };
 
 class BarrierTest : public CommonRuntimeTest {
@@ -67,31 +61,27 @@
 TEST_F(BarrierTest, CheckWait) {
   Thread* self = Thread::Current();
   ThreadPool thread_pool("Barrier test thread pool", num_threads);
-  Barrier barrier(0);
+  Barrier barrier(num_threads + 1);  // One extra Wait() in main thread.
+  Barrier timeout_barrier(0);  // Only used for sleeping on timeout.
   AtomicInteger count1(0);
   AtomicInteger count2(0);
-  AtomicInteger count3(0);
   for (int32_t i = 0; i < num_threads; ++i) {
-    thread_pool.AddTask(self, new CheckWaitTask(&barrier, &count1, &count2, &count3));
+    thread_pool.AddTask(self, new CheckWaitTask(&barrier, &count1, &count2));
   }
   thread_pool.StartWorkers(self);
-  barrier.Increment(self, num_threads);
-  // At this point each thread should have passed through the barrier. The first count should be
-  // equal to num_threads.
-  EXPECT_EQ(num_threads, count1.LoadRelaxed());
-  // Count 3 should still be zero since no thread should have gone past the second barrier.
-  EXPECT_EQ(0, count3.LoadRelaxed());
-  // Now lets tell the threads to pass again.
-  barrier.Increment(self, num_threads);
-  // Count 2 should be equal to num_threads since each thread must have passed the second barrier
-  // at this point.
-  EXPECT_EQ(num_threads, count2.LoadRelaxed());
+  while (count1.LoadRelaxed() != num_threads) {
+    timeout_barrier.Increment(self, 1, 100);  // sleep 100 msecs
+  }
+  // Count 2 should still be zero since no thread should have gone past the barrier.
+  EXPECT_EQ(0, count2.LoadRelaxed());
+  // Perform one additional Wait(), allowing pool threads to proceed.
+  barrier.Wait(self);
   // Wait for all the threads to finish.
   thread_pool.Wait(self, true, false);
-  // All three counts should be equal to num_threads now.
-  EXPECT_EQ(count1.LoadRelaxed(), count2.LoadRelaxed());
-  EXPECT_EQ(count2.LoadRelaxed(), count3.LoadRelaxed());
-  EXPECT_EQ(num_threads, count3.LoadRelaxed());
+  // Both counts should be equal to num_threads now.
+  EXPECT_EQ(count1.LoadRelaxed(), num_threads);
+  EXPECT_EQ(count2.LoadRelaxed(), num_threads);
+  timeout_barrier.Init(self, 0);  // Reset to zero for destruction.
 }
 
 class CheckPassTask : public Task {
diff --git a/runtime/base/unix_file/null_file.cc b/runtime/base/unix_file/null_file.cc
deleted file mode 100644
index 322c25a..0000000
--- a/runtime/base/unix_file/null_file.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/unix_file/null_file.h"
-#include <errno.h>
-
-namespace unix_file {
-
-NullFile::NullFile() {
-}
-
-NullFile::~NullFile() {
-}
-
-int NullFile::Close() {
-  return 0;
-}
-
-int NullFile::Flush() {
-  return 0;
-}
-
-int64_t NullFile::Read(char* buf ATTRIBUTE_UNUSED, int64_t byte_count ATTRIBUTE_UNUSED,
-                       int64_t offset) const {
-  if (offset < 0) {
-    return -EINVAL;
-  }
-  return 0;
-}
-
-int NullFile::SetLength(int64_t new_length) {
-  if (new_length < 0) {
-    return -EINVAL;
-  }
-  return 0;
-}
-
-int64_t NullFile::GetLength() const {
-  return 0;
-}
-
-int64_t NullFile::Write(const char* buf ATTRIBUTE_UNUSED, int64_t byte_count ATTRIBUTE_UNUSED,
-                        int64_t offset) {
-  if (offset < 0) {
-    return -EINVAL;
-  }
-  return byte_count;
-}
-
-}  // namespace unix_file
diff --git a/runtime/base/unix_file/null_file.h b/runtime/base/unix_file/null_file.h
deleted file mode 100644
index 3394731..0000000
--- a/runtime/base/unix_file/null_file.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_BASE_UNIX_FILE_NULL_FILE_H_
-#define ART_RUNTIME_BASE_UNIX_FILE_NULL_FILE_H_
-
-#include "base/unix_file/random_access_file.h"
-#include "base/macros.h"
-
-namespace unix_file {
-
-// A RandomAccessFile implementation equivalent to /dev/null. Writes are
-// discarded, and there's no data to be read. Callers could use FdFile in
-// conjunction with /dev/null, but that's not portable and costs a file
-// descriptor. NullFile is "free".
-//
-// Thread safe.
-class NullFile : public RandomAccessFile {
- public:
-  NullFile();
-  virtual ~NullFile();
-
-  // RandomAccessFile API.
-  virtual int Close();
-  virtual int Flush();
-  virtual int64_t Read(char* buf, int64_t byte_count, int64_t offset) const;
-  virtual int SetLength(int64_t new_length);
-  virtual int64_t GetLength() const;
-  virtual int64_t Write(const char* buf, int64_t byte_count, int64_t offset);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(NullFile);
-};
-
-}  // namespace unix_file
-
-#endif  // ART_RUNTIME_BASE_UNIX_FILE_NULL_FILE_H_
diff --git a/runtime/base/unix_file/null_file_test.cc b/runtime/base/unix_file/null_file_test.cc
deleted file mode 100644
index 410fdfc..0000000
--- a/runtime/base/unix_file/null_file_test.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/unix_file/null_file.h"
-
-#include <errno.h>
-
-#include "gtest/gtest.h"
-
-namespace unix_file {
-
-class NullFileTest : public testing::Test { };
-
-TEST_F(NullFileTest, Read) {
-  NullFile f;
-  char buf[256];
-  // You can't read a negative number of bytes...
-  ASSERT_EQ(-EINVAL, f.Read(buf, 0, -1));
-  // ...but everything else is fine (though you'll get no data).
-  ASSERT_EQ(0, f.Read(buf, 128, 0));
-  ASSERT_EQ(0, f.Read(buf, 128, 128));
-}
-
-TEST_F(NullFileTest, SetLength) {
-  NullFile f;
-  // You can't set a negative length...
-  ASSERT_EQ(-EINVAL, f.SetLength(-1));
-  // ...but everything else is fine.
-  ASSERT_EQ(0, f.SetLength(0));
-  ASSERT_EQ(0, f.SetLength(128));
-}
-
-TEST_F(NullFileTest, GetLength) {
-  const std::string content("hello");
-  NullFile f;
-  // The length is always 0.
-  ASSERT_EQ(0, f.GetLength());
-  ASSERT_EQ(content.size(), static_cast<uint64_t>(f.Write(content.data(), content.size(), 0)));
-  ASSERT_EQ(0, f.GetLength());
-}
-
-TEST_F(NullFileTest, Write) {
-  const std::string content("hello");
-  NullFile f;
-  // You can't write at a negative offset...
-  ASSERT_EQ(-EINVAL, f.Write(content.data(), content.size(), -128));
-  // But you can write anywhere else...
-  ASSERT_EQ(content.size(), static_cast<uint64_t>(f.Write(content.data(), content.size(), 0)));
-  ASSERT_EQ(content.size(), static_cast<uint64_t>(f.Write(content.data(), content.size(), 128)));
-  // ...though the file will remain empty.
-  ASSERT_EQ(0, f.GetLength());
-}
-
-}  // namespace unix_file
diff --git a/runtime/base/unix_file/random_access_file_utils_test.cc b/runtime/base/unix_file/random_access_file_utils_test.cc
deleted file mode 100644
index 9457d22..0000000
--- a/runtime/base/unix_file/random_access_file_utils_test.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/unix_file/random_access_file_utils.h"
-#include "base/unix_file/fd_file.h"
-#include "base/unix_file/string_file.h"
-#include "gtest/gtest.h"
-
-namespace unix_file {
-
-class RandomAccessFileUtilsTest : public testing::Test { };
-
-TEST_F(RandomAccessFileUtilsTest, CopyFile) {
-  StringFile src;
-  StringFile dst;
-
-  const std::string content("hello");
-  src.Assign(content);
-  ASSERT_EQ(src.ToStringPiece(), content);
-  ASSERT_EQ(dst.ToStringPiece(), "");
-
-  ASSERT_TRUE(CopyFile(src, &dst));
-  ASSERT_EQ(src.ToStringPiece(), dst.ToStringPiece());
-}
-
-TEST_F(RandomAccessFileUtilsTest, BadSrc) {
-  FdFile src(-1, false);
-  StringFile dst;
-  ASSERT_FALSE(CopyFile(src, &dst));
-}
-
-TEST_F(RandomAccessFileUtilsTest, BadDst) {
-  StringFile src;
-  FdFile dst(-1, false);
-
-  // We need some source content to trigger a write.
-  // Copying an empty file is a no-op.
-  src.Assign("hello");
-
-  ASSERT_FALSE(CopyFile(src, &dst));
-}
-
-}  // namespace unix_file
diff --git a/runtime/base/unix_file/string_file.cc b/runtime/base/unix_file/string_file.cc
deleted file mode 100644
index ff0d0fa..0000000
--- a/runtime/base/unix_file/string_file.cc
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/unix_file/string_file.h"
-#include <errno.h>
-#include <algorithm>
-#include "base/logging.h"
-
-namespace unix_file {
-
-StringFile::StringFile() {
-}
-
-StringFile::~StringFile() {
-}
-
-int StringFile::Close() {
-  return 0;
-}
-
-int StringFile::Flush() {
-  return 0;
-}
-
-int64_t StringFile::Read(char *buf, int64_t byte_count, int64_t offset) const {
-  CHECK(buf);
-  CHECK_GE(byte_count, 0);
-
-  if (offset < 0) {
-    return -EINVAL;
-  }
-
-  const int64_t available_bytes = std::min(byte_count, GetLength() - offset);
-  if (available_bytes < 0) {
-    return 0;  // Not an error, but nothing for us to do, either.
-  }
-  memcpy(buf, data_.data() + offset, available_bytes);
-  return available_bytes;
-}
-
-int StringFile::SetLength(int64_t new_length) {
-  if (new_length < 0) {
-    return -EINVAL;
-  }
-  data_.resize(new_length);
-  return 0;
-}
-
-int64_t StringFile::GetLength() const {
-  return data_.size();
-}
-
-int64_t StringFile::Write(const char *buf, int64_t byte_count, int64_t offset) {
-  CHECK(buf);
-  CHECK_GE(byte_count, 0);
-
-  if (offset < 0) {
-    return -EINVAL;
-  }
-
-  if (byte_count == 0) {
-    return 0;
-  }
-
-  // FUSE seems happy to allow writes past the end. (I'd guess it doesn't
-  // synthesize a write of zero bytes so that we're free to implement sparse
-  // files.) GNU as(1) seems to require such writes. Those files are small.
-  const int64_t bytes_past_end = offset - GetLength();
-  if (bytes_past_end > 0) {
-    data_.append(bytes_past_end, '\0');
-  }
-
-  data_.replace(offset, byte_count, buf, byte_count);
-  return byte_count;
-}
-
-void StringFile::Assign(const art::StringPiece &new_data) {
-  data_.assign(new_data.data(), new_data.size());
-}
-
-const art::StringPiece StringFile::ToStringPiece() const {
-  return data_;
-}
-
-}  // namespace unix_file
diff --git a/runtime/base/unix_file/string_file.h b/runtime/base/unix_file/string_file.h
deleted file mode 100644
index 26904f8..0000000
--- a/runtime/base/unix_file/string_file.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_BASE_UNIX_FILE_STRING_FILE_H_
-#define ART_RUNTIME_BASE_UNIX_FILE_STRING_FILE_H_
-
-#include <stdint.h>
-
-#include <string>
-
-#include "base/macros.h"
-#include "base/stringpiece.h"
-#include "base/unix_file/random_access_file.h"
-
-namespace unix_file {
-
-// A RandomAccessFile implementation backed by a std::string. (That is, all data is
-// kept in memory.)
-//
-// Not thread safe.
-class StringFile : public RandomAccessFile {
- public:
-  StringFile();
-  virtual ~StringFile();
-
-  // RandomAccessFile API.
-  virtual int Close();
-  virtual int Flush();
-  virtual int64_t Read(char* buf, int64_t byte_count, int64_t offset) const;
-  virtual int SetLength(int64_t new_length);
-  virtual int64_t GetLength() const;
-  virtual int64_t Write(const char* buf, int64_t byte_count, int64_t offset);
-
-  // Bonus API.
-  void Assign(const art::StringPiece& new_data);
-  const art::StringPiece ToStringPiece() const;
-
- private:
-  std::string data_;
-
-  DISALLOW_COPY_AND_ASSIGN(StringFile);
-};
-
-}  // namespace unix_file
-
-#endif  // ART_RUNTIME_BASE_UNIX_FILE_STRING_FILE_H_
diff --git a/runtime/base/unix_file/string_file_test.cc b/runtime/base/unix_file/string_file_test.cc
deleted file mode 100644
index 8821461..0000000
--- a/runtime/base/unix_file/string_file_test.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/unix_file/string_file.h"
-#include "base/unix_file/random_access_file_test.h"
-#include "gtest/gtest.h"
-
-namespace unix_file {
-
-class StringFileTest : public RandomAccessFileTest {
- protected:
-  virtual RandomAccessFile* MakeTestFile() {
-    return new StringFile;
-  }
-};
-
-TEST_F(StringFileTest, Read) {
-  TestRead();
-}
-
-TEST_F(StringFileTest, SetLength) {
-  TestSetLength();
-}
-
-TEST_F(StringFileTest, Write) {
-  TestWrite();
-}
-
-}  // namespace unix_file
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 6aab632..ee13e03 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -5963,4 +5963,34 @@
   return ComputeModifiedUtf8Hash(descriptor);
 }
 
+bool ClassLinker::MayBeCalledWithDirectCodePointer(mirror::ArtMethod* m) {
+  // Non-image methods don't use direct code pointer.
+  if (!m->GetDeclaringClass()->IsBootStrapClassLoaded()) {
+    return false;
+  }
+  if (m->IsPrivate()) {
+    // The method can only be called inside its own oat file. Therefore it won't be called using
+    // its direct code if the oat file has been compiled in PIC mode.
+    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+    const DexFile& dex_file = m->GetDeclaringClass()->GetDexFile();
+    const OatFile::OatDexFile* oat_dex_file = class_linker->FindOpenedOatDexFileForDexFile(dex_file);
+    if (oat_dex_file == nullptr) {
+      // No oat file: the method has not been compiled.
+      return false;
+    }
+    const OatFile* oat_file = oat_dex_file->GetOatFile();
+    return oat_file != nullptr && !oat_file->IsPic();
+  } else {
+    // The method can be called outside its own oat file. Therefore it won't be called using its
+    // direct code pointer only if all loaded oat files have been compiled in PIC mode.
+    ReaderMutexLock mu(Thread::Current(), dex_lock_);
+    for (const OatFile* oat_file : oat_files_) {
+      if (!oat_file->IsPic()) {
+        return true;
+      }
+    }
+    return false;
+  }
+}
+
 }  // namespace art
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index b78d0b5..55332f8 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -471,6 +471,10 @@
       LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Returns true if the method can be called with its direct code pointer, false otherwise.
+  bool MayBeCalledWithDirectCodePointer(mirror::ArtMethod* m)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  private:
   const OatFile::OatMethod FindOatMethodFor(mirror::ArtMethod* method, bool* found)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index ac078aa..4f09460 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -1137,4 +1137,24 @@
   CheckPreverified(statics.Get(), true);
 }
 
+TEST_F(ClassLinkerTest, IsBootStrapClassLoaded) {
+  ScopedObjectAccess soa(Thread::Current());
+
+  StackHandleScope<3> hs(soa.Self());
+  Handle<mirror::ClassLoader> class_loader(
+      hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Statics"))));
+
+  // java.lang.Object is a bootstrap class.
+  Handle<mirror::Class> jlo_class(
+      hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+  ASSERT_TRUE(jlo_class.Get() != nullptr);
+  EXPECT_TRUE(jlo_class.Get()->IsBootStrapClassLoaded());
+
+  // Statics is not a bootstrap class.
+  Handle<mirror::Class> statics(
+      hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStatics;", class_loader)));
+  ASSERT_TRUE(statics.Get() != nullptr);
+  EXPECT_FALSE(statics.Get()->IsBootStrapClassLoaded());
+}
+
 }  // namespace art
diff --git a/runtime/closure.h b/runtime/closure.h
deleted file mode 100644
index 9bea28f..0000000
--- a/runtime/closure.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_CLOSURE_H_
-#define ART_RUNTIME_CLOSURE_H_
-
-namespace art {
-
-class Thread;
-
-class Closure {
- public:
-  virtual ~Closure() { }
-  virtual void Run(Thread* self) = 0;
-};
-
-}  // namespace art
-
-#endif  // ART_RUNTIME_CLOSURE_H_
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index d5cba50..a9b70cb 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -3264,8 +3264,16 @@
       ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
       const bool is_compiled = class_linker->GetOatMethodQuickCodeFor(m) != nullptr;
       if (is_compiled) {
-        VLOG(jdwp) << "Need selective deoptimization for compiled method " << PrettyMethod(m);
-        return DeoptimizationRequest::kSelectiveDeoptimization;
+        // If the method may be called through its direct code pointer (without loading
+        // its updated entrypoint), we need full deoptimization to not miss the breakpoint.
+        if (class_linker->MayBeCalledWithDirectCodePointer(m)) {
+          VLOG(jdwp) << "Need full deoptimization because of possible direct code call "
+                     << "into image for compiled method " << PrettyMethod(m);
+          return DeoptimizationRequest::kFullDeoptimization;
+        } else {
+          VLOG(jdwp) << "Need selective deoptimization for compiled method " << PrettyMethod(m);
+          return DeoptimizationRequest::kSelectiveDeoptimization;
+        }
       } else {
         // Method is not compiled: we don't need to deoptimize.
         VLOG(jdwp) << "No need for deoptimization for non-compiled method " << PrettyMethod(m);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 812cfd3..bd49754 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -1048,6 +1048,11 @@
     DISALLOW_COPY_AND_ASSIGN(InitializeClassVisitor);
   };
 
+  // Returns true if the class loader is null, ie the class loader is the boot strap class loader.
+  bool IsBootStrapClassLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetClassLoader() == nullptr;
+  }
+
  private:
   void SetVerifyErrorClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/runtime/strutil.h b/runtime/strutil.h
deleted file mode 100644
index c8d39e2..0000000
--- a/runtime/strutil.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_STRUTIL_H_
-#define ART_RUNTIME_STRUTIL_H_
-
-#include <string.h>
-
-namespace art {
-
-// Key comparison function for C strings.
-struct CStringLt {
-  bool operator()(const char* s1, const char* s2) const {
-    return strcmp(s1, s2) < 0;
-  }
-};
-
-// Key equality function for C strings.
-struct CStringEq {
-  bool operator()(const char* s1, const char* s2) const {
-    return strcmp(s1, s2) == 0;
-  }
-};
-
-}  // namespace art
-
-#endif  // ART_RUNTIME_STRUTIL_H_
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index d6330c8..8c08067 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -22,13 +22,18 @@
 
 #include "barrier.h"
 #include "base/mutex.h"
-#include "closure.h"
 #include "mem_map.h"
 
 namespace art {
 
 class ThreadPool;
 
+class Closure {
+ public:
+  virtual ~Closure() { }
+  virtual void Run(Thread* self) = 0;
+};
+
 class Task : public Closure {
  public:
   // Called when references reaches 0.
diff --git a/runtime/utils.h b/runtime/utils.h
index 668c897..f9622b7 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -430,7 +430,8 @@
 // Sleep for the given number of nanoseconds, a bad way to handle contention.
 void NanoSleep(uint64_t ns);
 
-// Initialize a timespec to either an absolute or relative time.
+// Initialize a timespec to either a relative time (ms,ns), or to the absolute
+// time corresponding to the indicated clock value plus the supplied offset.
 void InitTimeSpec(bool absolute, int clock, int64_t ms, int32_t ns, timespec* ts);
 
 // Splits a string using the given separator character into a vector of
diff --git a/test/422-type-conversion/src/Main.java b/test/422-type-conversion/src/Main.java
index e7dbe24..91618fc 100644
--- a/test/422-type-conversion/src/Main.java
+++ b/test/422-type-conversion/src/Main.java
@@ -62,6 +62,18 @@
     }
   }
 
+  public static void assertFloatIsNaN(float result) {
+    if (!Float.isNaN(result)) {
+      throw new Error("Expected: NaN, found: " + result);
+    }
+  }
+
+  public static void assertDoubleIsNaN(double result) {
+    if (!Double.isNaN(result)) {
+      throw new Error("Expected: NaN, found: " + result);
+    }
+  }
+
 
   public static void main(String[] args) {
     // Generate, compile and check int-to-long Dex instructions.
@@ -94,6 +106,15 @@
     // Generate, compile and check float-to-int Dex instructions.
     floatToInt();
 
+    // Generate, compile and check float-to-long Dex instructions.
+    floatToLong();
+
+    // Generate, compile and check float-to-double Dex instructions.
+    floatToDouble();
+
+    // Generate, compile and check double-to-float Dex instructions.
+    doubleToFloat();
+
     // Generate, compile and check int-to-byte Dex instructions.
     shortToByte();
     intToByte();
@@ -342,6 +363,84 @@
     assertIntEquals(-2147483648, $opt$FloatToInt(Float.NEGATIVE_INFINITY));
   }
 
+  private static void floatToLong() {
+    assertLongEquals(1L, $opt$FloatToLong(1F));
+    assertLongEquals(0L, $opt$FloatToLong(0F));
+    assertLongEquals(0L, $opt$FloatToLong(-0F));
+    assertLongEquals(-1L, $opt$FloatToLong(-1F));
+    assertLongEquals(51L, $opt$FloatToLong(51F));
+    assertLongEquals(-51L, $opt$FloatToLong(-51F));
+    assertLongEquals(0L, $opt$FloatToLong(0.5F));
+    assertLongEquals(0L, $opt$FloatToLong(0.4999999F));
+    assertLongEquals(0L, $opt$FloatToLong(-0.4999999F));
+    assertLongEquals(0L, $opt$FloatToLong(-0.5F));
+    assertLongEquals(42L, $opt$FloatToLong(42.199F));
+    assertLongEquals(-42L, $opt$FloatToLong(-42.199F));
+    assertLongEquals(2147483648L, $opt$FloatToLong(2147483647F));  // 2^31 - 1
+    assertLongEquals(-2147483648L, $opt$FloatToLong(-2147483647F));  // -(2^31 - 1)
+    assertLongEquals(-2147483648L, $opt$FloatToLong(-2147483648F));  // -(2^31)
+    assertLongEquals(2147483648L, $opt$FloatToLong(2147483648F));  // (2^31)
+    assertLongEquals(-2147483648L, $opt$FloatToLong(-2147483649F));  // -(2^31 + 1)
+    assertLongEquals(9223372036854775807L, $opt$FloatToLong(9223372036854775807F));  // 2^63 - 1
+    assertLongEquals(-9223372036854775808L, $opt$FloatToLong(-9223372036854775807F));  // -(2^63 - 1)
+    assertLongEquals(-9223372036854775808L, $opt$FloatToLong(-9223372036854775808F));  // -(2^63)
+    assertLongEquals(0L, $opt$FloatToLong(Float.NaN));
+    assertLongEquals(9223372036854775807L, $opt$FloatToLong(Float.POSITIVE_INFINITY));
+    assertLongEquals(-9223372036854775808L, $opt$FloatToLong(Float.NEGATIVE_INFINITY));
+  }
+
+  private static void floatToDouble() {
+    assertDoubleEquals(1D, $opt$FloatToDouble(1F));
+    assertDoubleEquals(0D, $opt$FloatToDouble(0F));
+    assertDoubleEquals(0D, $opt$FloatToDouble(-0F));
+    assertDoubleEquals(-1D, $opt$FloatToDouble(-1F));
+    assertDoubleEquals(51D, $opt$FloatToDouble(51F));
+    assertDoubleEquals(-51D, $opt$FloatToDouble(-51F));
+    assertDoubleEquals(0.5D, $opt$FloatToDouble(0.5F));
+    assertDoubleEquals(0.49999991059303284D, $opt$FloatToDouble(0.4999999F));
+    assertDoubleEquals(-0.49999991059303284D, $opt$FloatToDouble(-0.4999999F));
+    assertDoubleEquals(-0.5D, $opt$FloatToDouble(-0.5F));
+    assertDoubleEquals(42.19900131225586D, $opt$FloatToDouble(42.199F));
+    assertDoubleEquals(-42.19900131225586D, $opt$FloatToDouble(-42.199F));
+    assertDoubleEquals(2147483648D, $opt$FloatToDouble(2147483647F));  // 2^31 - 1
+    assertDoubleEquals(-2147483648D, $opt$FloatToDouble(-2147483647F));  // -(2^31 - 1)
+    assertDoubleEquals(-2147483648D, $opt$FloatToDouble(-2147483648F));  // -(2^31)
+    assertDoubleEquals(2147483648D, $opt$FloatToDouble(2147483648F));  // (2^31)
+    assertDoubleEquals(-2147483648D, $opt$FloatToDouble(-2147483649F));  // -(2^31 + 1)
+    assertDoubleEquals(9223372036854775807D, $opt$FloatToDouble(9223372036854775807F));  // 2^63 - 1
+    assertDoubleEquals(-9223372036854775807D, $opt$FloatToDouble(-9223372036854775807F));  // -(2^63 - 1)
+    assertDoubleEquals(-9223372036854775808D, $opt$FloatToDouble(-9223372036854775808F));  // -(2^63)
+    assertDoubleIsNaN($opt$FloatToDouble(Float.NaN));
+    assertDoubleEquals(Double.POSITIVE_INFINITY, $opt$FloatToDouble(Float.POSITIVE_INFINITY));
+    assertDoubleEquals(Double.NEGATIVE_INFINITY, $opt$FloatToDouble(Float.NEGATIVE_INFINITY));
+  }
+
+  private static void doubleToFloat() {
+    assertFloatEquals(1F, $opt$DoubleToFloat(1D));
+    assertFloatEquals(0F, $opt$DoubleToFloat(0D));
+    assertFloatEquals(0F, $opt$DoubleToFloat(-0D));
+    assertFloatEquals(-1F, $opt$DoubleToFloat(-1D));
+    assertFloatEquals(51F, $opt$DoubleToFloat(51D));
+    assertFloatEquals(-51F, $opt$DoubleToFloat(-51D));
+    assertFloatEquals(0.5F, $opt$DoubleToFloat(0.5D));
+    assertFloatEquals(0.4999999F, $opt$DoubleToFloat(0.4999999D));
+    assertFloatEquals(-0.4999999F, $opt$DoubleToFloat(-0.4999999D));
+    assertFloatEquals(-0.5F, $opt$DoubleToFloat(-0.5D));
+    assertFloatEquals(42.199F, $opt$DoubleToFloat(42.199D));
+    assertFloatEquals(-42.199F, $opt$DoubleToFloat(-42.199D));
+    assertFloatEquals(2147483648F, $opt$DoubleToFloat(2147483647D));  // 2^31 - 1
+    assertFloatEquals(-2147483648F, $opt$DoubleToFloat(-2147483647D));  // -(2^31 - 1)
+    assertFloatEquals(-2147483648F, $opt$DoubleToFloat(-2147483648D));  // -(2^31)
+    assertFloatEquals(2147483648F, $opt$DoubleToFloat(2147483648D));  // (2^31)
+    assertFloatEquals(-2147483648F, $opt$DoubleToFloat(-2147483649D));  // -(2^31 + 1)
+    assertFloatEquals(9223372036854775807F, $opt$DoubleToFloat(9223372036854775807D));  // 2^63 - 1
+    assertFloatEquals(-9223372036854775807F, $opt$DoubleToFloat(-9223372036854775807D));  // -(2^63 - 1)
+    assertFloatEquals(-9223372036854775808F, $opt$DoubleToFloat(-9223372036854775808D));  // -(2^63)
+    assertFloatIsNaN($opt$DoubleToFloat(Float.NaN));
+    assertFloatEquals(Float.POSITIVE_INFINITY, $opt$DoubleToFloat(Double.POSITIVE_INFINITY));
+    assertFloatEquals(Float.NEGATIVE_INFINITY, $opt$DoubleToFloat(Double.NEGATIVE_INFINITY));
+  }
+
   private static void shortToByte() {
     assertByteEquals((byte)1, $opt$ShortToByte((short)1));
     assertByteEquals((byte)0, $opt$ShortToByte((short)0));
@@ -470,48 +569,57 @@
 
 
   // These methods produce int-to-long Dex instructions.
-  static long $opt$ByteToLong(byte a) { return a; }
-  static long $opt$ShortToLong(short a) { return a; }
-  static long $opt$IntToLong(int a) { return a; }
-  static long $opt$CharToLong(int a) { return a; }
+  static long $opt$ByteToLong(byte a) { return (long)a; }
+  static long $opt$ShortToLong(short a) { return (long)a; }
+  static long $opt$IntToLong(int a) { return (long)a; }
+  static long $opt$CharToLong(int a) { return (long)a; }
 
   // These methods produce int-to-float Dex instructions.
-  static float $opt$ByteToFloat(byte a) { return a; }
-  static float $opt$ShortToFloat(short a) { return a; }
-  static float $opt$IntToFloat(int a) { return a; }
-  static float $opt$CharToFloat(char a) { return a; }
+  static float $opt$ByteToFloat(byte a) { return (float)a; }
+  static float $opt$ShortToFloat(short a) { return (float)a; }
+  static float $opt$IntToFloat(int a) { return (float)a; }
+  static float $opt$CharToFloat(char a) { return (float)a; }
 
   // These methods produce int-to-double Dex instructions.
-  static double $opt$ByteToDouble(byte a) { return a; }
-  static double $opt$ShortToDouble(short a) { return a; }
-  static double $opt$IntToDouble(int a) { return a; }
-  static double $opt$CharToDouble(int a) { return a; }
+  static double $opt$ByteToDouble(byte a) { return (double)a; }
+  static double $opt$ShortToDouble(short a) { return (double)a; }
+  static double $opt$IntToDouble(int a) { return (double)a; }
+  static double $opt$CharToDouble(int a) { return (double)a; }
 
   // These methods produce long-to-int Dex instructions.
-  static int $opt$LongToInt(long a){ return (int)a; }
-  static int $opt$LongLiteralToInt(){ return (int)42L; }
+  static int $opt$LongToInt(long a) { return (int)a; }
+  static int $opt$LongLiteralToInt() { return (int)42L; }
 
   // This method produces a long-to-float Dex instruction.
-  static float $opt$LongToFloat(long a){ return (float)a; }
+  static float $opt$LongToFloat(long a) { return (float)a; }
 
   // This method produces a long-to-double Dex instruction.
-  static double $opt$LongToDouble(long a){ return (double)a; }
+  static double $opt$LongToDouble(long a) { return (double)a; }
 
   // This method produces a float-to-int Dex instruction.
-  static int $opt$FloatToInt(float a){ return (int)a; }
+  static int $opt$FloatToInt(float a) { return (int)a; }
+
+  // This method produces a float-to-double Dex instruction.
+  static double $opt$FloatToDouble(float a) { return (double)a; }
+
+  // This method produces a double-to-float Dex instruction.
+  static float $opt$DoubleToFloat(double a) { return (float)a; }
+
+  // This method produces a float-to-long Dex instruction.
+  static long $opt$FloatToLong(float a){ return (long)a; }
 
   // These methods produce int-to-byte Dex instructions.
-  static byte $opt$ShortToByte(short a){ return (byte)a; }
-  static byte $opt$IntToByte(int a){ return (byte)a; }
-  static byte $opt$CharToByte(char a){ return (byte)a; }
+  static byte $opt$ShortToByte(short a) { return (byte)a; }
+  static byte $opt$IntToByte(int a) { return (byte)a; }
+  static byte $opt$CharToByte(char a) { return (byte)a; }
 
   // These methods produce int-to-short Dex instructions.
-  static short $opt$ByteToShort(byte a){ return (short)a; }
-  static short $opt$IntToShort(int a){ return (short)a; }
-  static short $opt$CharToShort(char a){ return (short)a; }
+  static short $opt$ByteToShort(byte a) { return (short)a; }
+  static short $opt$IntToShort(int a) { return (short)a; }
+  static short $opt$CharToShort(char a) { return (short)a; }
 
   // These methods produce int-to-char Dex instructions.
-  static char $opt$ByteToChar(byte a){ return (char)a; }
-  static char $opt$ShortToChar(short a){ return (char)a; }
-  static char $opt$IntToChar(int a){ return (char)a; }
+  static char $opt$ByteToChar(byte a) { return (char)a; }
+  static char $opt$ShortToChar(short a) { return (char)a; }
+  static char $opt$IntToChar(int a) { return (char)a; }
 }
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index b85685b..69ba288 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -342,6 +342,14 @@
       $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
 endif
 
+# If ART_USE_OPTIMIZING_COMPILER is set to true, then the default core.art has been
+# compiled with the optimizing compiler.
+ifeq ($(ART_USE_OPTIMIZING_COMPILER),true)
+  ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+      default,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+      $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+endif
+
 TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS :=