Merge "Fix a double unmap issue in MemMap::UnMapAtEnd()." into dalvik-dev
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 4c658a2..655c7dd 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -17,6 +17,7 @@
 LOCAL_PATH := art
 
 TEST_COMMON_SRC_FILES := \
+	compiler/dex/arena_allocator_test.cc \
 	compiler/driver/compiler_driver_test.cc \
 	compiler/elf_writer_test.cc \
 	compiler/image_test.cc \
@@ -27,6 +28,7 @@
 	compiler/utils/arm/managed_register_arm_test.cc \
 	compiler/utils/x86/managed_register_x86_test.cc \
 	runtime/barrier_test.cc \
+	runtime/base/bit_vector_test.cc \
 	runtime/base/histogram_test.cc \
 	runtime/base/mutex_test.cc \
 	runtime/base/timing_logger_test.cc \
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index ea7b0b0..e8438a2 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -81,6 +81,16 @@
 include $(BUILD_PHONY_PACKAGE)
 endif
 
+ifeq ($(ART_BUILD_TARGET),true)
+include $(CLEAR_VARS)
+LOCAL_MODULE := core.art
+LOCAL_MODULE_TAGS := optional
+LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk
+LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.oat.mk
+LOCAL_ADDITIONAL_DEPENDENCIES += $(TARGET_CORE_IMG_OUT)
+include $(BUILD_PHONY_PACKAGE)
+endif
+
 ########################################################################
 # The full system boot classpath
 TARGET_BOOT_JARS := $(subst :, ,$(DEXPREOPT_BOOT_JARS))
diff --git a/compiler/dex/arena_allocator_test.cc b/compiler/dex/arena_allocator_test.cc
new file mode 100644
index 0000000..63dc615
--- /dev/null
+++ b/compiler/dex/arena_allocator_test.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "arena_allocator.h"
+#include "arena_bit_vector.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(ArenaAllocator, Test) {
+  ArenaPool pool;
+  ArenaAllocator arena(&pool);
+  ArenaBitVector bv(&arena, 10, true);
+  bv.SetBit(5);
+  EXPECT_EQ(1U, bv.GetStorageSize());
+  bv.SetBit(35);
+  EXPECT_EQ(2U, bv.GetStorageSize());
+}
+
+}  // namespace art
diff --git a/compiler/dex/arena_bit_vector.cc b/compiler/dex/arena_bit_vector.cc
index b921f61..b567ae8 100644
--- a/compiler/dex/arena_bit_vector.cc
+++ b/compiler/dex/arena_bit_vector.cc
@@ -19,119 +19,29 @@
 
 namespace art {
 
-// TODO: profile to make sure this is still a win relative to just using shifted masks.
-static uint32_t check_masks[32] = {
-  0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
-  0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200,
-  0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000,
-  0x00008000, 0x00010000, 0x00020000, 0x00040000, 0x00080000,
-  0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000,
-  0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000,
-  0x40000000, 0x80000000 };
+class ArenaBitVectorAllocator : public Allocator {
+ public:
+  explicit ArenaBitVectorAllocator(ArenaAllocator* arena) : arena_(arena) {}
+  ~ArenaBitVectorAllocator() {}
+
+  virtual void* Alloc(size_t size) {
+    return arena_->Alloc(size, ArenaAllocator::kAllocGrowableBitMap);
+  }
+
+  virtual void Free(void*) {}  // Nop.
+
+  static void* operator new(size_t size, ArenaAllocator* arena) {
+    return arena->Alloc(sizeof(ArenaBitVectorAllocator), ArenaAllocator::kAllocGrowableBitMap);
+  }
+  static void operator delete(void* p) {}  // Nop.
+
+ private:
+  ArenaAllocator* arena_;
+  DISALLOW_COPY_AND_ASSIGN(ArenaBitVectorAllocator);
+};
 
 ArenaBitVector::ArenaBitVector(ArenaAllocator* arena, unsigned int start_bits,
                                bool expandable, OatBitMapKind kind)
-  :  arena_(arena),
-     expandable_(expandable),
-     kind_(kind),
-     storage_size_((start_bits + 31) >> 5),
-     storage_(static_cast<uint32_t*>(arena_->Alloc(storage_size_ * sizeof(uint32_t),
-                                                   ArenaAllocator::kAllocGrowableBitMap))) {
-  DCHECK_EQ(sizeof(storage_[0]), 4U);    // Assuming 32-bit units.
-}
-
-/*
- * Determine whether or not the specified bit is set.
- */
-bool ArenaBitVector::IsBitSet(unsigned int num) {
-  DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8);
-
-  unsigned int val = storage_[num >> 5] & check_masks[num & 0x1f];
-  return (val != 0);
-}
-
-// Mark all bits bit as "clear".
-void ArenaBitVector::ClearAllBits() {
-  memset(storage_, 0, storage_size_ * sizeof(uint32_t));
-}
-
-// Mark the specified bit as "set".
-/*
- * TUNING: this could have pathologically bad growth/expand behavior.  Make sure we're
- * not using it badly or change resize mechanism.
- */
-void ArenaBitVector::SetBit(unsigned int num) {
-  if (num >= storage_size_ * sizeof(uint32_t) * 8) {
-    DCHECK(expandable_) << "Attempted to expand a non-expandable bitmap to position " << num;
-
-    /* Round up to word boundaries for "num+1" bits */
-    unsigned int new_size = (num + 1 + 31) >> 5;
-    DCHECK_GT(new_size, storage_size_);
-    uint32_t *new_storage =
-        static_cast<uint32_t*>(arena_->Alloc(new_size * sizeof(uint32_t),
-                                             ArenaAllocator::kAllocGrowableBitMap));
-    memcpy(new_storage, storage_, storage_size_ * sizeof(uint32_t));
-    // Zero out the new storage words.
-    memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * sizeof(uint32_t));
-    // TOTO: collect stats on space wasted because of resize.
-    storage_ = new_storage;
-    storage_size_ = new_size;
-  }
-
-  storage_[num >> 5] |= check_masks[num & 0x1f];
-}
-
-// Mark the specified bit as "unset".
-void ArenaBitVector::ClearBit(unsigned int num) {
-  DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8);
-  storage_[num >> 5] &= ~check_masks[num & 0x1f];
-}
-
-// Intersect with another bit vector.  Sizes and expandability must be the same.
-void ArenaBitVector::Intersect(const ArenaBitVector* src) {
-  DCHECK_EQ(storage_size_, src->GetStorageSize());
-  DCHECK_EQ(expandable_, src->IsExpandable());
-  for (unsigned int idx = 0; idx < storage_size_; idx++) {
-    storage_[idx] &= src->GetRawStorageWord(idx);
-  }
-}
-
-/*
- * Union with another bit vector.  Sizes and expandability must be the same.
- */
-void ArenaBitVector::Union(const ArenaBitVector* src) {
-  DCHECK_EQ(storage_size_, src->GetStorageSize());
-  DCHECK_EQ(expandable_, src->IsExpandable());
-  for (unsigned int idx = 0; idx < storage_size_; idx++) {
-    storage_[idx] |= src->GetRawStorageWord(idx);
-  }
-}
-
-// Count the number of bits that are set.
-int ArenaBitVector::NumSetBits() {
-  unsigned int count = 0;
-
-  for (unsigned int word = 0; word < storage_size_; word++) {
-    count += __builtin_popcount(storage_[word]);
-  }
-  return count;
-}
-
-/*
- * Mark specified number of bits as "set". Cannot set all bits like ClearAll
- * since there might be unused bits - setting those to one will confuse the
- * iterator.
- */
-void ArenaBitVector::SetInitialBits(unsigned int num_bits) {
-  DCHECK_LE(((num_bits + 31) >> 5), storage_size_);
-  unsigned int idx;
-  for (idx = 0; idx < (num_bits >> 5); idx++) {
-    storage_[idx] = -1;
-  }
-  unsigned int rem_num_bits = num_bits & 0x1f;
-  if (rem_num_bits) {
-    storage_[idx] = (1 << rem_num_bits) - 1;
-  }
-}
+  :  BitVector(start_bits, expandable, new (arena) ArenaBitVectorAllocator(arena)), kind_(kind) {}
 
 }  // namespace art
diff --git a/compiler/dex/arena_bit_vector.h b/compiler/dex/arena_bit_vector.h
index 53e6eb6..4b2193a 100644
--- a/compiler/dex/arena_bit_vector.h
+++ b/compiler/dex/arena_bit_vector.h
@@ -17,109 +17,28 @@
 #ifndef ART_COMPILER_DEX_ARENA_BIT_VECTOR_H_
 #define ART_COMPILER_DEX_ARENA_BIT_VECTOR_H_
 
-#include <stdint.h>
-#include <stddef.h>
-#include "compiler_enums.h"
 #include "arena_allocator.h"
+#include "base/bit_vector.h"
+#include "compiler_enums.h"
 
 namespace art {
 
 /*
- * Expanding bitmap, used for tracking resources.  Bits are numbered starting
- * from zero.  All operations on a BitVector are unsynchronized.
+ * A BitVector implementation that uses Arena allocation.
  */
-class ArenaBitVector {
+class ArenaBitVector : public BitVector {
   public:
-    class Iterator {
-      public:
-        explicit Iterator(ArenaBitVector* bit_vector)
-          : p_bits_(bit_vector),
-            bit_storage_(bit_vector->GetRawStorage()),
-            bit_index_(0),
-            bit_size_(p_bits_->storage_size_ * sizeof(uint32_t) * 8) {}
-
-        // Return the position of the next set bit.  -1 means end-of-element reached.
-        int32_t Next() {
-          // Did anything obviously change since we started?
-          DCHECK_EQ(bit_size_, p_bits_->GetStorageSize() * sizeof(uint32_t) * 8);
-          DCHECK_EQ(bit_storage_, p_bits_->GetRawStorage());
-
-          if (UNLIKELY(bit_index_ >= bit_size_)) return -1;
-
-          uint32_t word_index = bit_index_ / 32;
-          uint32_t word = bit_storage_[word_index];
-          // Mask out any bits in the first word we've already considered.
-          word >>= bit_index_ & 0x1f;
-          if (word == 0) {
-            bit_index_ &= ~0x1f;
-            do {
-              word_index++;
-              if (UNLIKELY((word_index * 32) >= bit_size_)) {
-                bit_index_ = bit_size_;
-                return -1;
-              }
-              word = bit_storage_[word_index];
-              bit_index_ += 32;
-            } while (word == 0);
-          }
-          bit_index_ += CTZ(word) + 1;
-          return bit_index_ - 1;
-        }
-
-        static void* operator new(size_t size, ArenaAllocator* arena) {
-          return arena->Alloc(sizeof(ArenaBitVector::Iterator),
-                              ArenaAllocator::kAllocGrowableBitMap);
-        };
-        static void operator delete(void* p) {}  // Nop.
-
-      private:
-        ArenaBitVector* const p_bits_;
-        uint32_t* const bit_storage_;
-        uint32_t bit_index_;              // Current index (size in bits).
-        const uint32_t bit_size_;       // Size of vector in bits.
-    };
-
     ArenaBitVector(ArenaAllocator* arena, uint32_t start_bits, bool expandable,
                    OatBitMapKind kind = kBitMapMisc);
     ~ArenaBitVector() {}
 
-    static void* operator new(size_t size, ArenaAllocator* arena) {
-      return arena->Alloc(sizeof(ArenaBitVector), ArenaAllocator::kAllocGrowableBitMap);
-    }
-    static void operator delete(void* p) {}  // Nop.
-
-    void SetBit(uint32_t num);
-    void ClearBit(uint32_t num);
-    void MarkAllBits(bool set);
-    void DebugBitVector(char* msg, int length);
-    bool IsBitSet(uint32_t num);
-    void ClearAllBits();
-    void SetInitialBits(uint32_t num_bits);
-    void Copy(ArenaBitVector* src) {
-      memcpy(storage_, src->GetRawStorage(), sizeof(uint32_t) * storage_size_);
-    }
-    void Intersect(const ArenaBitVector* src2);
-    void Union(const ArenaBitVector* src);
-    // Are we equal to another bit vector?  Note: expandability attributes must also match.
-    bool Equal(const ArenaBitVector* src) {
-      return (storage_size_ == src->GetStorageSize()) &&
-        (expandable_ == src->IsExpandable()) &&
-        (memcmp(storage_, src->GetRawStorage(), storage_size_ * 4) == 0);
-    }
-    int32_t NumSetBits();
-
-    uint32_t GetStorageSize() const { return storage_size_; }
-    bool IsExpandable() const { return expandable_; }
-    uint32_t GetRawStorageWord(size_t idx) const { return storage_[idx]; }
-    uint32_t* GetRawStorage() { return storage_; }
-    const uint32_t* GetRawStorage() const { return storage_; }
+  static void* operator new(size_t size, ArenaAllocator* arena) {
+     return arena->Alloc(sizeof(ArenaBitVector), ArenaAllocator::kAllocGrowableBitMap);
+  }
+  static void operator delete(void* p) {}  // Nop.
 
   private:
-    ArenaAllocator* const arena_;
-    const bool expandable_;         // expand bitmap if we run out?
-    const OatBitMapKind kind_;      // for memory use tuning.
-    uint32_t   storage_size_;       // current size, in 32-bit words.
-    uint32_t*  storage_;
+    const OatBitMapKind kind_;      // for memory use tuning. TODO: currently unused.
 };
 
 
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 05ca1b5..6ea21fc 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -181,6 +181,8 @@
   kOpBic,
   kOpCmn,
   kOpTst,
+  kOpRev,
+  kOpRevsh,
   kOpBkpt,
   kOpBlx,
   kOpPush,
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
index bdc3154..0d7209e 100644
--- a/compiler/dex/compiler_ir.h
+++ b/compiler/dex/compiler_ir.h
@@ -29,6 +29,7 @@
 #include "llvm/intrinsic_helper.h"
 #include "llvm/ir_builder.h"
 #include "safe_map.h"
+#include "base/timing_logger.h"
 
 namespace art {
 
@@ -68,7 +69,14 @@
       compiler_flip_match(false),
       arena(pool),
       mir_graph(NULL),
-      cg(NULL) {}
+      cg(NULL),
+      timings("QuickCompiler", true, false) {
+      }
+
+  void StartTimingSplit(const char* label);
+  void NewTimingSplit(const char* label);
+  void EndTiming();
+
   /*
    * Fields needed/generated by common frontend and generally used throughout
    * the compiler.
@@ -109,6 +117,7 @@
 
   UniquePtr<MIRGraph> mir_graph;   // MIR container.
   UniquePtr<Backend> cg;           // Target-specific codegen.
+  base::TimingLogger timings;
 };
 
 }  // namespace art
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 2952570..2f8521f 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -24,6 +24,7 @@
 #include "runtime.h"
 #include "backend.h"
 #include "base/logging.h"
+#include "base/timing_logger.h"
 
 #if defined(ART_USE_PORTABLE_COMPILER)
 #include "dex/portable/mir_to_gbc.h"
@@ -104,8 +105,30 @@
   // (1 << kDebugVerifyBitcode) |
   // (1 << kDebugShowSummaryMemoryUsage) |
   // (1 << kDebugShowFilterStats) |
+  // (1 << kDebugTimings) |
   0;
 
+// TODO: Add a cumulative version of logging, and combine with dex2oat --dump-timing
+void CompilationUnit::StartTimingSplit(const char* label) {
+  if (enable_debug & (1 << kDebugTimings)) {
+    timings.StartSplit(label);
+  }
+}
+
+void CompilationUnit::NewTimingSplit(const char* label) {
+  if (enable_debug & (1 << kDebugTimings)) {
+    timings.NewSplit(label);
+  }
+}
+
+void CompilationUnit::EndTiming() {
+  if (enable_debug & (1 << kDebugTimings)) {
+    timings.EndSplit();
+    LOG(INFO) << "TIMINGS " << PrettyMethod(method_idx, *dex_file);
+    LOG(INFO) << Dumpable<base::TimingLogger>(timings);
+  }
+}
+
 static CompiledMethod* CompileMethod(CompilerDriver& compiler,
                                      const CompilerBackend compiler_backend,
                                      const DexFile::CodeItem* code_item,
@@ -175,6 +198,7 @@
         (1 << kPromoteCompilerTemps));
   }
 
+  cu.StartTimingSplit("BuildMIRGraph");
   cu.mir_graph.reset(new MIRGraph(&cu, &cu.arena));
 
   /* Gathering opcode stats? */
@@ -192,22 +216,28 @@
   }
 #endif
 
+  cu.NewTimingSplit("MIROpt:CodeLayout");
+
   /* Do a code layout pass */
   cu.mir_graph->CodeLayout();
 
   /* Perform SSA transformation for the whole method */
+  cu.NewTimingSplit("MIROpt:SSATransform");
   cu.mir_graph->SSATransformation();
 
   /* Do constant propagation */
+  cu.NewTimingSplit("MIROpt:ConstantProp");
   cu.mir_graph->PropagateConstants();
 
   /* Count uses */
   cu.mir_graph->MethodUseCount();
 
   /* Perform null check elimination */
+  cu.NewTimingSplit("MIROpt:NullCheckElimination");
   cu.mir_graph->NullCheckElimination();
 
   /* Combine basic blocks where possible */
+  cu.NewTimingSplit("MIROpt:BBOpt");
   cu.mir_graph->BasicBlockCombine();
 
   /* Do some basic block optimizations */
@@ -250,6 +280,7 @@
 
   cu.cg->Materialize();
 
+  cu.NewTimingSplit("Cleanup");
   result = cu.cg->GetCompiledMethod();
 
   if (result) {
@@ -270,6 +301,7 @@
               << " " << PrettyMethod(method_idx, dex_file);
   }
 
+  cu.EndTiming();
   return result;
 }
 
diff --git a/compiler/dex/frontend.h b/compiler/dex/frontend.h
index 6c33d10..43f6855 100644
--- a/compiler/dex/frontend.h
+++ b/compiler/dex/frontend.h
@@ -78,6 +78,7 @@
   kDebugVerifyBitcode,
   kDebugShowSummaryMemoryUsage,
   kDebugShowFilterStats,
+  kDebugTimings
 };
 
 class LLVMInfo {
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index d184673..2ff7f1c 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -296,6 +296,8 @@
   kThumbOrr,         // orr   [0100001100] rm[5..3] rd[2..0].
   kThumbPop,         // pop   [1011110] r[8..8] rl[7..0].
   kThumbPush,        // push  [1011010] r[8..8] rl[7..0].
+  kThumbRev,         // rev   [1011101000] rm[5..3] rd[2..0]
+  kThumbRevsh,       // revsh   [1011101011] rm[5..3] rd[2..0]
   kThumbRorRR,       // ror   [0100000111] rs[5..3] rd[2..0].
   kThumbSbc,         // sbc   [0100000110] rm[5..3] rd[2..0].
   kThumbStmia,       // stmia   [11000] rn[10..8] reglist [7.. 0].
@@ -399,6 +401,8 @@
   kThumb2AdcRRI8,    // adc [111100010101] rn[19..16] [0] imm3 rd[11..8] imm8.
   kThumb2SubRRI8,    // sub [111100011011] rn[19..16] [0] imm3 rd[11..8] imm8.
   kThumb2SbcRRI8,    // sbc [111100010111] rn[19..16] [0] imm3 rd[11..8] imm8.
+  kThumb2RevRR,      // rev [111110101001] rm[19..16] [1111] rd[11..8] 1000 rm[3..0]
+  kThumb2RevshRR,    // rev [111110101001] rm[19..16] [1111] rd[11..8] 1011 rm[3..0]
   kThumb2It,         // it [10111111] firstcond[7-4] mask[3-0].
   kThumb2Fmstat,     // fmstat [11101110111100011111101000010000].
   kThumb2Vcmpd,      // vcmp [111011101] D [11011] rd[15-12] [1011] E [1] M [0] rm[3-0].
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index cc40e99..e8c188c 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -327,6 +327,16 @@
                  kFmtUnused, -1, -1,
                  IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE_LIST0
                  | IS_STORE, "push", "<!0R>", 2, kFixupNone),
+    ENCODING_MAP(kThumbRev,           0xba00,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE1,
+                 "rev", "!0C, !1C", 2, kFixupNone),
+    ENCODING_MAP(kThumbRevsh,         0xbac0,
+                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1,
+                 IS_BINARY_OP | REG_DEF0_USE1,
+                 "rev", "!0C, !1C", 2, kFixupNone),
     ENCODING_MAP(kThumbRorRR,        0x41c0,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1,
@@ -768,6 +778,16 @@
                  kFmtUnused, -1, -1,
                  IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES | USES_CCODES,
                  "sbcs", "!0C, !1C, #!2m", 4, kFixupNone),
+    ENCODING_MAP(kThumb2RevRR, 0xfa90f080,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE12,  // Binary, but rm is stored twice.
+                 "rev", "!0C, !1C", 4, kFixupNone),
+    ENCODING_MAP(kThumb2RevshRR, 0xfa90f0b0,
+                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+                 kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_DEF0_USE12,  // Binary, but rm is stored twice.
+                 "revsh", "!0C, !1C", 4, kFixupNone),
     ENCODING_MAP(kThumb2It,  0xbf00,
                  kFmtBitBlt, 7, 4, kFmtBitBlt, 3, 0, kFmtModImm, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | IS_IT | USES_CCODES,
@@ -1153,6 +1173,7 @@
 void ArmMir2Lir::AssembleLIR() {
   LIR* lir;
   LIR* prev_lir;
+  cu_->NewTimingSplit("Assemble");
   int assembler_retries = 0;
   CodeOffset starting_offset = EncodeRange(first_lir_insn_, last_lir_insn_, 0);
   data_offset_ = (starting_offset + 0x3) & ~0x3;
@@ -1574,6 +1595,7 @@
 
   data_offset_ = (code_buffer_.size() + 0x3) & ~0x3;
 
+  cu_->NewTimingSplit("LiteralData");
   // Install literals
   InstallLiteralPools();
 
@@ -1584,8 +1606,10 @@
   InstallFillArrayData();
 
   // Create the mapping table and native offset to reference map.
+  cu_->NewTimingSplit("PcMappingTable");
   CreateMappingTables();
 
+  cu_->NewTimingSplit("GcMap");
   CreateNativeGcMap();
 }
 
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index a2ac6ef..3ceeacf 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -314,6 +314,22 @@
     case kOpSub:
       opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
       break;
+    case kOpRev:
+      DCHECK_EQ(shift, 0);
+      if (!thumb_form) {
+        // Binary, but rm is encoded twice.
+        return NewLIR3(kThumb2RevRR, r_dest_src1, r_src2, r_src2);
+      }
+      opcode = kThumbRev;
+      break;
+    case kOpRevsh:
+      DCHECK_EQ(shift, 0);
+      if (!thumb_form) {
+        // Binary, but rm is encoded twice.
+        return NewLIR3(kThumb2RevshRR, r_dest_src1, r_src2, r_src2);
+      }
+      opcode = kThumbRevsh;
+      break;
     case kOp2Byte:
       DCHECK_EQ(shift, 0);
       return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 8);
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 2ce8f58..a6653fa 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -929,6 +929,7 @@
 }
 
 void Mir2Lir::Materialize() {
+  cu_->NewTimingSplit("RegisterAllocation");
   CompilerInitializeRegAlloc();  // Needs to happen after SSA naming
 
   /* Allocate Registers using simple local allocation scheme */
@@ -940,6 +941,7 @@
        * special codegen doesn't succeed, first_lir_insn_ will
        * set to NULL;
        */
+      cu_->NewTimingSplit("SpecialMIR2LIR");
       SpecialMIR2LIR(mir_graph_->GetSpecialCase());
     }
 
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 64938f3..62feade 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -350,16 +350,13 @@
                           uintptr_t direct_code, uintptr_t direct_method,
                           InvokeType type) {
   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
-  if (cu->instruction_set != kThumb2) {
-    // Disable sharpening
-    direct_code = 0;
-    direct_method = 0;
-  }
   if (direct_code != 0 && direct_method != 0) {
     switch (state) {
     case 0:  // Get the current Method* [sets kArg0]
       if (direct_code != static_cast<unsigned int>(-1)) {
-        cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
+        if (cu->instruction_set != kX86) {
+          cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
+        }
       } else {
         CHECK_EQ(cu->dex_file, target_method.dex_file);
         LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
@@ -405,6 +402,7 @@
           cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
         } else {
           CHECK_EQ(cu->dex_file, target_method.dex_file);
+          CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
           LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
                                                  target_method.dex_method_index, 0);
           if (data_target == NULL) {
@@ -501,10 +499,6 @@
                                  uint32_t unused, uintptr_t unused2,
                                  uintptr_t direct_method, InvokeType unused4) {
   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
-  if (cu->instruction_set != kThumb2) {
-    // Disable sharpening
-    direct_method = 0;
-  }
   ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
 
   if (direct_method != 0) {
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index ea8b7a6..5f5e5e4 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -768,6 +768,7 @@
  * TODO: consolidate w/ Arm assembly mechanism.
  */
 void MipsMir2Lir::AssembleLIR() {
+  cu_->NewTimingSplit("Assemble");
   AssignOffsets();
   int assembler_retries = 0;
   /*
@@ -792,6 +793,7 @@
   }
 
   // Install literals
+  cu_->NewTimingSplit("LiteralData");
   InstallLiteralPools();
 
   // Install switch tables
@@ -801,8 +803,10 @@
   InstallFillArrayData();
 
   // Create the mapping table and native offset to reference map.
+  cu_->NewTimingSplit("PcMappingTable");
   CreateMappingTables();
 
+  cu_->NewTimingSplit("GcMap");
   CreateNativeGcMap();
 }
 
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 197e200..fa9a3ad 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -819,6 +819,8 @@
 }
 
 void Mir2Lir::MethodMIR2LIR() {
+  cu_->NewTimingSplit("MIR2LIR");
+
   // Hold the labels of each block.
   block_label_list_ =
       static_cast<LIR*>(arena_->Alloc(sizeof(LIR) * mir_graph_->GetNumBlocks(),
@@ -839,7 +841,7 @@
       next_bb = iter.Next();
     } while ((next_bb != NULL) && (next_bb->block_type == kDead));
   }
-
+  cu_->NewTimingSplit("Launchpads");
   HandleSuspendLaunchPads();
 
   HandleThrowLaunchPads();
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 9167b1c..2047f30 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -246,6 +246,8 @@
   UNARY_ENCODING_MAP(Idivmod, 0x7, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEAD, REG_DEFAD_USEAD, "ah:al,ax,", "dx:ax,dx:ax,", "edx:eax,edx:eax,"),
 #undef UNARY_ENCODING_MAP
 
+  { kX86Bswap32R, kRegOpcode, IS_UNARY_OP | REG_DEF0_USE0, { 0, 0, 0x0F, 0xC8, 0, 0, 0, 0 }, "Bswap32R", "!0r" },
+
 #define EXT_0F_ENCODING_MAP(opname, prefix, opcode, reg_def) \
 { kX86 ## opname ## RR, kRegReg,             IS_BINARY_OP   | reg_def | REG_USE01,  { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RR", "!0r,!1r" }, \
 { kX86 ## opname ## RM, kRegMem,   IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01,  { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RM", "!0r,[!1r+!2d]" }, \
@@ -371,6 +373,8 @@
       return lir->operands[0];  // length of nop is sole operand
     case kNullary:
       return 1;  // 1 byte of opcode
+    case kRegOpcode:  // lir operands - 0: reg
+      return ComputeSize(entry, 0, 0, false) - 1;  // substract 1 for modrm
     case kReg:  // lir operands - 0: reg
       return ComputeSize(entry, 0, 0, false);
     case kMem:  // lir operands - 0: base, 1: disp
@@ -514,6 +518,33 @@
   }
 }
 
+void X86Mir2Lir::EmitOpRegOpcode(const X86EncodingMap* entry, uint8_t reg) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    // There's no 3-byte instruction with +rd
+    DCHECK_NE(0x38, entry->skeleton.extra_opcode1);
+    DCHECK_NE(0x3A, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  DCHECK(!X86_FPREG(reg));
+  DCHECK_LT(reg, 8);
+  code_buffer_.back() += reg;
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
 void X86Mir2Lir::EmitOpReg(const X86EncodingMap* entry, uint8_t reg) {
   if (entry->skeleton.prefix1 != 0) {
     code_buffer_.push_back(entry->skeleton.prefix1);
@@ -596,7 +627,9 @@
     reg = reg & X86_FP_REG_MASK;
   }
   if (reg >= 4) {
-    DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+    DCHECK(strchr(entry->name, '8') == NULL ||
+           entry->opcode == kX86Movzx8RM || entry->opcode == kX86Movsx8RM)
+        << entry->name << " " << static_cast<int>(reg)
         << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
   }
   DCHECK_LT(reg, 8);
@@ -1301,6 +1334,9 @@
         DCHECK_EQ(0, entry->skeleton.ax_opcode);
         DCHECK_EQ(0, entry->skeleton.immediate_bytes);
         break;
+      case kRegOpcode:  // lir operands - 0: reg
+        EmitOpRegOpcode(entry, lir->operands[0]);
+        break;
       case kReg:  // lir operands - 0: reg
         EmitOpReg(entry, lir->operands[0]);
         break;
@@ -1443,6 +1479,7 @@
  * TODO: consolidate w/ Arm assembly mechanism.
  */
 void X86Mir2Lir::AssembleLIR() {
+  cu_->NewTimingSplit("Assemble");
   AssignOffsets();
   int assembler_retries = 0;
   /*
@@ -1466,6 +1503,7 @@
     }
   }
 
+  cu_->NewTimingSplit("LiteralData");
   // Install literals
   InstallLiteralPools();
 
@@ -1476,8 +1514,10 @@
   InstallFillArrayData();
 
   // Create the mapping table and native offset to reference map.
+  cu_->NewTimingSplit("PcMappingTable");
   CreateMappingTables();
 
+  cu_->NewTimingSplit("GcMap");
   CreateNativeGcMap();
 }
 
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index b1d95ff..b28d7ef 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -170,6 +170,7 @@
 
   private:
     void EmitDisp(int base, int disp);
+    void EmitOpRegOpcode(const X86EncodingMap* entry, uint8_t reg);
     void EmitOpReg(const X86EncodingMap* entry, uint8_t reg);
     void EmitOpMem(const X86EncodingMap* entry, uint8_t base, int disp);
     void EmitMemReg(const X86EncodingMap* entry, uint8_t base, int disp, uint8_t reg);
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index c519bfe..6ec7ebb 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -117,6 +117,7 @@
   switch (op) {
     case kOpNeg: opcode = kX86Neg32R; break;
     case kOpNot: opcode = kX86Not32R; break;
+    case kOpRev: opcode = kX86Bswap32R; break;
     case kOpBlx: opcode = kX86CallR; break;
     default:
       LOG(FATAL) << "Bad case in OpReg " << op;
@@ -161,6 +162,13 @@
       case kOpNeg:
         OpRegCopy(r_dest_src1, r_src2);
         return OpReg(kOpNeg, r_dest_src1);
+      case kOpRev:
+        OpRegCopy(r_dest_src1, r_src2);
+        return OpReg(kOpRev, r_dest_src1);
+      case kOpRevsh:
+        OpRegCopy(r_dest_src1, r_src2);
+        OpReg(kOpRev, r_dest_src1);
+        return OpRegImm(kOpAsr, r_dest_src1, 16);
         // X86 binary opcodes
       case kOpSub: opcode = kX86Sub32RR; break;
       case kOpSbc: opcode = kX86Sbb32RR; break;
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index f1b91ca..3518131 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -313,6 +313,7 @@
   UnaryOpcode(kX86Imul, DaR, DaM, DaA),
   UnaryOpcode(kX86Divmod,  DaR, DaM, DaA),
   UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
+  kX86Bswap32R,
 #undef UnaryOpcode
 #define Binary0fOpCode(opcode) \
   opcode ## RR, opcode ## RM, opcode ## RA
@@ -381,6 +382,7 @@
   kData,                                   // Special case for raw data.
   kNop,                                    // Special case for variable length nop.
   kNullary,                                // Opcode that takes no arguments.
+  kRegOpcode,                              // Shorter form of R instruction kind (opcode+rd)
   kReg, kMem, kArray,                      // R, M and A instruction kinds.
   kMemReg, kArrayReg, kThreadReg,          // MR, AR and TR instruction kinds.
   kRegReg, kRegMem, kRegArray, kRegThread,  // RR, RM, RA and RT instruction kinds.
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index eb0d412..b6c8922 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -183,7 +183,7 @@
   ClearAllVisitedFlags();
   std::vector<std::pair<BasicBlock*, ArenaBitVector::Iterator*> > work_stack;
   bb->visited = true;
-  work_stack.push_back(std::make_pair(bb, new (arena_) ArenaBitVector::Iterator(bb->i_dominated)));
+  work_stack.push_back(std::make_pair(bb, bb->i_dominated->GetIterator()));
   while (!work_stack.empty()) {
     const std::pair<BasicBlock*, ArenaBitVector::Iterator*>& curr = work_stack.back();
     BasicBlock* curr_bb = curr.first;
@@ -196,7 +196,7 @@
       BasicBlock* new_bb = GetBasicBlock(bb_idx);
       new_bb->visited = true;
       work_stack.push_back(
-          std::make_pair(new_bb, new (arena_) ArenaBitVector::Iterator(new_bb->i_dominated)));
+          std::make_pair(new_bb, new_bb->i_dominated->GetIterator()));
     } else {
       // no successor/next
       if (curr_bb->id != NullBasicBlockId) {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 7c4a6ce..91b0188 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -355,7 +355,7 @@
       jni_compiler_(NULL),
       compiler_enable_auto_elf_loading_(NULL),
       compiler_get_method_code_addr_(NULL),
-      support_boot_image_fixup_(true),
+      support_boot_image_fixup_(instruction_set == kThumb2),
       dedupe_code_("dedupe code"),
       dedupe_mapping_table_("dedupe mapping table"),
       dedupe_vmap_table_("dedupe vmap table"),
@@ -1058,10 +1058,12 @@
   return false;  // Incomplete knowledge needs slow path.
 }
 
-void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type,
+void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType sharp_type,
+                                                   bool no_guarantee_of_dex_cache_entry,
                                                    mirror::Class* referrer_class,
                                                    mirror::ArtMethod* method,
                                                    bool update_stats,
+                                                   MethodReference* target_method,
                                                    uintptr_t* direct_code,
                                                    uintptr_t* direct_method) {
   // For direct and static methods compute possible direct_code and direct_method values, ie
@@ -1070,46 +1072,103 @@
   // invoked, so this can be passed to the out-of-line runtime support code.
   *direct_code = 0;
   *direct_method = 0;
+  bool use_dex_cache = false;
+  bool compiling_boot = Runtime::Current()->GetHeap()->GetContinuousSpaces().size() == 1;
   if (compiler_backend_ == kPortable) {
     if (sharp_type != kStatic && sharp_type != kDirect) {
       return;
     }
+    use_dex_cache = true;
   } else {
     if (sharp_type != kStatic && sharp_type != kDirect && sharp_type != kInterface) {
       return;
     }
+    // TODO: support patching on all architectures.
+    use_dex_cache = compiling_boot && !support_boot_image_fixup_;
   }
-  bool method_code_in_boot = method->GetDeclaringClass()->GetClassLoader() == NULL;
-  if (!method_code_in_boot) {
-    return;
-  }
-  bool has_clinit_trampoline = method->IsStatic() && !method->GetDeclaringClass()->IsInitialized();
-  if (has_clinit_trampoline && (method->GetDeclaringClass() != referrer_class)) {
-    // Ensure we run the clinit trampoline unless we are invoking a static method in the same class.
-    return;
-  }
-  if (update_stats) {
-    if (sharp_type != kInterface) {  // Interfaces always go via a trampoline.
-      stats_->DirectCallsToBoot(type);
-    }
-    stats_->DirectMethodsToBoot(type);
-  }
-  bool compiling_boot = Runtime::Current()->GetHeap()->GetContinuousSpaces().size() == 1;
-  if (compiling_boot) {
-    if (support_boot_image_fixup_) {
-      MethodHelper mh(method);
-      if (IsImageClass(mh.GetDeclaringClassDescriptorAsStringPiece())) {
-        // We can only branch directly to Methods that are resolved in the DexCache.
-        // Otherwise we won't invoke the resolution trampoline.
-        *direct_method = -1;
-        *direct_code = -1;
+  bool method_code_in_boot = (method->GetDeclaringClass()->GetClassLoader() == nullptr);
+  if (!use_dex_cache) {
+    if (!method_code_in_boot) {
+      use_dex_cache = true;
+    } else {
+      bool has_clinit_trampoline =
+          method->IsStatic() && !method->GetDeclaringClass()->IsInitialized();
+      if (has_clinit_trampoline && (method->GetDeclaringClass() != referrer_class)) {
+        // Ensure we run the clinit trampoline unless we are invoking a static method in the same
+        // class.
+        use_dex_cache = true;
       }
     }
-  } else {
-    if (Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace()) {
-      *direct_method = reinterpret_cast<uintptr_t>(method);
+  }
+  if (update_stats && method_code_in_boot) {
+    if (sharp_type != kInterface) {  // Interfaces always go via a trampoline until we get IMTs.
+      stats_->DirectCallsToBoot(*type);
     }
-    *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromCompiledCode());
+    stats_->DirectMethodsToBoot(*type);
+  }
+  if (!use_dex_cache && compiling_boot) {
+    MethodHelper mh(method);
+    if (!IsImageClass(mh.GetDeclaringClassDescriptorAsStringPiece())) {
+      // We can only branch directly to Methods that are resolved in the DexCache.
+      // Otherwise we won't invoke the resolution trampoline.
+      use_dex_cache = true;
+    }
+  }
+  // The method is defined not within this dex file. We need a dex cache slot within the current
+  // dex file or direct pointers.
+  bool must_use_direct_pointers = false;
+  if (target_method->dex_file == method->GetDeclaringClass()->GetDexCache()->GetDexFile()) {
+    target_method->dex_method_index = method->GetDexMethodIndex();
+  } else {
+    // TODO: support patching from one dex file to another in the boot image.
+    use_dex_cache = use_dex_cache || compiling_boot;
+    if (no_guarantee_of_dex_cache_entry) {
+      // See if the method is also declared in this dex cache.
+      uint32_t dex_method_idx = MethodHelper(method).FindDexMethodIndexInOtherDexFile(
+          *referrer_class->GetDexCache()->GetDexFile());
+      if (dex_method_idx != DexFile::kDexNoIndex) {
+        target_method->dex_method_index = dex_method_idx;
+      } else {
+        must_use_direct_pointers = true;
+      }
+    }
+  }
+  if (use_dex_cache) {
+    if (must_use_direct_pointers) {
+      // Fail. Test above showed the only safe dispatch was via the dex cache, however, the direct
+      // pointers are required as the dex cache lacks an appropriate entry.
+      VLOG(compiler) << "Dex cache devirtualization failed for: " << PrettyMethod(method);
+    } else {
+      *type = sharp_type;
+    }
+  } else {
+    if (compiling_boot) {
+      *type = sharp_type;
+      *direct_method = -1;
+      if (sharp_type != kInterface) {
+        *direct_code = -1;
+      }
+    } else {
+      bool method_in_image =
+          Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace();
+      if (method_in_image) {
+        CHECK_EQ(method->IsAbstract(), sharp_type == kInterface);
+        *type = sharp_type;
+        *direct_method = reinterpret_cast<uintptr_t>(method);
+        if (*type != kInterface) {
+          *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromCompiledCode());
+        }
+        target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+        target_method->dex_method_index = method->GetDexMethodIndex();
+      } else if (!must_use_direct_pointers) {
+        // Set the code and rely on the dex cache for the method.
+        *type = sharp_type;
+        *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromCompiledCode());
+      } else {
+        // Direct pointers were required but none were available.
+        VLOG(compiler) << "Dex cache devirtualization failed for: " << PrettyMethod(method);
+      }
+    }
   }
 }
 
@@ -1126,6 +1185,9 @@
       ComputeMethodReferencedFromCompilingMethod(soa, mUnit, target_method->dex_method_index,
                                                  *invoke_type);
   if (resolved_method != NULL) {
+    if (*invoke_type == kVirtual || *invoke_type == kSuper) {
+      *vtable_idx = resolved_method->GetMethodIndex();
+    }
     // Don't try to fast-path if we don't understand the caller's class or this appears to be an
     // Incompatible Class Change Error.
     mirror::Class* referrer_class =
@@ -1166,13 +1228,14 @@
           // dex cache, check that this resolved method is where we expect it.
           CHECK(referrer_class->GetDexCache()->GetResolvedMethod(target_method->dex_method_index) ==
                 resolved_method) << PrettyMethod(resolved_method);
-          if (update_stats) {
-            stats_->ResolvedMethod(*invoke_type);
-            stats_->VirtualMadeDirect(*invoke_type);
+          InvokeType orig_invoke_type = *invoke_type;
+          GetCodeAndMethodForDirectCall(invoke_type, kDirect, false, referrer_class, resolved_method,
+                                        update_stats, target_method, direct_code, direct_method);
+          if (update_stats && (*invoke_type == kDirect)) {
+            stats_->ResolvedMethod(orig_invoke_type);
+            stats_->VirtualMadeDirect(orig_invoke_type);
           }
-          GetCodeAndMethodForDirectCall(*invoke_type, kDirect, referrer_class, resolved_method,
-                                        update_stats, direct_code, direct_method);
-          *invoke_type = kDirect;
+          DCHECK_NE(*invoke_type, kSuper) << PrettyMethod(resolved_method);
           return true;
         }
         const bool enableVerifierBasedSharpening = enable_devirtualization;
@@ -1194,76 +1257,16 @@
                                                        kVirtual);
             CHECK(called_method != NULL);
             CHECK(!called_method->IsAbstract());
-            GetCodeAndMethodForDirectCall(*invoke_type, kDirect, referrer_class, called_method,
-                                          update_stats, direct_code, direct_method);
-            bool compiler_needs_dex_cache =
-                (GetCompilerBackend() == kPortable) ||
-                (GetCompilerBackend() == kQuick && instruction_set_ != kThumb2) ||
-                (*direct_code == 0) || (*direct_code == static_cast<unsigned int>(-1)) ||
-                (*direct_method == 0) || (*direct_method == static_cast<unsigned int>(-1));
-            if ((devirt_map_target->dex_file != target_method->dex_file) &&
-                compiler_needs_dex_cache) {
-              // We need to use the dex cache to find either the method or code, and the dex file
-              // containing the method isn't the one expected for the target method. Try to find
-              // the method within the expected target dex file.
-              // TODO: the -1 could be handled as direct code if the patching new the target dex
-              //       file.
-              // TODO: quick only supports direct pointers with Thumb2.
-              // TODO: the following should be factored into a common helper routine to find
-              //       one dex file's method within another.
-              const DexFile* dexfile = target_method->dex_file;
-              const DexFile* cm_dexfile =
-                  called_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
-              const DexFile::MethodId& cm_method_id =
-                  cm_dexfile->GetMethodId(called_method->GetDexMethodIndex());
-              const char* cm_descriptor = cm_dexfile->StringByTypeIdx(cm_method_id.class_idx_);
-              const DexFile::StringId* descriptor = dexfile->FindStringId(cm_descriptor);
-              if (descriptor != NULL) {
-                const DexFile::TypeId* type_id =
-                    dexfile->FindTypeId(dexfile->GetIndexForStringId(*descriptor));
-                if (type_id != NULL) {
-                  const char* cm_name = cm_dexfile->GetMethodName(cm_method_id);
-                  const DexFile::StringId* name = dexfile->FindStringId(cm_name);
-                  if (name != NULL) {
-                    uint16_t return_type_idx;
-                    std::vector<uint16_t> param_type_idxs;
-                    bool success =
-                        dexfile->CreateTypeList(cm_dexfile->GetMethodSignature(cm_method_id).ToString(),
-                                                &return_type_idx, &param_type_idxs);
-                    if (success) {
-                      const DexFile::ProtoId* sig =
-                          dexfile->FindProtoId(return_type_idx, param_type_idxs);
-                      if (sig != NULL) {
-                        const  DexFile::MethodId* method_id = dexfile->FindMethodId(*type_id,
-                                                                                    *name, *sig);
-                        if (method_id != NULL) {
-                          if (update_stats) {
-                            stats_->ResolvedMethod(*invoke_type);
-                            stats_->VirtualMadeDirect(*invoke_type);
-                            stats_->PreciseTypeDevirtualization();
-                          }
-                          target_method->dex_method_index =
-                              dexfile->GetIndexForMethodId(*method_id);
-                          *invoke_type = kDirect;
-                          return true;
-                        }
-                      }
-                    }
-                  }
-                }
-              }
-              // TODO: the stats for direct code and method are off as we failed to find the direct
-              //       method in the referring method's dex cache/file.
-            } else {
-              if (update_stats) {
-                stats_->ResolvedMethod(*invoke_type);
-                stats_->VirtualMadeDirect(*invoke_type);
-                stats_->PreciseTypeDevirtualization();
-              }
-              *target_method = *devirt_map_target;
-              *invoke_type = kDirect;
-              return true;
+            InvokeType orig_invoke_type = *invoke_type;
+            GetCodeAndMethodForDirectCall(invoke_type, kDirect, true, referrer_class, called_method,
+                                          update_stats, target_method, direct_code, direct_method);
+            if (update_stats && (*invoke_type == kDirect)) {
+              stats_->ResolvedMethod(orig_invoke_type);
+              stats_->VirtualMadeDirect(orig_invoke_type);
+              stats_->PreciseTypeDevirtualization();
             }
+            DCHECK_NE(*invoke_type, kSuper);
+            return true;
           }
         }
         if (*invoke_type == kSuper) {
@@ -1273,11 +1276,8 @@
           if (update_stats) {
             stats_->ResolvedMethod(*invoke_type);
           }
-          if (*invoke_type == kVirtual || *invoke_type == kSuper) {
-            *vtable_idx = resolved_method->GetMethodIndex();
-          }
-          GetCodeAndMethodForDirectCall(*invoke_type, *invoke_type, referrer_class, resolved_method,
-                                        update_stats, direct_code, direct_method);
+          GetCodeAndMethodForDirectCall(invoke_type, *invoke_type, false, referrer_class, resolved_method,
+                                        update_stats, target_method, direct_code, direct_method);
           return true;
         }
       }
@@ -1747,6 +1747,7 @@
   "Landroid/opengl/GLUtils;",  // Calls android.opengl.GLUtils.nativeClassInit.
   "Landroid/os/Build;",  // Calls -..-> android.os.SystemProperties.native_get.
   "Landroid/os/Build$VERSION;",  // Requires Build.
+  "Landroid/os/Bundle;",  // Calls android.os.Parcel.obtain -..> Parcel.nativeCreate.
   "Landroid/os/Debug;",  // Requires android.os.Environment.
   "Landroid/os/Environment;",  // Calls System.getenv.
   "Landroid/os/FileUtils;",  // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 7657af5..971021f 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -321,10 +321,12 @@
 
  private:
   // Compute constant code and method pointers when possible
-  void GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type,
+  void GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType sharp_type,
+                                     bool no_guarantee_of_dex_cache_entry,
                                      mirror::Class* referrer_class,
                                      mirror::ArtMethod* method,
                                      bool update_stats,
+                                     MethodReference* target_method,
                                      uintptr_t* direct_code, uintptr_t* direct_method)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 634a160..af86743 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -28,6 +28,8 @@
 
 class OatTest : public CommonTest {
  protected:
+  static const bool kCompile = false;  // DISABLED_ due to the time to compile libcore
+
   void CheckMethod(mirror::ArtMethod* method,
                    const OatFile::OatMethod& oat_method,
                    const DexFile* dex_file)
@@ -40,7 +42,7 @@
       EXPECT_TRUE(oat_method.GetCode() == NULL) << PrettyMethod(method) << " "
                                                 << oat_method.GetCode();
 #if !defined(ART_USE_PORTABLE_COMPILER)
-      EXPECT_EQ(oat_method.GetFrameSizeInBytes(), static_cast<uint32_t>(kStackAlignment));
+      EXPECT_EQ(oat_method.GetFrameSizeInBytes(), kCompile ? kStackAlignment : 0);
       EXPECT_EQ(oat_method.GetCoreSpillMask(), 0U);
       EXPECT_EQ(oat_method.GetFpSpillMask(), 0U);
 #endif
@@ -65,7 +67,6 @@
 };
 
 TEST_F(OatTest, WriteRead) {
-  const bool compile = false;  // DISABLED_ due to the time to compile libcore
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
 
   // TODO: make selectable
@@ -77,7 +78,7 @@
   InstructionSet insn_set = kIsTargetBuild ? kThumb2 : kX86;
   compiler_driver_.reset(new CompilerDriver(compiler_backend, insn_set, false, NULL, 2, true));
   jobject class_loader = NULL;
-  if (compile) {
+  if (kCompile) {
     base::TimingLogger timings("OatTest::WriteRead", false, false);
     compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings);
   }
@@ -96,7 +97,7 @@
                                             tmp.GetFile());
   ASSERT_TRUE(success);
 
-  if (compile) {  // OatWriter strips the code, regenerate to compare
+  if (kCompile) {  // OatWriter strips the code, regenerate to compare
     base::TimingLogger timings("CommonTest::WriteRead", false, false);
     compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings);
   }
@@ -120,16 +121,18 @@
   for (size_t i = 0; i < dex_file->NumClassDefs(); i++) {
     const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
     const byte* class_data = dex_file->GetClassData(class_def);
-    size_t num_virtual_methods =0;
+    size_t num_virtual_methods = 0;
     if (class_data != NULL) {
       ClassDataItemIterator it(*dex_file, class_data);
       num_virtual_methods = it.NumVirtualMethods();
     }
     const char* descriptor = dex_file->GetClassDescriptor(class_def);
+    mirror::Class* klass = class_linker->FindClass(descriptor, NULL);
 
     UniquePtr<const OatFile::OatClass> oat_class(oat_dex_file->GetOatClass(i));
-
-    mirror::Class* klass = class_linker->FindClass(descriptor, NULL);
+    CHECK_EQ(mirror::Class::Status::kStatusNotReady, oat_class->GetStatus()) << descriptor;
+    CHECK_EQ(kCompile ? OatClassType::kOatClassAllCompiled : OatClassType::kOatClassNoneCompiled,
+             oat_class->GetType()) << descriptor;
 
     size_t method_index = 0;
     for (size_t i = 0; i < klass->NumDirectMethods(); i++, method_index++) {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index f23b72b..f681d7d 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -18,6 +18,7 @@
 
 #include <zlib.h>
 
+#include "base/bit_vector.h"
 #include "base/stl_util.h"
 #include "base/unix_file/fd_file.h"
 #include "class_linker.h"
@@ -70,7 +71,9 @@
     size_oat_dex_file_location_checksum_(0),
     size_oat_dex_file_offset_(0),
     size_oat_dex_file_methods_offsets_(0),
+    size_oat_class_type_(0),
     size_oat_class_status_(0),
+    size_oat_class_method_bitmaps_(0),
     size_oat_class_method_offsets_(0) {
   size_t offset = InitOatHeader();
   offset = InitOatDexFiles(offset);
@@ -142,12 +145,48 @@
       oat_dex_files_[i]->methods_offsets_[class_def_index] = offset;
       const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
       const byte* class_data = dex_file->GetClassData(class_def);
-      uint32_t num_methods = 0;
+      uint32_t num_non_null_compiled_methods = 0;
+      UniquePtr<std::vector<CompiledMethod*> > compiled_methods(new std::vector<CompiledMethod*>());
       if (class_data != NULL) {  // ie not an empty class, such as a marker interface
         ClassDataItemIterator it(*dex_file, class_data);
         size_t num_direct_methods = it.NumDirectMethods();
         size_t num_virtual_methods = it.NumVirtualMethods();
-        num_methods = num_direct_methods + num_virtual_methods;
+        size_t num_methods = num_direct_methods + num_virtual_methods;
+
+        // Fill in the compiled_methods_ array for methods that have a
+        // CompiledMethod. We track the number of non-null entries in
+        // num_non_null_compiled_methods since we only want to allocate
+        // OatMethodOffsets for the compiled methods.
+        compiled_methods->reserve(num_methods);
+        while (it.HasNextStaticField()) {
+          it.Next();
+        }
+        while (it.HasNextInstanceField()) {
+          it.Next();
+        }
+        size_t class_def_method_index = 0;
+        while (it.HasNextDirectMethod()) {
+          uint32_t method_idx = it.GetMemberIndex();
+          CompiledMethod* compiled_method =
+              compiler_driver_->GetCompiledMethod(MethodReference(dex_file, method_idx));
+          compiled_methods->push_back(compiled_method);
+          if (compiled_method != NULL) {
+              num_non_null_compiled_methods++;
+          }
+          class_def_method_index++;
+          it.Next();
+        }
+        while (it.HasNextVirtualMethod()) {
+          uint32_t method_idx = it.GetMemberIndex();
+          CompiledMethod* compiled_method =
+              compiler_driver_->GetCompiledMethod(MethodReference(dex_file, method_idx));
+          compiled_methods->push_back(compiled_method);
+          if (compiled_method != NULL) {
+              num_non_null_compiled_methods++;
+          }
+          class_def_method_index++;
+          it.Next();
+        }
       }
 
       ClassReference class_ref(dex_file, class_def_index);
@@ -161,7 +200,8 @@
         status = mirror::Class::kStatusNotReady;
       }
 
-      OatClass* oat_class = new OatClass(offset, status, num_methods);
+      OatClass* oat_class = new OatClass(offset, compiled_methods.release(),
+                                         num_non_null_compiled_methods, status);
       oat_classes_.push_back(oat_class);
       offset += oat_class->SizeOf();
     }
@@ -212,20 +252,20 @@
   for (size_t i = 0; i != dex_files_->size(); ++i) {
     const DexFile* dex_file = (*dex_files_)[i];
     CHECK(dex_file != NULL);
-    offset = InitOatCodeDexFile(offset, oat_class_index, *dex_file);
+    offset = InitOatCodeDexFile(offset, &oat_class_index, *dex_file);
   }
   return offset;
 }
 
 size_t OatWriter::InitOatCodeDexFile(size_t offset,
-                                     size_t& oat_class_index,
+                                     size_t* oat_class_index,
                                      const DexFile& dex_file) {
   for (size_t class_def_index = 0;
        class_def_index < dex_file.NumClassDefs();
-       class_def_index++, oat_class_index++) {
+       class_def_index++, (*oat_class_index)++) {
     const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
-    offset = InitOatCodeClassDef(offset, oat_class_index, class_def_index, dex_file, class_def);
-    oat_classes_[oat_class_index]->UpdateChecksum(*oat_header_);
+    offset = InitOatCodeClassDef(offset, *oat_class_index, class_def_index, dex_file, class_def);
+    oat_classes_[*oat_class_index]->UpdateChecksum(*oat_header_);
   }
   return offset;
 }
@@ -240,7 +280,7 @@
     return offset;
   }
   ClassDataItemIterator it(dex_file, class_data);
-  CHECK_EQ(oat_classes_[oat_class_index]->method_offsets_.size(),
+  CHECK_LE(oat_classes_[oat_class_index]->method_offsets_.size(),
            it.NumDirectMethods() + it.NumVirtualMethods());
   // Skip fields
   while (it.HasNextStaticField()) {
@@ -251,32 +291,35 @@
   }
   // Process methods
   size_t class_def_method_index = 0;
+  size_t method_offsets_index = 0;
   while (it.HasNextDirectMethod()) {
     bool is_native = (it.GetMemberAccessFlags() & kAccNative) != 0;
     offset = InitOatCodeMethod(offset, oat_class_index, class_def_index, class_def_method_index,
-                               is_native, it.GetMethodInvokeType(class_def), it.GetMemberIndex(),
-                               &dex_file);
+                               &method_offsets_index, is_native,
+                               it.GetMethodInvokeType(class_def), it.GetMemberIndex(), dex_file);
     class_def_method_index++;
     it.Next();
   }
   while (it.HasNextVirtualMethod()) {
     bool is_native = (it.GetMemberAccessFlags() & kAccNative) != 0;
     offset = InitOatCodeMethod(offset, oat_class_index, class_def_index, class_def_method_index,
-                               is_native, it.GetMethodInvokeType(class_def), it.GetMemberIndex(),
-                               &dex_file);
+                               &method_offsets_index, is_native,
+                               it.GetMethodInvokeType(class_def), it.GetMemberIndex(), dex_file);
     class_def_method_index++;
     it.Next();
   }
   DCHECK(!it.HasNext());
+  CHECK_LE(method_offsets_index, class_def_method_index);
   return offset;
 }
 
 size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
                                     size_t __attribute__((unused)) class_def_index,
                                     size_t class_def_method_index,
+                                    size_t* method_offsets_index,
                                     bool __attribute__((unused)) is_native,
                                     InvokeType invoke_type,
-                                    uint32_t method_idx, const DexFile* dex_file) {
+                                    uint32_t method_idx, const DexFile& dex_file) {
   // derived from CompiledMethod if available
   uint32_t code_offset = 0;
   uint32_t frame_size_in_bytes = kStackAlignment;
@@ -292,8 +335,7 @@
       oat_class->GetOatMethodOffsetsOffsetFromOatHeader(class_def_method_index);
 #endif
 
-  CompiledMethod* compiled_method =
-      compiler_driver_->GetCompiledMethod(MethodReference(dex_file, method_idx));
+  CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
   if (compiled_method != NULL) {
 #if defined(ART_USE_PORTABLE_COMPILER)
     compiled_method->AddOatdataOffsetToCompliledCodeOffset(
@@ -358,7 +400,7 @@
 
 #if !defined(NDEBUG)
     // We expect GC maps except when the class hasn't been verified or the method is native
-    ClassReference class_ref(dex_file, class_def_index);
+    ClassReference class_ref(&dex_file, class_def_index);
     CompiledClass* compiled_class = compiler_driver_->GetCompiledClass(class_ref);
     mirror::Class::Status status;
     if (compiled_class != NULL) {
@@ -371,7 +413,7 @@
     CHECK(gc_map_size != 0 || is_native || status < mirror::Class::kStatusVerified)
         << &gc_map << " " << gc_map_size << " " << (is_native ? "true" : "false") << " "
         << (status < mirror::Class::kStatusVerified) << " " << status << " "
-        << PrettyMethod(method_idx, *dex_file);
+        << PrettyMethod(method_idx, dex_file);
 #endif
 
     // Deduplicate GC maps
@@ -384,24 +426,26 @@
       offset += gc_map_size;
       oat_header_->UpdateChecksum(&gc_map[0], gc_map_size);
     }
+
+    oat_class->method_offsets_[*method_offsets_index] =
+        OatMethodOffsets(code_offset,
+                         frame_size_in_bytes,
+                         core_spill_mask,
+                         fp_spill_mask,
+                         mapping_table_offset,
+                         vmap_table_offset,
+                         gc_map_offset);
+    (*method_offsets_index)++;
   }
 
-  oat_class->method_offsets_[class_def_method_index] =
-      OatMethodOffsets(code_offset,
-                       frame_size_in_bytes,
-                       core_spill_mask,
-                       fp_spill_mask,
-                       mapping_table_offset,
-                       vmap_table_offset,
-                       gc_map_offset);
 
   if (compiler_driver_->IsImage()) {
     ClassLinker* linker = Runtime::Current()->GetClassLinker();
-    mirror::DexCache* dex_cache = linker->FindDexCache(*dex_file);
+    mirror::DexCache* dex_cache = linker->FindDexCache(dex_file);
     // Unchecked as we hold mutator_lock_ on entry.
     ScopedObjectAccessUnchecked soa(Thread::Current());
-    mirror::ArtMethod* method = linker->ResolveMethod(*dex_file, method_idx, dex_cache,
-                                                           NULL, NULL, invoke_type);
+    mirror::ArtMethod* method = linker->ResolveMethod(dex_file, method_idx, dex_cache,
+                                                      NULL, NULL, invoke_type);
     CHECK(method != NULL);
     method->SetFrameSizeInBytes(frame_size_in_bytes);
     method->SetCoreSpillMask(core_spill_mask);
@@ -491,7 +535,9 @@
     DO_STAT(size_oat_dex_file_location_checksum_);
     DO_STAT(size_oat_dex_file_offset_);
     DO_STAT(size_oat_dex_file_methods_offsets_);
+    DO_STAT(size_oat_class_type_);
     DO_STAT(size_oat_class_status_);
+    DO_STAT(size_oat_class_method_bitmaps_);
     DO_STAT(size_oat_class_method_offsets_);
     #undef DO_STAT
 
@@ -586,7 +632,7 @@
   for (size_t i = 0; i != oat_dex_files_.size(); ++i) {
     const DexFile* dex_file = (*dex_files_)[i];
     CHECK(dex_file != NULL);
-    relative_offset = WriteCodeDexFile(out, file_offset, relative_offset, oat_class_index,
+    relative_offset = WriteCodeDexFile(out, file_offset, relative_offset, &oat_class_index,
                                        *dex_file);
     if (relative_offset == 0) {
       return 0;
@@ -596,12 +642,12 @@
 }
 
 size_t OatWriter::WriteCodeDexFile(OutputStream& out, const size_t file_offset,
-                                   size_t relative_offset, size_t& oat_class_index,
+                                   size_t relative_offset, size_t* oat_class_index,
                                    const DexFile& dex_file) {
   for (size_t class_def_index = 0; class_def_index < dex_file.NumClassDefs();
-      class_def_index++, oat_class_index++) {
+      class_def_index++, (*oat_class_index)++) {
     const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
-    relative_offset = WriteCodeClassDef(out, file_offset, relative_offset, oat_class_index,
+    relative_offset = WriteCodeClassDef(out, file_offset, relative_offset, *oat_class_index,
                                         dex_file, class_def);
     if (relative_offset == 0) {
       return 0;
@@ -637,11 +683,12 @@
   }
   // Process methods
   size_t class_def_method_index = 0;
+  size_t method_offsets_index = 0;
   while (it.HasNextDirectMethod()) {
     bool is_static = (it.GetMemberAccessFlags() & kAccStatic) != 0;
     relative_offset = WriteCodeMethod(out, file_offset, relative_offset, oat_class_index,
-                                      class_def_method_index, is_static, it.GetMemberIndex(),
-                                      dex_file);
+                                      class_def_method_index, &method_offsets_index, is_static,
+                                      it.GetMemberIndex(), dex_file);
     if (relative_offset == 0) {
       return 0;
     }
@@ -650,28 +697,30 @@
   }
   while (it.HasNextVirtualMethod()) {
     relative_offset = WriteCodeMethod(out, file_offset, relative_offset, oat_class_index,
-                                      class_def_method_index, false, it.GetMemberIndex(), dex_file);
+                                      class_def_method_index, &method_offsets_index, false,
+                                      it.GetMemberIndex(), dex_file);
     if (relative_offset == 0) {
       return 0;
     }
     class_def_method_index++;
     it.Next();
   }
+  DCHECK(!it.HasNext());
+  CHECK_LE(method_offsets_index, class_def_method_index);
   return relative_offset;
 }
 
 size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset,
                                   size_t relative_offset, size_t oat_class_index,
-                                  size_t class_def_method_index, bool is_static,
-                                  uint32_t method_idx, const DexFile& dex_file) {
-  const CompiledMethod* compiled_method =
-      compiler_driver_->GetCompiledMethod(MethodReference(&dex_file, method_idx));
-
-  const OatMethodOffsets& method_offsets =
-      oat_classes_[oat_class_index]->method_offsets_[class_def_method_index];
-
+                                  size_t class_def_method_index, size_t* method_offsets_index,
+                                  bool is_static, uint32_t method_idx, const DexFile& dex_file) {
+  OatClass* oat_class = oat_classes_[oat_class_index];
+  const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
 
   if (compiled_method != NULL) {  // ie. not an abstract method
+    const OatMethodOffsets method_offsets = oat_class->method_offsets_[*method_offsets_index];
+    (*method_offsets_index)++;
+
 #if !defined(ART_USE_PORTABLE_COMPILER)
     uint32_t aligned_offset = compiled_method->AlignCode(relative_offset);
     uint32_t aligned_code_delta = aligned_offset - relative_offset;
@@ -854,29 +903,96 @@
   return true;
 }
 
-OatWriter::OatClass::OatClass(size_t offset, mirror::Class::Status status, uint32_t methods_count) {
+OatWriter::OatClass::OatClass(size_t offset,
+                              std::vector<CompiledMethod*>* compiled_methods,
+                              uint32_t num_non_null_compiled_methods,
+                              mirror::Class::Status status) {
+  CHECK(compiled_methods !=  NULL);
+  uint32_t num_methods = compiled_methods->size();
+  CHECK_LE(num_non_null_compiled_methods, num_methods);
+
   offset_ = offset;
+  compiled_methods_ = compiled_methods;
+  oat_method_offsets_offsets_from_oat_class_.resize(num_methods);
+
+  // Since both kOatClassNoneCompiled and kOatClassAllCompiled could
+  // apply when there are 0 methods, we just arbitrarily say that 0
+  // methods means kOatClassNoneCompiled and that we won't use
+  // kOatClassAllCompiled unless there is at least one compiled
+  // method. This means in an interpretter only system, we can assert
+  // that all classes are kOatClassNoneCompiled.
+  if (num_non_null_compiled_methods == 0) {
+    type_ = kOatClassNoneCompiled;
+  } else if (num_non_null_compiled_methods == num_methods) {
+    type_ = kOatClassAllCompiled;
+  } else {
+    type_ = kOatClassSomeCompiled;
+  }
+
   status_ = status;
-  method_offsets_.resize(methods_count);
+  method_offsets_.resize(num_non_null_compiled_methods);
+
+  uint32_t oat_method_offsets_offset_from_oat_class = sizeof(type_) + sizeof(status_);
+  if (type_ == kOatClassSomeCompiled) {
+    method_bitmap_ = new BitVector(num_methods, false, Allocator::GetMallocAllocator());
+    method_bitmap_size_ = method_bitmap_->GetSizeOf();
+    oat_method_offsets_offset_from_oat_class += sizeof(method_bitmap_size_);
+    oat_method_offsets_offset_from_oat_class += method_bitmap_size_;
+  } else {
+    method_bitmap_ = NULL;
+    method_bitmap_size_ = 0;
+  }
+
+  for (size_t i = 0; i < num_methods; i++) {
+    CompiledMethod* compiled_method = (*compiled_methods_)[i];
+    if (compiled_method == NULL) {
+      oat_method_offsets_offsets_from_oat_class_[i] = 0;
+    } else {
+      oat_method_offsets_offsets_from_oat_class_[i] = oat_method_offsets_offset_from_oat_class;
+      oat_method_offsets_offset_from_oat_class += sizeof(OatMethodOffsets);
+      if (type_ == kOatClassSomeCompiled) {
+        method_bitmap_->SetBit(i);
+      }
+    }
+  }
 }
 
+OatWriter::OatClass::~OatClass() {
+  delete compiled_methods_;
+}
+
+#if defined(ART_USE_PORTABLE_COMPILER)
 size_t OatWriter::OatClass::GetOatMethodOffsetsOffsetFromOatHeader(
     size_t class_def_method_index_) const {
-  return offset_ + GetOatMethodOffsetsOffsetFromOatClass(class_def_method_index_);
+  uint32_t method_offset = GetOatMethodOffsetsOffsetFromOatClass(class_def_method_index_);
+  if (method_offset == 0) {
+    return 0;
+  }
+  return offset_ + method_offset;
 }
 
 size_t OatWriter::OatClass::GetOatMethodOffsetsOffsetFromOatClass(
     size_t class_def_method_index_) const {
-  return sizeof(status_)
-          + (sizeof(method_offsets_[0]) * class_def_method_index_);
+  return oat_method_offsets_offsets_from_oat_class_[class_def_method_index_];
 }
+#endif
 
 size_t OatWriter::OatClass::SizeOf() const {
-  return GetOatMethodOffsetsOffsetFromOatClass(method_offsets_.size());
+  return sizeof(status_)
+          + sizeof(type_)
+          + ((method_bitmap_size_ == 0) ? 0 : sizeof(method_bitmap_size_))
+          + method_bitmap_size_
+          + (sizeof(method_offsets_[0]) * method_offsets_.size());
 }
 
 void OatWriter::OatClass::UpdateChecksum(OatHeader& oat_header) const {
   oat_header.UpdateChecksum(&status_, sizeof(status_));
+  oat_header.UpdateChecksum(&type_, sizeof(type_));
+  if (method_bitmap_size_ != 0) {
+    CHECK_EQ(kOatClassSomeCompiled, type_);
+    oat_header.UpdateChecksum(&method_bitmap_size_, sizeof(method_bitmap_size_));
+    oat_header.UpdateChecksum(method_bitmap_->GetRawStorage(), method_bitmap_size_);
+  }
   oat_header.UpdateChecksum(&method_offsets_[0],
                             sizeof(method_offsets_[0]) * method_offsets_.size());
 }
@@ -890,17 +1006,30 @@
     return false;
   }
   oat_writer->size_oat_class_status_ += sizeof(status_);
-  DCHECK_EQ(static_cast<off_t>(file_offset + GetOatMethodOffsetsOffsetFromOatHeader(0)),
-            out.Seek(0, kSeekCurrent));
+  if (!out.WriteFully(&type_, sizeof(type_))) {
+    PLOG(ERROR) << "Failed to write oat class type to " << out.GetLocation();
+    return false;
+  }
+  oat_writer->size_oat_class_type_ += sizeof(type_);
+  if (method_bitmap_size_ != 0) {
+    CHECK_EQ(kOatClassSomeCompiled, type_);
+    if (!out.WriteFully(&method_bitmap_size_, sizeof(method_bitmap_size_))) {
+      PLOG(ERROR) << "Failed to write method bitmap size to " << out.GetLocation();
+      return false;
+    }
+    oat_writer->size_oat_class_method_bitmaps_ += sizeof(method_bitmap_size_);
+    if (!out.WriteFully(method_bitmap_->GetRawStorage(), method_bitmap_size_)) {
+      PLOG(ERROR) << "Failed to write method bitmap to " << out.GetLocation();
+      return false;
+    }
+    oat_writer->size_oat_class_method_bitmaps_ += method_bitmap_size_;
+  }
   if (!out.WriteFully(&method_offsets_[0],
                       sizeof(method_offsets_[0]) * method_offsets_.size())) {
     PLOG(ERROR) << "Failed to write method offsets to " << out.GetLocation();
     return false;
   }
   oat_writer->size_oat_class_method_offsets_ += sizeof(method_offsets_[0]) * method_offsets_.size();
-  DCHECK_EQ(static_cast<off_t>(file_offset +
-                               GetOatMethodOffsetsOffsetFromOatHeader(method_offsets_.size())),
-            out.Seek(0, kSeekCurrent));
   return true;
 }
 
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index d5f7e21..e3cb0a8 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -30,6 +30,7 @@
 
 namespace art {
 
+class BitVector;
 class OutputStream;
 
 // OatHeader         variable length with count of D OatDexFiles
@@ -90,7 +91,7 @@
   size_t InitOatCodeDexFiles(size_t offset)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   size_t InitOatCodeDexFile(size_t offset,
-                            size_t& oat_class_index,
+                            size_t* oat_class_index,
                             const DexFile& dex_file)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   size_t InitOatCodeClassDef(size_t offset,
@@ -99,21 +100,22 @@
                              const DexFile::ClassDef& class_def)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   size_t InitOatCodeMethod(size_t offset, size_t oat_class_index, size_t class_def_index,
-                           size_t class_def_method_index, bool is_native, InvokeType type,
-                           uint32_t method_idx, const DexFile*)
+                           size_t class_def_method_index, size_t* method_offsets_index,
+                           bool is_native, InvokeType type, uint32_t method_idx, const DexFile&)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   bool WriteTables(OutputStream& out, const size_t file_offset);
   size_t WriteCode(OutputStream& out, const size_t file_offset);
   size_t WriteCodeDexFiles(OutputStream& out, const size_t file_offset, size_t relative_offset);
   size_t WriteCodeDexFile(OutputStream& out, const size_t file_offset, size_t relative_offset,
-                          size_t& oat_class_index, const DexFile& dex_file);
+                          size_t* oat_class_index, const DexFile& dex_file);
   size_t WriteCodeClassDef(OutputStream& out, const size_t file_offset, size_t relative_offset,
                            size_t oat_class_index, const DexFile& dex_file,
                            const DexFile::ClassDef& class_def);
   size_t WriteCodeMethod(OutputStream& out, const size_t file_offset, size_t relative_offset,
-                         size_t oat_class_index, size_t class_def_method_index, bool is_static,
-                         uint32_t method_idx, const DexFile& dex_file);
+                         size_t oat_class_index, size_t class_def_method_index,
+                         size_t* method_offsets_index, bool is_static, uint32_t method_idx,
+                         const DexFile& dex_file);
 
   void ReportWriteFailure(const char* what, uint32_t method_idx, const DexFile& dex_file,
                           OutputStream& out) const;
@@ -142,13 +144,24 @@
 
   class OatClass {
    public:
-    explicit OatClass(size_t offset, mirror::Class::Status status, uint32_t methods_count);
+    explicit OatClass(size_t offset,
+                      std::vector<CompiledMethod*>* compiled_methods,
+                      uint32_t num_non_null_compiled_methods,
+                      mirror::Class::Status status);
+    ~OatClass();
+#if defined(ART_USE_PORTABLE_COMPILER)
     size_t GetOatMethodOffsetsOffsetFromOatHeader(size_t class_def_method_index_) const;
     size_t GetOatMethodOffsetsOffsetFromOatClass(size_t class_def_method_index_) const;
+#endif
     size_t SizeOf() const;
     void UpdateChecksum(OatHeader& oat_header) const;
     bool Write(OatWriter* oat_writer, OutputStream& out, const size_t file_offset) const;
 
+    CompiledMethod* GetCompiledMethod(size_t class_def_method_index) const {
+      DCHECK(compiled_methods_ != NULL);
+      return (*compiled_methods_)[class_def_method_index];
+    }
+
     // Offset of start of OatClass from beginning of OatHeader. It is
     // used to validate file position when writing. For Portable, it
     // is also used to calculate the position of the OatMethodOffsets
@@ -156,8 +169,37 @@
     // patched to point to code in the Portable .o ELF objects.
     size_t offset_;
 
+    // CompiledMethods for each class_def_method_index, or NULL if no method is available.
+    std::vector<CompiledMethod*>* compiled_methods_;
+
+    // Offset from OatClass::offset_ to the OatMethodOffsets for the
+    // class_def_method_index. If 0, it means the corresponding
+    // CompiledMethod entry in OatClass::compiled_methods_ should be
+    // NULL and that the OatClass::type_ should be kOatClassBitmap.
+    std::vector<uint32_t> oat_method_offsets_offsets_from_oat_class_;
+
     // data to write
-    mirror::Class::Status status_;
+
+    COMPILE_ASSERT(mirror::Class::Status::kStatusMax < (2 ^ 16), class_status_wont_fit_in_16bits);
+    int16_t status_;
+
+    COMPILE_ASSERT(OatClassType::kOatClassMax < (2 ^ 16), oat_class_type_wont_fit_in_16bits);
+    uint16_t type_;
+
+    uint32_t method_bitmap_size_;
+
+    // bit vector indexed by ClassDef method index. When
+    // OatClassType::type_ is kOatClassBitmap, a set bit indicates the
+    // method has an OatMethodOffsets in methods_offsets_, otherwise
+    // the entry was ommited to save space. If OatClassType::type_ is
+    // not is kOatClassBitmap, the bitmap will be NULL.
+    BitVector* method_bitmap_;
+
+    // OatMethodOffsets for each CompiledMethod present in the
+    // OatClass. Note that some may be missing if
+    // OatClass::compiled_methods_ contains NULL values (and
+    // oat_method_offsets_offsets_from_oat_class_ should contain 0
+    // values in this case).
     std::vector<OatMethodOffsets> method_offsets_;
 
    private:
@@ -214,7 +256,9 @@
   uint32_t size_oat_dex_file_location_checksum_;
   uint32_t size_oat_dex_file_offset_;
   uint32_t size_oat_dex_file_methods_offsets_;
+  uint32_t size_oat_class_type_;
   uint32_t size_oat_class_status_;
+  uint32_t size_oat_class_method_bitmaps_;
   uint32_t size_oat_class_method_offsets_;
 
   // Code mappings for deduplication. Deduplication is already done on a pointer basis by the
diff --git a/compiler/utils/dedupe_set.h b/compiler/utils/dedupe_set.h
index 53c1afa..638e0ec 100644
--- a/compiler/utils/dedupe_set.h
+++ b/compiler/utils/dedupe_set.h
@@ -22,6 +22,7 @@
 
 #include "base/mutex.h"
 #include "base/stl_util.h"
+#include "base/stringprintf.h"
 
 namespace art {
 
diff --git a/compiler/utils/dedupe_set_test.cc b/compiler/utils/dedupe_set_test.cc
index 03d8b96..8abe6de 100644
--- a/compiler/utils/dedupe_set_test.cc
+++ b/compiler/utils/dedupe_set_test.cc
@@ -14,15 +14,12 @@
  * limitations under the License.
  */
 
-#include "common_test.h"
 #include "dedupe_set.h"
+#include "gtest/gtest.h"
+#include "thread-inl.h"
 
 namespace art {
 
-class DedupeSetTest : public testing::Test {
- public:
-};
-
 class DedupeHashFunc {
  public:
   size_t operator()(const std::vector<uint8_t>& array) const {
@@ -35,7 +32,7 @@
     return hash;
   }
 };
-TEST_F(DedupeSetTest, Test) {
+TEST(DedupeSetTest, Test) {
   Thread* self = Thread::Current();
   typedef std::vector<uint8_t> ByteArray;
   DedupeSet<ByteArray, size_t, DedupeHashFunc> deduplicator("test");
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 782c1f3..6239e9a 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -103,6 +103,10 @@
   "tst", "rsb", "cmp", "cmn", "orr", "mul", "bic", "mvn",
 };
 
+static const char* kThumbReverseOperations[] = {
+    "rev", "rev16", "rbit", "revsh"
+};
+
 struct ArmRegister {
   explicit ArmRegister(uint32_t r) : r(r) { CHECK_LE(r, 15U); }
   ArmRegister(uint32_t instruction, uint32_t at_bit) : r((instruction >> at_bit) & 0xf) { CHECK_LE(r, 15U); }
@@ -995,6 +999,31 @@
           }
           break;
         }
+        case 0x29: {  // 0101001
+          // |111|11|1000000|0000|1111|1100|00|0 0|0000|
+          // |5 3|21|0     4|3  0|5  2|1  8|76|5 4|3  0|
+          // |---|--|-------|----|----|----|--|---|----|
+          // |332|22|2222222|1111|1111|1100|00|0 0|0000|
+          // |1 9|87|6     0|9  6|5  2|1  8|76|5 4|3  0|
+          // |---|--|-------|----|----|----|--|---|----|
+          // |111|11|0101001| Rm |1111| Rd |11|op3| Rm |
+          // REV   - 111 11 0101001 mmmm 1111 dddd 1000 mmmm
+          // REV16 - 111 11 0101001 mmmm 1111 dddd 1001 mmmm
+          // RBIT  - 111 11 0101001 mmmm 1111 dddd 1010 mmmm
+          // REVSH - 111 11 0101001 mmmm 1111 dddd 1011 mmmm
+          if ((instr & 0xf0c0) == 0xf080) {
+            uint32_t op3 = (instr >> 4) & 3;
+            opcode << kThumbReverseOperations[op3];
+            ArmRegister Rm(instr, 0);
+            ArmRegister Rd(instr, 8);
+            args << Rd << ", " << Rm;
+            ArmRegister Rm2(instr, 16);
+            if (Rm.r != Rm2.r || Rm.r == 13 || Rm.r == 15 || Rd.r == 13 || Rd.r == 15) {
+              args << " (UNPREDICTABLE)";
+            }
+          }  // else unknown instruction
+          break;
+        }
         case 0x05: case 0x0D: case 0x15: case 0x1D: {  // 00xx101
           // Load word
           // |111|11|10|0 0|00|0|0000|1111|110000|000000|
@@ -1285,6 +1314,16 @@
           DumpBranchTarget(args, instr_ptr + 4, imm32);
           break;
         }
+        case 0x50: case 0x51:    // 101000x
+        case 0x52: case 0x53:    // 101001x
+        case 0x56: case 0x57: {  // 101011x
+          uint16_t op = (instr >> 6) & 3;
+          opcode << kThumbReverseOperations[op];
+          ThumbRegister Rm(instr, 3);
+          ThumbRegister Rd(instr, 0);
+          args << Rd << ", " << Rm;
+          break;
+        }
         case 0x78: case 0x79: case 0x7A: case 0x7B:  // 1111xxx
         case 0x7C: case 0x7D: case 0x7E: case 0x7F: {
           // If-Then, and hints
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index e5cdb7b..9ed65cd 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -520,6 +520,10 @@
       case 0xB7: opcode << "movzxw"; has_modrm = true; load = true; break;
       case 0xBE: opcode << "movsxb"; has_modrm = true; load = true; break;
       case 0xBF: opcode << "movsxw"; has_modrm = true; load = true; break;
+      case 0xC8: case 0xC9: case 0xCA: case 0xCB: case 0xCC: case 0xCD: case 0xCE: case 0xCF:
+        opcode << "bswap";
+        reg_in_opcode = true;
+        break;
       default:
         opcode << StringPrintf("unknown opcode '0F %02X'", *instr);
         break;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index ea06b02..fdeeaec 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -263,8 +263,10 @@
       const char* descriptor = dex_file->GetClassDescriptor(class_def);
       UniquePtr<const OatFile::OatClass> oat_class(oat_dex_file.GetOatClass(class_def_index));
       CHECK(oat_class.get() != NULL);
-      os << StringPrintf("%zd: %s (type_idx=%d) (", class_def_index, descriptor, class_def.class_idx_)
-         << oat_class->GetStatus() << ")\n";
+      os << StringPrintf("%zd: %s (type_idx=%d)", class_def_index, descriptor, class_def.class_idx_)
+         << " (" << oat_class->GetStatus() << ")"
+         << " (" << oat_class->GetType() << ")\n";
+      // TODO: include bitmap here if type is kOatClassBitmap?
       Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
       std::ostream indented_os(&indent_filter);
       DumpOatClass(indented_os, *oat_class.get(), *(dex_file.get()), class_def);
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 8579222..e4b7e47 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -21,6 +21,8 @@
 LIBART_COMMON_SRC_FILES := \
 	atomic.cc.arm \
 	barrier.cc \
+	base/allocator.cc \
+	base/bit_vector.cc \
 	base/logging.cc \
 	base/mutex.cc \
 	base/stringpiece.cc \
@@ -247,6 +249,7 @@
 	locks.h \
 	lock_word.h \
 	mirror/class.h \
+	oat.h \
 	thread.h \
 	thread_state.h \
 	verifier/method_verifier.h
diff --git a/runtime/base/allocator.cc b/runtime/base/allocator.cc
new file mode 100644
index 0000000..4f7753d
--- /dev/null
+++ b/runtime/base/allocator.cc
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "allocator.h"
+
+#include <inttypes.h>
+#include <stdlib.h>
+
+#include "base/logging.h"
+
+namespace art {
+
+class MallocAllocator : public Allocator {
+ public:
+  explicit MallocAllocator() {}
+  ~MallocAllocator() {}
+
+  virtual void* Alloc(size_t size) {
+    return calloc(sizeof(uint8_t), size);
+  }
+
+  virtual void Free(void* p) {
+    free(p);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MallocAllocator);
+};
+
+MallocAllocator g_malloc_allocator;
+
+class NoopAllocator : public Allocator {
+ public:
+  explicit NoopAllocator() {}
+  ~NoopAllocator() {}
+
+  virtual void* Alloc(size_t size) {
+    LOG(FATAL) << "NoopAllocator::Alloc should not be called";
+    return NULL;
+  }
+
+  virtual void Free(void* p) {
+    // Noop.
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(NoopAllocator);
+};
+
+NoopAllocator g_noop_allocator;
+
+Allocator* Allocator::GetMallocAllocator() {
+  return &g_malloc_allocator;
+}
+
+Allocator* Allocator::GetNoopAllocator() {
+  return &g_noop_allocator;
+}
+
+
+}  // namespace art
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
new file mode 100644
index 0000000..917bf0b
--- /dev/null
+++ b/runtime/base/allocator.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_ALLOCATOR_H_
+#define ART_RUNTIME_BASE_ALLOCATOR_H_
+
+#include "base/macros.h"
+
+namespace art {
+
+class Allocator {
+ public:
+  static Allocator* GetMallocAllocator();
+  static Allocator* GetNoopAllocator();
+
+  Allocator() {}
+  virtual ~Allocator() {}
+
+  virtual void* Alloc(size_t) = 0;
+  virtual void Free(void*) = 0;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Allocator);
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_BASE_ALLOCATOR_H_
diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc
new file mode 100644
index 0000000..3b82651
--- /dev/null
+++ b/runtime/base/bit_vector.cc
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bit_vector.h"
+
+namespace art {
+
+// TODO: profile to make sure this is still a win relative to just using shifted masks.
+static uint32_t check_masks[32] = {
+  0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
+  0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200,
+  0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000,
+  0x00008000, 0x00010000, 0x00020000, 0x00040000, 0x00080000,
+  0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000,
+  0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000,
+  0x40000000, 0x80000000 };
+
+static inline uint32_t BitsToWords(uint32_t bits) {
+  return (bits + 31) >> 5;
+}
+
+// TODO: replace excessive argument defaulting when we are at gcc 4.7
+// or later on host with delegating constructor support. Specifically,
+// starts_bits and storage_size/storage are mutually exclusive.
+BitVector::BitVector(uint32_t start_bits,
+                     bool expandable,
+                     Allocator* allocator,
+                     uint32_t storage_size,
+                     uint32_t* storage)
+  : allocator_(allocator),
+    expandable_(expandable),
+    storage_size_(storage_size),
+    storage_(storage) {
+  DCHECK_EQ(sizeof(storage_[0]), 4U);  // Assuming 32-bit units.
+  if (storage_ == NULL) {
+    storage_size_ = BitsToWords(start_bits);
+    storage_ = static_cast<uint32_t*>(allocator_->Alloc(storage_size_ * sizeof(uint32_t)));
+  }
+}
+
+BitVector::~BitVector() {
+  allocator_->Free(storage_);
+}
+
+/*
+ * Determine whether or not the specified bit is set.
+ */
+bool BitVector::IsBitSet(uint32_t num) const {
+  DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8);
+
+  uint32_t val = storage_[num >> 5] & check_masks[num & 0x1f];
+  return (val != 0);
+}
+
+// Mark all bits bit as "clear".
+void BitVector::ClearAllBits() {
+  memset(storage_, 0, storage_size_ * sizeof(uint32_t));
+}
+
+// Mark the specified bit as "set".
+/*
+ * TUNING: this could have pathologically bad growth/expand behavior.  Make sure we're
+ * not using it badly or change resize mechanism.
+ */
+void BitVector::SetBit(uint32_t num) {
+  if (num >= storage_size_ * sizeof(uint32_t) * 8) {
+    DCHECK(expandable_) << "Attempted to expand a non-expandable bitmap to position " << num;
+
+    /* Round up to word boundaries for "num+1" bits */
+    uint32_t new_size = BitsToWords(num + 1);
+    DCHECK_GT(new_size, storage_size_);
+    uint32_t *new_storage =
+        static_cast<uint32_t*>(allocator_->Alloc(new_size * sizeof(uint32_t)));
+    memcpy(new_storage, storage_, storage_size_ * sizeof(uint32_t));
+    // Zero out the new storage words.
+    memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * sizeof(uint32_t));
+    // TOTO: collect stats on space wasted because of resize.
+    storage_ = new_storage;
+    storage_size_ = new_size;
+  }
+
+  storage_[num >> 5] |= check_masks[num & 0x1f];
+}
+
+// Mark the specified bit as "unset".
+void BitVector::ClearBit(uint32_t num) {
+  DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8);
+  storage_[num >> 5] &= ~check_masks[num & 0x1f];
+}
+
+// Intersect with another bit vector.  Sizes and expandability must be the same.
+void BitVector::Intersect(const BitVector* src) {
+  DCHECK_EQ(storage_size_, src->GetStorageSize());
+  DCHECK_EQ(expandable_, src->IsExpandable());
+  for (uint32_t idx = 0; idx < storage_size_; idx++) {
+    storage_[idx] &= src->GetRawStorageWord(idx);
+  }
+}
+
+/*
+ * Union with another bit vector.  Sizes and expandability must be the same.
+ */
+void BitVector::Union(const BitVector* src) {
+  DCHECK_EQ(storage_size_, src->GetStorageSize());
+  DCHECK_EQ(expandable_, src->IsExpandable());
+  for (uint32_t idx = 0; idx < storage_size_; idx++) {
+    storage_[idx] |= src->GetRawStorageWord(idx);
+  }
+}
+
+// Count the number of bits that are set.
+uint32_t BitVector::NumSetBits() const {
+  uint32_t count = 0;
+  for (uint32_t word = 0; word < storage_size_; word++) {
+    count += __builtin_popcount(storage_[word]);
+  }
+  return count;
+}
+
+// Count the number of bits that are set up through and including num.
+uint32_t BitVector::NumSetBits(uint32_t num) const {
+  DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8);
+  uint32_t last_word = num >> 5;
+  uint32_t partial_word_bits = num & 0x1f;
+
+  // partial_word_bits |  # |                         |                      | partial_word_mask
+  //             00000 |  0 | 0xffffffff >> (31 -  0) | (1 <<  (0 + 1)) - 1  | 0x00000001
+  //             00001 |  1 | 0xffffffff >> (31 -  1) | (1 <<  (1 + 1)) - 1  | 0x00000003
+  //             00010 |  2 | 0xffffffff >> (31 -  2) | (1 <<  (2 + 1)) - 1  | 0x00000007
+  //             ..... |
+  //             11110 | 30 | 0xffffffff >> (31 - 30) | (1 << (30 + 1)) - 1  | 0x7fffffff
+  //             11111 | 31 | 0xffffffff >> (31 - 31) | last_full_word++     | 0xffffffff
+  uint32_t partial_word_mask = 0xffffffff >> (0x1f - partial_word_bits);
+
+  uint32_t count = 0;
+  for (uint32_t word = 0; word < last_word; word++) {
+    count += __builtin_popcount(storage_[word]);
+  }
+  count += __builtin_popcount(storage_[last_word] & partial_word_mask);
+  return count;
+}
+
+BitVector::Iterator* BitVector::GetIterator() const {
+  return new (allocator_) Iterator(this);
+}
+
+/*
+ * Mark specified number of bits as "set". Cannot set all bits like ClearAll
+ * since there might be unused bits - setting those to one will confuse the
+ * iterator.
+ */
+void BitVector::SetInitialBits(uint32_t num_bits) {
+  DCHECK_LE(BitsToWords(num_bits), storage_size_);
+  uint32_t idx;
+  for (idx = 0; idx < (num_bits >> 5); idx++) {
+    storage_[idx] = -1;
+  }
+  uint32_t rem_num_bits = num_bits & 0x1f;
+  if (rem_num_bits) {
+    storage_[idx] = (1 << rem_num_bits) - 1;
+  }
+}
+
+}  // namespace art
diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h
new file mode 100644
index 0000000..74bec08
--- /dev/null
+++ b/runtime/base/bit_vector.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_BIT_VECTOR_H_
+#define ART_RUNTIME_BASE_BIT_VECTOR_H_
+
+#include <stdint.h>
+#include <stddef.h>
+
+#include "allocator.h"
+#include "base/logging.h"
+#include "utils.h"
+
+namespace art {
+
+/*
+ * Expanding bitmap, used for tracking resources.  Bits are numbered starting
+ * from zero.  All operations on a BitVector are unsynchronized.
+ */
+class BitVector {
+  public:
+    class Iterator {
+      public:
+        explicit Iterator(const BitVector* bit_vector)
+          : p_bits_(bit_vector),
+            bit_storage_(bit_vector->GetRawStorage()),
+            bit_index_(0),
+            bit_size_(p_bits_->storage_size_ * sizeof(uint32_t) * 8) {}
+
+        // Return the position of the next set bit.  -1 means end-of-element reached.
+        int32_t Next() {
+          // Did anything obviously change since we started?
+          DCHECK_EQ(bit_size_, p_bits_->GetStorageSize() * sizeof(uint32_t) * 8);
+          DCHECK_EQ(bit_storage_, p_bits_->GetRawStorage());
+
+          if (UNLIKELY(bit_index_ >= bit_size_)) return -1;
+
+          uint32_t word_index = bit_index_ / 32;
+          uint32_t word = bit_storage_[word_index];
+          // Mask out any bits in the first word we've already considered.
+          word >>= bit_index_ & 0x1f;
+          if (word == 0) {
+            bit_index_ &= ~0x1f;
+            do {
+              word_index++;
+              if (UNLIKELY((word_index * 32) >= bit_size_)) {
+                bit_index_ = bit_size_;
+                return -1;
+              }
+              word = bit_storage_[word_index];
+              bit_index_ += 32;
+            } while (word == 0);
+          }
+          bit_index_ += CTZ(word) + 1;
+          return bit_index_ - 1;
+        }
+
+        static void* operator new(size_t size, Allocator* allocator) {
+          return allocator->Alloc(sizeof(BitVector::Iterator));
+        };
+        static void operator delete(void* p) {
+          Iterator* it = reinterpret_cast<Iterator*>(p);
+          it->p_bits_->allocator_->Free(p);
+        }
+
+      private:
+        const BitVector* const p_bits_;
+        const uint32_t* const bit_storage_;
+        uint32_t bit_index_;           // Current index (size in bits).
+        const uint32_t bit_size_;      // Size of vector in bits.
+
+        friend class BitVector;
+    };
+
+    BitVector(uint32_t start_bits,
+              bool expandable,
+              Allocator* allocator,
+              uint32_t storage_size = 0,
+              uint32_t* storage = NULL);
+
+    virtual ~BitVector();
+
+    void SetBit(uint32_t num);
+    void ClearBit(uint32_t num);
+    bool IsBitSet(uint32_t num) const;
+    void ClearAllBits();
+    void SetInitialBits(uint32_t num_bits);
+    void Copy(BitVector* src) {
+      memcpy(storage_, src->GetRawStorage(), sizeof(uint32_t) * storage_size_);
+    }
+    void Intersect(const BitVector* src2);
+    void Union(const BitVector* src);
+    // Are we equal to another bit vector?  Note: expandability attributes must also match.
+    bool Equal(const BitVector* src) {
+      return (storage_size_ == src->GetStorageSize()) &&
+        (expandable_ == src->IsExpandable()) &&
+        (memcmp(storage_, src->GetRawStorage(), storage_size_ * sizeof(uint32_t)) == 0);
+    }
+    uint32_t NumSetBits() const;
+    uint32_t NumSetBits(uint32_t num) const;
+
+    Iterator* GetIterator() const;
+
+    uint32_t GetStorageSize() const { return storage_size_; }
+    bool IsExpandable() const { return expandable_; }
+    uint32_t GetRawStorageWord(size_t idx) const { return storage_[idx]; }
+    uint32_t* GetRawStorage() { return storage_; }
+    const uint32_t* GetRawStorage() const { return storage_; }
+    size_t GetSizeOf() const { return storage_size_ * sizeof(uint32_t); }
+
+  private:
+    Allocator* const allocator_;
+    const bool expandable_;         // expand bitmap if we run out?
+    uint32_t   storage_size_;       // current size, in 32-bit words.
+    uint32_t*  storage_;
+};
+
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_BASE_BIT_VECTOR_H_
diff --git a/runtime/base/bit_vector_test.cc b/runtime/base/bit_vector_test.cc
new file mode 100644
index 0000000..d99d059
--- /dev/null
+++ b/runtime/base/bit_vector_test.cc
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "UniquePtr.h"
+#include "bit_vector.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(BitVector, Test) {
+  const size_t kBits = 32;
+
+  BitVector bv(kBits, false, Allocator::GetMallocAllocator());
+  EXPECT_EQ(1U, bv.GetStorageSize());
+  EXPECT_EQ(kWordSize, bv.GetSizeOf());
+  EXPECT_FALSE(bv.IsExpandable());
+
+  EXPECT_EQ(0U, bv.NumSetBits());
+  EXPECT_EQ(0U, bv.NumSetBits(0));
+  EXPECT_EQ(0U, bv.NumSetBits(kBits - 1));
+  for (size_t i = 0; i < kBits; i++) {
+    EXPECT_FALSE(bv.IsBitSet(i));
+  }
+  EXPECT_EQ(0U, bv.GetRawStorageWord(0));
+  EXPECT_EQ(0U, *bv.GetRawStorage());
+
+  BitVector::Iterator empty_iterator(&bv);
+  EXPECT_EQ(-1, empty_iterator.Next());
+
+  UniquePtr<BitVector::Iterator> empty_iterator_on_heap(bv.GetIterator());
+  EXPECT_EQ(-1, empty_iterator_on_heap->Next());
+
+  bv.SetBit(0);
+  bv.SetBit(kBits - 1);
+  EXPECT_EQ(2U, bv.NumSetBits());
+  EXPECT_EQ(1U, bv.NumSetBits(0));
+  EXPECT_EQ(2U, bv.NumSetBits(kBits - 1));
+  EXPECT_TRUE(bv.IsBitSet(0));
+  for (size_t i = 1; i < kBits - 1; i++) {
+    EXPECT_FALSE(bv.IsBitSet(i));
+  }
+  EXPECT_TRUE(bv.IsBitSet(kBits - 1));
+  EXPECT_EQ(0x80000001U, bv.GetRawStorageWord(0));
+  EXPECT_EQ(0x80000001U, *bv.GetRawStorage());
+
+  BitVector::Iterator iterator(&bv);
+  EXPECT_EQ(0, iterator.Next());
+  EXPECT_EQ(static_cast<int>(kBits - 1), iterator.Next());
+  EXPECT_EQ(-1, iterator.Next());
+}
+
+TEST(BitVector, NoopAllocator) {
+  const uint32_t kWords = 2;
+
+  uint32_t bits[kWords];
+  memset(bits, 0, sizeof(bits));
+
+  BitVector bv(0U, false, Allocator::GetNoopAllocator(), kWords, bits);
+  EXPECT_EQ(kWords, bv.GetStorageSize());
+  EXPECT_EQ(kWords * kWordSize, bv.GetSizeOf());
+  EXPECT_EQ(bits, bv.GetRawStorage());
+  EXPECT_EQ(0U, bv.NumSetBits());
+
+  bv.SetBit(8);
+  EXPECT_EQ(1U, bv.NumSetBits());
+  EXPECT_EQ(0x00000100U, bv.GetRawStorageWord(0));
+  EXPECT_EQ(0x00000000U, bv.GetRawStorageWord(1));
+  EXPECT_EQ(1U, bv.NumSetBits());
+
+  bv.SetBit(16);
+  EXPECT_EQ(2U, bv.NumSetBits());
+  EXPECT_EQ(0x00010100U, bv.GetRawStorageWord(0));
+  EXPECT_EQ(0x00000000U, bv.GetRawStorageWord(1));
+  EXPECT_EQ(2U, bv.NumSetBits());
+
+  bv.SetBit(32);
+  EXPECT_EQ(3U, bv.NumSetBits());
+  EXPECT_EQ(0x00010100U, bv.GetRawStorageWord(0));
+  EXPECT_EQ(0x00000001U, bv.GetRawStorageWord(1));
+  EXPECT_EQ(3U, bv.NumSetBits());
+
+  bv.SetBit(48);
+  EXPECT_EQ(4U, bv.NumSetBits());
+  EXPECT_EQ(0x00010100U, bv.GetRawStorageWord(0));
+  EXPECT_EQ(0x00010001U, bv.GetRawStorageWord(1));
+  EXPECT_EQ(4U, bv.NumSetBits());
+
+  EXPECT_EQ(0U, bv.NumSetBits(0));
+
+  EXPECT_EQ(0U, bv.NumSetBits(7));
+  EXPECT_EQ(1U, bv.NumSetBits(8));
+  EXPECT_EQ(1U, bv.NumSetBits(9));
+
+  EXPECT_EQ(1U, bv.NumSetBits(15));
+  EXPECT_EQ(2U, bv.NumSetBits(16));
+  EXPECT_EQ(2U, bv.NumSetBits(17));
+
+  EXPECT_EQ(2U, bv.NumSetBits(31));
+  EXPECT_EQ(3U, bv.NumSetBits(32));
+  EXPECT_EQ(3U, bv.NumSetBits(33));
+
+  EXPECT_EQ(3U, bv.NumSetBits(47));
+  EXPECT_EQ(4U, bv.NumSetBits(48));
+  EXPECT_EQ(4U, bv.NumSetBits(49));
+
+  EXPECT_EQ(4U, bv.NumSetBits(63));
+}
+
+}  // namespace art
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index d9c9e31..24ab1ce 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -96,201 +96,6 @@
   return mirror::Array::AllocInstrumented(self, klass, component_count);
 }
 
-mirror::ArtField* FindFieldFromCode(uint32_t field_idx, const mirror::ArtMethod* referrer,
-                                 Thread* self, FindFieldType type, size_t expected_size,
-                                 bool access_check) {
-  bool is_primitive;
-  bool is_set;
-  bool is_static;
-  switch (type) {
-    case InstanceObjectRead:     is_primitive = false; is_set = false; is_static = false; break;
-    case InstanceObjectWrite:    is_primitive = false; is_set = true;  is_static = false; break;
-    case InstancePrimitiveRead:  is_primitive = true;  is_set = false; is_static = false; break;
-    case InstancePrimitiveWrite: is_primitive = true;  is_set = true;  is_static = false; break;
-    case StaticObjectRead:       is_primitive = false; is_set = false; is_static = true;  break;
-    case StaticObjectWrite:      is_primitive = false; is_set = true;  is_static = true;  break;
-    case StaticPrimitiveRead:    is_primitive = true;  is_set = false; is_static = true;  break;
-    case StaticPrimitiveWrite:   // Keep GCC happy by having a default handler, fall-through.
-    default:                     is_primitive = true;  is_set = true;  is_static = true;  break;
-  }
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  mirror::ArtField* resolved_field = class_linker->ResolveField(field_idx, referrer, is_static);
-  if (UNLIKELY(resolved_field == NULL)) {
-    DCHECK(self->IsExceptionPending());  // Throw exception and unwind.
-    return NULL;  // Failure.
-  }
-  mirror::Class* fields_class = resolved_field->GetDeclaringClass();
-  if (access_check) {
-    if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
-      ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, referrer);
-      return NULL;
-    }
-    mirror::Class* referring_class = referrer->GetDeclaringClass();
-    if (UNLIKELY(!referring_class->CanAccess(fields_class) ||
-                 !referring_class->CanAccessMember(fields_class,
-                                                   resolved_field->GetAccessFlags()))) {
-      // The referring class can't access the resolved field, this may occur as a result of a
-      // protected field being made public by a sub-class. Resort to the dex file to determine
-      // the correct class for the access check.
-      const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile();
-      fields_class = class_linker->ResolveType(dex_file,
-                                               dex_file.GetFieldId(field_idx).class_idx_,
-                                               referring_class);
-      if (UNLIKELY(!referring_class->CanAccess(fields_class))) {
-        ThrowIllegalAccessErrorClass(referring_class, fields_class);
-        return NULL;  // failure
-      } else if (UNLIKELY(!referring_class->CanAccessMember(fields_class,
-                                                            resolved_field->GetAccessFlags()))) {
-        ThrowIllegalAccessErrorField(referring_class, resolved_field);
-        return NULL;  // failure
-      }
-    }
-    if (UNLIKELY(is_set && resolved_field->IsFinal() && (fields_class != referring_class))) {
-      ThrowIllegalAccessErrorFinalField(referrer, resolved_field);
-      return NULL;  // failure
-    } else {
-      FieldHelper fh(resolved_field);
-      if (UNLIKELY(fh.IsPrimitiveType() != is_primitive ||
-                   fh.FieldSize() != expected_size)) {
-        ThrowLocation throw_location = self->GetCurrentLocationForThrow();
-        DCHECK(throw_location.GetMethod() == referrer);
-        self->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
-                                 "Attempted read of %zd-bit %s on field '%s'",
-                                 expected_size * (32 / sizeof(int32_t)),
-                                 is_primitive ? "primitive" : "non-primitive",
-                                 PrettyField(resolved_field, true).c_str());
-        return NULL;  // failure
-      }
-    }
-  }
-  if (!is_static) {
-    // instance fields must be being accessed on an initialized class
-    return resolved_field;
-  } else {
-    // If the class is initialized we're done.
-    if (fields_class->IsInitialized()) {
-      return resolved_field;
-    } else if (Runtime::Current()->GetClassLinker()->EnsureInitialized(fields_class, true, true)) {
-      // Otherwise let's ensure the class is initialized before resolving the field.
-      return resolved_field;
-    } else {
-      DCHECK(self->IsExceptionPending());  // Throw exception and unwind
-      return NULL;  // failure
-    }
-  }
-}
-
-// Slow path method resolution
-mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* this_object,
-                                           mirror::ArtMethod* referrer,
-                                           Thread* self, bool access_check, InvokeType type) {
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  bool is_direct = type == kStatic || type == kDirect;
-  mirror::ArtMethod* resolved_method = class_linker->ResolveMethod(method_idx, referrer, type);
-  if (UNLIKELY(resolved_method == NULL)) {
-    DCHECK(self->IsExceptionPending());  // Throw exception and unwind.
-    return NULL;  // Failure.
-  } else if (UNLIKELY(this_object == NULL && type != kStatic)) {
-    // Maintain interpreter-like semantics where NullPointerException is thrown
-    // after potential NoSuchMethodError from class linker.
-    ThrowLocation throw_location = self->GetCurrentLocationForThrow();
-    DCHECK(referrer == throw_location.GetMethod());
-    ThrowNullPointerExceptionForMethodAccess(throw_location, method_idx, type);
-    return NULL;  // Failure.
-  } else {
-    if (!access_check) {
-      if (is_direct) {
-        return resolved_method;
-      } else if (type == kInterface) {
-        mirror::ArtMethod* interface_method =
-            this_object->GetClass()->FindVirtualMethodForInterface(resolved_method);
-        if (UNLIKELY(interface_method == NULL)) {
-          ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object,
-                                                                     referrer);
-          return NULL;  // Failure.
-        } else {
-          return interface_method;
-        }
-      } else {
-        mirror::ObjectArray<mirror::ArtMethod>* vtable;
-        uint16_t vtable_index = resolved_method->GetMethodIndex();
-        if (type == kSuper) {
-          vtable = referrer->GetDeclaringClass()->GetSuperClass()->GetVTable();
-        } else {
-          vtable = this_object->GetClass()->GetVTable();
-        }
-        // TODO: eliminate bounds check?
-        return vtable->Get(vtable_index);
-      }
-    } else {
-      // Incompatible class change should have been handled in resolve method.
-      if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(type))) {
-        ThrowIncompatibleClassChangeError(type, resolved_method->GetInvokeType(), resolved_method,
-                                          referrer);
-        return NULL;  // Failure.
-      }
-      mirror::Class* methods_class = resolved_method->GetDeclaringClass();
-      mirror::Class* referring_class = referrer->GetDeclaringClass();
-      if (UNLIKELY(!referring_class->CanAccess(methods_class) ||
-                   !referring_class->CanAccessMember(methods_class,
-                                                     resolved_method->GetAccessFlags()))) {
-        // The referring class can't access the resolved method, this may occur as a result of a
-        // protected method being made public by implementing an interface that re-declares the
-        // method public. Resort to the dex file to determine the correct class for the access check
-        const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile();
-        methods_class = class_linker->ResolveType(dex_file,
-                                                  dex_file.GetMethodId(method_idx).class_idx_,
-                                                  referring_class);
-        if (UNLIKELY(!referring_class->CanAccess(methods_class))) {
-          ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class,
-                                                        referrer, resolved_method, type);
-          return NULL;  // Failure.
-        } else if (UNLIKELY(!referring_class->CanAccessMember(methods_class,
-                                                              resolved_method->GetAccessFlags()))) {
-          ThrowIllegalAccessErrorMethod(referring_class, resolved_method);
-          return NULL;  // Failure.
-        }
-      }
-      if (is_direct) {
-        return resolved_method;
-      } else if (type == kInterface) {
-        mirror::ArtMethod* interface_method =
-            this_object->GetClass()->FindVirtualMethodForInterface(resolved_method);
-        if (UNLIKELY(interface_method == NULL)) {
-          ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object,
-                                                                     referrer);
-          return NULL;  // Failure.
-        } else {
-          return interface_method;
-        }
-      } else {
-        mirror::ObjectArray<mirror::ArtMethod>* vtable;
-        uint16_t vtable_index = resolved_method->GetMethodIndex();
-        if (type == kSuper) {
-          mirror::Class* super_class = referring_class->GetSuperClass();
-          if (LIKELY(super_class != NULL)) {
-            vtable = referring_class->GetSuperClass()->GetVTable();
-          } else {
-            vtable = NULL;
-          }
-        } else {
-          vtable = this_object->GetClass()->GetVTable();
-        }
-        if (LIKELY(vtable != NULL &&
-                   vtable_index < static_cast<uint32_t>(vtable->GetLength()))) {
-          return vtable->GetWithoutChecks(vtable_index);
-        } else {
-          // Behavior to agree with that of the verifier.
-          MethodHelper mh(resolved_method);
-          ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(), mh.GetName(),
-                                 mh.GetSignature());
-          return NULL;  // Failure.
-        }
-      }
-    }
-  }
-}
-
 void ThrowStackOverflowError(Thread* self) {
   if (self->IsHandlingStackOverflow()) {
       LOG(ERROR) << "Recursive stack overflow.";
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index c32a661..2008604 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -178,11 +178,235 @@
   StaticPrimitiveWrite,
 };
 
-// Slow field find that can initialize classes and may throw exceptions.
-extern mirror::ArtField* FindFieldFromCode(uint32_t field_idx, const mirror::ArtMethod* referrer,
-                                           Thread* self, FindFieldType type, size_t expected_size,
-                                           bool access_check)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+template<FindFieldType type, bool access_check>
+static inline mirror::ArtField* FindFieldFromCode(uint32_t field_idx, const mirror::ArtMethod* referrer,
+                                                  Thread* self, size_t expected_size) {
+  bool is_primitive;
+  bool is_set;
+  bool is_static;
+  switch (type) {
+    case InstanceObjectRead:     is_primitive = false; is_set = false; is_static = false; break;
+    case InstanceObjectWrite:    is_primitive = false; is_set = true;  is_static = false; break;
+    case InstancePrimitiveRead:  is_primitive = true;  is_set = false; is_static = false; break;
+    case InstancePrimitiveWrite: is_primitive = true;  is_set = true;  is_static = false; break;
+    case StaticObjectRead:       is_primitive = false; is_set = false; is_static = true;  break;
+    case StaticObjectWrite:      is_primitive = false; is_set = true;  is_static = true;  break;
+    case StaticPrimitiveRead:    is_primitive = true;  is_set = false; is_static = true;  break;
+    case StaticPrimitiveWrite:   // Keep GCC happy by having a default handler, fall-through.
+    default:                     is_primitive = true;  is_set = true;  is_static = true;  break;
+  }
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  mirror::ArtField* resolved_field = class_linker->ResolveField(field_idx, referrer, is_static);
+  if (UNLIKELY(resolved_field == nullptr)) {
+    DCHECK(self->IsExceptionPending());  // Throw exception and unwind.
+    return nullptr;  // Failure.
+  }
+  mirror::Class* fields_class = resolved_field->GetDeclaringClass();
+  if (access_check) {
+    if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
+      ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, referrer);
+      return nullptr;
+    }
+    mirror::Class* referring_class = referrer->GetDeclaringClass();
+    if (UNLIKELY(!referring_class->CanAccess(fields_class) ||
+                 !referring_class->CanAccessMember(fields_class,
+                                                   resolved_field->GetAccessFlags()))) {
+      // The referring class can't access the resolved field, this may occur as a result of a
+      // protected field being made public by a sub-class. Resort to the dex file to determine
+      // the correct class for the access check.
+      const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile();
+      fields_class = class_linker->ResolveType(dex_file,
+                                               dex_file.GetFieldId(field_idx).class_idx_,
+                                               referring_class);
+      if (UNLIKELY(!referring_class->CanAccess(fields_class))) {
+        ThrowIllegalAccessErrorClass(referring_class, fields_class);
+        return nullptr;  // failure
+      } else if (UNLIKELY(!referring_class->CanAccessMember(fields_class,
+                                                            resolved_field->GetAccessFlags()))) {
+        ThrowIllegalAccessErrorField(referring_class, resolved_field);
+        return nullptr;  // failure
+      }
+    }
+    if (UNLIKELY(is_set && resolved_field->IsFinal() && (fields_class != referring_class))) {
+      ThrowIllegalAccessErrorFinalField(referrer, resolved_field);
+      return nullptr;  // failure
+    } else {
+      FieldHelper fh(resolved_field);
+      if (UNLIKELY(fh.IsPrimitiveType() != is_primitive ||
+                   fh.FieldSize() != expected_size)) {
+        ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+        DCHECK(throw_location.GetMethod() == referrer);
+        self->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
+                                 "Attempted read of %zd-bit %s on field '%s'",
+                                 expected_size * (32 / sizeof(int32_t)),
+                                 is_primitive ? "primitive" : "non-primitive",
+                                 PrettyField(resolved_field, true).c_str());
+        return nullptr;  // failure
+      }
+    }
+  }
+  if (!is_static) {
+    // instance fields must be being accessed on an initialized class
+    return resolved_field;
+  } else {
+    // If the class is initialized we're done.
+    if (LIKELY(fields_class->IsInitialized())) {
+      return resolved_field;
+    } else if (LIKELY(class_linker->EnsureInitialized(fields_class, true, true))) {
+      // Otherwise let's ensure the class is initialized before resolving the field.
+      return resolved_field;
+    } else {
+      DCHECK(self->IsExceptionPending());  // Throw exception and unwind
+      return nullptr;  // failure
+    }
+  }
+}
+
+// Explicit template declarations of FindFieldFromCode for all field access types.
+#define EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
+template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
+static mirror::ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \
+                                                                 const mirror::ArtMethod* referrer, \
+                                                                 Thread* self, size_t expected_size) \
+
+#define EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \
+    EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, false); \
+    EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, true)
+
+EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(InstanceObjectRead);
+EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(InstanceObjectWrite);
+EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(InstancePrimitiveRead);
+EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(InstancePrimitiveWrite);
+EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(StaticObjectRead);
+EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(StaticObjectWrite);
+EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(StaticPrimitiveRead);
+EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(StaticPrimitiveWrite);
+
+#undef EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL
+#undef EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL
+
+template<InvokeType type, bool access_check>
+static inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* this_object,
+                                                    mirror::ArtMethod* referrer, Thread* self) {
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  mirror::ArtMethod* resolved_method = class_linker->ResolveMethod(method_idx, referrer, type);
+  if (UNLIKELY(resolved_method == nullptr)) {
+    DCHECK(self->IsExceptionPending());  // Throw exception and unwind.
+    return nullptr;  // Failure.
+  } else if (UNLIKELY(this_object == nullptr && type != kStatic)) {
+    // Maintain interpreter-like semantics where NullPointerException is thrown
+    // after potential NoSuchMethodError from class linker.
+    ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+    DCHECK(referrer == throw_location.GetMethod());
+    ThrowNullPointerExceptionForMethodAccess(throw_location, method_idx, type);
+    return nullptr;  // Failure.
+  } else if (access_check) {
+    // Incompatible class change should have been handled in resolve method.
+    if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(type))) {
+      ThrowIncompatibleClassChangeError(type, resolved_method->GetInvokeType(), resolved_method,
+                                        referrer);
+      return nullptr;  // Failure.
+    }
+    mirror::Class* methods_class = resolved_method->GetDeclaringClass();
+    mirror::Class* referring_class = referrer->GetDeclaringClass();
+    if (UNLIKELY(!referring_class->CanAccess(methods_class) ||
+                 !referring_class->CanAccessMember(methods_class,
+                                                   resolved_method->GetAccessFlags()))) {
+      // The referring class can't access the resolved method, this may occur as a result of a
+      // protected method being made public by implementing an interface that re-declares the
+      // method public. Resort to the dex file to determine the correct class for the access check
+      const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile();
+      ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+      methods_class = class_linker->ResolveType(dex_file,
+                                                dex_file.GetMethodId(method_idx).class_idx_,
+                                                referring_class);
+      if (UNLIKELY(!referring_class->CanAccess(methods_class))) {
+        ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class,
+                                                      referrer, resolved_method, type);
+        return nullptr;  // Failure.
+      } else if (UNLIKELY(!referring_class->CanAccessMember(methods_class,
+                                                            resolved_method->GetAccessFlags()))) {
+        ThrowIllegalAccessErrorMethod(referring_class, resolved_method);
+        return nullptr;  // Failure.
+      }
+    }
+  }
+  switch (type) {
+    case kStatic:
+    case kDirect:
+      return resolved_method;
+    case kVirtual: {
+      mirror::ObjectArray<mirror::ArtMethod>* vtable = this_object->GetClass()->GetVTable();
+      uint16_t vtable_index = resolved_method->GetMethodIndex();
+      if (access_check &&
+          (vtable == nullptr || vtable_index >= static_cast<uint32_t>(vtable->GetLength()))) {
+        // Behavior to agree with that of the verifier.
+        MethodHelper mh(resolved_method);
+        ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(), mh.GetName(),
+                               mh.GetSignature());
+        return nullptr;  // Failure.
+      }
+      DCHECK(vtable != nullptr);
+      return vtable->GetWithoutChecks(vtable_index);
+    }
+    case kSuper: {
+      mirror::Class* super_class = referrer->GetDeclaringClass()->GetSuperClass();
+      uint16_t vtable_index = resolved_method->GetMethodIndex();
+      mirror::ObjectArray<mirror::ArtMethod>* vtable;
+      if (access_check) {
+        // Check existence of super class.
+        vtable = (super_class != nullptr) ? super_class->GetVTable() : nullptr;
+        if (vtable == nullptr || vtable_index >= static_cast<uint32_t>(vtable->GetLength())) {
+          // Behavior to agree with that of the verifier.
+          MethodHelper mh(resolved_method);
+          ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(), mh.GetName(),
+                                 mh.GetSignature());
+          return nullptr;  // Failure.
+        }
+      } else {
+        // Super class must exist.
+        DCHECK(super_class != nullptr);
+        vtable = super_class->GetVTable();
+      }
+      DCHECK(vtable != nullptr);
+      return vtable->GetWithoutChecks(vtable_index);
+    }
+    case kInterface: {
+      mirror::ArtMethod* interface_method =
+          this_object->GetClass()->FindVirtualMethodForInterface(resolved_method);
+      if (UNLIKELY(interface_method == nullptr)) {
+        ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object,
+                                                                   referrer);
+        return nullptr;  // Failure.
+      } else {
+        return interface_method;
+      }
+    }
+    default:
+      LOG(FATAL) << "Unknown invoke type " << type;
+      return nullptr;  // Failure.
+  }
+}
+
+// Explicit template declarations of FindMethodFromCode for all invoke types.
+#define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check)                        \
+  template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE                              \
+  static mirror::ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx,         \
+                                                                     mirror::Object* this_object, \
+                                                                     mirror::ArtMethod* referrer, \
+                                                                     Thread* self)
+#define EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \
+    EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, false);   \
+    EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, true)
+
+EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kStatic);
+EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kDirect);
+EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kVirtual);
+EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kSuper);
+EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kInterface);
+
+#undef EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL
+#undef EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL
 
 // Fast path field resolution that can't initialize classes or throw exceptions.
 static inline mirror::ArtField* FindFieldFast(uint32_t field_idx,
@@ -282,11 +506,6 @@
   }
 }
 
-extern mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* this_object,
-                                             mirror::ArtMethod* referrer,
-                                             Thread* self, bool access_check, InvokeType type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
 static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
                                                     const mirror::ArtMethod* referrer,
                                                     Thread* self, bool can_run_clinit,
diff --git a/runtime/entrypoints/portable/portable_field_entrypoints.cc b/runtime/entrypoints/portable/portable_field_entrypoints.cc
index bd6f795..095e99e 100644
--- a/runtime/entrypoints/portable/portable_field_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_field_entrypoints.cc
@@ -33,12 +33,8 @@
     field->Set32(field->GetDeclaringClass(), new_value);
     return 0;
   }
-  field = FindFieldFromCode(field_idx,
-                            referrer,
-                            Thread::Current(),
-                            StaticPrimitiveWrite,
-                            sizeof(uint32_t),
-                            true);
+  field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, Thread::Current(),
+                                                        sizeof(uint32_t));
   if (LIKELY(field != NULL)) {
     field->Set32(field->GetDeclaringClass(), new_value);
     return 0;
@@ -55,12 +51,8 @@
     field->Set64(field->GetDeclaringClass(), new_value);
     return 0;
   }
-  field = FindFieldFromCode(field_idx,
-                            referrer,
-                            Thread::Current(),
-                            StaticPrimitiveWrite,
-                            sizeof(uint64_t),
-                            true);
+  field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, Thread::Current(),
+                                                        sizeof(uint64_t));
   if (LIKELY(field != NULL)) {
     field->Set64(field->GetDeclaringClass(), new_value);
     return 0;
@@ -78,8 +70,8 @@
     field->SetObj(field->GetDeclaringClass(), new_value);
     return 0;
   }
-  field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
-                            StaticObjectWrite, sizeof(mirror::Object*), true);
+  field = FindFieldFromCode<StaticObjectWrite, true>(field_idx, referrer, Thread::Current(),
+                                                     sizeof(mirror::Object*));
   if (LIKELY(field != NULL)) {
     field->SetObj(field->GetDeclaringClass(), new_value);
     return 0;
@@ -94,8 +86,8 @@
   if (LIKELY(field != NULL)) {
     return field->Get32(field->GetDeclaringClass());
   }
-  field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
-                            StaticPrimitiveRead, sizeof(uint32_t), true);
+  field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, Thread::Current(),
+                                                       sizeof(uint32_t));
   if (LIKELY(field != NULL)) {
     return field->Get32(field->GetDeclaringClass());
   }
@@ -109,8 +101,8 @@
   if (LIKELY(field != NULL)) {
     return field->Get64(field->GetDeclaringClass());
   }
-  field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
-                            StaticPrimitiveRead, sizeof(uint64_t), true);
+  field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, Thread::Current(),
+                                                       sizeof(uint64_t));
   if (LIKELY(field != NULL)) {
     return field->Get64(field->GetDeclaringClass());
   }
@@ -125,8 +117,8 @@
   if (LIKELY(field != NULL)) {
     return field->GetObj(field->GetDeclaringClass());
   }
-  field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
-                            StaticObjectRead, sizeof(mirror::Object*), true);
+  field = FindFieldFromCode<StaticObjectRead, true>(field_idx, referrer, Thread::Current(),
+                                                    sizeof(mirror::Object*));
   if (LIKELY(field != NULL)) {
     return field->GetObj(field->GetDeclaringClass());
   }
@@ -142,8 +134,8 @@
     field->Set32(obj, new_value);
     return 0;
   }
-  field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
-                            InstancePrimitiveWrite, sizeof(uint32_t), true);
+  field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, Thread::Current(),
+                                                          sizeof(uint32_t));
   if (LIKELY(field != NULL)) {
     field->Set32(obj, new_value);
     return 0;
@@ -160,8 +152,8 @@
     field->Set64(obj, new_value);
     return 0;
   }
-  field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
-                            InstancePrimitiveWrite, sizeof(uint64_t), true);
+  field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, Thread::Current(),
+                                                          sizeof(uint64_t));
   if (LIKELY(field != NULL)) {
     field->Set64(obj, new_value);
     return 0;
@@ -180,8 +172,8 @@
     field->SetObj(obj, new_value);
     return 0;
   }
-  field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
-                            InstanceObjectWrite, sizeof(mirror::Object*), true);
+  field = FindFieldFromCode<InstanceObjectWrite, true>(field_idx, referrer, Thread::Current(),
+                                                       sizeof(mirror::Object*));
   if (LIKELY(field != NULL)) {
     field->SetObj(obj, new_value);
     return 0;
@@ -197,8 +189,8 @@
   if (LIKELY(field != NULL)) {
     return field->Get32(obj);
   }
-  field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
-                            InstancePrimitiveRead, sizeof(uint32_t), true);
+  field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, Thread::Current(),
+                                                         sizeof(uint32_t));
   if (LIKELY(field != NULL)) {
     return field->Get32(obj);
   }
@@ -213,8 +205,8 @@
   if (LIKELY(field != NULL)) {
     return field->Get64(obj);
   }
-  field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
-                            InstancePrimitiveRead, sizeof(uint64_t), true);
+  field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, Thread::Current(),
+                                                         sizeof(uint64_t));
   if (LIKELY(field != NULL)) {
     return field->Get64(obj);
   }
@@ -230,8 +222,8 @@
   if (LIKELY(field != NULL)) {
     return field->GetObj(obj);
   }
-  field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
-                            InstanceObjectRead, sizeof(mirror::Object*), true);
+  field = FindFieldFromCode<InstanceObjectRead, true>(field_idx, referrer, Thread::Current(),
+                                                      sizeof(mirror::Object*));
   if (LIKELY(field != NULL)) {
     return field->GetObj(obj);
   }
diff --git a/runtime/entrypoints/portable/portable_invoke_entrypoints.cc b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
index 14cbd84..e2a0cc2 100644
--- a/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
@@ -21,21 +21,13 @@
 
 namespace art {
 
-static mirror::ArtMethod* FindMethodHelper(uint32_t method_idx,
-                                                mirror::Object* this_object,
-                                                mirror::ArtMethod* caller_method,
-                                                bool access_check,
-                                                InvokeType type,
-                                                Thread* thread)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::ArtMethod* method = FindMethodFast(method_idx,
-                                                  this_object,
-                                                  caller_method,
-                                                  access_check,
-                                                  type);
+template<InvokeType type, bool access_check>
+static mirror::ArtMethod* FindMethodHelper(uint32_t method_idx, mirror::Object* this_object,
+                                           mirror::ArtMethod* caller_method, Thread* thread) {
+  mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method,
+                                             access_check, type);
   if (UNLIKELY(method == NULL)) {
-    method = FindMethodFromCode(method_idx, this_object, caller_method,
-                                thread, access_check, type);
+    method = FindMethodFromCode<type, access_check>(method_idx, this_object, caller_method, thread);
     if (UNLIKELY(method == NULL)) {
       CHECK(thread->IsExceptionPending());
       return 0;  // failure
@@ -53,12 +45,32 @@
   return method;
 }
 
+// Explicit template declarations of FindMethodHelper for all invoke types.
+#define EXPLICIT_FIND_METHOD_HELPER_TEMPLATE_DECL(_type, _access_check)                               \
+  template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                                \
+  static mirror::ArtMethod* FindMethodHelper<_type, _access_check>(uint32_t method_idx,               \
+                                                                   mirror::Object* this_object,       \
+                                                                   mirror::ArtMethod* caller_method,  \
+                                                                   Thread* thread)
+#define EXPLICIT_FIND_METHOD_HELPER_TYPED_TEMPLATE_DECL(_type) \
+    EXPLICIT_FIND_METHOD_HELPER_TEMPLATE_DECL(_type, false);   \
+    EXPLICIT_FIND_METHOD_HELPER_TEMPLATE_DECL(_type, true)
+
+EXPLICIT_FIND_METHOD_HELPER_TYPED_TEMPLATE_DECL(kStatic);
+EXPLICIT_FIND_METHOD_HELPER_TYPED_TEMPLATE_DECL(kDirect);
+EXPLICIT_FIND_METHOD_HELPER_TYPED_TEMPLATE_DECL(kVirtual);
+EXPLICIT_FIND_METHOD_HELPER_TYPED_TEMPLATE_DECL(kSuper);
+EXPLICIT_FIND_METHOD_HELPER_TYPED_TEMPLATE_DECL(kInterface);
+
+#undef EXPLICIT_FIND_METHOD_HELPER_TYPED_TEMPLATE_DECL
+#undef EXPLICIT_FIND_METHOD_HELPER_TEMPLATE_DECL
+
 extern "C" mirror::Object* art_portable_find_static_method_from_code_with_access_check(uint32_t method_idx,
                                                                                        mirror::Object* this_object,
                                                                                        mirror::ArtMethod* referrer,
                                                                                        Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return FindMethodHelper(method_idx, this_object, referrer, true, kStatic, thread);
+  return FindMethodHelper<kStatic, true>(method_idx, this_object, referrer, thread);
 }
 
 extern "C" mirror::Object* art_portable_find_direct_method_from_code_with_access_check(uint32_t method_idx,
@@ -66,7 +78,7 @@
                                                                                        mirror::ArtMethod* referrer,
                                                                                        Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return FindMethodHelper(method_idx, this_object, referrer, true, kDirect, thread);
+  return FindMethodHelper<kDirect, true>(method_idx, this_object, referrer, thread);
 }
 
 extern "C" mirror::Object* art_portable_find_virtual_method_from_code_with_access_check(uint32_t method_idx,
@@ -74,7 +86,7 @@
                                                                                         mirror::ArtMethod* referrer,
                                                                                         Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return FindMethodHelper(method_idx, this_object, referrer, true, kVirtual, thread);
+  return FindMethodHelper<kVirtual, true>(method_idx, this_object, referrer, thread);
 }
 
 extern "C" mirror::Object* art_portable_find_super_method_from_code_with_access_check(uint32_t method_idx,
@@ -82,7 +94,7 @@
                                                                                       mirror::ArtMethod* referrer,
                                                                                       Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return FindMethodHelper(method_idx, this_object, referrer, true, kSuper, thread);
+  return FindMethodHelper<kSuper, true>(method_idx, this_object, referrer, thread);
 }
 
 extern "C" mirror::Object* art_portable_find_interface_method_from_code_with_access_check(uint32_t method_idx,
@@ -90,7 +102,7 @@
                                                                                           mirror::ArtMethod* referrer,
                                                                                           Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return FindMethodHelper(method_idx, this_object, referrer, true, kInterface, thread);
+  return FindMethodHelper<kInterface, true>(method_idx, this_object, referrer, thread);
 }
 
 extern "C" mirror::Object* art_portable_find_interface_method_from_code(uint32_t method_idx,
@@ -98,7 +110,7 @@
                                                                         mirror::ArtMethod* referrer,
                                                                         Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return FindMethodHelper(method_idx, this_object, referrer, false, kInterface, thread);
+  return FindMethodHelper<kInterface, false>(method_idx, this_object, referrer, thread);
 }
 
 }  // namespace art
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 0ec1eb7..0a533bd 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -35,7 +35,7 @@
     return field->Get32(field->GetDeclaringClass());
   }
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveRead, sizeof(int32_t), true);
+  field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int32_t));
   if (LIKELY(field != NULL)) {
     return field->Get32(field->GetDeclaringClass());
   }
@@ -52,7 +52,7 @@
     return field->Get64(field->GetDeclaringClass());
   }
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveRead, sizeof(int64_t), true);
+  field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int64_t));
   if (LIKELY(field != NULL)) {
     return field->Get64(field->GetDeclaringClass());
   }
@@ -69,8 +69,8 @@
     return field->GetObj(field->GetDeclaringClass());
   }
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  field = FindFieldFromCode(field_idx, referrer, self, StaticObjectRead, sizeof(mirror::Object*),
-                            true);
+  field = FindFieldFromCode<StaticObjectRead, true>(field_idx, referrer, self,
+                                                    sizeof(mirror::Object*));
   if (LIKELY(field != NULL)) {
     return field->GetObj(field->GetDeclaringClass());
   }
@@ -87,8 +87,8 @@
     return field->Get32(obj);
   }
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveRead, sizeof(int32_t),
-                            true);
+  field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
+                                                         sizeof(int32_t));
   if (LIKELY(field != NULL)) {
     if (UNLIKELY(obj == NULL)) {
       ThrowLocation throw_location = self->GetCurrentLocationForThrow();
@@ -110,8 +110,8 @@
     return field->Get64(obj);
   }
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveRead, sizeof(int64_t),
-                            true);
+  field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
+                                                         sizeof(int64_t));
   if (LIKELY(field != NULL)) {
     if (UNLIKELY(obj == NULL)) {
       ThrowLocation throw_location = self->GetCurrentLocationForThrow();
@@ -134,8 +134,8 @@
     return field->GetObj(obj);
   }
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  field = FindFieldFromCode(field_idx, referrer, self, InstanceObjectRead, sizeof(mirror::Object*),
-                            true);
+  field = FindFieldFromCode<InstanceObjectRead, true>(field_idx, referrer, self,
+                                                      sizeof(mirror::Object*));
   if (LIKELY(field != NULL)) {
     if (UNLIKELY(obj == NULL)) {
       ThrowLocation throw_location = self->GetCurrentLocationForThrow();
@@ -158,7 +158,7 @@
     return 0;  // success
   }
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveWrite, sizeof(int32_t), true);
+  field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int32_t));
   if (LIKELY(field != NULL)) {
     field->Set32(field->GetDeclaringClass(), new_value);
     return 0;  // success
@@ -176,7 +176,7 @@
     return 0;  // success
   }
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveWrite, sizeof(int64_t), true);
+  field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int64_t));
   if (LIKELY(field != NULL)) {
     field->Set64(field->GetDeclaringClass(), new_value);
     return 0;  // success
@@ -197,7 +197,8 @@
     }
   }
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  field = FindFieldFromCode(field_idx, referrer, self, StaticObjectWrite, sizeof(mirror::Object*), true);
+  field = FindFieldFromCode<StaticObjectWrite, true>(field_idx, referrer, self,
+                                                     sizeof(mirror::Object*));
   if (LIKELY(field != NULL)) {
     field->SetObj(field->GetDeclaringClass(), new_value);
     return 0;  // success
@@ -216,8 +217,8 @@
     return 0;  // success
   }
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveWrite, sizeof(int32_t),
-                            true);
+  field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, self,
+                                                          sizeof(int32_t));
   if (LIKELY(field != NULL)) {
     if (UNLIKELY(obj == NULL)) {
       ThrowLocation throw_location = self->GetCurrentLocationForThrow();
@@ -244,8 +245,8 @@
   }
   *sp = callee_save;
   self->SetTopOfStack(sp, 0);
-  field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveWrite, sizeof(int64_t),
-                            true);
+  field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, self,
+                                                          sizeof(int64_t));
   if (LIKELY(field != NULL)) {
     if (UNLIKELY(obj == NULL)) {
       ThrowLocation throw_location = self->GetCurrentLocationForThrow();
@@ -270,8 +271,8 @@
     return 0;  // success
   }
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  field = FindFieldFromCode(field_idx, referrer, self, InstanceObjectWrite,
-                            sizeof(mirror::Object*), true);
+  field = FindFieldFromCode<InstanceObjectWrite, true>(field_idx, referrer, self,
+                                                       sizeof(mirror::Object*));
   if (LIKELY(field != NULL)) {
     if (UNLIKELY(obj == NULL)) {
       ThrowLocation throw_location = self->GetCurrentLocationForThrow();
diff --git a/runtime/entrypoints/quick/quick_invoke_entrypoints.cc b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc
index 07c1c01..b852a32 100644
--- a/runtime/entrypoints/quick/quick_invoke_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc
@@ -118,8 +118,7 @@
       DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
       dex_method_idx = instr->VRegB_3rc();
     }
-    method = FindMethodFromCode(dex_method_idx, this_object, caller_method, self,
-                                false, kInterface);
+    method = FindMethodFromCode<kInterface, false>(dex_method_idx, this_object, caller_method, self);
     if (UNLIKELY(method == NULL)) {
       CHECK(self->IsExceptionPending());
       return 0;  // Failure.
@@ -142,17 +141,15 @@
   return result;
 }
 
-
+template<InvokeType type, bool access_check>
 static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
                                 mirror::ArtMethod* caller_method,
-                                Thread* self, mirror::ArtMethod** sp, bool access_check,
-                                InvokeType type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+                                Thread* self, mirror::ArtMethod** sp) {
   mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method,
                                                   access_check, type);
   if (UNLIKELY(method == NULL)) {
     FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
-    method = FindMethodFromCode(method_idx, this_object, caller_method, self, access_check, type);
+    method = FindMethodFromCode<type, access_check>(method_idx, this_object, caller_method, self);
     if (UNLIKELY(method == NULL)) {
       CHECK(self->IsExceptionPending());
       return 0;  // failure
@@ -176,6 +173,27 @@
   return result;
 }
 
+// Explicit template declarations of artInvokeCommon for all invoke types.
+#define EXPLICIT_ART_INVOKE_COMMON_TEMPLATE_DECL(_type, _access_check)                        \
+  template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                        \
+  static uint64_t artInvokeCommon<_type, _access_check>(uint32_t method_idx,                  \
+                                                        mirror::Object* this_object,          \
+                                                        mirror::ArtMethod* caller_method,     \
+                                                        Thread* self, mirror::ArtMethod** sp)
+
+#define EXPLICIT_ART_INVOKE_COMMON_TYPED_TEMPLATE_DECL(_type) \
+    EXPLICIT_ART_INVOKE_COMMON_TEMPLATE_DECL(_type, false);   \
+    EXPLICIT_ART_INVOKE_COMMON_TEMPLATE_DECL(_type, true)
+
+EXPLICIT_ART_INVOKE_COMMON_TYPED_TEMPLATE_DECL(kStatic);
+EXPLICIT_ART_INVOKE_COMMON_TYPED_TEMPLATE_DECL(kDirect);
+EXPLICIT_ART_INVOKE_COMMON_TYPED_TEMPLATE_DECL(kVirtual);
+EXPLICIT_ART_INVOKE_COMMON_TYPED_TEMPLATE_DECL(kSuper);
+EXPLICIT_ART_INVOKE_COMMON_TYPED_TEMPLATE_DECL(kInterface);
+
+#undef EXPLICIT_ART_INVOKE_COMMON_TYPED_TEMPLATE_DECL
+#undef EXPLICIT_ART_INVOKE_COMMON_TEMPLATE_DECL
+
 // See comments in runtime_support_asm.S
 extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,
                                                                 mirror::Object* this_object,
@@ -183,7 +201,7 @@
                                                                 Thread* self,
                                                                 mirror::ArtMethod** sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kInterface);
+  return artInvokeCommon<kInterface, true>(method_idx, this_object, caller_method, self, sp);
 }
 
 
@@ -193,7 +211,7 @@
                                                              Thread* self,
                                                              mirror::ArtMethod** sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kDirect);
+  return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method, self, sp);
 }
 
 extern "C" uint64_t artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,
@@ -202,7 +220,7 @@
                                                              Thread* self,
                                                              mirror::ArtMethod** sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kStatic);
+  return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method, self, sp);
 }
 
 extern "C" uint64_t artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,
@@ -211,7 +229,7 @@
                                                             Thread* self,
                                                             mirror::ArtMethod** sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kSuper);
+  return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method, self, sp);
 }
 
 extern "C" uint64_t artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,
@@ -220,7 +238,7 @@
                                                               Thread* self,
                                                               mirror::ArtMethod** sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kVirtual);
+  return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method, self, sp);
 }
 
 }  // namespace art
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 12291c3..01d3549 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -19,6 +19,7 @@
 #include "dex_file-inl.h"
 #include "dex_instruction-inl.h"
 #include "entrypoints/entrypoint_utils.h"
+#include "gc/accounting/card_table-inl.h"
 #include "interpreter/interpreter.h"
 #include "invoke_arg_array_builder.h"
 #include "mirror/art_method-inl.h"
@@ -547,6 +548,21 @@
     } else if (invoke_type == kInterface) {
       called = receiver->GetClass()->FindVirtualMethodForInterface(called);
     }
+    if ((invoke_type == kVirtual) || (invoke_type == kInterface)) {
+      // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index
+      // of the sharpened method.
+      if (called->GetDexCacheResolvedMethods() == caller->GetDexCacheResolvedMethods()) {
+        caller->GetDexCacheResolvedMethods()->Set(called->GetDexMethodIndex(), called);
+      } else {
+        // Calling from one dex file to another, need to compute the method index appropriate to
+        // the caller's dex file.
+        uint32_t method_index =
+            MethodHelper(called).FindDexMethodIndexInOtherDexFile(MethodHelper(caller).GetDexFile());
+        if (method_index != DexFile::kDexNoIndex) {
+          caller->GetDexCacheResolvedMethods()->Set(method_index, called);
+        }
+      }
+    }
     // Ensure that the called method's class is initialized.
     mirror::Class* called_class = called->GetDeclaringClass();
     linker->EnsureInitialized(called_class, true, true);
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 2e6b0a8..a5f9997 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -146,7 +146,7 @@
   ScopedObjectAccess soa(env);
 
   std::vector<uintptr_t> fake_stack;
-  ASSERT_EQ(kStackAlignment, 16);
+  ASSERT_EQ(kStackAlignment, 16U);
   ASSERT_EQ(sizeof(uintptr_t), sizeof(uint32_t));
 
 #if !defined(ART_USE_PORTABLE_COMPILER)
diff --git a/runtime/globals.h b/runtime/globals.h
index c397494..31574ff 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -30,30 +30,30 @@
 const size_t MB = KB * KB;
 const size_t GB = KB * KB * KB;
 
-const int kWordSize = sizeof(word);
-const int kPointerSize = sizeof(void*);
+const size_t kWordSize = sizeof(word);
+const size_t kPointerSize = sizeof(void*);
 
-const int kBitsPerByte = 8;
-const int kBitsPerByteLog2 = 3;
+const size_t kBitsPerByte = 8;
+const size_t kBitsPerByteLog2 = 3;
 const int kBitsPerWord = kWordSize * kBitsPerByte;
-const int kWordHighBitMask = 1 << (kBitsPerWord - 1);
+const size_t kWordHighBitMask = 1 << (kBitsPerWord - 1);
 
 // Required stack alignment
-const int kStackAlignment = 16;
+const size_t kStackAlignment = 16;
 
 // Required object alignment
-const int kObjectAlignment = 8;
+const size_t kObjectAlignment = 8;
 
 // ARM instruction alignment. ARM processors require code to be 4-byte aligned,
 // but ARM ELF requires 8..
-const int kArmAlignment = 8;
+const size_t kArmAlignment = 8;
 
 // MIPS instruction alignment.  MIPS processors require code to be 4-byte aligned.
 // TODO: Can this be 4?
-const int kMipsAlignment = 8;
+const size_t kMipsAlignment = 8;
 
 // X86 instruction alignment. This is the recommended alignment for maximum performance.
-const int kX86Alignment = 16;
+const size_t kX86Alignment = 16;
 
 // System page size. We check this against sysconf(_SC_PAGE_SIZE) at runtime, but use a simple
 // compile-time constant so the compiler can generate better code.
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 29b00d2..0bc834c 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -90,7 +90,6 @@
 bool DoCall(ArtMethod* method, Object* receiver, Thread* self, ShadowFrame& shadow_frame,
             const Instruction* inst, uint16_t inst_data, JValue* result);
 
-
 // Handles invoke-XXX/range instructions.
 // Returns true on success, otherwise throws an exception and returns false.
 template<InvokeType type, bool is_range, bool do_access_check>
@@ -99,14 +98,14 @@
   const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
   const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
   Object* receiver = (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
-  ArtMethod* const method = FindMethodFromCode(method_idx, receiver, shadow_frame.GetMethod(), self,
-                                               do_access_check, type);
+  ArtMethod* const method = FindMethodFromCode<type, do_access_check>(method_idx, receiver,
+                                                                      shadow_frame.GetMethod(),
+                                                                      self);
   if (type != kStatic) {
     // Reload the vreg since the GC may have moved the object.
     receiver = shadow_frame.GetVRegReference(vregC);
   }
-
-  if (UNLIKELY(method == NULL)) {
+  if (UNLIKELY(method == nullptr)) {
     CHECK(self->IsExceptionPending());
     result->SetJ(0);
     return false;
@@ -128,7 +127,7 @@
                                         JValue* result) {
   const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
   Object* const receiver = shadow_frame.GetVRegReference(vregC);
-  if (UNLIKELY(receiver == NULL)) {
+  if (UNLIKELY(receiver == nullptr)) {
     // We lost the reference to the method index so we cannot get a more
     // precised exception message.
     ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
@@ -136,7 +135,7 @@
   }
   const uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
   ArtMethod* const method = receiver->GetClass()->GetVTable()->GetWithoutChecks(vtable_idx);
-  if (UNLIKELY(method == NULL)) {
+  if (UNLIKELY(method == nullptr)) {
     CHECK(self->IsExceptionPending());
     result->SetJ(0);
     return false;
@@ -155,12 +154,11 @@
 template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
 static inline bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
                               const Instruction* inst, uint16_t inst_data) {
-  bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
-  uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
-  ArtField* f = FindFieldFromCode(field_idx, shadow_frame.GetMethod(), self,
-                                  find_type, Primitive::FieldSize(field_type),
-                                  do_access_check);
-  if (UNLIKELY(f == NULL)) {
+  const bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
+  const uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
+  ArtField* f = FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
+                                                              Primitive::FieldSize(field_type));
+  if (UNLIKELY(f == nullptr)) {
     CHECK(self->IsExceptionPending());
     return false;
   }
@@ -169,7 +167,7 @@
     obj = f->GetDeclaringClass();
   } else {
     obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
-    if (UNLIKELY(obj == NULL)) {
+    if (UNLIKELY(obj == nullptr)) {
       ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(), f, true);
       return false;
     }
@@ -208,7 +206,7 @@
 template<Primitive::Type field_type>
 static inline bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) {
   Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
-  if (UNLIKELY(obj == NULL)) {
+  if (UNLIKELY(obj == nullptr)) {
     // We lost the reference to the field index so we cannot get a more
     // precised exception message.
     ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
@@ -241,10 +239,9 @@
   bool do_assignability_check = do_access_check;
   bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
   uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
-  ArtField* f = FindFieldFromCode(field_idx, shadow_frame.GetMethod(), self,
-                                  find_type, Primitive::FieldSize(field_type),
-                                  do_access_check);
-  if (UNLIKELY(f == NULL)) {
+  ArtField* f = FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
+                                                              Primitive::FieldSize(field_type));
+  if (UNLIKELY(f == nullptr)) {
     CHECK(self->IsExceptionPending());
     return false;
   }
@@ -253,7 +250,7 @@
     obj = f->GetDeclaringClass();
   } else {
     obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
-    if (UNLIKELY(obj == NULL)) {
+    if (UNLIKELY(obj == nullptr)) {
       ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(),
                                               f, false);
       return false;
@@ -281,7 +278,7 @@
       break;
     case Primitive::kPrimNot: {
       Object* reg = shadow_frame.GetVRegReference(vregA);
-      if (do_assignability_check && reg != NULL) {
+      if (do_assignability_check && reg != nullptr) {
         Class* field_class = FieldHelper(f).GetType();
         if (!reg->VerifierInstanceOf(field_class)) {
           // This should never happen.
@@ -308,7 +305,7 @@
 template<Primitive::Type field_type>
 static inline bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) {
   Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
-  if (UNLIKELY(obj == NULL)) {
+  if (UNLIKELY(obj == nullptr)) {
     // We lost the reference to the field index so we cannot get a more
     // precised exception message.
     ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index dbc6f57..d15f337 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -122,6 +122,7 @@
     kStatusVerified = 7,  // Logically part of linking; done pre-init.
     kStatusInitializing = 8,  // Class init in progress.
     kStatusInitialized = 9,  // Ready to go.
+    kStatusMax = 10,
   };
 
   Status GetStatus() const {
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 4c5f90c..6c6d488 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -291,8 +291,8 @@
   ASSERT_TRUE(field_id != NULL);
   uint32_t field_idx = dex_file->GetIndexForFieldId(*field_id);
 
-  ArtField* field = FindFieldFromCode(field_idx, clinit, Thread::Current(), StaticObjectRead,
-                                      sizeof(Object*), true);
+  ArtField* field = FindFieldFromCode<StaticObjectRead, true>(field_idx, clinit, Thread::Current(),
+                                                              sizeof(Object*));
   Object* s0 = field->GetObj(klass);
   EXPECT_TRUE(s0 != NULL);
 
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index af09a1c..55a56d6 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -242,9 +242,16 @@
                                               &error_msg)) {
         if (kVerboseLogging) {
           LOG(INFO) << "DexFile_isDexOptNeeded precompiled file " << odex_filename
-              << " is up-to-date checksum compared to " << filename.c_str();
+              << " has an up-to-date checksum compared to " << filename.c_str();
         }
         return JNI_FALSE;
+      } else {
+        if (kVerboseLogging) {
+          LOG(INFO) << "DexFile_isDexOptNeeded found precompiled file " << odex_filename
+              << " with an out-of-date checksum compared to " << filename.c_str()
+              << ": " << error_msg;
+        }
+        error_msg.clear();
       }
     }
   }
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 486328c..4629dbd 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -135,7 +135,7 @@
   return env->NewStringUTF(kIsDebugBuild ? "libartd.so" : "libart.so");
 }
 
-static void VMRuntime_setTargetSdkVersion(JNIEnv* env, jobject, jint targetSdkVersion) {
+static void VMRuntime_setTargetSdkVersionNative(JNIEnv* env, jobject, jint targetSdkVersion) {
   // This is the target SDK version of the app we're about to run.
   // Note that targetSdkVersion may be CUR_DEVELOPMENT (10000).
   // Note that targetSdkVersion may be 0, meaning "current".
@@ -519,7 +519,7 @@
   NATIVE_METHOD(VMRuntime, nativeSetTargetHeapUtilization, "(F)V"),
   NATIVE_METHOD(VMRuntime, newNonMovableArray, "!(Ljava/lang/Class;I)Ljava/lang/Object;"),
   NATIVE_METHOD(VMRuntime, properties, "()[Ljava/lang/String;"),
-  NATIVE_METHOD(VMRuntime, setTargetSdkVersion, "(I)V"),
+  NATIVE_METHOD(VMRuntime, setTargetSdkVersionNative, "(I)V"),
   NATIVE_METHOD(VMRuntime, registerNativeAllocation, "(I)V"),
   NATIVE_METHOD(VMRuntime, registerNativeFree, "(I)V"),
   NATIVE_METHOD(VMRuntime, startJitCompilation, "()V"),
diff --git a/runtime/oat.cc b/runtime/oat.cc
index c01f77c..6fe5d10 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -22,7 +22,7 @@
 namespace art {
 
 const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '0', '7', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '0', '8', '\0' };
 
 OatHeader::OatHeader() {
   memset(this, 0, sizeof(*this));
diff --git a/runtime/oat.h b/runtime/oat.h
index a653cf8..a9dc540 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -104,6 +104,19 @@
   DISALLOW_COPY_AND_ASSIGN(OatHeader);
 };
 
+// OatMethodOffsets are currently 7x32-bits=224-bits long, so if we can
+// save even one OatMethodOffsets struct, the more complicated encoding
+// using a bitmap pays for itself since few classes will have 224
+// methods.
+enum OatClassType {
+  kOatClassAllCompiled = 0,   // OatClass is followed by an OatMethodOffsets for each method.
+  kOatClassSomeCompiled = 1,  // A bitmap of which OatMethodOffsets are present follows the OatClass.
+  kOatClassNoneCompiled = 2,  // All methods are interpretted so no OatMethodOffsets are necessary.
+  kOatClassMax = 3,
+};
+
+std::ostream& operator<<(std::ostream& os, const OatClassType& rhs);
+
 class PACKED(4) OatMethodOffsets {
  public:
   OatMethodOffsets();
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 7553dcc..fa2b485 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -18,6 +18,7 @@
 
 #include <dlfcn.h>
 
+#include "base/bit_vector.h"
 #include "base/stl_util.h"
 #include "base/unix_file/fd_file.h"
 #include "elf_file.h"
@@ -384,29 +385,92 @@
 
   const byte* oat_class_pointer = oat_file_->Begin() + oat_class_offset;
   CHECK_LT(oat_class_pointer, oat_file_->End()) << oat_file_->GetLocation();
-  mirror::Class::Status status = *reinterpret_cast<const mirror::Class::Status*>(oat_class_pointer);
 
-  const byte* methods_pointer = oat_class_pointer + sizeof(status);
+  const byte* status_pointer = oat_class_pointer;
+  CHECK_LT(status_pointer, oat_file_->End()) << oat_file_->GetLocation();
+  mirror::Class::Status status =
+      static_cast<mirror::Class::Status>(*reinterpret_cast<const int16_t*>(status_pointer));
+  CHECK_LT(status, mirror::Class::kStatusMax);
+
+  const byte* type_pointer = status_pointer + sizeof(uint16_t);
+  CHECK_LT(type_pointer, oat_file_->End()) << oat_file_->GetLocation();
+  OatClassType type = static_cast<OatClassType>(*reinterpret_cast<const uint16_t*>(type_pointer));
+  CHECK_LT(type, kOatClassMax);
+
+  const byte* bitmap_pointer = type_pointer + sizeof(int16_t);
+  CHECK_LT(bitmap_pointer, oat_file_->End()) << oat_file_->GetLocation();
+  uint32_t bitmap_size = 0;
+  if (type == kOatClassSomeCompiled) {
+    bitmap_size = static_cast<uint32_t>(*reinterpret_cast<const uint32_t*>(bitmap_pointer));
+    bitmap_pointer += sizeof(bitmap_size);
+    CHECK_LT(bitmap_pointer, oat_file_->End()) << oat_file_->GetLocation();
+  }
+
+  const byte* methods_pointer = bitmap_pointer + bitmap_size;
   CHECK_LT(methods_pointer, oat_file_->End()) << oat_file_->GetLocation();
 
   return new OatClass(oat_file_,
                       status,
+                      type,
+                      bitmap_size,
+                      reinterpret_cast<const uint32_t*>(bitmap_pointer),
                       reinterpret_cast<const OatMethodOffsets*>(methods_pointer));
 }
 
 OatFile::OatClass::OatClass(const OatFile* oat_file,
                             mirror::Class::Status status,
+                            OatClassType type,
+                            uint32_t bitmap_size,
+                            const uint32_t* bitmap_pointer,
                             const OatMethodOffsets* methods_pointer)
-    : oat_file_(oat_file), status_(status), methods_pointer_(methods_pointer) {}
+    : oat_file_(oat_file), status_(status), type_(type),
+      bitmap_(NULL), methods_pointer_(methods_pointer) {
+    switch (type_) {
+      case kOatClassAllCompiled: {
+        CHECK_EQ(0U, bitmap_size);
+        break;
+      }
+      case kOatClassSomeCompiled: {
+        CHECK_NE(0U, bitmap_size);
+        bitmap_ = new BitVector(0, false, Allocator::GetNoopAllocator(), bitmap_size,
+                                const_cast<uint32_t*>(bitmap_pointer));
+        break;
+      }
+      case kOatClassNoneCompiled: {
+        CHECK_EQ(0U, bitmap_size);
+        methods_pointer_ = NULL;
+        break;
+      }
+      case kOatClassMax: {
+        LOG(FATAL) << "Invalid OatClassType " << type_;
+        break;
+      }
+    }
+}
 
-OatFile::OatClass::~OatClass() {}
-
-mirror::Class::Status OatFile::OatClass::GetStatus() const {
-  return status_;
+OatFile::OatClass::~OatClass() {
+  delete bitmap_;
 }
 
 const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index) const {
-  const OatMethodOffsets& oat_method_offsets = methods_pointer_[method_index];
+  if (methods_pointer_ == NULL) {
+    CHECK_EQ(kOatClassNoneCompiled, type_);
+    return OatMethod(NULL, 0, 0, 0, 0, 0, 0, 0);
+  }
+  size_t methods_pointer_index;
+  if (bitmap_ == NULL) {
+    CHECK_EQ(kOatClassAllCompiled, type_);
+    methods_pointer_index = method_index;
+  } else {
+    CHECK_EQ(kOatClassSomeCompiled, type_);
+    if (!bitmap_->IsBitSet(method_index)) {
+      return OatMethod(NULL, 0, 0, 0, 0, 0, 0, 0);
+    }
+    size_t num_set_bits = bitmap_->NumSetBits(method_index);
+    CHECK_NE(0U, num_set_bits);
+    methods_pointer_index = num_set_bits - 1;
+  }
+  const OatMethodOffsets& oat_method_offsets = methods_pointer_[methods_pointer_index];
   return OatMethod(
       oat_file_->Begin(),
       oat_method_offsets.code_offset_,
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index af14760..887a9d1 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -29,6 +29,7 @@
 
 namespace art {
 
+class BitVector;
 class ElfFile;
 class MemMap;
 class OatMethodOffsets;
@@ -145,7 +146,13 @@
 
   class OatClass {
    public:
-    mirror::Class::Status GetStatus() const;
+    mirror::Class::Status GetStatus() const {
+      return status_;
+    }
+
+    OatClassType GetType() const {
+      return type_;
+    }
 
     // get the OatMethod entry based on its index into the class
     // defintion. direct methods come first, followed by virtual
@@ -157,10 +164,21 @@
    private:
     OatClass(const OatFile* oat_file,
              mirror::Class::Status status,
+             OatClassType type,
+             uint32_t bitmap_size,
+             const uint32_t* bitmap_pointer,
              const OatMethodOffsets* methods_pointer);
 
     const OatFile* oat_file_;
+
     const mirror::Class::Status status_;
+    COMPILE_ASSERT(mirror::Class::Status::kStatusMax < (2 ^ 16), class_status_wont_fit_in_16bits);
+
+    OatClassType type_;
+    COMPILE_ASSERT(OatClassType::kOatClassMax < (2 ^ 16), oat_class_type_wont_fit_in_16bits);
+
+    const BitVector* bitmap_;
+
     const OatMethodOffsets* methods_pointer_;
 
     friend class OatDexFile;
diff --git a/runtime/object_utils.h b/runtime/object_utils.h
index 8062a89..3ca3c0b 100644
--- a/runtime/object_utils.h
+++ b/runtime/object_utils.h
@@ -700,6 +700,46 @@
     return s;
   }
 
+  uint32_t FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    const DexFile& dexfile = GetDexFile();
+    if (&dexfile == &other_dexfile) {
+      return method_->GetDexMethodIndex();
+    }
+    const DexFile::MethodId& mid = dexfile.GetMethodId(method_->GetDexMethodIndex());
+    const char* mid_declaring_class_descriptor = dexfile.StringByTypeIdx(mid.class_idx_);
+    const DexFile::StringId* other_descriptor =
+        other_dexfile.FindStringId(mid_declaring_class_descriptor);
+    if (other_descriptor != nullptr) {
+      const DexFile::TypeId* other_type_id =
+          other_dexfile.FindTypeId(other_dexfile.GetIndexForStringId(*other_descriptor));
+      if (other_type_id != nullptr) {
+        const char* mid_name = dexfile.GetMethodName(mid);
+        const DexFile::StringId* other_name = other_dexfile.FindStringId(mid_name);
+        if (other_name != nullptr) {
+          uint16_t other_return_type_idx;
+          std::vector<uint16_t> other_param_type_idxs;
+          bool success = other_dexfile.CreateTypeList(dexfile.GetMethodSignature(mid).ToString(),
+                                                      &other_return_type_idx,
+                                                      &other_param_type_idxs);
+          if (success) {
+            const DexFile::ProtoId* other_sig =
+                other_dexfile.FindProtoId(other_return_type_idx, other_param_type_idxs);
+            if (other_sig != nullptr) {
+              const  DexFile::MethodId* other_mid = other_dexfile.FindMethodId(*other_type_id,
+                                                                               *other_name,
+                                                                               *other_sig);
+              if (other_mid != nullptr) {
+                return other_dexfile.GetIndexForMethodId(*other_mid);
+              }
+            }
+          }
+        }
+      }
+    }
+    return DexFile::kDexNoIndex;
+  }
+
  private:
   // Set the method_ field, for proxy methods looking up the interface method via the resolved
   // methods table.
diff --git a/test/055-enum-performance/run b/test/055-enum-performance/run
index 1436ce2..e27a622 100755
--- a/test/055-enum-performance/run
+++ b/test/055-enum-performance/run
@@ -14,5 +14,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# As this is a performance test we always run -O
-exec ${RUN} -O "$@"
+# As this is a performance test we always use the non-debug build.
+exec ${RUN} "${@/#libartd.so/libart.so}"
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index f4d2dd1..b881b34 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -37,6 +37,10 @@
     test_String_length();
   }
 
+  private static String nullString() {
+    return null;
+  }
+
   public static void test_String_length() {
     String str0 = "";
     String str1 = "x";
@@ -46,7 +50,7 @@
     Assert.assertEquals(str1.length(), 1);
     Assert.assertEquals(str80.length(), 80);
 
-    String strNull = null;
+    String strNull = nullString();
     try {
       strNull.length();
       Assert.fail();
@@ -61,7 +65,7 @@
     Assert.assertTrue(str0.isEmpty());
     Assert.assertFalse(str1.isEmpty());
 
-    String strNull = null;
+    String strNull = nullString();
     try {
       strNull.isEmpty();
       Assert.fail();
@@ -88,7 +92,7 @@
     } catch (StringIndexOutOfBoundsException expected) {
     }
 
-    String strNull = null;
+    String strNull = nullString();
     try {
       strNull.charAt(0);
       Assert.fail();
@@ -133,7 +137,7 @@
     Assert.assertEquals(str40.indexOf('a',10), 10);
     Assert.assertEquals(str40.indexOf('b',40), -1);
 
-    String strNull = null;
+    String strNull = nullString();
     try {
       strNull.indexOf('a');
       Assert.fail();
diff --git a/test/etc/host-run-test-jar b/test/etc/host-run-test-jar
index 357fb5a..da74532 100755
--- a/test/etc/host-run-test-jar
+++ b/test/etc/host-run-test-jar
@@ -9,7 +9,6 @@
     fi
 }
 
-LIB="libartd.so"
 DEBUGGER="n"
 GDB="n"
 INTERPRETER="n"
@@ -23,11 +22,17 @@
     if [ "x$1" = "x--quiet" ]; then
         QUIET="y"
         shift
-    elif [ "x$1" = "x-lib" ]; then
+    elif [ "x$1" = "x--lib" ]; then
         shift
+        if [ "x$1" = "x" ]; then
+            echo "$0 missing argument to --lib" 1>&2
+            exit 1
+        fi
         LIB="$1"
-    elif [ "x$1" = "x-O" ]; then
-        LIB="libart.so"
+        shift
+    elif [ "x$1" = "x--boot" ]; then
+        shift
+        BOOT_OPT="$1"
         shift
     elif [ "x$1" = "x--debug" ]; then
         DEBUGGER="y"
@@ -38,6 +43,10 @@
         shift
     elif [ "x$1" = "x--invoke-with" ]; then
         shift
+        if [ "x$1" = "x" ]; then
+            echo "$0 missing argument to --invoke-with" 1>&2
+            exit 1
+        fi
         if [ "x$INVOKE_WITH" = "x" ]; then
             INVOKE_WITH="$1"
         else
@@ -106,7 +115,9 @@
 
 JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
 
+if [ "$DEV_MODE" = "y" ]; then
+  echo $cmdline "$@"
+fi
+
 cd $ANDROID_BUILD_TOP
-$INVOKE_WITH $gdb $exe $gdbargs -XXlib:$LIB -Ximage:$ANDROID_ROOT/framework/core.art \
-    $JNI_OPTS $INT_OPTS $DEBUGGER_OPTS \
-    -cp $DEX_LOCATION/$TEST_NAME.jar Main "$@"
+$INVOKE_WITH $gdb $exe $gdbargs -XXlib:$LIB $JNI_OPTS $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar Main "$@"
diff --git a/test/etc/push-and-run-test-jar b/test/etc/push-and-run-test-jar
index cc28592..ff75d32 100755
--- a/test/etc/push-and-run-test-jar
+++ b/test/etc/push-and-run-test-jar
@@ -9,7 +9,6 @@
     fi
 }
 
-LIB="libartd.so"
 GDB="n"
 DEBUGGER="n"
 INTERPRETER="n"
@@ -24,11 +23,17 @@
     if [ "x$1" = "x--quiet" ]; then
         QUIET="y"
         shift
-    elif [ "x$1" = "x-lib" ]; then
+    elif [ "x$1" = "x--lib" ]; then
         shift
+        if [ "x$1" = "x" ]; then
+            echo "$0 missing argument to --lib" 1>&2
+            exit 1
+        fi
         LIB="$1"
-    elif [ "x$1" = "x-O" ]; then
-        LIB="libart.so"
+        shift
+    elif [ "x$1" = "x--boot" ]; then
+        shift
+        BOOT_OPT="$1"
         shift
     elif [ "x$1" = "x--debug" ]; then
         DEBUGGER="y"
@@ -49,6 +54,10 @@
         shift
     elif [ "x$1" = "x--invoke-with" ]; then
         shift
+        if [ "x$1" = "x" ]; then
+            echo "$0 missing argument to --invoke-with" 1>&2
+            exit 1
+        fi
         if [ "x$INVOKE_WITH" = "x" ]; then
             INVOKE_WITH="$1"
         else
@@ -132,7 +141,7 @@
 JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
 
 cmdline="cd $DEX_LOCATION && mkdir dalvik-cache && export ANDROID_DATA=$DEX_LOCATION && export DEX_LOCATION=$DEX_LOCATION && \
-    $INVOKE_WITH $gdb dalvikvm $gdbargs -XXlib:$LIB $ZYGOTE $JNI_OPTS $INT_OPTS $DEBUGGER_OPTS -Ximage:/data/art-test/core.art -cp $DEX_LOCATION/$TEST_NAME.jar Main"
+    $INVOKE_WITH $gdb dalvikvm $gdbargs -XXlib:$LIB $ZYGOTE $JNI_OPTS $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar Main"
 if [ "$DEV_MODE" = "y" ]; then
   echo $cmdline "$@"
 fi
diff --git a/test/run-test b/test/run-test
index c449e84..f706110 100755
--- a/test/run-test
+++ b/test/run-test
@@ -58,12 +58,14 @@
 expected="expected.txt"
 output="output.txt"
 build_output="build-output.txt"
+lib="libartd.so"
 run_args="--quiet"
 
 target_mode="yes"
 dev_mode="no"
 update_mode="no"
 debug_mode="no"
+dalvik_mode="no"
 usage="no"
 build_only="no"
 
@@ -79,7 +81,16 @@
         NEED_DEX="false"
         shift
     elif [ "x$1" = "x-O" ]; then
-        run_args="${run_args} -O"
+        lib="libart.so"
+        shift
+    elif [ "x$1" = "x--dalvik" ]; then
+        lib="libdvm.so"
+        dalvik_mode="yes"
+        shift
+    elif [ "x$1" = "x--image" ]; then
+        shift
+        image="$1"
+        run_args="${run_args} --image $image"
         shift
     elif [ "x$1" = "x--debug" ]; then
         run_args="${run_args} --debug"
@@ -106,6 +117,11 @@
     elif [ "x$1" = "x--invoke-with" ]; then
         shift
         what="$1"
+        if [ "x$what" = "x" ]; then
+            echo "$0 missing argument to --invoke-with" 1>&2
+            usage="yes"
+            break
+        fi
         run_args="${run_args} --invoke-with ${what}"
         shift
     elif [ "x$1" = "x--dev" ]; then
@@ -118,6 +134,11 @@
     elif [ "x$1" = "x--output-path" ]; then
         shift
         tmp_dir=$1
+        if [ "x$tmp_dir" = "x" ]; then
+            echo "$0 missing argument to --output-path" 1>&2
+            usage="yes"
+            break
+        fi
         shift
     elif [ "x$1" = "x--update" ]; then
         update_mode="yes"
@@ -134,6 +155,24 @@
     fi
 done
 
+run_args="${run_args} --lib $lib"
+
+if [ "$dalvik_mode" = "no" ]; then
+    if [ "$target_mode" = "no" ]; then
+        run_args="${run_args} --boot -Ximage:${ANDROID_HOST_OUT}/framework/core.art"
+    else
+        run_args="${run_args} --boot -Ximage:/data/art-test/core.art"
+    fi
+else
+    if [ "$target_mode" = "no" ]; then
+        framework="${OUT}/system/framework"
+        bpath="${framework}/core.jar:${framework}/conscrypt.jar:${framework}/okhttp.jar:${framework}/core-junit.jar:${framework}/bouncycastle.jar:${framework}/ext.jar"
+        run_args="${run_args} --boot -Xbootclasspath:${bpath}"
+    else
+        true # defaults to using target BOOTCLASSPATH
+    fi
+fi
+
 if [ "$dev_mode" = "yes" -a "$update_mode" = "yes" ]; then
     echo "--dev and --update are mutually exclusive" 1>&2
     usage="yes"
@@ -185,6 +224,7 @@
         echo "                   other runtime options are ignored."
         echo "    --host         Use the host-mode virtual machine."
         echo "    --invoke-with  Pass --invoke-with option to runtime."
+        echo "    --dalvik       Use Dalvik (off by default)."
         echo "    --jvm          Use a host-local RI virtual machine."
         echo "    --output-path [path] Location where to store the build" \
              "files."
@@ -237,7 +277,7 @@
     if [ "$build_exit" = '0' ]; then
         echo "${test_dir}: running..." 1>&2
         "./${run}" $run_args "$@" 2>&1
-	run_exit="$?"
+        run_exit="$?"
         echo "run exit status: $run_exit" 1>&2
         if [ "$run_exit" = "0" ]; then
             good="yes"