Cache field lowering info in mir_graph.

Change-Id: I9f9d76e3ae6c31e88bdf3f59820d31a625da020f
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index 1286a8e..bd7c40b 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -23,6 +23,20 @@
 namespace art {
 
 /**
+ * @class CacheFieldLoweringInfo
+ * @brief Cache the lowering info for fields used by IGET/IPUT/SGET/SPUT insns.
+ */
+class CacheFieldLoweringInfo : public Pass {
+ public:
+  CacheFieldLoweringInfo() : Pass("CacheFieldLoweringInfo", kNoNodes) {
+  }
+
+  void Start(CompilationUnit* cUnit) const {
+    cUnit->mir_graph->DoCacheFieldLoweringInfo();
+  }
+};
+
+/**
  * @class CodeLayout
  * @brief Perform the code layout pass.
  */
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index ff8fea0..b9f9437 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -208,21 +208,21 @@
     return;
   }
   uint32_t field_idx = inst->VRegC_22c();
-  int field_offset;
+  MemberOffset field_offset(0u);
   bool is_volatile;
   bool fast_path = driver_.ComputeInstanceFieldInfo(field_idx, &unit_, is_put,
                                                     &field_offset, &is_volatile);
-  if (fast_path && !is_volatile && IsUint(16, field_offset)) {
+  if (fast_path && !is_volatile && IsUint(16, field_offset.Int32Value())) {
     VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode())
                    << " to " << Instruction::Name(new_opcode)
                    << " by replacing field index " << field_idx
-                   << " by field offset " << field_offset
+                   << " by field offset " << field_offset.Int32Value()
                    << " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method "
                    << PrettyMethod(unit_.GetDexMethodIndex(), GetDexFile(), true);
     // We are modifying 4 consecutive bytes.
     inst->SetOpcode(new_opcode);
     // Replace field index by field offset.
-    inst->SetVRegC_22c(static_cast<uint16_t>(field_offset));
+    inst->SetVRegC_22c(static_cast<uint16_t>(field_offset.Int32Value()));
   }
 }
 
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index a3ea034..61c6767 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -16,6 +16,7 @@
 
 #include "local_value_numbering.h"
 
+#include "mir_field_info.h"
 #include "mir_graph.h"
 
 namespace art {
@@ -534,16 +535,24 @@
     case Instruction::IGET_BYTE:
     case Instruction::IGET_CHAR:
     case Instruction::IGET_SHORT: {
+        uint16_t type = opcode - Instruction::IGET;
         uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
         HandleNullCheck(mir, base);
+        const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
         uint16_t memory_version;
         uint16_t field_id;
-        // TODO: all gets treated as volatile.
-        // Volatile fields always get a new memory version; field id is irrelevant.
-        // Unresolved fields are always marked as volatile and handled the same way here.
-        field_id = 0u;
-        memory_version = next_memory_version_;
-        ++next_memory_version_;
+        if (!field_info.IsResolved() || field_info.IsVolatile()) {
+          // Volatile fields always get a new memory version; field id is irrelevant.
+          // Unresolved fields may be volatile, so handle them as such to be safe.
+          field_id = 0u;
+          memory_version = next_memory_version_;
+          ++next_memory_version_;
+        } else {
+          DCHECK(field_info.IsResolved());
+          field_id = GetFieldId(field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex());
+          memory_version = std::max(unresolved_ifield_version_[type],
+                                    GetMemoryVersion(base, field_id, type));
+        }
         if (opcode == Instruction::IGET_WIDE) {
           res = LookupValue(Instruction::IGET_WIDE, base, field_id, memory_version);
           SetOperandValueWide(mir->ssa_rep->defs[0], res);
@@ -567,10 +576,18 @@
         int base_reg = (opcode == Instruction::IPUT_WIDE) ? 2 : 1;
         uint16_t base = GetOperandValue(mir->ssa_rep->uses[base_reg]);
         HandleNullCheck(mir, base);
-        // TODO: all puts treated as unresolved.
-        // Unresolved fields always alias with everything of the same type.
-        unresolved_ifield_version_[type] = next_memory_version_;
-        ++next_memory_version_;
+        const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
+        if (!field_info.IsResolved()) {
+          // Unresolved fields always alias with everything of the same type.
+          unresolved_ifield_version_[type] = next_memory_version_;
+          ++next_memory_version_;
+        } else if (field_info.IsVolatile()) {
+          // Nothing to do, resolved volatile fields always get a new memory version anyway and
+          // can't alias with resolved non-volatile fields.
+        } else {
+          AdvanceMemoryVersion(base, GetFieldId(field_info.DeclaringDexFile(),
+                                                field_info.DeclaringFieldIndex()), type);
+        }
       }
       break;
 
@@ -581,14 +598,22 @@
     case Instruction::SGET_BYTE:
     case Instruction::SGET_CHAR:
     case Instruction::SGET_SHORT: {
+        uint16_t type = opcode - Instruction::SGET;
+        const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
         uint16_t memory_version;
         uint16_t field_id;
-        // TODO: all gets treated as volatile.
-        // Volatile fields always get a new memory version; field id is irrelevant.
-        // Unresolved fields are always marked as volatile and handled the same way here.
-        field_id = 0u;
-        memory_version = next_memory_version_;
-        ++next_memory_version_;
+        if (!field_info.IsResolved() || field_info.IsVolatile()) {
+          // Volatile fields always get a new memory version; field id is irrelevant.
+          // Unresolved fields may be volatile, so handle them as such to be safe.
+          field_id = 0u;
+          memory_version = next_memory_version_;
+          ++next_memory_version_;
+        } else {
+          DCHECK(field_info.IsResolved());
+          field_id = GetFieldId(field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex());
+          memory_version = std::max(unresolved_sfield_version_[type],
+                                    GetMemoryVersion(NO_VALUE, field_id, type));
+        }
         if (opcode == Instruction::SGET_WIDE) {
           res = LookupValue(Instruction::SGET_WIDE, NO_VALUE, field_id, memory_version);
           SetOperandValueWide(mir->ssa_rep->defs[0], res);
@@ -609,10 +634,18 @@
     case Instruction::SPUT_CHAR:
     case Instruction::SPUT_SHORT: {
         uint16_t type = opcode - Instruction::SPUT;
-        // TODO: all puts treated as unresolved.
-        // Unresolved fields always alias with everything of the same type.
-        unresolved_sfield_version_[type] = next_memory_version_;
-        ++next_memory_version_;
+        const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
+        if (!field_info.IsResolved()) {
+          // Unresolved fields always alias with everything of the same type.
+          unresolved_sfield_version_[type] = next_memory_version_;
+          ++next_memory_version_;
+        } else if (field_info.IsVolatile()) {
+          // Nothing to do, resolved volatile fields always get a new memory version anyway and
+          // can't alias with resolved non-volatile fields.
+        } else {
+          AdvanceMemoryVersion(NO_VALUE, GetFieldId(field_info.DeclaringDexFile(),
+                                                    field_info.DeclaringFieldIndex()), type);
+        }
       }
       break;
   }
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 6ab6c51..4599612 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -44,7 +44,7 @@
 
     Instruction::Code opcode;
     int64_t value;
-    uint32_t field_annotation;
+    uint32_t field_info;
     size_t num_uses;
     int32_t uses[kMaxSsaUses];
     size_t num_defs;
@@ -55,28 +55,41 @@
     { opcode, value, 0u, 0, { }, 1, { reg } }
 #define DEF_CONST_WIDE(opcode, reg, value) \
     { opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_IGET(opcode, reg, obj, field_annotation) \
-    { opcode, 0u, field_annotation, 1, { obj }, 1, { reg } }
-#define DEF_IGET_WIDE(opcode, reg, obj, field_annotation) \
-    { opcode, 0u, field_annotation, 1, { obj }, 2, { reg, reg + 1 } }
-#define DEF_IPUT(opcode, reg, obj, field_annotation) \
-    { opcode, 0u, field_annotation, 2, { reg, obj }, 0, { } }
-#define DEF_IPUT_WIDE(opcode, reg, obj, field_annotation) \
-    { opcode, 0u, field_annotation, 3, { reg, reg + 1, obj }, 0, { } }
-#define DEF_SGET(opcode, reg, field_annotation) \
-    { opcode, 0u, field_annotation, 0, { }, 1, { reg } }
-#define DEF_SGET_WIDE(opcode, reg, field_annotation) \
-    { opcode, 0u, field_annotation, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_SPUT(opcode, reg, field_annotation) \
-    { opcode, 0u, field_annotation, 1, { reg }, 0, { } }
-#define DEF_SPUT_WIDE(opcode, reg, field_annotation) \
-    { opcode, 0u, field_annotation, 2, { reg, reg + 1 }, 0, { } }
+#define DEF_IGET(opcode, reg, obj, field_info) \
+    { opcode, 0u, field_info, 1, { obj }, 1, { reg } }
+#define DEF_IGET_WIDE(opcode, reg, obj, field_info) \
+    { opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
+#define DEF_IPUT(opcode, reg, obj, field_info) \
+    { opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
+#define DEF_IPUT_WIDE(opcode, reg, obj, field_info) \
+    { opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
+#define DEF_SGET(opcode, reg, field_info) \
+    { opcode, 0u, field_info, 0, { }, 1, { reg } }
+#define DEF_SGET_WIDE(opcode, reg, field_info) \
+    { opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_SPUT(opcode, reg, field_info) \
+    { opcode, 0u, field_info, 1, { reg }, 0, { } }
+#define DEF_SPUT_WIDE(opcode, reg, field_info) \
+    { opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
 #define DEF_INVOKE1(opcode, reg) \
     { opcode, 0u, 0u, 1, { reg }, 0, { } }
 #define DEF_UNIQUE_REF(opcode, reg) \
     { opcode, 0u, 0u, 0, { }, 1, { reg } }  // CONST_CLASS, CONST_STRING, NEW_ARRAY, ...
 
   void DoPrepareIFields(const IFieldDef* defs, size_t count) {
+    cu_.mir_graph->ifield_lowering_infos_.Reset();
+    cu_.mir_graph->ifield_lowering_infos_.Resize(count);
+    for (size_t i = 0u; i != count; ++i) {
+      const IFieldDef* def = &defs[i];
+      MirIFieldLoweringInfo field_info(def->field_idx);
+      if (def->declaring_dex_file != 0u) {
+        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
+        field_info.declaring_field_idx_ = def->declaring_field_idx;
+        field_info.flags_ = 0u |  // Without kFlagIsStatic.
+            (def->is_volatile ? MirIFieldLoweringInfo::kFlagIsVolatile : 0u);
+      }
+      cu_.mir_graph->ifield_lowering_infos_.Insert(field_info);
+    }
   }
 
   template <size_t count>
@@ -85,6 +98,19 @@
   }
 
   void DoPrepareSFields(const SFieldDef* defs, size_t count) {
+    cu_.mir_graph->sfield_lowering_infos_.Reset();
+    cu_.mir_graph->sfield_lowering_infos_.Resize(count);
+    for (size_t i = 0u; i != count; ++i) {
+      const SFieldDef* def = &defs[i];
+      MirSFieldLoweringInfo field_info(def->field_idx);
+      if (def->declaring_dex_file != 0u) {
+        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
+        field_info.declaring_field_idx_ = def->declaring_field_idx;
+        field_info.flags_ = MirSFieldLoweringInfo::kFlagIsStatic |
+            (def->is_volatile ? MirSFieldLoweringInfo::kFlagIsVolatile : 0u);
+      }
+      cu_.mir_graph->sfield_lowering_infos_.Insert(field_info);
+    }
   }
 
   template <size_t count>
@@ -102,6 +128,13 @@
       mir->dalvikInsn.opcode = def->opcode;
       mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
       mir->dalvikInsn.vB_wide = def->value;
+      if (def->opcode >= Instruction::IGET && def->opcode <= Instruction::IPUT_SHORT) {
+        ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.Size());
+        mir->meta.ifield_lowering_info = def->field_info;
+      } else if (def->opcode >= Instruction::SGET && def->opcode <= Instruction::SPUT_SHORT) {
+        ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.Size());
+        mir->meta.sfield_lowering_info = def->field_info;
+      }
       mir->ssa_rep = &ssa_reps_[i];
       mir->ssa_rep->num_uses = def->num_uses;
       mir->ssa_rep->uses = const_cast<int32_t*>(def->uses);  // Not modified by LVN.
@@ -146,7 +179,6 @@
   LocalValueNumbering lvn_;
 };
 
-#if 0  // TODO: re-enable when LVN is handling memory igets.
 TEST_F(LocalValueNumberingTest, TestIGetIGetInvokeIGet) {
   static const IFieldDef ifields[] = {
       { 1u, 1u, 1u, false }
@@ -169,7 +201,6 @@
   EXPECT_EQ(mirs_[2].optimization_flags, 0u);
   EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
 }
-#endif
 
 TEST_F(LocalValueNumberingTest, TestIGetIPutIGetIGetIGet) {
   static const IFieldDef ifields[] = {
@@ -197,7 +228,6 @@
   EXPECT_EQ(mirs_[4].optimization_flags, 0u);
 }
 
-#if 0  // TODO: re-enable when LVN is handling memory igets.
 TEST_F(LocalValueNumberingTest, TestUniquePreserve1) {
   static const IFieldDef ifields[] = {
       { 1u, 1u, 1u, false },
@@ -218,9 +248,7 @@
   EXPECT_EQ(mirs_[2].optimization_flags, 0u);
   EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
 }
-#endif
 
-#if 0  // TODO: re-enable when LVN is handling memory igets.
 TEST_F(LocalValueNumberingTest, TestUniquePreserve2) {
   static const IFieldDef ifields[] = {
       { 1u, 1u, 1u, false },
@@ -241,9 +269,7 @@
   EXPECT_EQ(mirs_[2].optimization_flags, MIR_IGNORE_NULL_CHECK);
   EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
 }
-#endif
 
-#if 0  // TODO: re-enable when LVN is handling memory igets.
 TEST_F(LocalValueNumberingTest, TestUniquePreserveAndEscape) {
   static const IFieldDef ifields[] = {
       { 1u, 1u, 1u, false },
@@ -267,7 +293,6 @@
   EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
   EXPECT_EQ(mirs_[5].optimization_flags, MIR_IGNORE_NULL_CHECK);
 }
-#endif
 
 TEST_F(LocalValueNumberingTest, TestVolatile) {
   static const IFieldDef ifields[] = {
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 8ef80fa..d159f49 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -14,11 +14,15 @@
  * limitations under the License.
  */
 
+#include <algorithm>
 #include "compiler_internals.h"
 #include "dataflow_iterator-inl.h"
+#include "dex_instruction.h"
+#include "dex_instruction-inl.h"
 #include "dex/quick/dex_file_method_inliner.h"
 #include "dex/quick/dex_file_to_method_inliner_map.h"
 #include "driver/compiler_options.h"
+#include "UniquePtr.h"
 
 namespace art {
 
@@ -1090,4 +1094,109 @@
   return ComputeSkipCompilation(&stats, skip_compilation);
 }
 
+void MIRGraph::DoCacheFieldLoweringInfo() {
+  // Try to use stack-allocated array, resort to heap if we exceed the initial size.
+  static constexpr size_t kInitialSize = 32;
+  uint16_t stack_idxs[kInitialSize];
+  UniquePtr<uint16_t[]> allocated_idxs;
+  uint16_t* field_idxs = stack_idxs;
+  size_t size = kInitialSize;
+
+  // Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
+  size_t ifield_pos = 0u;
+  size_t sfield_pos = size;
+  AllNodesIterator iter(this);
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+    if (bb->block_type != kDalvikByteCode) {
+      continue;
+    }
+    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+      if (mir->dalvikInsn.opcode >= Instruction::IGET &&
+          mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
+        bool need_alloc = false;
+        const Instruction* insn = Instruction::At(current_code_item_->insns_ + mir->offset);
+        uint16_t field_idx;
+        // Get field index and try to find it among existing indexes. If found, it's usually among
+        // the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
+        // is a linear search, it actually performs much better than map based approach.
+        if (mir->dalvikInsn.opcode <= Instruction::IPUT_SHORT) {
+          field_idx = insn->VRegC_22c();
+          size_t i = ifield_pos;
+          while (i != 0u && field_idxs[i - 1] != field_idx) {
+            --i;
+          }
+          if (i != 0u) {
+            mir->meta.ifield_lowering_info = i - 1;
+          } else {
+            mir->meta.ifield_lowering_info = ifield_pos;
+            if (UNLIKELY(ifield_pos == sfield_pos)) {
+              need_alloc = true;
+            } else {
+              field_idxs[ifield_pos++] = field_idx;
+            }
+          }
+        } else {
+          field_idx = insn->VRegB_21c();
+          size_t i = sfield_pos;
+          while (i != size && field_idxs[i] != field_idx) {
+            ++i;
+          }
+          if (i != size) {
+            mir->meta.sfield_lowering_info = size - i - 1u;
+          } else {
+            mir->meta.sfield_lowering_info = size - sfield_pos;
+            if (UNLIKELY(ifield_pos == sfield_pos)) {
+              need_alloc = true;
+            } else {
+              field_idxs[--sfield_pos] = field_idx;
+            }
+          }
+        }
+        if (UNLIKELY(need_alloc)) {
+          DCHECK(field_idxs == stack_idxs);
+          // All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
+          uint32_t max_refs = (current_code_item_->insns_size_in_code_units_ - 1u) / 2u;
+          allocated_idxs.reset(new uint16_t[max_refs]);
+          field_idxs = allocated_idxs.get();
+          size_t sfield_count = size - sfield_pos;
+          sfield_pos = max_refs - sfield_count;
+          size = max_refs;
+          memcpy(field_idxs, stack_idxs, ifield_pos * sizeof(field_idxs[0]));
+          memcpy(field_idxs + sfield_pos, stack_idxs + ifield_pos,
+                 sfield_count * sizeof(field_idxs[0]));
+          if (mir->dalvikInsn.opcode <= Instruction::IPUT_SHORT) {
+            field_idxs[ifield_pos++] = field_idx;
+          } else {
+            field_idxs[--sfield_pos] = field_idx;
+          }
+        }
+        DCHECK_LE(ifield_pos, sfield_pos);
+      }
+    }
+  }
+
+  if (ifield_pos != 0u) {
+    // Resolve instance field infos.
+    DCHECK_EQ(ifield_lowering_infos_.Size(), 0u);
+    ifield_lowering_infos_.Resize(ifield_pos);
+    for (size_t pos = 0u; pos != ifield_pos; ++pos) {
+      ifield_lowering_infos_.Insert(MirIFieldLoweringInfo(field_idxs[pos]));
+    }
+    MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
+                                ifield_lowering_infos_.GetRawStorage(), ifield_pos);
+  }
+
+  if (sfield_pos != size) {
+    // Resolve static field infos.
+    DCHECK_EQ(sfield_lowering_infos_.Size(), 0u);
+    sfield_lowering_infos_.Resize(size - sfield_pos);
+    for (size_t pos = size; pos != sfield_pos;) {
+      --pos;
+      sfield_lowering_infos_.Insert(MirSFieldLoweringInfo(field_idxs[pos]));
+    }
+    MirSFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
+                                sfield_lowering_infos_.GetRawStorage(), size - sfield_pos);
+  }
+}
+
 }  // namespace art
diff --git a/compiler/dex/mir_field_info.cc b/compiler/dex/mir_field_info.cc
new file mode 100644
index 0000000..3c76130
--- /dev/null
+++ b/compiler/dex/mir_field_info.cc
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mir_field_info.h"
+
+#include <string.h>
+
+#include "base/logging.h"
+#include "driver/compiler_driver.h"
+#include "driver/compiler_driver-inl.h"
+#include "mirror/class_loader.h"  // Only to allow casts in SirtRef<ClassLoader>.
+#include "mirror/dex_cache.h"     // Only to allow casts in SirtRef<DexCache>.
+#include "scoped_thread_state_change.h"
+#include "sirt_ref.h"
+
+namespace art {
+
+void MirIFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
+                                    const DexCompilationUnit* mUnit,
+                                    MirIFieldLoweringInfo* field_infos, size_t count) {
+  if (kIsDebugBuild) {
+    DCHECK(field_infos != nullptr);
+    DCHECK_NE(count, 0u);
+    for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
+      MirIFieldLoweringInfo unresolved(it->field_idx_);
+      DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0);
+    }
+  }
+
+  // We're going to resolve fields and check access in a tight loop. It's better to hold
+  // the lock and needed references once than re-acquiring them again and again.
+  ScopedObjectAccess soa(Thread::Current());
+  SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
+  SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
+      compiler_driver->GetClassLoader(soa, mUnit));
+  SirtRef<mirror::Class> referrer_class(soa.Self(),
+      compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+  // Even if the referrer class is unresolved (i.e. we're compiling a method without class
+  // definition) we still want to resolve fields and record all available info.
+
+  for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
+    uint32_t field_idx = it->field_idx_;
+    mirror::ArtField* resolved_field =
+        compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit, field_idx, false);
+    if (UNLIKELY(resolved_field == nullptr)) {
+      continue;
+    }
+    compiler_driver->GetResolvedFieldDexFileLocation(resolved_field,
+        &it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_field_idx_);
+    bool is_volatile = compiler_driver->IsFieldVolatile(resolved_field);
+
+    std::pair<bool, bool> fast_path = compiler_driver->IsFastInstanceField(
+        dex_cache.get(), referrer_class.get(), resolved_field, field_idx, &it->field_offset_);
+    it->flags_ = 0u |  // Without kFlagIsStatic.
+        (is_volatile ? kFlagIsVolatile : 0u) |
+        (fast_path.first ? kFlagFastGet : 0u) |
+        (fast_path.second ? kFlagFastPut : 0u);
+  }
+}
+
+void MirSFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
+                                    const DexCompilationUnit* mUnit,
+                                    MirSFieldLoweringInfo* field_infos, size_t count) {
+  if (kIsDebugBuild) {
+    DCHECK(field_infos != nullptr);
+    DCHECK_NE(count, 0u);
+    for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
+      MirSFieldLoweringInfo unresolved(it->field_idx_);
+      DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0);
+    }
+  }
+
+  // We're going to resolve fields and check access in a tight loop. It's better to hold
+  // the lock and needed references once than re-acquiring them again and again.
+  ScopedObjectAccess soa(Thread::Current());
+  SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
+  SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
+      compiler_driver->GetClassLoader(soa, mUnit));
+  SirtRef<mirror::Class> referrer_class(soa.Self(),
+      compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+  // Even if the referrer class is unresolved (i.e. we're compiling a method without class
+  // definition) we still want to resolve fields and record all available info.
+
+  for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
+    uint32_t field_idx = it->field_idx_;
+    mirror::ArtField* resolved_field =
+        compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit, field_idx, true);
+    if (UNLIKELY(resolved_field == nullptr)) {
+      continue;
+    }
+    compiler_driver->GetResolvedFieldDexFileLocation(resolved_field,
+        &it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_field_idx_);
+    bool is_volatile = compiler_driver->IsFieldVolatile(resolved_field) ? 1u : 0u;
+
+    bool is_referrers_class, is_initialized;
+    std::pair<bool, bool> fast_path = compiler_driver->IsFastStaticField(
+        dex_cache.get(), referrer_class.get(), resolved_field, field_idx, &it->field_offset_,
+        &it->storage_index_, &is_referrers_class, &is_initialized);
+    it->flags_ = kFlagIsStatic |
+        (is_volatile ? kFlagIsVolatile : 0u) |
+        (fast_path.first ? kFlagFastGet : 0u) |
+        (fast_path.second ? kFlagFastPut : 0u) |
+        (is_referrers_class ? kFlagIsReferrersClass : 0u) |
+        (is_initialized ? kFlagIsInitialized : 0u);
+  }
+}
+
+}  // namespace art
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
new file mode 100644
index 0000000..41cb4ce
--- /dev/null
+++ b/compiler/dex/mir_field_info.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_MIR_FIELD_INFO_H_
+#define ART_COMPILER_DEX_MIR_FIELD_INFO_H_
+
+#include "base/macros.h"
+#include "dex_file.h"
+#include "offsets.h"
+
+namespace art {
+
+class CompilerDriver;
+class DexCompilationUnit;
+
+/*
+ * Field info is calculated from the perspective of the compilation unit that accesses
+ * the field and stored in that unit's MIRGraph. Therefore it does not need to reference the
+ * dex file or method for which it has been calculated. However, we do store the declaring
+ * field index, class index and dex file of the resolved field to help distinguish between fields.
+ */
+
+class MirFieldInfo {
+ public:
+  uint16_t FieldIndex() const {
+    return field_idx_;
+  }
+
+  bool IsStatic() const {
+    return (flags_ & kFlagIsStatic) != 0u;
+  }
+
+  bool IsResolved() const {
+    return declaring_dex_file_ != nullptr;
+  }
+
+  const DexFile* DeclaringDexFile() const {
+    return declaring_dex_file_;
+  }
+
+  uint16_t DeclaringClassIndex() const {
+    return declaring_class_idx_;
+  }
+
+  uint16_t DeclaringFieldIndex() const {
+    return declaring_field_idx_;
+  }
+
+  bool IsVolatile() const {
+    return (flags_ & kFlagIsVolatile) != 0u;
+  }
+
+ protected:
+  enum {
+    kBitIsStatic = 0,
+    kBitIsVolatile,
+    kFieldInfoBitEnd
+  };
+  static constexpr uint16_t kFlagIsVolatile = 1u << kBitIsVolatile;
+  static constexpr uint16_t kFlagIsStatic = 1u << kBitIsStatic;
+
+  MirFieldInfo(uint16_t field_idx, uint16_t flags)
+      : field_idx_(field_idx),
+        flags_(flags),
+        declaring_field_idx_(0u),
+        declaring_class_idx_(0u),
+        declaring_dex_file_(nullptr) {
+  }
+
+  // Make copy-ctor/assign/dtor protected to avoid slicing.
+  MirFieldInfo(const MirFieldInfo& other) = default;
+  MirFieldInfo& operator=(const MirFieldInfo& other) = default;
+  ~MirFieldInfo() = default;
+
+  // The field index in the compiling method's dex file.
+  uint16_t field_idx_;
+  // Flags, for volatility and derived class data.
+  uint16_t flags_;
+  // The field index in the dex file that defines field, 0 if unresolved.
+  uint16_t declaring_field_idx_;
+  // The type index of the class declaring the field, 0 if unresolved.
+  uint16_t declaring_class_idx_;
+  // The dex file that defines the class containing the field and the field, nullptr if unresolved.
+  const DexFile* declaring_dex_file_;
+};
+
+class MirIFieldLoweringInfo : public MirFieldInfo {
+ public:
+  // For each requested instance field retrieve the field's declaring location (dex file, class
+  // index and field index) and volatility and compute the whether we can fast path the access
+  // with IGET/IPUT. For fast path fields, retrieve the field offset.
+  static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
+                      MirIFieldLoweringInfo* field_infos, size_t count)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+  // Construct an unresolved instance field lowering info.
+  explicit MirIFieldLoweringInfo(uint16_t field_idx)
+      : MirFieldInfo(field_idx, kFlagIsVolatile),  // Without kFlagIsStatic.
+        field_offset_(0u) {
+  }
+
+  bool FastGet() const {
+    return (flags_ & kFlagFastGet) != 0u;
+  }
+
+  bool FastPut() const {
+    return (flags_ & kFlagFastPut) != 0u;
+  }
+
+  MemberOffset FieldOffset() const {
+    return field_offset_;
+  }
+
+ private:
+  enum {
+    kBitFastGet = kFieldInfoBitEnd,
+    kBitFastPut,
+    kIFieldLoweringInfoBitEnd
+  };
+  COMPILE_ASSERT(kIFieldLoweringInfoBitEnd <= 16, too_many_flags);
+  static constexpr uint16_t kFlagFastGet = 1u << kBitFastGet;
+  static constexpr uint16_t kFlagFastPut = 1u << kBitFastPut;
+
+  // The member offset of the field, 0u if unresolved.
+  MemberOffset field_offset_;
+
+  friend class LocalValueNumberingTest;
+};
+
+class MirSFieldLoweringInfo : public MirFieldInfo {
+ public:
+  // For each requested static field retrieve the field's declaring location (dex file, class
+  // index and field index) and volatility and compute the whether we can fast path the access with
+  // IGET/IPUT. For fast path fields (at least for IGET), retrieve the information needed for
+  // the field access, i.e. the field offset, whether the field is in the same class as the
+  // method being compiled, whether the declaring class can be safely assumed to be initialized
+  // and the type index of the declaring class in the compiled method's dex file.
+  static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
+                      MirSFieldLoweringInfo* field_infos, size_t count)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+  // Construct an unresolved static field lowering info.
+  explicit MirSFieldLoweringInfo(uint16_t field_idx)
+      : MirFieldInfo(field_idx, kFlagIsVolatile | kFlagIsStatic),
+        field_offset_(0u),
+        storage_index_(DexFile::kDexNoIndex) {
+  }
+
+  bool FastGet() const {
+    return (flags_ & kFlagFastGet) != 0u;
+  }
+
+  bool FastPut() const {
+    return (flags_ & kFlagFastPut) != 0u;
+  }
+
+  bool IsReferrersClass() const {
+    return (flags_ & kFlagIsReferrersClass) != 0u;
+  }
+
+  bool IsInitialized() const {
+    return (flags_ & kFlagIsInitialized) != 0u;
+  }
+
+  MemberOffset FieldOffset() const {
+    return field_offset_;
+  }
+
+  uint32_t StorageIndex() const {
+    return storage_index_;
+  }
+
+ private:
+  enum {
+    kBitFastGet = kFieldInfoBitEnd,
+    kBitFastPut,
+    kBitIsReferrersClass,
+    kBitIsInitialized,
+    kSFieldLoweringInfoBitEnd
+  };
+  COMPILE_ASSERT(kSFieldLoweringInfoBitEnd <= 16, too_many_flags);
+  static constexpr uint16_t kFlagFastGet = 1u << kBitFastGet;
+  static constexpr uint16_t kFlagFastPut = 1u << kBitFastPut;
+  static constexpr uint16_t kFlagIsReferrersClass = 1u << kBitIsReferrersClass;
+  static constexpr uint16_t kFlagIsInitialized = 1u << kBitIsInitialized;
+
+  // The member offset of the field, 0u if unresolved.
+  MemberOffset field_offset_;
+  // The type index of the declaring class in the compiling method's dex file,
+  // -1 if the field is unresolved or there's no appropriate TypeId in that dex file.
+  uint32_t storage_index_;
+
+  friend class LocalValueNumberingTest;
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_DEX_MIR_FIELD_INFO_H_
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 2bfc154..46e854f 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -87,7 +87,9 @@
       compiler_temps_(arena, 6, kGrowableArrayMisc),
       num_non_special_compiler_temps_(0),
       max_available_non_special_compiler_temps_(0),
-      punt_to_interpreter_(false) {
+      punt_to_interpreter_(false),
+      ifield_lowering_infos_(arena, 0u),
+      sfield_lowering_infos_(arena, 0u) {
   try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
   max_available_special_compiler_temps_ = std::abs(static_cast<int>(kVRegNonSpecialTempBaseReg))
       - std::abs(static_cast<int>(kVRegTempBaseReg));
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 28e9470..d4aafbc 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -20,6 +20,8 @@
 #include "dex_file.h"
 #include "dex_instruction.h"
 #include "compiler_ir.h"
+#include "mir_field_info.h"
+#include "invoke_type.h"
 #include "utils/arena_bit_vector.h"
 #include "utils/growable_array.h"
 
@@ -258,6 +260,12 @@
     MIR* throw_insn;
     // Fused cmp branch condition.
     ConditionCode ccode;
+    // IGET/IPUT lowering info index, points to MIRGraph::ifield_lowering_infos_. Due to limit on
+    // the number of code points (64K) and size of IGET/IPUT insn (2), this will never exceed 32K.
+    uint32_t ifield_lowering_info;
+    // SGET/SPUT lowering info index, points to MIRGraph::sfield_lowering_infos_. Due to limit on
+    // the number of code points (64K) and size of SGET/SPUT insn (2), this will never exceed 32K.
+    uint32_t sfield_lowering_info;
   } meta;
 };
 
@@ -466,6 +474,18 @@
    */
   void DumpCFG(const char* dir_prefix, bool all_blocks, const char* suffix = nullptr);
 
+  void DoCacheFieldLoweringInfo();
+
+  const MirIFieldLoweringInfo& GetIFieldLoweringInfo(MIR* mir) {
+    DCHECK_LT(mir->meta.ifield_lowering_info, ifield_lowering_infos_.Size());
+    return ifield_lowering_infos_.GetRawStorage()[mir->meta.ifield_lowering_info];
+  }
+
+  const MirSFieldLoweringInfo& GetSFieldLoweringInfo(MIR* mir) {
+    DCHECK_LT(mir->meta.sfield_lowering_info, sfield_lowering_infos_.Size());
+    return sfield_lowering_infos_.GetRawStorage()[mir->meta.sfield_lowering_info];
+  }
+
   void InitRegLocations();
 
   void RemapRegLocations();
@@ -923,6 +943,8 @@
   size_t max_available_non_special_compiler_temps_;
   size_t max_available_special_compiler_temps_;
   bool punt_to_interpreter_;                    // Difficult or not worthwhile - just interpret.
+  GrowableArray<MirIFieldLoweringInfo> ifield_lowering_infos_;
+  GrowableArray<MirSFieldLoweringInfo> sfield_lowering_infos_;
 
   friend class LocalValueNumberingTest;
 };
diff --git a/compiler/dex/pass_driver.cc b/compiler/dex/pass_driver.cc
index b60f296..256bcb1 100644
--- a/compiler/dex/pass_driver.cc
+++ b/compiler/dex/pass_driver.cc
@@ -91,6 +91,7 @@
    *   - This is not yet an issue: no current pass would require it.
    */
   static const Pass* const passes[] = {
+      GetPassInstance<CacheFieldLoweringInfo>(),
       GetPassInstance<CodeLayout>(),
       GetPassInstance<SSATransformation>(),
       GetPassInstance<ConstantPropagation>(),
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index b0b8d1e..db7bdc8 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -78,11 +78,6 @@
   DCHECK_EQ(safepoint_pc->u.m.def_mask, ENCODE_ALL);
 }
 
-bool Mir2Lir::FastInstance(uint32_t field_idx, bool is_put, int* field_offset, bool* is_volatile) {
-  return cu_->compiler_driver->ComputeInstanceFieldInfo(
-      field_idx, mir_graph_->GetCurrentDexCompilationUnit(), is_put, field_offset, is_volatile);
-}
-
 /* Remove a LIR from the list. */
 void Mir2Lir::UnlinkLIR(LIR* lir) {
   if (UNLIKELY(lir == first_lir_insn_)) {
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 0533fbf..49e3c6f 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -381,20 +381,14 @@
   const int r_base_;
 };
 
-void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_double,
+void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
                       bool is_object) {
-  int field_offset;
-  int storage_index;
-  bool is_volatile;
-  bool is_referrers_class;
-  bool is_initialized;
-  bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
-      field_idx, mir_graph_->GetCurrentDexCompilationUnit(), true,
-      &field_offset, &storage_index, &is_referrers_class, &is_volatile, &is_initialized);
-  if (fast_path && !SLOW_FIELD_PATH) {
-    DCHECK_GE(field_offset, 0);
+  const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
+  cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass());
+  if (field_info.FastPut() && !SLOW_FIELD_PATH) {
+    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
     int r_base;
-    if (is_referrers_class) {
+    if (field_info.IsReferrersClass()) {
       // Fast path, static storage base is this method's class
       RegLocation rl_method  = LoadCurrMethod();
       r_base = AllocTemp();
@@ -407,7 +401,7 @@
       // Medium path, static storage base in a different class which requires checks that the other
       // class is initialized.
       // TODO: remove initialized check now that we are initializing classes in the compiler driver.
-      DCHECK_GE(storage_index, 0);
+      DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex);
       // May do runtime call so everything to home locations.
       FlushAllRegs();
       // Using fixed register to sync with possible call to runtime support.
@@ -420,9 +414,9 @@
                    mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
                    r_base);
       LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
-                   sizeof(int32_t*) * storage_index, r_base);
+                   sizeof(int32_t*) * field_info.StorageIndex(), r_base);
       // r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
-      if (!is_initialized) {
+      if (!field_info.IsInitialized()) {
         // Check if r_base is NULL or a not yet initialized class.
 
         // The slow path is invoked if the r_base is NULL or the class pointed
@@ -437,7 +431,7 @@
 
         AddSlowPath(new (arena_) StaticFieldSlowPath(this,
                                                      unresolved_branch, uninit_branch, cont,
-                                                     storage_index, r_base));
+                                                     field_info.StorageIndex(), r_base));
 
         FreeTemp(r_tmp);
       }
@@ -449,16 +443,16 @@
     } else {
       rl_src = LoadValue(rl_src, kAnyReg);
     }
-    if (is_volatile) {
+    if (field_info.IsVolatile()) {
       GenMemBarrier(kStoreStore);
     }
     if (is_long_or_double) {
-      StoreBaseDispWide(r_base, field_offset, rl_src.low_reg,
+      StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.low_reg,
                         rl_src.high_reg);
     } else {
-      StoreWordDisp(r_base, field_offset, rl_src.low_reg);
+      StoreWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.low_reg);
     }
-    if (is_volatile) {
+    if (field_info.IsVolatile()) {
       GenMemBarrier(kStoreLoad);
     }
     if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
@@ -471,24 +465,18 @@
         is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static)
                           : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic)
                                        : QUICK_ENTRYPOINT_OFFSET(pSet32Static));
-    CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true);
+    CallRuntimeHelperImmRegLocation(setter_offset, field_info.FieldIndex(), rl_src, true);
   }
 }
 
-void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
+void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
                       bool is_long_or_double, bool is_object) {
-  int field_offset;
-  int storage_index;
-  bool is_volatile;
-  bool is_referrers_class;
-  bool is_initialized;
-  bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
-      field_idx, mir_graph_->GetCurrentDexCompilationUnit(), false,
-      &field_offset, &storage_index, &is_referrers_class, &is_volatile, &is_initialized);
-  if (fast_path && !SLOW_FIELD_PATH) {
-    DCHECK_GE(field_offset, 0);
+  const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
+  cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass());
+  if (field_info.FastGet() && !SLOW_FIELD_PATH) {
+    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
     int r_base;
-    if (is_referrers_class) {
+    if (field_info.IsReferrersClass()) {
       // Fast path, static storage base is this method's class
       RegLocation rl_method  = LoadCurrMethod();
       r_base = AllocTemp();
@@ -497,7 +485,7 @@
     } else {
       // Medium path, static storage base in a different class which requires checks that the other
       // class is initialized
-      DCHECK_GE(storage_index, 0);
+      DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex);
       // May do runtime call so everything to home locations.
       FlushAllRegs();
       // Using fixed register to sync with possible call to runtime support.
@@ -510,9 +498,9 @@
                    mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
                    r_base);
       LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
-                   sizeof(int32_t*) * storage_index, r_base);
+                   sizeof(int32_t*) * field_info.StorageIndex(), r_base);
       // r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
-      if (!is_initialized) {
+      if (!field_info.IsInitialized()) {
         // Check if r_base is NULL or a not yet initialized class.
 
         // The slow path is invoked if the r_base is NULL or the class pointed
@@ -527,7 +515,7 @@
 
         AddSlowPath(new (arena_) StaticFieldSlowPath(this,
                                                      unresolved_branch, uninit_branch, cont,
-                                                     storage_index, r_base));
+                                                     field_info.StorageIndex(), r_base));
 
         FreeTemp(r_tmp);
       }
@@ -535,14 +523,14 @@
     }
     // r_base now holds static storage base
     RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
-    if (is_volatile) {
+    if (field_info.IsVolatile()) {
       GenMemBarrier(kLoadLoad);
     }
     if (is_long_or_double) {
-      LoadBaseDispWide(r_base, field_offset, rl_result.low_reg,
+      LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.low_reg,
                        rl_result.high_reg, INVALID_SREG);
     } else {
-      LoadWordDisp(r_base, field_offset, rl_result.low_reg);
+      LoadWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.low_reg);
     }
     FreeTemp(r_base);
     if (is_long_or_double) {
@@ -556,7 +544,7 @@
         is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static)
                           :(is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic)
                                       : QUICK_ENTRYPOINT_OFFSET(pGet32Static));
-    CallRuntimeHelperImm(getterOffset, field_idx, true);
+    CallRuntimeHelperImm(getterOffset, field_info.FieldIndex(), true);
     if (is_long_or_double) {
       RegLocation rl_result = GetReturnWide(rl_dest.fp);
       StoreValueWide(rl_dest, rl_result);
@@ -698,18 +686,15 @@
   }
 }
 
-void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
+void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
                       RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
                       bool is_object) {
-  int field_offset;
-  bool is_volatile;
-
-  bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
-
-  if (fast_path && !SLOW_FIELD_PATH) {
+  const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
+  cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
+  if (field_info.FastGet() && !SLOW_FIELD_PATH) {
     RegLocation rl_result;
     RegisterClass reg_class = oat_reg_class_by_size(size);
-    DCHECK_GE(field_offset, 0);
+    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
     rl_obj = LoadValue(rl_obj, kCoreReg);
     if (is_long_or_double) {
       DCHECK(rl_dest.wide);
@@ -717,17 +702,17 @@
       if (cu_->instruction_set == kX86) {
         rl_result = EvalLoc(rl_dest, reg_class, true);
         GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
-        LoadBaseDispWide(rl_obj.low_reg, field_offset, rl_result.low_reg,
+        LoadBaseDispWide(rl_obj.low_reg, field_info.FieldOffset().Int32Value(), rl_result.low_reg,
                          rl_result.high_reg, rl_obj.s_reg_low);
-        if (is_volatile) {
+        if (field_info.IsVolatile()) {
           GenMemBarrier(kLoadLoad);
         }
       } else {
         int reg_ptr = AllocTemp();
-        OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
+        OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_info.FieldOffset().Int32Value());
         rl_result = EvalLoc(rl_dest, reg_class, true);
         LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
-        if (is_volatile) {
+        if (field_info.IsVolatile()) {
           GenMemBarrier(kLoadLoad);
         }
         FreeTemp(reg_ptr);
@@ -736,9 +721,9 @@
     } else {
       rl_result = EvalLoc(rl_dest, reg_class, true);
       GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
-      LoadBaseDisp(rl_obj.low_reg, field_offset, rl_result.low_reg,
+      LoadBaseDisp(rl_obj.low_reg, field_info.FieldOffset().Int32Value(), rl_result.low_reg,
                    kWord, rl_obj.s_reg_low);
-      if (is_volatile) {
+      if (field_info.IsVolatile()) {
         GenMemBarrier(kLoadLoad);
       }
       StoreValue(rl_dest, rl_result);
@@ -748,7 +733,7 @@
         is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance)
                           : (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance)
                                        : QUICK_ENTRYPOINT_OFFSET(pGet32Instance));
-    CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true);
+    CallRuntimeHelperImmRegLocation(getterOffset, field_info.FieldIndex(), rl_obj, true);
     if (is_long_or_double) {
       RegLocation rl_result = GetReturnWide(rl_dest.fp);
       StoreValueWide(rl_dest, rl_result);
@@ -759,39 +744,37 @@
   }
 }
 
-void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
+void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
                       RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
                       bool is_object) {
-  int field_offset;
-  bool is_volatile;
-
-  bool fast_path = FastInstance(field_idx, true, &field_offset, &is_volatile);
-  if (fast_path && !SLOW_FIELD_PATH) {
+  const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
+  cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
+  if (field_info.FastPut() && !SLOW_FIELD_PATH) {
     RegisterClass reg_class = oat_reg_class_by_size(size);
-    DCHECK_GE(field_offset, 0);
+    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
     rl_obj = LoadValue(rl_obj, kCoreReg);
     if (is_long_or_double) {
       int reg_ptr;
       rl_src = LoadValueWide(rl_src, kAnyReg);
       GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
       reg_ptr = AllocTemp();
-      OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
-      if (is_volatile) {
+      OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_info.FieldOffset().Int32Value());
+      if (field_info.IsVolatile()) {
         GenMemBarrier(kStoreStore);
       }
       StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
-      if (is_volatile) {
+      if (field_info.IsVolatile()) {
         GenMemBarrier(kLoadLoad);
       }
       FreeTemp(reg_ptr);
     } else {
       rl_src = LoadValue(rl_src, reg_class);
       GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
-      if (is_volatile) {
+      if (field_info.IsVolatile()) {
         GenMemBarrier(kStoreStore);
       }
-      StoreBaseDisp(rl_obj.low_reg, field_offset, rl_src.low_reg, kWord);
-      if (is_volatile) {
+      StoreBaseDisp(rl_obj.low_reg, field_info.FieldOffset().Int32Value(), rl_src.low_reg, kWord);
+      if (field_info.IsVolatile()) {
         GenMemBarrier(kLoadLoad);
       }
       if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
@@ -803,7 +786,8 @@
         is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance)
                           : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance)
                                        : QUICK_ENTRYPOINT_OFFSET(pSet32Instance));
-    CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true);
+    CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info.FieldIndex(),
+                                               rl_obj, rl_src, true);
   }
 }
 
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 8c2ed36..00518bd 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -596,72 +596,72 @@
       break;
 
     case Instruction::IGET_OBJECT:
-      GenIGet(vC, opt_flags, kWord, rl_dest, rl_src[0], false, true);
+      GenIGet(mir, opt_flags, kWord, rl_dest, rl_src[0], false, true);
       break;
 
     case Instruction::IGET_WIDE:
-      GenIGet(vC, opt_flags, kLong, rl_dest, rl_src[0], true, false);
+      GenIGet(mir, opt_flags, kLong, rl_dest, rl_src[0], true, false);
       break;
 
     case Instruction::IGET:
-      GenIGet(vC, opt_flags, kWord, rl_dest, rl_src[0], false, false);
+      GenIGet(mir, opt_flags, kWord, rl_dest, rl_src[0], false, false);
       break;
 
     case Instruction::IGET_CHAR:
-      GenIGet(vC, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
+      GenIGet(mir, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
       break;
 
     case Instruction::IGET_SHORT:
-      GenIGet(vC, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
+      GenIGet(mir, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
       break;
 
     case Instruction::IGET_BOOLEAN:
     case Instruction::IGET_BYTE:
-      GenIGet(vC, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
+      GenIGet(mir, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
       break;
 
     case Instruction::IPUT_WIDE:
-      GenIPut(vC, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
+      GenIPut(mir, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
       break;
 
     case Instruction::IPUT_OBJECT:
-      GenIPut(vC, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
+      GenIPut(mir, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
       break;
 
     case Instruction::IPUT:
-      GenIPut(vC, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
+      GenIPut(mir, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
       break;
 
     case Instruction::IPUT_BOOLEAN:
     case Instruction::IPUT_BYTE:
-      GenIPut(vC, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
+      GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
       break;
 
     case Instruction::IPUT_CHAR:
-      GenIPut(vC, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
+      GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
       break;
 
     case Instruction::IPUT_SHORT:
-      GenIPut(vC, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
+      GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
       break;
 
     case Instruction::SGET_OBJECT:
-      GenSget(vB, rl_dest, false, true);
+      GenSget(mir, rl_dest, false, true);
       break;
     case Instruction::SGET:
     case Instruction::SGET_BOOLEAN:
     case Instruction::SGET_BYTE:
     case Instruction::SGET_CHAR:
     case Instruction::SGET_SHORT:
-      GenSget(vB, rl_dest, false, false);
+      GenSget(mir, rl_dest, false, false);
       break;
 
     case Instruction::SGET_WIDE:
-      GenSget(vB, rl_dest, true, false);
+      GenSget(mir, rl_dest, true, false);
       break;
 
     case Instruction::SPUT_OBJECT:
-      GenSput(vB, rl_src[0], false, true);
+      GenSput(mir, rl_src[0], false, true);
       break;
 
     case Instruction::SPUT:
@@ -669,11 +669,11 @@
     case Instruction::SPUT_BYTE:
     case Instruction::SPUT_CHAR:
     case Instruction::SPUT_SHORT:
-      GenSput(vB, rl_src[0], false, false);
+      GenSput(mir, rl_src[0], false, false);
       break;
 
     case Instruction::SPUT_WIDE:
-      GenSput(vB, rl_src[0], true, false);
+      GenSput(mir, rl_src[0], true, false);
       break;
 
     case Instruction::INVOKE_STATIC_RANGE:
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 8f199f8..e230c9d 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -395,7 +395,6 @@
     virtual void Materialize();
     virtual CompiledMethod* GetCompiledMethod();
     void MarkSafepointPC(LIR* inst);
-    bool FastInstance(uint32_t field_idx, bool is_put, int* field_offset, bool* is_volatile);
     void SetupResourceMasks(LIR* lir);
     void SetMemRefType(LIR* lir, bool is_load, int mem_type);
     void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
@@ -558,13 +557,13 @@
     void GenNewArray(uint32_t type_idx, RegLocation rl_dest,
                      RegLocation rl_src);
     void GenFilledNewArray(CallInfo* info);
-    void GenSput(uint32_t field_idx, RegLocation rl_src,
+    void GenSput(MIR* mir, RegLocation rl_src,
                  bool is_long_or_double, bool is_object);
-    void GenSget(uint32_t field_idx, RegLocation rl_dest,
+    void GenSget(MIR* mir, RegLocation rl_dest,
                  bool is_long_or_double, bool is_object);
-    void GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
+    void GenIGet(MIR* mir, int opt_flags, OpSize size,
                  RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object);
-    void GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
+    void GenIPut(MIR* mir, int opt_flags, OpSize size,
                  RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object);
     void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
                         RegLocation rl_src);