Global Value Numbering.

Implement the Global Value Numbering for optimization
purposes. Use it for the null check and range check
elimination as the LVN used to do.

The order of evaluation of basic blocks needs improving as
we currently fail to recognize some obviously identical
values in methods with more than one loop. (There are three
disabled tests that check this. This is just a missed
optimization, not a correctness issue.)

Change-Id: I0d0ce16b2495b5a3b17ad1b2b32931cd69f5a25a
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index 69a94a5..d5fd6fe 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -16,6 +16,7 @@
 
 #include "local_value_numbering.h"
 
+#include "global_value_numbering.h"
 #include "mir_field_info.h"
 #include "mir_graph.h"
 
@@ -24,89 +25,925 @@
 namespace {  // anonymous namespace
 
 // Operations used for value map keys instead of actual opcode.
-static constexpr uint16_t kInvokeMemoryVersionBumpOp = Instruction::INVOKE_DIRECT;
-static constexpr uint16_t kUnresolvedSFieldOp = Instruction::SPUT;
-static constexpr uint16_t kResolvedSFieldOp = Instruction::SGET;
-static constexpr uint16_t kUnresolvedIFieldOp = Instruction::IPUT;
-static constexpr uint16_t kNonAliasingIFieldOp = Instruction::IGET;
-static constexpr uint16_t kAliasingIFieldOp = Instruction::IGET_WIDE;
-static constexpr uint16_t kAliasingIFieldStartVersionOp = Instruction::IGET_WIDE;
-static constexpr uint16_t kAliasingIFieldBumpVersionOp = Instruction::IGET_OBJECT;
-static constexpr uint16_t kArrayAccessLocOp = Instruction::APUT;
+static constexpr uint16_t kInvokeMemoryVersionBumpOp = Instruction::INVOKE_VIRTUAL;
+static constexpr uint16_t kUnresolvedSFieldOp = Instruction::SGET;
+static constexpr uint16_t kResolvedSFieldOp = Instruction::SGET_WIDE;
+static constexpr uint16_t kUnresolvedIFieldOp = Instruction::IGET;
+static constexpr uint16_t kNonAliasingIFieldLocOp = Instruction::IGET_WIDE;
+static constexpr uint16_t kNonAliasingIFieldInitialOp = Instruction::IGET_OBJECT;
+static constexpr uint16_t kAliasingIFieldOp = Instruction::IGET_BOOLEAN;
+static constexpr uint16_t kAliasingIFieldStartVersionOp = Instruction::IGET_BYTE;
+static constexpr uint16_t kAliasingIFieldBumpVersionOp = Instruction::IGET_CHAR;
 static constexpr uint16_t kNonAliasingArrayOp = Instruction::AGET;
 static constexpr uint16_t kNonAliasingArrayStartVersionOp = Instruction::AGET_WIDE;
-static constexpr uint16_t kAliasingArrayOp = Instruction::AGET_OBJECT;
-static constexpr uint16_t kAliasingArrayMemoryVersionOp = Instruction::AGET_BOOLEAN;
-static constexpr uint16_t kAliasingArrayBumpVersionOp = Instruction::AGET_BYTE;
+static constexpr uint16_t kNonAliasingArrayBumpVersionOp = Instruction::AGET_OBJECT;
+static constexpr uint16_t kAliasingArrayOp = Instruction::AGET_BOOLEAN;
+static constexpr uint16_t kAliasingArrayStartVersionOp = Instruction::AGET_BYTE;
+static constexpr uint16_t kAliasingArrayBumpVersionOp = Instruction::AGET_CHAR;
+static constexpr uint16_t kMergeBlockMemoryVersionBumpOp = Instruction::INVOKE_VIRTUAL_RANGE;
+static constexpr uint16_t kMergeBlockAliasingIFieldVersionBumpOp = Instruction::IPUT;
+static constexpr uint16_t kMergeBlockAliasingIFieldMergeLocationOp = Instruction::IPUT_WIDE;
+static constexpr uint16_t kMergeBlockNonAliasingArrayVersionBumpOp = Instruction::APUT;
+static constexpr uint16_t kMergeBlockNonAliasingArrayMergeLocationOp = Instruction::APUT_WIDE;
+static constexpr uint16_t kMergeBlockAliasingArrayVersionBumpOp = Instruction::APUT_OBJECT;
+static constexpr uint16_t kMergeBlockAliasingArrayMergeLocationOp = Instruction::APUT_BOOLEAN;
+static constexpr uint16_t kMergeBlockNonAliasingIFieldVersionBumpOp = Instruction::APUT_BYTE;
+static constexpr uint16_t kMergeBlockSFieldVersionBumpOp = Instruction::APUT_CHAR;
 
 }  // anonymous namespace
 
-LocalValueNumbering::LocalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator)
-    : cu_(cu),
-      last_value_(0u),
-      sreg_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-      sreg_wide_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-      value_map_(std::less<uint64_t>(), allocator->Adapter()),
-      global_memory_version_(0u),
-      aliasing_ifield_version_map_(std::less<uint16_t>(), allocator->Adapter()),
-      non_aliasing_array_version_map_(std::less<uint16_t>(), allocator->Adapter()),
-      field_index_map_(FieldReferenceComparator(), allocator->Adapter()),
-      non_aliasing_refs_(std::less<uint16_t>(), allocator->Adapter()),
-      non_aliasing_ifields_(NonAliasingIFieldKeyComparator(), allocator->Adapter()),
-      escaped_array_refs_(EscapedArrayKeyComparator(), allocator->Adapter()),
-      range_checked_(RangeCheckKeyComparator() , allocator->Adapter()),
-      null_checked_(std::less<uint16_t>(), allocator->Adapter()) {
-  std::fill_n(unresolved_sfield_version_, kFieldTypeCount, 0u);
-  std::fill_n(unresolved_ifield_version_, kFieldTypeCount, 0u);
-  std::fill_n(aliasing_array_version_, kFieldTypeCount, 0u);
+class LocalValueNumbering::AliasingIFieldVersions {
+ public:
+  static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
+                                     uint16_t field_id) {
+    uint16_t type = gvn->GetFieldType(field_id);
+    return gvn->LookupValue(kAliasingIFieldStartVersionOp, field_id,
+                            lvn->global_memory_version_, lvn->unresolved_ifield_version_[type]);
+  }
+
+  static uint16_t BumpMemoryVersion(GlobalValueNumbering* gvn, uint16_t old_version,
+                                    uint16_t store_ref_set_id, uint16_t stored_value) {
+    return gvn->LookupValue(kAliasingIFieldBumpVersionOp, old_version,
+                            store_ref_set_id, stored_value);
+  }
+
+  static uint16_t LookupGlobalValue(GlobalValueNumbering* gvn,
+                                    uint16_t field_id, uint16_t base, uint16_t memory_version) {
+    return gvn->LookupValue(kAliasingIFieldOp, field_id, base, memory_version);
+  }
+
+  static uint16_t LookupMergeValue(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
+                                   uint16_t field_id, uint16_t base) {
+    // If the base/field_id is non-aliasing in lvn, use the non-aliasing value.
+    uint16_t type = gvn->GetFieldType(field_id);
+    if (lvn->IsNonAliasingIField(base, field_id, type)) {
+      uint16_t loc = gvn->LookupValue(kNonAliasingIFieldLocOp, base, field_id, type);
+      auto lb = lvn->non_aliasing_ifield_value_map_.find(loc);
+      return (lb != lvn->non_aliasing_ifield_value_map_.end())
+          ? lb->second
+          : gvn->LookupValue(kNonAliasingIFieldInitialOp, loc, kNoValue, kNoValue);
+    }
+    return AliasingValuesMergeGet<AliasingIFieldVersions>(
+        gvn, lvn, &lvn->aliasing_ifield_value_map_, field_id, base);
+  }
+
+  static bool HasNewBaseVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
+                                uint16_t field_id) {
+    uint16_t type = gvn->GetFieldType(field_id);
+    return lvn->unresolved_ifield_version_[type] == lvn->merge_new_memory_version_ ||
+        lvn->global_memory_version_ == lvn->merge_new_memory_version_;
+  }
+
+  static uint16_t LookupMergeBlockValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
+                                        uint16_t field_id) {
+    return gvn->LookupValue(kMergeBlockAliasingIFieldVersionBumpOp, field_id, kNoValue, lvn_id);
+  }
+
+  static uint16_t LookupMergeLocationValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
+                                           uint16_t field_id, uint16_t base) {
+    return gvn->LookupValue(kMergeBlockAliasingIFieldMergeLocationOp, field_id, base, lvn_id);
+  }
+};
+
+class LocalValueNumbering::NonAliasingArrayVersions {
+ public:
+  static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
+                                     uint16_t array) {
+    return gvn->LookupValue(kNonAliasingArrayStartVersionOp, array, kNoValue, kNoValue);
+  }
+
+  static uint16_t BumpMemoryVersion(GlobalValueNumbering* gvn, uint16_t old_version,
+                                    uint16_t store_ref_set_id, uint16_t stored_value) {
+    return gvn->LookupValue(kNonAliasingArrayBumpVersionOp, old_version,
+                            store_ref_set_id, stored_value);
+  }
+
+  static uint16_t LookupGlobalValue(GlobalValueNumbering* gvn,
+                                    uint16_t array, uint16_t index, uint16_t memory_version) {
+    return gvn->LookupValue(kNonAliasingArrayOp, array, index, memory_version);
+  }
+
+  static uint16_t LookupMergeValue(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
+                                   uint16_t array, uint16_t index) {
+    return AliasingValuesMergeGet<NonAliasingArrayVersions>(
+        gvn, lvn, &lvn->non_aliasing_array_value_map_, array, index);
+  }
+
+  static bool HasNewBaseVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
+                                uint16_t array) {
+    return false;  // Not affected by global_memory_version_.
+  }
+
+  static uint16_t LookupMergeBlockValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
+                                        uint16_t array) {
+    return gvn->LookupValue(kMergeBlockNonAliasingArrayVersionBumpOp, array, kNoValue, lvn_id);
+  }
+
+  static uint16_t LookupMergeLocationValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
+                                           uint16_t array, uint16_t index) {
+    return gvn->LookupValue(kMergeBlockNonAliasingArrayMergeLocationOp, array, index, lvn_id);
+  }
+};
+
+class LocalValueNumbering::AliasingArrayVersions {
+ public:
+  static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
+                                     uint16_t type) {
+    return gvn->LookupValue(kAliasingArrayStartVersionOp, type, lvn->global_memory_version_,
+                            kNoValue);
+  }
+
+  static uint16_t BumpMemoryVersion(GlobalValueNumbering* gvn, uint16_t old_version,
+                                    uint16_t store_ref_set_id, uint16_t stored_value) {
+    return gvn->LookupValue(kAliasingArrayBumpVersionOp, old_version,
+                            store_ref_set_id, stored_value);
+  }
+
+  static uint16_t LookupGlobalValue(GlobalValueNumbering* gvn,
+                                    uint16_t type, uint16_t location, uint16_t memory_version) {
+    return gvn->LookupValue(kAliasingArrayOp, type, location, memory_version);
+  }
+
+  static uint16_t LookupMergeValue(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
+                                   uint16_t type, uint16_t location) {
+    // If the location is non-aliasing in lvn, use the non-aliasing value.
+    uint16_t array = gvn->GetArrayLocationBase(location);
+    if (lvn->IsNonAliasingArray(array, type)) {
+      uint16_t index = gvn->GetArrayLocationIndex(location);
+      return NonAliasingArrayVersions::LookupMergeValue(gvn, lvn, array, index);
+    }
+    return AliasingValuesMergeGet<AliasingArrayVersions>(
+        gvn, lvn, &lvn->aliasing_array_value_map_, type, location);
+  }
+
+  static bool HasNewBaseVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
+                                uint16_t type) {
+    return lvn->global_memory_version_ == lvn->merge_new_memory_version_;
+  }
+
+  static uint16_t LookupMergeBlockValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
+                                        uint16_t type) {
+    return gvn->LookupValue(kMergeBlockAliasingArrayVersionBumpOp, type, kNoValue, lvn_id);
+  }
+
+  static uint16_t LookupMergeLocationValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
+                                           uint16_t type, uint16_t location) {
+    return gvn->LookupValue(kMergeBlockAliasingArrayMergeLocationOp, type, location, lvn_id);
+  }
+};
+
+template <typename Map>
+LocalValueNumbering::AliasingValues* LocalValueNumbering::GetAliasingValues(
+    Map* map, const typename Map::key_type& key) {
+  auto lb = map->lower_bound(key);
+  if (lb == map->end() || map->key_comp()(key, lb->first)) {
+    map->PutBefore(lb, key, AliasingValues(gvn_->allocator_));
+    // The new entry was inserted before lb.
+    DCHECK(lb != map->begin());
+    --lb;
+    DCHECK(!map->key_comp()(lb->first, key) && !map->key_comp()(key, lb->first));
+  }
+  return &lb->second;
 }
 
-uint16_t LocalValueNumbering::GetFieldId(const MirFieldInfo& field_info) {
-  FieldReference key = { field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex() };
-  auto it = field_index_map_.find(key);
-  if (it != field_index_map_.end()) {
-    return it->second;
+template <typename Versions, typename KeyType>
+void LocalValueNumbering::UpdateAliasingValuesLoadVersion(const KeyType& key,
+                                                          AliasingValues* values) {
+  if (values->last_load_memory_version == kNoValue) {
+    // Get the start version that accounts for aliasing with unresolved fields of the same
+    // type and make it unique for the field by including the field_id.
+    uint16_t memory_version = values->memory_version_before_stores;
+    if (memory_version == kNoValue) {
+      memory_version = Versions::StartMemoryVersion(gvn_, this, key);
+    }
+    if (!values->store_loc_set.empty()) {
+      uint16_t ref_set_id = gvn_->GetRefSetId(values->store_loc_set);
+      memory_version = Versions::BumpMemoryVersion(gvn_, memory_version, ref_set_id,
+                                                   values->last_stored_value);
+    }
+    values->last_load_memory_version = memory_version;
   }
-  uint16_t id = field_index_map_.size();
-  field_index_map_.Put(key, id);
-  return id;
+}
+
+template <typename Versions, typename Map>
+uint16_t LocalValueNumbering::AliasingValuesMergeGet(GlobalValueNumbering* gvn,
+                                                     const LocalValueNumbering* lvn,
+                                                     Map* map, const typename Map::key_type& key,
+                                                     uint16_t location) {
+  // Retrieve the value name that we would get from
+  //   const_cast<LocalValueNumbering*>(lvn)->HandleAliasingValueGet(map. key, location)
+  // but don't modify the map.
+  uint16_t value_name;
+  auto it = map->find(key);
+  if (it == map->end()) {
+    uint16_t start_version = Versions::StartMemoryVersion(gvn, lvn, key);
+    value_name = Versions::LookupGlobalValue(gvn, key, location, start_version);
+  } else if (it->second.store_loc_set.count(location) != 0u) {
+    value_name = it->second.last_stored_value;
+  } else {
+    auto load_it = it->second.load_value_map.find(location);
+    if (load_it != it->second.load_value_map.end()) {
+      value_name = load_it->second;
+    } else {
+      value_name = Versions::LookupGlobalValue(gvn, key, location, it->second.last_load_memory_version);
+    }
+  }
+  return value_name;
+}
+
+template <typename Versions, typename Map>
+uint16_t LocalValueNumbering::HandleAliasingValuesGet(Map* map, const typename Map::key_type& key,
+                                                      uint16_t location) {
+  // Retrieve the value name for IGET/SGET/AGET, update the map with new value if any.
+  uint16_t res;
+  AliasingValues* values = GetAliasingValues(map, key);
+  if (values->store_loc_set.count(location) != 0u) {
+    res = values->last_stored_value;
+  } else {
+    UpdateAliasingValuesLoadVersion<Versions>(key, values);
+    auto lb = values->load_value_map.lower_bound(location);
+    if (lb != values->load_value_map.end() && lb->first == location) {
+      res = lb->second;
+    } else {
+      res = Versions::LookupGlobalValue(gvn_, key, location, values->last_load_memory_version);
+      values->load_value_map.PutBefore(lb, location, res);
+    }
+  }
+  return res;
+}
+
+template <typename Versions, typename Map>
+bool LocalValueNumbering::HandleAliasingValuesPut(Map* map, const typename Map::key_type& key,
+                                                  uint16_t location, uint16_t value) {
+  AliasingValues* values = GetAliasingValues(map, key);
+  auto load_values_it = values->load_value_map.find(location);
+  if (load_values_it != values->load_value_map.end() && load_values_it->second == value) {
+    // This insn can be eliminated, it stores the same value that's already in the field.
+    return false;
+  }
+  if (value == values->last_stored_value) {
+    auto store_loc_lb = values->store_loc_set.lower_bound(location);
+    if (store_loc_lb != values->store_loc_set.end() && *store_loc_lb == location) {
+      // This insn can be eliminated, it stores the same value that's already in the field.
+      return false;
+    }
+    values->store_loc_set.emplace_hint(store_loc_lb, location);
+  } else {
+    UpdateAliasingValuesLoadVersion<Versions>(key, values);
+    values->memory_version_before_stores = values->last_load_memory_version;
+    values->last_stored_value = value;
+    values->store_loc_set.clear();
+    values->store_loc_set.insert(location);
+  }
+  // Clear the last load memory version and remove all potentially overwritten values.
+  values->last_load_memory_version = kNoValue;
+  auto it = values->load_value_map.begin(), end = values->load_value_map.end();
+  while (it != end) {
+    if (it->second == value) {
+      ++it;
+    } else {
+      it = values->load_value_map.erase(it);
+    }
+  }
+  return true;
+}
+
+LocalValueNumbering::LocalValueNumbering(GlobalValueNumbering* gvn, uint16_t id)
+    : gvn_(gvn),
+      id_(id),
+      sreg_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
+      sreg_wide_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
+      sfield_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
+      non_aliasing_ifield_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
+      aliasing_ifield_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
+      non_aliasing_array_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
+      aliasing_array_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
+      global_memory_version_(0u),
+      non_aliasing_refs_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
+      escaped_refs_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
+      escaped_ifield_clobber_set_(EscapedIFieldClobberKeyComparator(), gvn->Allocator()->Adapter()),
+      escaped_array_clobber_set_(EscapedArrayClobberKeyComparator(), gvn->Allocator()->Adapter()),
+      range_checked_(RangeCheckKeyComparator() , gvn->Allocator()->Adapter()),
+      null_checked_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
+      merge_names_(gvn->Allocator()->Adapter()),
+      merge_map_(std::less<ScopedArenaVector<BasicBlockId>>(), gvn->Allocator()->Adapter()),
+      merge_new_memory_version_(kNoValue) {
+  std::fill_n(unresolved_sfield_version_, kFieldTypeCount, 0u);
+  std::fill_n(unresolved_ifield_version_, kFieldTypeCount, 0u);
+}
+
+bool LocalValueNumbering::Equals(const LocalValueNumbering& other) const {
+  DCHECK(gvn_ == other.gvn_);
+  // Compare the maps/sets and memory versions.
+  return sreg_value_map_ == other.sreg_value_map_ &&
+      sreg_wide_value_map_ == other.sreg_wide_value_map_ &&
+      sfield_value_map_ == other.sfield_value_map_ &&
+      non_aliasing_ifield_value_map_ == other.non_aliasing_ifield_value_map_ &&
+      aliasing_ifield_value_map_ == other.aliasing_ifield_value_map_ &&
+      non_aliasing_array_value_map_ == other.non_aliasing_array_value_map_ &&
+      aliasing_array_value_map_ == other.aliasing_array_value_map_ &&
+      SameMemoryVersion(other) &&
+      non_aliasing_refs_ == other.non_aliasing_refs_ &&
+      escaped_refs_ == other.escaped_refs_ &&
+      escaped_ifield_clobber_set_ == other.escaped_ifield_clobber_set_ &&
+      escaped_array_clobber_set_ == other.escaped_array_clobber_set_ &&
+      range_checked_ == other.range_checked_ &&
+      null_checked_ == other.null_checked_;
+}
+
+void LocalValueNumbering::MergeOne(const LocalValueNumbering& other, MergeType merge_type) {
+  sreg_value_map_ = other.sreg_value_map_;
+  sreg_wide_value_map_ = other.sreg_wide_value_map_;
+
+  if (merge_type == kReturnMerge) {
+    // RETURN or PHI+RETURN. We need only sreg value maps.
+    return;
+  }
+
+  non_aliasing_ifield_value_map_ = other.non_aliasing_ifield_value_map_;
+  non_aliasing_array_value_map_ = other.non_aliasing_array_value_map_;
+  non_aliasing_refs_ = other.non_aliasing_refs_;
+  range_checked_ = other.range_checked_;
+  null_checked_ = other.null_checked_;
+
+  if (merge_type == kCatchMerge) {
+    // Memory is clobbered. Use new memory version and don't merge aliasing locations.
+    global_memory_version_ = NewMemoryVersion(&merge_new_memory_version_);
+    std::fill_n(unresolved_sfield_version_, kFieldTypeCount, global_memory_version_);
+    std::fill_n(unresolved_ifield_version_, kFieldTypeCount, global_memory_version_);
+    PruneNonAliasingRefsForCatch();
+    return;
+  }
+
+  DCHECK(merge_type == kNormalMerge);
+  global_memory_version_ = other.global_memory_version_;
+  std::copy_n(other.unresolved_ifield_version_, kFieldTypeCount, unresolved_ifield_version_);
+  std::copy_n(other.unresolved_sfield_version_, kFieldTypeCount, unresolved_sfield_version_);
+  sfield_value_map_ = other.sfield_value_map_;
+  aliasing_ifield_value_map_ = other.aliasing_ifield_value_map_;
+  aliasing_array_value_map_ = other.aliasing_array_value_map_;
+  escaped_refs_ = other.escaped_refs_;
+  escaped_ifield_clobber_set_ = other.escaped_ifield_clobber_set_;
+  escaped_array_clobber_set_ = other.escaped_array_clobber_set_;
+}
+
+bool LocalValueNumbering::SameMemoryVersion(const LocalValueNumbering& other) const {
+  return
+      global_memory_version_ == other.global_memory_version_ &&
+      std::equal(unresolved_ifield_version_, unresolved_ifield_version_ + kFieldTypeCount,
+                 other.unresolved_ifield_version_) &&
+      std::equal(unresolved_sfield_version_, unresolved_sfield_version_ + kFieldTypeCount,
+                 other.unresolved_sfield_version_);
+}
+
+uint16_t LocalValueNumbering::NewMemoryVersion(uint16_t* new_version) {
+  if (*new_version == kNoValue) {
+    *new_version = gvn_->LookupValue(kMergeBlockMemoryVersionBumpOp, 0u, 0u, id_);
+  }
+  return *new_version;
+}
+
+void LocalValueNumbering::MergeMemoryVersions(bool clobbered_catch) {
+  DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
+  const LocalValueNumbering* cmp = gvn_->merge_lvns_[0];
+  // Check if the global version has changed.
+  bool new_global_version = clobbered_catch;
+  if (!new_global_version) {
+    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+      if (lvn->global_memory_version_ != cmp->global_memory_version_) {
+        // Use a new version for everything.
+        new_global_version = true;
+        break;
+      }
+    }
+  }
+  if (new_global_version) {
+    global_memory_version_ = NewMemoryVersion(&merge_new_memory_version_);
+    std::fill_n(unresolved_sfield_version_, kFieldTypeCount, merge_new_memory_version_);
+    std::fill_n(unresolved_ifield_version_, kFieldTypeCount, merge_new_memory_version_);
+  } else {
+    // Initialize with a copy of memory versions from the comparison LVN.
+    global_memory_version_ = cmp->global_memory_version_;
+    std::copy_n(cmp->unresolved_ifield_version_, kFieldTypeCount, unresolved_ifield_version_);
+    std::copy_n(cmp->unresolved_sfield_version_, kFieldTypeCount, unresolved_sfield_version_);
+    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+      if (lvn == cmp) {
+        continue;
+      }
+      for (size_t i = 0; i != kFieldTypeCount; ++i) {
+        if (lvn->unresolved_ifield_version_[i] != cmp->unresolved_ifield_version_[i]) {
+          unresolved_ifield_version_[i] = NewMemoryVersion(&merge_new_memory_version_);
+        }
+        if (lvn->unresolved_sfield_version_[i] != cmp->unresolved_sfield_version_[i]) {
+          unresolved_sfield_version_[i] = NewMemoryVersion(&merge_new_memory_version_);
+        }
+      }
+    }
+  }
+}
+
+void LocalValueNumbering::PruneNonAliasingRefsForCatch() {
+  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+    const BasicBlock* bb = gvn_->GetBasicBlock(lvn->Id());
+    DCHECK_EQ(bb->taken, kNullBlock);
+    DCHECK_NE(bb->fall_through, kNullBlock);
+    const BasicBlock* fall_through_bb = gvn_->GetBasicBlock(bb->fall_through);
+    const MIR* mir = fall_through_bb->first_mir_insn;
+    DCHECK(mir != nullptr);
+    // Only INVOKEs can leak and clobber non-aliasing references if they throw.
+    if ((Instruction::FlagsOf(mir->dalvikInsn.opcode) & Instruction::kInvoke) != 0) {
+      for (uint16_t i = 0u; i != mir->ssa_rep->num_uses; ++i) {
+        uint16_t value_name = lvn->GetOperandValue(mir->ssa_rep->uses[i]);
+        non_aliasing_refs_.erase(value_name);
+      }
+    }
+  }
+}
+
+
+template <typename Set, Set LocalValueNumbering::* set_ptr>
+void LocalValueNumbering::IntersectSets() {
+  DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
+
+  // Find the LVN with the least entries in the set.
+  const LocalValueNumbering* least_entries_lvn = gvn_->merge_lvns_[0];
+  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+    if ((lvn->*set_ptr).size() < (least_entries_lvn->*set_ptr).size()) {
+      least_entries_lvn = lvn;
+    }
+  }
+
+  // For each key check if it's in all the LVNs.
+  for (const auto& key : least_entries_lvn->*set_ptr) {
+    bool checked = true;
+    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+      if (lvn != least_entries_lvn && (lvn->*set_ptr).count(key) == 0u) {
+        checked = false;
+        break;
+      }
+    }
+    if (checked) {
+      (this->*set_ptr).emplace_hint((this->*set_ptr).end(), key);
+    }
+  }
+}
+
+template <typename Map, Map LocalValueNumbering::* map_ptr>
+void LocalValueNumbering::IntersectMaps() {
+  DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
+
+  // Find the LVN with the least entries in the set.
+  const LocalValueNumbering* least_entries_lvn = gvn_->merge_lvns_[0];
+  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+    if ((lvn->*map_ptr).size() < (least_entries_lvn->*map_ptr).size()) {
+      least_entries_lvn = lvn;
+    }
+  }
+
+  // For each key check if it's in all the LVNs.
+  for (const auto& entry : least_entries_lvn->*map_ptr) {
+    bool checked = true;
+    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+      if (lvn != least_entries_lvn) {
+        auto it = (lvn->*map_ptr).find(entry.first);
+        if (it == (lvn->*map_ptr).end() || !(it->second == entry.second)) {
+          checked = false;
+          break;
+        }
+      }
+    }
+    if (checked) {
+      (this->*map_ptr).PutBefore((this->*map_ptr).end(), entry.first, entry.second);
+    }
+  }
+}
+
+// Intersect maps as sets. The value type must be equality-comparable.
+template <typename Map>
+void LocalValueNumbering::InPlaceIntersectMaps(Map* work_map, const Map& other_map) {
+  auto work_it = work_map->begin(), work_end = work_map->end();
+  auto cmp = work_map->value_comp();
+  for (const auto& entry : other_map) {
+    while (work_it != work_end &&
+        (cmp(*work_it, entry) ||
+         (!cmp(entry, *work_it) && !(work_it->second == entry.second)))) {
+      work_it = work_map->erase(work_it);
+    }
+  }
+}
+
+template <typename Set, Set LocalValueNumbering::*set_ptr, void (LocalValueNumbering::*MergeFn)(
+    const typename Set::value_type& entry, typename Set::iterator hint)>
+void LocalValueNumbering::MergeSets() {
+  auto cmp = (this->*set_ptr).value_comp();
+  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+    auto my_it = (this->*set_ptr).begin(), my_end = (this->*set_ptr).end();
+    for (const auto& entry : lvn->*set_ptr) {
+      while (my_it != my_end && cmp(*my_it, entry)) {
+        ++my_it;
+      }
+      if (my_it != my_end && !cmp(entry, *my_it)) {
+        // Already handled.
+        ++my_it;
+      } else {
+        // Merge values for this field_id.
+        (this->*MergeFn)(entry, my_it);  // my_it remains valid across inserts to std::set/SafeMap.
+      }
+    }
+  }
+}
+
+void LocalValueNumbering::IntersectAliasingValueLocations(AliasingValues* work_values,
+                                                          const AliasingValues* values) {
+  auto cmp = work_values->load_value_map.key_comp();
+  auto work_it = work_values->load_value_map.begin(), work_end = work_values->load_value_map.end();
+  auto store_it = values->store_loc_set.begin(), store_end = values->store_loc_set.end();
+  auto load_it = values->load_value_map.begin(), load_end = values->load_value_map.end();
+  while (store_it != store_end || load_it != load_end) {
+    uint16_t loc;
+    if (store_it != store_end && (load_it == load_end || *store_it < load_it->first)) {
+      loc = *store_it;
+      ++store_it;
+    } else {
+      loc = load_it->first;
+      ++load_it;
+      DCHECK(store_it == store_end || cmp(loc, *store_it));
+    }
+    while (work_it != work_end && cmp(work_it->first, loc)) {
+      work_it = work_values->load_value_map.erase(work_it);
+    }
+    if (work_it != work_end && !cmp(loc, work_it->first)) {
+      // The location matches, keep it.
+      ++work_it;
+    }
+  }
+  while (work_it != work_end) {
+    work_it = work_values->load_value_map.erase(work_it);
+  }
+}
+
+void LocalValueNumbering::MergeEscapedRefs(const ValueNameSet::value_type& entry,
+                                           ValueNameSet::iterator hint) {
+  // See if the ref is either escaped or non-aliasing in each predecessor.
+  bool is_escaped = true;
+  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+    if (lvn->non_aliasing_refs_.count(entry) == 0u &&
+        lvn->escaped_refs_.count(entry) == 0u) {
+      is_escaped = false;
+      break;
+    }
+  }
+  if (is_escaped) {
+    escaped_refs_.emplace_hint(hint, entry);
+  }
+}
+
+void LocalValueNumbering::MergeEscapedIFieldTypeClobberSets(
+    const EscapedIFieldClobberSet::value_type& entry, EscapedIFieldClobberSet::iterator hint) {
+  // Insert only type-clobber entries (field_id == kNoValue) of escaped refs.
+  if (entry.field_id == kNoValue && escaped_refs_.count(entry.base) != 0u) {
+    escaped_ifield_clobber_set_.emplace_hint(hint, entry);
+  }
+}
+
+void LocalValueNumbering::MergeEscapedIFieldClobberSets(
+    const EscapedIFieldClobberSet::value_type& entry, EscapedIFieldClobberSet::iterator hint) {
+  // Insert only those entries of escaped refs that are not overridden by a type clobber.
+  if (!(hint == escaped_ifield_clobber_set_.end() &&
+        hint->base == entry.base && hint->type == entry.type) &&
+      escaped_refs_.count(entry.base) != 0u) {
+    escaped_ifield_clobber_set_.emplace_hint(hint, entry);
+  }
+}
+
+void LocalValueNumbering::MergeEscapedArrayClobberSets(
+    const EscapedArrayClobberSet::value_type& entry, EscapedArrayClobberSet::iterator hint) {
+  if (escaped_refs_.count(entry.base) != 0u) {
+    escaped_array_clobber_set_.emplace_hint(hint, entry);
+  }
+}
+
+void LocalValueNumbering::MergeNullChecked(const ValueNameSet::value_type& entry,
+                                           ValueNameSet::iterator hint) {
+  // Merge null_checked_ for this ref.
+  merge_names_.clear();
+  merge_names_.resize(gvn_->merge_lvns_.size(), entry);
+  if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
+    null_checked_.insert(hint, entry);
+  }
+}
+
+void LocalValueNumbering::MergeSFieldValues(const SFieldToValueMap::value_type& entry,
+                                            SFieldToValueMap::iterator hint) {
+  uint16_t field_id = entry.first;
+  merge_names_.clear();
+  uint16_t value_name = kNoValue;
+  bool same_values = true;
+  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+    // Get the value name as in HandleSGet() but don't modify *lvn.
+    auto it = lvn->sfield_value_map_.find(field_id);
+    if (it != lvn->sfield_value_map_.end()) {
+      value_name = it->second;
+    } else {
+      uint16_t type = gvn_->GetFieldType(field_id);
+      value_name = gvn_->LookupValue(kResolvedSFieldOp, field_id,
+                                     lvn->unresolved_sfield_version_[type],
+                                     lvn->global_memory_version_);
+    }
+
+    same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
+    merge_names_.push_back(value_name);
+  }
+  if (same_values) {
+    // value_name already contains the result.
+  } else {
+    auto lb = merge_map_.lower_bound(merge_names_);
+    if (lb != merge_map_.end() && !merge_map_.key_comp()(merge_names_, lb->first)) {
+      value_name = lb->second;
+    } else {
+      value_name = gvn_->LookupValue(kMergeBlockSFieldVersionBumpOp, field_id, id_, kNoValue);
+      merge_map_.PutBefore(lb, merge_names_, value_name);
+      if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
+        null_checked_.insert(value_name);
+      }
+    }
+  }
+  sfield_value_map_.PutBefore(hint, field_id, value_name);
+}
+
+void LocalValueNumbering::MergeNonAliasingIFieldValues(const IFieldLocToValueMap::value_type& entry,
+                                                       IFieldLocToValueMap::iterator hint) {
+  uint16_t field_loc = entry.first;
+  merge_names_.clear();
+  uint16_t value_name = kNoValue;
+  bool same_values = true;
+  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+    // Get the value name as in HandleIGet() but don't modify *lvn.
+    auto it = lvn->non_aliasing_ifield_value_map_.find(field_loc);
+    if (it != lvn->non_aliasing_ifield_value_map_.end()) {
+      value_name = it->second;
+    } else {
+      value_name = gvn_->LookupValue(kNonAliasingIFieldInitialOp, field_loc, kNoValue, kNoValue);
+    }
+
+    same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
+    merge_names_.push_back(value_name);
+  }
+  if (same_values) {
+    // value_name already contains the result.
+  } else {
+    auto lb = merge_map_.lower_bound(merge_names_);
+    if (lb != merge_map_.end() && !merge_map_.key_comp()(merge_names_, lb->first)) {
+      value_name = lb->second;
+    } else {
+      value_name = gvn_->LookupValue(kMergeBlockNonAliasingIFieldVersionBumpOp, field_loc,
+                                     id_, kNoValue);
+      merge_map_.PutBefore(lb, merge_names_, value_name);
+      if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
+        null_checked_.insert(value_name);
+      }
+    }
+  }
+  non_aliasing_ifield_value_map_.PutBefore(hint, field_loc, value_name);
+}
+
+template <typename Map, Map LocalValueNumbering::*map_ptr, typename Versions>
+void LocalValueNumbering::MergeAliasingValues(const typename Map::value_type& entry,
+                                              typename Map::iterator hint) {
+  const typename Map::key_type& key = entry.first;
+
+  (this->*map_ptr).PutBefore(hint, key, AliasingValues(gvn_->allocator_));
+  DCHECK(hint != (this->*map_ptr).begin());
+  AliasingIFieldValuesMap::iterator it = hint;
+  --it;
+  DCHECK_EQ(it->first, key);
+  AliasingValues* my_values = &it->second;
+
+  const AliasingValues* cmp_values = nullptr;
+  bool same_version = !Versions::HasNewBaseVersion(gvn_, this, key);
+  uint16_t load_memory_version_for_same_version = kNoValue;
+  if (same_version) {
+    // Find the first non-null values.
+    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+      auto it = (lvn->*map_ptr).find(key);
+      if (it != (lvn->*map_ptr).end()) {
+        cmp_values = &it->second;
+        break;
+      }
+    }
+    DCHECK(cmp_values != nullptr);  // There must be at least one non-null values.
+
+    // Check if we have identical memory versions, i.e. the global memory version, unresolved
+    // field version and the values' memory_version_before_stores, last_stored_value
+    // and store_loc_set are identical.
+    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+      auto it = (lvn->*map_ptr).find(key);
+      if (it == (lvn->*map_ptr).end()) {
+        if (cmp_values->memory_version_before_stores != kNoValue) {
+          same_version = false;
+          break;
+        }
+      } else if (cmp_values->last_stored_value != it->second.last_stored_value ||
+          cmp_values->memory_version_before_stores != it->second.memory_version_before_stores ||
+          cmp_values->store_loc_set != it->second.store_loc_set) {
+        same_version = false;
+        break;
+      } else if (it->second.last_load_memory_version != kNoValue) {
+        DCHECK(load_memory_version_for_same_version == kNoValue ||
+               load_memory_version_for_same_version == it->second.last_load_memory_version);
+        load_memory_version_for_same_version = it->second.last_load_memory_version;
+      }
+    }
+  }
+
+  if (same_version) {
+    // Copy the identical values.
+    my_values->memory_version_before_stores = cmp_values->memory_version_before_stores;
+    my_values->last_stored_value = cmp_values->last_stored_value;
+    my_values->store_loc_set = cmp_values->store_loc_set;
+    my_values->last_load_memory_version = load_memory_version_for_same_version;
+    // Merge load values seen in all incoming arcs (i.e. an intersection).
+    if (!cmp_values->load_value_map.empty()) {
+      my_values->load_value_map = cmp_values->load_value_map;
+      for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+        auto it = (lvn->*map_ptr).find(key);
+        if (it == (lvn->*map_ptr).end() || it->second.load_value_map.empty()) {
+          my_values->load_value_map.clear();
+          break;
+        }
+        InPlaceIntersectMaps(&my_values->load_value_map, it->second.load_value_map);
+        if (my_values->load_value_map.empty()) {
+          break;
+        }
+      }
+    }
+  } else {
+    // Bump version number for the merge.
+    my_values->memory_version_before_stores = my_values->last_load_memory_version =
+        Versions::LookupMergeBlockValue(gvn_, id_, key);
+
+    // Calculate the locations that have been either read from or written to in each incoming LVN.
+    bool first_lvn = true;
+    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+      auto it = (lvn->*map_ptr).find(key);
+      if (it == (lvn->*map_ptr).end()) {
+        my_values->load_value_map.clear();
+        break;
+      }
+      if (first_lvn) {
+        first_lvn = false;
+        // Copy the first LVN's locations. Values will be overwritten later.
+        my_values->load_value_map = it->second.load_value_map;
+        for (uint16_t location : it->second.store_loc_set) {
+          my_values->load_value_map.Put(location, 0u);
+        }
+      } else {
+        IntersectAliasingValueLocations(my_values, &it->second);
+      }
+    }
+    // Calculate merged values for the intersection.
+    for (auto& load_value_entry : my_values->load_value_map) {
+      uint16_t location = load_value_entry.first;
+      bool same_values = true;
+      uint16_t value_name = kNoValue;
+      merge_names_.clear();
+      for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+        value_name = Versions::LookupMergeValue(gvn_, lvn, key, location);
+        same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
+        merge_names_.push_back(value_name);
+      }
+      if (same_values) {
+        // value_name already contains the result.
+      } else {
+        auto lb = merge_map_.lower_bound(merge_names_);
+        if (lb != merge_map_.end() && !merge_map_.key_comp()(merge_names_, lb->first)) {
+          value_name = lb->second;
+        } else {
+          // NOTE: In addition to the key and id_ which don't change on an LVN recalculation
+          // during GVN, we also add location which can actually change on recalculation, so the
+          // value_name below may change. This could lead to an infinite loop if the location
+          // value name always changed when the refereced value name changes. However, given that
+          // we assign unique value names for other merges, such as Phis, such a dependency is
+          // not possible in a well-formed SSA graph.
+          value_name = Versions::LookupMergeLocationValue(gvn_, id_, key, location);
+          merge_map_.PutBefore(lb, merge_names_, value_name);
+          if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
+            null_checked_.insert(value_name);
+          }
+        }
+      }
+      load_value_entry.second = value_name;
+    }
+  }
+}
+
+void LocalValueNumbering::Merge(MergeType merge_type) {
+  DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
+
+  IntersectMaps<SregValueMap, &LocalValueNumbering::sreg_value_map_>();
+  IntersectMaps<SregValueMap, &LocalValueNumbering::sreg_wide_value_map_>();
+  if (merge_type == kReturnMerge) {
+    // RETURN or PHI+RETURN. We need only sreg value maps.
+    return;
+  }
+
+  MergeMemoryVersions(merge_type == kCatchMerge);
+
+  // Merge non-aliasing maps/sets.
+  MergeSets<IFieldLocToValueMap, &LocalValueNumbering::non_aliasing_ifield_value_map_,
+            &LocalValueNumbering::MergeNonAliasingIFieldValues>();
+  MergeSets<NonAliasingArrayValuesMap, &LocalValueNumbering::non_aliasing_array_value_map_,
+            &LocalValueNumbering::MergeAliasingValues<
+                NonAliasingArrayValuesMap, &LocalValueNumbering::non_aliasing_array_value_map_,
+                NonAliasingArrayVersions>>();
+  IntersectSets<ValueNameSet, &LocalValueNumbering::non_aliasing_refs_>();
+
+  // We won't do anything complicated for range checks, just calculate the intersection.
+  IntersectSets<RangeCheckSet, &LocalValueNumbering::range_checked_>();
+
+  // Merge null_checked_. We may later insert more, such as merged object field values.
+  MergeSets<ValueNameSet, &LocalValueNumbering::null_checked_,
+            &LocalValueNumbering::MergeNullChecked>();
+
+  if (merge_type == kCatchMerge) {
+    // Memory is clobbered. New memory version already created, don't merge aliasing locations.
+    PruneNonAliasingRefsForCatch();
+    return;
+  }
+
+  DCHECK(merge_type == kNormalMerge);
+
+  // Merge escaped refs and clobber sets.
+  MergeSets<ValueNameSet, &LocalValueNumbering::escaped_refs_,
+            &LocalValueNumbering::MergeEscapedRefs>();
+  if (!escaped_refs_.empty()) {
+    MergeSets<EscapedIFieldClobberSet, &LocalValueNumbering::escaped_ifield_clobber_set_,
+              &LocalValueNumbering::MergeEscapedIFieldTypeClobberSets>();
+    MergeSets<EscapedIFieldClobberSet, &LocalValueNumbering::escaped_ifield_clobber_set_,
+              &LocalValueNumbering::MergeEscapedIFieldClobberSets>();
+    MergeSets<EscapedArrayClobberSet, &LocalValueNumbering::escaped_array_clobber_set_,
+              &LocalValueNumbering::MergeEscapedArrayClobberSets>();
+  }
+
+  MergeSets<SFieldToValueMap, &LocalValueNumbering::sfield_value_map_,
+            &LocalValueNumbering::MergeSFieldValues>();
+  MergeSets<AliasingIFieldValuesMap, &LocalValueNumbering::aliasing_ifield_value_map_,
+            &LocalValueNumbering::MergeAliasingValues<
+                AliasingIFieldValuesMap, &LocalValueNumbering::aliasing_ifield_value_map_,
+                AliasingIFieldVersions>>();
+  MergeSets<AliasingArrayValuesMap, &LocalValueNumbering::aliasing_array_value_map_,
+            &LocalValueNumbering::MergeAliasingValues<
+                AliasingArrayValuesMap, &LocalValueNumbering::aliasing_array_value_map_,
+                AliasingArrayVersions>>();
 }
 
 uint16_t LocalValueNumbering::MarkNonAliasingNonNull(MIR* mir) {
   uint16_t res = GetOperandValue(mir->ssa_rep->defs[0]);
-  SetOperandValue(mir->ssa_rep->defs[0], res);
   DCHECK(null_checked_.find(res) == null_checked_.end());
   null_checked_.insert(res);
   non_aliasing_refs_.insert(res);
   return res;
 }
 
-bool LocalValueNumbering::IsNonAliasing(uint16_t reg) {
+bool LocalValueNumbering::IsNonAliasing(uint16_t reg) const {
   return non_aliasing_refs_.find(reg) != non_aliasing_refs_.end();
 }
 
-bool LocalValueNumbering::IsNonAliasingIField(uint16_t reg, uint16_t field_id, uint16_t type) {
+bool LocalValueNumbering::IsNonAliasingIField(uint16_t reg, uint16_t field_id,
+                                              uint16_t type) const {
   if (IsNonAliasing(reg)) {
     return true;
   }
-  NonAliasingIFieldKey key = { reg, field_id, type };
-  return non_aliasing_ifields_.count(key) != 0u;
+  if (escaped_refs_.find(reg) == escaped_refs_.end()) {
+    return false;
+  }
+  // Check for IPUTs to unresolved fields.
+  EscapedIFieldClobberKey key1 = { reg, type, kNoValue };
+  if (escaped_ifield_clobber_set_.find(key1) != escaped_ifield_clobber_set_.end()) {
+    return false;
+  }
+  // Check for aliased IPUTs to the same field.
+  EscapedIFieldClobberKey key2 = { reg, type, field_id };
+  return escaped_ifield_clobber_set_.find(key2) == escaped_ifield_clobber_set_.end();
 }
 
-bool LocalValueNumbering::IsNonAliasingArray(uint16_t reg, uint16_t type) {
+bool LocalValueNumbering::IsNonAliasingArray(uint16_t reg, uint16_t type) const {
   if (IsNonAliasing(reg)) {
     return true;
   }
-  EscapedArrayKey key = { reg, type };
-  return escaped_array_refs_.count(key) != 0u;
+  if (escaped_refs_.count(reg) == 0u) {
+    return false;
+  }
+  // Check for aliased APUTs.
+  EscapedArrayClobberKey key = { reg, type };
+  return escaped_array_clobber_set_.find(key) == escaped_array_clobber_set_.end();
 }
 
-
 void LocalValueNumbering::HandleNullCheck(MIR* mir, uint16_t reg) {
   auto lb = null_checked_.lower_bound(reg);
   if (lb != null_checked_.end() && *lb == reg) {
-    if (LIKELY(Good())) {
-      if (cu_->verbose) {
+    if (LIKELY(gvn_->CanModify())) {
+      if (gvn_->GetCompilationUnit()->verbose) {
         LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
       }
       mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
@@ -120,8 +957,8 @@
   RangeCheckKey key = { array, index };
   auto lb = range_checked_.lower_bound(key);
   if (lb != range_checked_.end() && !RangeCheckKeyComparator()(key, *lb)) {
-    if (LIKELY(Good())) {
-      if (cu_->verbose) {
+    if (LIKELY(gvn_->CanModify())) {
+      if (gvn_->GetCompilationUnit()->verbose) {
         LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
       }
       mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
@@ -141,26 +978,72 @@
 void LocalValueNumbering::HandleEscapingRef(uint16_t base) {
   auto it = non_aliasing_refs_.find(base);
   if (it != non_aliasing_refs_.end()) {
-    uint64_t iget_key = BuildKey(Instruction::IGET, base, 0u, 0u);
-    for (auto iget_it = value_map_.lower_bound(iget_key), iget_end = value_map_.end();
-        iget_it != iget_end && EqualOpAndOperand1(iget_it->first, iget_key); ++iget_it) {
-      uint16_t field_id = ExtractOperand2(iget_it->first);
-      uint16_t type = ExtractModifier(iget_it->first);
-      NonAliasingIFieldKey key = { base, field_id, type };
-      non_aliasing_ifields_.insert(key);
-    }
-    uint64_t aget_key = BuildKey(kNonAliasingArrayStartVersionOp, base, 0u, 0u);
-    auto aget_it = value_map_.lower_bound(aget_key);
-    if (aget_it != value_map_.end() && EqualOpAndOperand1(aget_key, aget_it->first)) {
-      DCHECK_EQ(ExtractOperand2(aget_it->first), kNoValue);
-      uint16_t type = ExtractModifier(aget_it->first);
-      EscapedArrayKey key = { base, type };
-      escaped_array_refs_.insert(key);
-    }
     non_aliasing_refs_.erase(it);
+    escaped_refs_.insert(base);
   }
 }
 
+uint16_t LocalValueNumbering::HandlePhi(MIR* mir) {
+  if (gvn_->merge_lvns_.empty()) {
+    // Running LVN without a full GVN?
+    return kNoValue;
+  }
+  int16_t num_uses = mir->ssa_rep->num_uses;
+  int32_t* uses = mir->ssa_rep->uses;
+  // Try to find out if this is merging wide regs.
+  if (mir->ssa_rep->defs[0] != 0 &&
+      sreg_wide_value_map_.count(mir->ssa_rep->defs[0] - 1) != 0u) {
+    // This is the high part of a wide reg. Ignore the Phi.
+    return kNoValue;
+  }
+  bool wide = false;
+  for (int16_t i = 0; i != num_uses; ++i) {
+    if (sreg_wide_value_map_.count(uses[i]) != 0u) {
+      wide = true;
+      break;
+    }
+  }
+  // Iterate over *merge_lvns_ and skip incoming sregs for BBs without associated LVN.
+  uint16_t value_name = kNoValue;
+  merge_names_.clear();
+  BasicBlockId* incoming = mir->meta.phi_incoming;
+  int16_t pos = 0;
+  bool same_values = true;
+  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+    DCHECK_LT(pos, mir->ssa_rep->num_uses);
+    while (incoming[pos] != lvn->Id()) {
+      ++pos;
+      DCHECK_LT(pos, mir->ssa_rep->num_uses);
+    }
+    int s_reg = uses[pos];
+    ++pos;
+    value_name = wide ? lvn->GetOperandValueWide(s_reg) : lvn->GetOperandValue(s_reg);
+
+    same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
+    merge_names_.push_back(value_name);
+  }
+  if (same_values) {
+    // value_name already contains the result.
+  } else {
+    auto lb = merge_map_.lower_bound(merge_names_);
+    if (lb != merge_map_.end() && !merge_map_.key_comp()(merge_names_, lb->first)) {
+      value_name = lb->second;
+    } else {
+      value_name = gvn_->LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
+      merge_map_.PutBefore(lb, merge_names_, value_name);
+      if (!wide && gvn_->NullCheckedInAllPredecessors(merge_names_)) {
+        null_checked_.insert(value_name);
+      }
+    }
+  }
+  if (wide) {
+    SetOperandValueWide(mir->ssa_rep->defs[0], value_name);
+  } else {
+    SetOperandValue(mir->ssa_rep->defs[0], value_name);
+  }
+  return value_name;
+}
+
 uint16_t LocalValueNumbering::HandleAGet(MIR* mir, uint16_t opcode) {
   // uint16_t type = opcode - Instruction::AGET;
   uint16_t array = GetOperandValue(mir->ssa_rep->uses[0]);
@@ -171,22 +1054,12 @@
   // Establish value number for loaded register.
   uint16_t res;
   if (IsNonAliasingArray(array, type)) {
-    // Get the start version that accounts for aliasing within the array (different index names).
-    uint16_t start_version = LookupValue(kNonAliasingArrayStartVersionOp, array, kNoValue, type);
-    // Find the current version from the non_aliasing_array_version_map_.
-    uint16_t memory_version = start_version;
-    auto it = non_aliasing_array_version_map_.find(start_version);
-    if (it != non_aliasing_array_version_map_.end()) {
-      memory_version = it->second;
-    } else {
-      // Just use the start_version.
-    }
-    res = LookupValue(kNonAliasingArrayOp, array, index, memory_version);
+    res = HandleAliasingValuesGet<NonAliasingArrayVersions>(&non_aliasing_array_value_map_,
+                                                            array, index);
   } else {
-    // Get the memory version of aliased array accesses of this type.
-    uint16_t memory_version = LookupValue(kAliasingArrayMemoryVersionOp, global_memory_version_,
-                                          aliasing_array_version_[type], kNoValue);
-    res = LookupValue(kAliasingArrayOp, array, index, memory_version);
+    uint16_t location = gvn_->GetArrayLocation(array, index);
+    res = HandleAliasingValuesGet<AliasingArrayVersions>(&aliasing_array_value_map_,
+                                                         type, location);
   }
   if (opcode == Instruction::AGET_WIDE) {
     SetOperandValueWide(mir->ssa_rep->defs[0], res);
@@ -209,46 +1082,27 @@
                    ? GetOperandValueWide(mir->ssa_rep->uses[0])
                    : GetOperandValue(mir->ssa_rep->uses[0]);
   if (IsNonAliasing(array)) {
-    // Get the start version that accounts for aliasing within the array (different index values).
-    uint16_t start_version = LookupValue(kNonAliasingArrayStartVersionOp, array, kNoValue, type);
-    auto it = non_aliasing_array_version_map_.find(start_version);
-    uint16_t memory_version = start_version;
-    if (it != non_aliasing_array_version_map_.end()) {
-      memory_version = it->second;
-    }
-    // We need to take 4 values (array, index, memory_version, value) into account for bumping
-    // the memory version but the key can take only 3. Merge array and index into a location.
-    uint16_t array_access_location = LookupValue(kArrayAccessLocOp, array, index, kNoValue);
-    // Bump the version, adding to the chain.
-    memory_version = LookupValue(kAliasingArrayBumpVersionOp, memory_version,
-                                 array_access_location, value);
-    non_aliasing_array_version_map_.Overwrite(start_version, memory_version);
-    StoreValue(kNonAliasingArrayOp, array, index, memory_version, value);
-  } else {
-    // Get the memory version based on global_memory_version_ and aliasing_array_version_[type].
-    uint16_t memory_version = LookupValue(kAliasingArrayMemoryVersionOp, global_memory_version_,
-                                          aliasing_array_version_[type], kNoValue);
-    if (HasValue(kAliasingArrayOp, array, index, memory_version, value)) {
+    bool put_is_live = HandleAliasingValuesPut<NonAliasingArrayVersions>(
+        &non_aliasing_array_value_map_, array, index, value);
+    if (!put_is_live) {
       // This APUT can be eliminated, it stores the same value that's already in the field.
       // TODO: Eliminate the APUT.
       return;
     }
-    // We need to take 4 values (array, index, memory_version, value) into account for bumping
-    // the memory version but the key can take only 3. Merge array and index into a location.
-    uint16_t array_access_location = LookupValue(kArrayAccessLocOp, array, index, kNoValue);
-    // Bump the version, adding to the chain.
-    uint16_t bumped_version = LookupValue(kAliasingArrayBumpVersionOp, memory_version,
-                                          array_access_location, value);
-    aliasing_array_version_[type] = bumped_version;
-    memory_version = LookupValue(kAliasingArrayMemoryVersionOp, global_memory_version_,
-                                 bumped_version, kNoValue);
-    StoreValue(kAliasingArrayOp, array, index, memory_version, value);
+  } else {
+    uint16_t location = gvn_->GetArrayLocation(array, index);
+    bool put_is_live = HandleAliasingValuesPut<AliasingArrayVersions>(
+        &aliasing_array_value_map_, type, location, value);
+    if (!put_is_live) {
+      // This APUT can be eliminated, it stores the same value that's already in the field.
+      // TODO: Eliminate the APUT.
+      return;
+    }
 
-    // Clear escaped array refs for this type.
-    EscapedArrayKey array_key = { type, 0u };
-    auto it = escaped_array_refs_.lower_bound(array_key), end = escaped_array_refs_.end();
-    while (it != end && it->type == type) {
-      it = escaped_array_refs_.erase(it);
+    // Clobber all escaped array refs for this type.
+    for (uint16_t escaped_array : escaped_refs_) {
+      EscapedArrayClobberKey clobber_key = { escaped_array, type };
+      escaped_array_clobber_set_.insert(clobber_key);
     }
   }
 }
@@ -256,32 +1110,28 @@
 uint16_t LocalValueNumbering::HandleIGet(MIR* mir, uint16_t opcode) {
   uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
   HandleNullCheck(mir, base);
-  const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
+  const MirFieldInfo& field_info = gvn_->GetMirGraph()->GetIFieldLoweringInfo(mir);
   uint16_t res;
   if (!field_info.IsResolved() || field_info.IsVolatile()) {
     // Volatile fields always get a new memory version; field id is irrelevant.
     // Unresolved fields may be volatile, so handle them as such to be safe.
     // Use result s_reg - will be unique.
-    res = LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
+    res = gvn_->LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
   } else {
     uint16_t type = opcode - Instruction::IGET;
-    uint16_t field_id = GetFieldId(field_info);
+    uint16_t field_id = gvn_->GetFieldId(field_info, type);
     if (IsNonAliasingIField(base, field_id, type)) {
-      res = LookupValue(kNonAliasingIFieldOp, base, field_id, type);
-    } else {
-      // Get the start version that accounts for aliasing with unresolved fields of the same type
-      // and make it unique for the field by including the field_id.
-      uint16_t start_version = LookupValue(kAliasingIFieldStartVersionOp, global_memory_version_,
-                                           unresolved_ifield_version_[type], field_id);
-      // Find the current version from the aliasing_ifield_version_map_.
-      uint16_t memory_version = start_version;
-      auto version_it = aliasing_ifield_version_map_.find(start_version);
-      if (version_it != aliasing_ifield_version_map_.end()) {
-        memory_version = version_it->second;
+      uint16_t loc = gvn_->LookupValue(kNonAliasingIFieldLocOp, base, field_id, type);
+      auto lb = non_aliasing_ifield_value_map_.lower_bound(loc);
+      if (lb != non_aliasing_ifield_value_map_.end() && lb->first == loc) {
+        res = lb->second;
       } else {
-        // Just use the start_version.
+        res = gvn_->LookupValue(kNonAliasingIFieldInitialOp, loc, kNoValue, kNoValue);
+        non_aliasing_ifield_value_map_.PutBefore(lb, loc, res);
       }
-      res = LookupValue(kAliasingIFieldOp, base, field_id, memory_version);
+    } else {
+      res = HandleAliasingValuesGet<AliasingIFieldVersions>(&aliasing_ifield_value_map_,
+                                                            field_id, base);
     }
   }
   if (opcode == Instruction::IGET_WIDE) {
@@ -297,66 +1147,72 @@
   int base_reg = (opcode == Instruction::IPUT_WIDE) ? 2 : 1;
   uint16_t base = GetOperandValue(mir->ssa_rep->uses[base_reg]);
   HandleNullCheck(mir, base);
-  const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
+  const MirFieldInfo& field_info = gvn_->GetMirGraph()->GetIFieldLoweringInfo(mir);
   if (!field_info.IsResolved()) {
     // Unresolved fields always alias with everything of the same type.
     // Use mir->offset as modifier; without elaborate inlining, it will be unique.
     unresolved_ifield_version_[type] =
-        LookupValue(kUnresolvedIFieldOp, kNoValue, kNoValue, mir->offset);
+        gvn_->LookupValue(kUnresolvedIFieldOp, kNoValue, kNoValue, mir->offset);
 
-    // Treat fields of escaped references of the same type as potentially modified.
-    NonAliasingIFieldKey key = { type, 0u, 0u };  // lowest possible key of this type.
-    auto it = non_aliasing_ifields_.lower_bound(key), end = non_aliasing_ifields_.end();
-    while (it != end && it->type == type) {
-      it = non_aliasing_ifields_.erase(it);
+    // For simplicity, treat base as escaped now.
+    HandleEscapingRef(base);
+
+    // Clobber all fields of escaped references of the same type.
+    for (uint16_t escaped_ref : escaped_refs_) {
+      EscapedIFieldClobberKey clobber_key = { escaped_ref, type, kNoValue };
+      escaped_ifield_clobber_set_.insert(clobber_key);
+    }
+
+    // Aliasing fields of the same type may have been overwritten.
+    auto it = aliasing_ifield_value_map_.begin(), end = aliasing_ifield_value_map_.end();
+    while (it != end) {
+      if (gvn_->GetFieldType(it->first) != type) {
+        ++it;
+      } else {
+        it = aliasing_ifield_value_map_.erase(it);
+      }
     }
   } else if (field_info.IsVolatile()) {
     // Nothing to do, resolved volatile fields always get a new memory version anyway and
     // can't alias with resolved non-volatile fields.
   } else {
-    uint16_t field_id = GetFieldId(field_info);
+    uint16_t field_id = gvn_->GetFieldId(field_info, type);
     uint16_t value = (opcode == Instruction::IPUT_WIDE)
                      ? GetOperandValueWide(mir->ssa_rep->uses[0])
                      : GetOperandValue(mir->ssa_rep->uses[0]);
     if (IsNonAliasing(base)) {
-      StoreValue(kNonAliasingIFieldOp, base, field_id, type, value);
-    } else {
-      // Get the start version that accounts for aliasing with unresolved fields of the same type
-      // and make it unique for the field by including the field_id.
-      uint16_t start_version = LookupValue(kAliasingIFieldStartVersionOp, global_memory_version_,
-                                           unresolved_ifield_version_[type], field_id);
-      // Find the old version from the aliasing_ifield_version_map_.
-      uint16_t old_version = start_version;
-      auto version_it = aliasing_ifield_version_map_.find(start_version);
-      if (version_it != aliasing_ifield_version_map_.end()) {
-        old_version = version_it->second;
+      uint16_t loc = gvn_->LookupValue(kNonAliasingIFieldLocOp, base, field_id, type);
+      auto lb = non_aliasing_ifield_value_map_.lower_bound(loc);
+      if (lb != non_aliasing_ifield_value_map_.end() && lb->first == loc) {
+        if (lb->second == value) {
+          // This IPUT can be eliminated, it stores the same value that's already in the field.
+          // TODO: Eliminate the IPUT.
+          return;
+        }
+        lb->second = value;  // Overwrite.
+      } else {
+        non_aliasing_ifield_value_map_.PutBefore(lb, loc, value);
       }
-      // Check if the field currently contains the value, making this a NOP.
-      if (HasValue(kAliasingIFieldOp, base, field_id, old_version, value)) {
+    } else {
+      bool put_is_live = HandleAliasingValuesPut<AliasingIFieldVersions>(
+          &aliasing_ifield_value_map_, field_id, base, value);
+      if (!put_is_live) {
         // This IPUT can be eliminated, it stores the same value that's already in the field.
         // TODO: Eliminate the IPUT.
         return;
       }
-      // Bump the version, adding to the chain started by start_version.
-      uint16_t memory_version = LookupValue(kAliasingIFieldBumpVersionOp, old_version, base, value);
-      // Update the aliasing_ifield_version_map_ so that HandleIGet() can get the memory_version
-      // without knowing the values used to build the chain.
-      aliasing_ifield_version_map_.Overwrite(start_version, memory_version);
-      StoreValue(kAliasingIFieldOp, base, field_id, memory_version, value);
 
-      // Clear non-aliasing fields for this field_id.
-      NonAliasingIFieldKey field_key = { type, field_id, 0u };
-      auto it = non_aliasing_ifields_.lower_bound(field_key), end = non_aliasing_ifields_.end();
-      while (it != end && it->field_id == field_id) {
-        DCHECK_EQ(type, it->type);
-        it = non_aliasing_ifields_.erase(it);
+      // Clobber all fields of escaped references for this field.
+      for (uint16_t escaped_ref : escaped_refs_) {
+        EscapedIFieldClobberKey clobber_key = { escaped_ref, type, field_id };
+        escaped_ifield_clobber_set_.insert(clobber_key);
       }
     }
   }
 }
 
 uint16_t LocalValueNumbering::HandleSGet(MIR* mir, uint16_t opcode) {
-  const MirSFieldLoweringInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
+  const MirSFieldLoweringInfo& field_info = gvn_->GetMirGraph()->GetSFieldLoweringInfo(mir);
   if (!field_info.IsInitialized() && (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
     // Class initialization can call arbitrary functions, we need to wipe aliasing values.
     HandleInvokeOrClInit(mir);
@@ -366,15 +1222,21 @@
     // Volatile fields always get a new memory version; field id is irrelevant.
     // Unresolved fields may be volatile, so handle them as such to be safe.
     // Use result s_reg - will be unique.
-    res = LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
+    res = gvn_->LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
   } else {
-    uint16_t field_id = GetFieldId(field_info);
-    // Resolved non-volatile static fields can alias with non-resolved fields of the same type,
-    // so we need to use unresolved_sfield_version_[type] in addition to global_memory_version_
-    // to determine the version of the field.
     uint16_t type = opcode - Instruction::SGET;
-    res = LookupValue(kResolvedSFieldOp, field_id,
-                      unresolved_sfield_version_[type], global_memory_version_);
+    uint16_t field_id = gvn_->GetFieldId(field_info, type);
+    auto lb = sfield_value_map_.lower_bound(field_id);
+    if (lb != sfield_value_map_.end() && lb->first == field_id) {
+      res = lb->second;
+    } else {
+      // Resolved non-volatile static fields can alias with non-resolved fields of the same type,
+      // so we need to use unresolved_sfield_version_[type] in addition to global_memory_version_
+      // to determine the version of the field.
+      res = gvn_->LookupValue(kResolvedSFieldOp, field_id,
+                              unresolved_sfield_version_[type], global_memory_version_);
+      sfield_value_map_.PutBefore(lb, field_id, res);
+    }
   }
   if (opcode == Instruction::SGET_WIDE) {
     SetOperandValueWide(mir->ssa_rep->defs[0], res);
@@ -385,7 +1247,7 @@
 }
 
 void LocalValueNumbering::HandleSPut(MIR* mir, uint16_t opcode) {
-  const MirSFieldLoweringInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
+  const MirSFieldLoweringInfo& field_info = gvn_->GetMirGraph()->GetSFieldLoweringInfo(mir);
   if (!field_info.IsInitialized() && (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
     // Class initialization can call arbitrary functions, we need to wipe aliasing values.
     HandleInvokeOrClInit(mir);
@@ -395,31 +1257,56 @@
     // Unresolved fields always alias with everything of the same type.
     // Use mir->offset as modifier; without elaborate inlining, it will be unique.
     unresolved_sfield_version_[type] =
-        LookupValue(kUnresolvedSFieldOp, kNoValue, kNoValue, mir->offset);
+        gvn_->LookupValue(kUnresolvedSFieldOp, kNoValue, kNoValue, mir->offset);
+    RemoveSFieldsForType(type);
   } else if (field_info.IsVolatile()) {
     // Nothing to do, resolved volatile fields always get a new memory version anyway and
     // can't alias with resolved non-volatile fields.
   } else {
-    uint16_t field_id = GetFieldId(field_info);
+    uint16_t field_id = gvn_->GetFieldId(field_info, type);
     uint16_t value = (opcode == Instruction::SPUT_WIDE)
                      ? GetOperandValueWide(mir->ssa_rep->uses[0])
                      : GetOperandValue(mir->ssa_rep->uses[0]);
     // Resolved non-volatile static fields can alias with non-resolved fields of the same type,
     // so we need to use unresolved_sfield_version_[type] in addition to global_memory_version_
     // to determine the version of the field.
-    uint16_t type = opcode - Instruction::SGET;
-    StoreValue(kResolvedSFieldOp, field_id,
-               unresolved_sfield_version_[type], global_memory_version_, value);
+    auto lb = sfield_value_map_.lower_bound(field_id);
+    if (lb != sfield_value_map_.end() && lb->first == field_id) {
+      if (lb->second == value) {
+        // This SPUT can be eliminated, it stores the same value that's already in the field.
+        // TODO: Eliminate the SPUT.
+        return;
+      }
+      lb->second = value;  // Overwrite.
+    } else {
+      sfield_value_map_.PutBefore(lb, field_id, value);
+    }
+  }
+}
+
+void LocalValueNumbering::RemoveSFieldsForType(uint16_t type) {
+  // Erase all static fields of this type from the sfield_value_map_.
+  for (auto it = sfield_value_map_.begin(), end = sfield_value_map_.end(); it != end; ) {
+    if (gvn_->GetFieldType(it->first) == type) {
+      it = sfield_value_map_.erase(it);
+    } else {
+      ++it;
+    }
   }
 }
 
 void LocalValueNumbering::HandleInvokeOrClInit(MIR* mir) {
   // Use mir->offset as modifier; without elaborate inlining, it will be unique.
-  global_memory_version_ = LookupValue(kInvokeMemoryVersionBumpOp, 0u, 0u, mir->offset);
-  // All fields of escaped references need to be treated as potentially modified.
-  non_aliasing_ifields_.clear();
-  // Array elements may also have been modified via escaped array refs.
-  escaped_array_refs_.clear();
+  global_memory_version_ =
+      gvn_->LookupValue(kInvokeMemoryVersionBumpOp, 0u, 0u, mir->offset);
+  // All static fields and instance fields and array elements of aliasing references,
+  // including escaped references, may have been modified.
+  sfield_value_map_.clear();
+  aliasing_ifield_value_map_.clear();
+  aliasing_array_value_map_.clear();
+  escaped_refs_.clear();
+  escaped_ifield_clobber_set_.clear();
+  escaped_array_clobber_set_.clear();
 }
 
 uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
@@ -431,8 +1318,6 @@
     case Instruction::RETURN:
     case Instruction::RETURN_OBJECT:
     case Instruction::RETURN_WIDE:
-    case Instruction::MONITOR_ENTER:
-    case Instruction::MONITOR_EXIT:
     case Instruction::GOTO:
     case Instruction::GOTO_16:
     case Instruction::GOTO_32:
@@ -461,12 +1346,42 @@
       // Nothing defined - take no action.
       break;
 
+    case Instruction::MONITOR_ENTER:
+      HandleNullCheck(mir, GetOperandValue(mir->ssa_rep->uses[0]));
+      // NOTE: Keeping all aliasing values intact. Programs that rely on loads/stores of the
+      // same non-volatile locations outside and inside a synchronized block being different
+      // contain races that we cannot fix.
+      break;
+
+    case Instruction::MONITOR_EXIT:
+      HandleNullCheck(mir, GetOperandValue(mir->ssa_rep->uses[0]));
+      // If we're running GVN and CanModify(), uneliminated null check indicates bytecode error.
+      if ((gvn_->cu_->disable_opt & (1 << kGlobalValueNumbering)) == 0 && gvn_->CanModify() &&
+          (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0) {
+        LOG(WARNING) << "Bytecode error: MONITOR_EXIT is still null checked at 0x" << std::hex
+            << mir->offset << " in " << PrettyMethod(gvn_->cu_->method_idx, *gvn_->cu_->dex_file);
+      }
+      break;
+
     case Instruction::FILLED_NEW_ARRAY:
     case Instruction::FILLED_NEW_ARRAY_RANGE:
       // Nothing defined but the result will be unique and non-null.
       if (mir->next != nullptr && mir->next->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
-        MarkNonAliasingNonNull(mir->next);
-        // TUNING: We could track value names stored in the array.
+        uint16_t array = MarkNonAliasingNonNull(mir->next);
+        // Do not SetOperandValue(), we'll do that when we process the MOVE_RESULT_OBJECT.
+        if (kLocalValueNumberingEnableFilledNewArrayTracking && mir->ssa_rep->num_uses != 0u) {
+          AliasingValues* values = GetAliasingValues(&non_aliasing_array_value_map_, array);
+          // Clear the value if we got a merged version in a loop.
+          *values = AliasingValues(gvn_->allocator_);
+          for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
+            DCHECK_EQ(High16Bits(i), 0u);
+            uint16_t index = gvn_->LookupValue(Instruction::CONST, i, 0u, 0);
+            uint16_t value = GetOperandValue(mir->ssa_rep->uses[i]);
+            values->load_value_map.Put(index, value);
+            RangeCheckKey key = { array, index };
+            range_checked_.insert(key);
+          }
+        }
         // The MOVE_RESULT_OBJECT will be processed next and we'll return the value name then.
       }
       // All args escaped (if references).
@@ -514,12 +1429,13 @@
     case Instruction::NEW_ARRAY:
       // 1 result, treat as unique each time, use result s_reg - will be unique.
       res = MarkNonAliasingNonNull(mir);
+      SetOperandValue(mir->ssa_rep->defs[0], res);
       break;
     case Instruction::CONST_STRING:
     case Instruction::CONST_STRING_JUMBO:
       // These strings are internalized, so assign value based on the string pool index.
-      res = LookupValue(Instruction::CONST_STRING, Low16Bits(mir->dalvikInsn.vB),
-                        High16Bits(mir->dalvikInsn.vB), 0);
+      res = gvn_->LookupValue(Instruction::CONST_STRING, Low16Bits(mir->dalvikInsn.vB),
+                              High16Bits(mir->dalvikInsn.vB), 0);
       SetOperandValue(mir->ssa_rep->defs[0], res);
       null_checked_.insert(res);  // May already be there.
       // NOTE: Hacking the contents of an internalized string via reflection is possible
@@ -535,10 +1451,7 @@
       break;
 
     case kMirOpPhi:
-      /*
-       * Because we'll only see phi nodes at the beginning of an extended basic block,
-       * we can ignore them.  Revisit if we shift to global value numbering.
-       */
+      res = HandlePhi(mir);
       break;
 
     case Instruction::MOVE:
@@ -564,27 +1477,27 @@
     case Instruction::CONST:
     case Instruction::CONST_4:
     case Instruction::CONST_16:
-      res = LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
-                        High16Bits(mir->dalvikInsn.vB), 0);
+      res = gvn_->LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
+                              High16Bits(mir->dalvikInsn.vB), 0);
       SetOperandValue(mir->ssa_rep->defs[0], res);
       break;
 
     case Instruction::CONST_HIGH16:
-      res = LookupValue(Instruction::CONST, 0, mir->dalvikInsn.vB, 0);
+      res = gvn_->LookupValue(Instruction::CONST, 0, mir->dalvikInsn.vB, 0);
       SetOperandValue(mir->ssa_rep->defs[0], res);
       break;
 
     case Instruction::CONST_WIDE_16:
     case Instruction::CONST_WIDE_32: {
-        uint16_t low_res = LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
-                                       High16Bits(mir->dalvikInsn.vB >> 16), 1);
+        uint16_t low_res = gvn_->LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
+                                             High16Bits(mir->dalvikInsn.vB >> 16), 1);
         uint16_t high_res;
         if (mir->dalvikInsn.vB & 0x80000000) {
-          high_res = LookupValue(Instruction::CONST, 0xffff, 0xffff, 2);
+          high_res = gvn_->LookupValue(Instruction::CONST, 0xffff, 0xffff, 2);
         } else {
-          high_res = LookupValue(Instruction::CONST, 0, 0, 2);
+          high_res = gvn_->LookupValue(Instruction::CONST, 0, 0, 2);
         }
-        res = LookupValue(Instruction::CONST, low_res, high_res, 3);
+        res = gvn_->LookupValue(Instruction::CONST, low_res, high_res, 3);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -592,24 +1505,30 @@
     case Instruction::CONST_WIDE: {
         uint32_t low_word = Low32Bits(mir->dalvikInsn.vB_wide);
         uint32_t high_word = High32Bits(mir->dalvikInsn.vB_wide);
-        uint16_t low_res = LookupValue(Instruction::CONST, Low16Bits(low_word),
-                                       High16Bits(low_word), 1);
-        uint16_t high_res = LookupValue(Instruction::CONST, Low16Bits(high_word),
-                                       High16Bits(high_word), 2);
-        res = LookupValue(Instruction::CONST, low_res, high_res, 3);
+        uint16_t low_res = gvn_->LookupValue(Instruction::CONST, Low16Bits(low_word),
+                                             High16Bits(low_word), 1);
+        uint16_t high_res = gvn_->LookupValue(Instruction::CONST, Low16Bits(high_word),
+                                              High16Bits(high_word), 2);
+        res = gvn_->LookupValue(Instruction::CONST, low_res, high_res, 3);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
 
     case Instruction::CONST_WIDE_HIGH16: {
-        uint16_t low_res = LookupValue(Instruction::CONST, 0, 0, 1);
-        uint16_t high_res = LookupValue(Instruction::CONST, 0, Low16Bits(mir->dalvikInsn.vB), 2);
-        res = LookupValue(Instruction::CONST, low_res, high_res, 3);
+        uint16_t low_res = gvn_->LookupValue(Instruction::CONST, 0, 0, 1);
+        uint16_t high_res = gvn_->LookupValue(Instruction::CONST, 0,
+                                              Low16Bits(mir->dalvikInsn.vB), 2);
+        res = gvn_->LookupValue(Instruction::CONST, low_res, high_res, 3);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
 
-    case Instruction::ARRAY_LENGTH:
+    case Instruction::ARRAY_LENGTH: {
+        // Handle the null check.
+        uint16_t reg = GetOperandValue(mir->ssa_rep->uses[0]);
+        HandleNullCheck(mir, reg);
+      }
+      // Intentional fall-through.
     case Instruction::NEG_INT:
     case Instruction::NOT_INT:
     case Instruction::NEG_FLOAT:
@@ -620,7 +1539,7 @@
     case Instruction::FLOAT_TO_INT: {
         // res = op + 1 operand
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        res = LookupValue(opcode, operand1, kNoValue, kNoValue);
+        res = gvn_->LookupValue(opcode, operand1, kNoValue, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -631,7 +1550,7 @@
     case Instruction::DOUBLE_TO_INT: {
         // res = op + 1 wide operand
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        res = LookupValue(opcode, operand1, kNoValue, kNoValue);
+        res = gvn_->LookupValue(opcode, operand1, kNoValue, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -644,7 +1563,7 @@
     case Instruction::NEG_DOUBLE: {
         // wide res = op + 1 wide operand
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        res = LookupValue(opcode, operand1, kNoValue, kNoValue);
+        res = gvn_->LookupValue(opcode, operand1, kNoValue, kNoValue);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -655,7 +1574,7 @@
     case Instruction::INT_TO_LONG: {
         // wide res = op + 1 operand
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        res = LookupValue(opcode, operand1, kNoValue, kNoValue);
+        res = gvn_->LookupValue(opcode, operand1, kNoValue, kNoValue);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -666,7 +1585,7 @@
         // res = op + 2 wide operands
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
-        res = LookupValue(opcode, operand1, operand2, kNoValue);
+        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -698,7 +1617,7 @@
         // res = op + 2 operands
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
-        res = LookupValue(opcode, operand1, operand2, kNoValue);
+        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -732,7 +1651,7 @@
         // wide res = op + 2 wide operands
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
-        res = LookupValue(opcode, operand1, operand2, kNoValue);
+        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -746,7 +1665,7 @@
         // wide res = op + 1 wide operand + 1 operand
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[2]);
-        res = LookupValue(opcode, operand1, operand2, kNoValue);
+        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -764,7 +1683,7 @@
         // res = op + 2 operands
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
-        res = LookupValue(opcode, operand1, operand2, kNoValue);
+        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -790,8 +1709,8 @@
     case Instruction::USHR_INT_LIT8: {
         // Same as res = op + 2 operands, except use vC as operand 2
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        uint16_t operand2 = LookupValue(Instruction::CONST, mir->dalvikInsn.vC, 0, 0);
-        res = LookupValue(opcode, operand1, operand2, kNoValue);
+        uint16_t operand2 = gvn_->LookupValue(Instruction::CONST, mir->dalvikInsn.vC, 0, 0);
+        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;