Merge "ART: Make FastJNI annotation checks slow-debug"
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index 28709a1..5b57718 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -62,23 +62,24 @@
     const char* shorty = "IIFII";
 
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
+    ArenaAllocator allocator(&pool);
 
     std::unique_ptr<JniCallingConvention> jni_conv(
-        JniCallingConvention::Create(&arena,
+        JniCallingConvention::Create(&allocator,
                                      is_static,
                                      is_synchronized,
                                      /*is_critical_native*/false,
                                      shorty,
                                      isa));
     std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
-        ManagedRuntimeCallingConvention::Create(&arena, is_static, is_synchronized, shorty, isa));
+        ManagedRuntimeCallingConvention::Create(
+            &allocator, is_static, is_synchronized, shorty, isa));
     const int frame_size(jni_conv->FrameSize());
     ArrayRef<const ManagedRegister> callee_save_regs = jni_conv->CalleeSaveRegisters();
 
     // Assemble the method.
     std::unique_ptr<JNIMacroAssembler<kPointerSize>> jni_asm(
-        JNIMacroAssembler<kPointerSize>::Create(&arena, isa));
+        JNIMacroAssembler<kPointerSize>::Create(&allocator, isa));
     jni_asm->cfi().SetEnabled(true);
     jni_asm->BuildFrame(frame_size, mr_conv->MethodRegister(),
                         callee_save_regs, mr_conv->EntrySpills());
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 92b5c4d..e32b681 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -179,11 +179,11 @@
   }
 
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
+  ArenaAllocator allocator(&pool);
 
   // Calling conventions used to iterate over parameters to method
   std::unique_ptr<JniCallingConvention> main_jni_conv =
-      JniCallingConvention::Create(&arena,
+      JniCallingConvention::Create(&allocator,
                                    is_static,
                                    is_synchronized,
                                    is_critical_native,
@@ -193,7 +193,7 @@
 
   std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
       ManagedRuntimeCallingConvention::Create(
-          &arena, is_static, is_synchronized, shorty, instruction_set));
+          &allocator, is_static, is_synchronized, shorty, instruction_set));
 
   // Calling conventions to call into JNI method "end" possibly passing a returned reference, the
   //     method and the current thread.
@@ -209,7 +209,7 @@
   }
 
   std::unique_ptr<JniCallingConvention> end_jni_conv(
-      JniCallingConvention::Create(&arena,
+      JniCallingConvention::Create(&allocator,
                                    is_static,
                                    is_synchronized,
                                    is_critical_native,
@@ -218,7 +218,7 @@
 
   // Assembler that holds generated instructions
   std::unique_ptr<JNIMacroAssembler<kPointerSize>> jni_asm =
-      GetMacroAssembler<kPointerSize>(&arena, instruction_set, instruction_set_features);
+      GetMacroAssembler<kPointerSize>(&allocator, instruction_set, instruction_set_features);
   const CompilerOptions& compiler_options = driver->GetCompilerOptions();
   jni_asm->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
   jni_asm->SetEmitRunTimeChecksInDebugMode(compiler_options.EmitRunTimeChecksInDebugMode());
diff --git a/compiler/linker/arm/relative_patcher_thumb2.cc b/compiler/linker/arm/relative_patcher_thumb2.cc
index f84fea3..3d56833 100644
--- a/compiler/linker/arm/relative_patcher_thumb2.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2.cc
@@ -354,8 +354,8 @@
 
 std::vector<uint8_t> Thumb2RelativePatcher::CompileThunk(const ThunkKey& key) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
-  arm::ArmVIXLAssembler assembler(&arena);
+  ArenaAllocator allocator(&pool);
+  arm::ArmVIXLAssembler assembler(&allocator);
 
   switch (key.GetType()) {
     case ThunkType::kMethodCall:
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 828c99b..663e43b 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -511,8 +511,8 @@
 
 std::vector<uint8_t> Arm64RelativePatcher::CompileThunk(const ThunkKey& key) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
-  arm64::Arm64Assembler assembler(&arena);
+  ArenaAllocator allocator(&pool);
+  arm64::Arm64Assembler assembler(&allocator);
 
   switch (key.GetType()) {
     case ThunkType::kMethodCall: {
diff --git a/compiler/optimizing/block_builder.cc b/compiler/optimizing/block_builder.cc
index fe7ecd1..d7def77 100644
--- a/compiler/optimizing/block_builder.cc
+++ b/compiler/optimizing/block_builder.cc
@@ -29,7 +29,7 @@
                                                     uint32_t store_dex_pc) {
   HBasicBlock* block = branch_targets_[store_dex_pc];
   if (block == nullptr) {
-    block = new (arena_) HBasicBlock(graph_, semantic_dex_pc);
+    block = new (allocator_) HBasicBlock(graph_, semantic_dex_pc);
     branch_targets_[store_dex_pc] = block;
   }
   DCHECK_EQ(block->GetDexPc(), semantic_dex_pc);
@@ -200,7 +200,7 @@
 // Returns the TryItem stored for `block` or nullptr if there is no info for it.
 static const DexFile::TryItem* GetTryItem(
     HBasicBlock* block,
-    const ArenaSafeMap<uint32_t, const DexFile::TryItem*>& try_block_info) {
+    const ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*>& try_block_info) {
   auto iterator = try_block_info.find(block->GetBlockId());
   return (iterator == try_block_info.end()) ? nullptr : iterator->second;
 }
@@ -212,7 +212,7 @@
 static void LinkToCatchBlocks(HTryBoundary* try_boundary,
                               const DexFile::CodeItem& code_item,
                               const DexFile::TryItem* try_item,
-                              const ArenaSafeMap<uint32_t, HBasicBlock*>& catch_blocks) {
+                              const ScopedArenaSafeMap<uint32_t, HBasicBlock*>& catch_blocks) {
   for (CatchHandlerIterator it(code_item, *try_item); it.HasNext(); it.Next()) {
     try_boundary->AddExceptionHandler(catch_blocks.Get(it.GetHandlerAddress()));
   }
@@ -253,8 +253,8 @@
 
   // Keep a map of all try blocks and their respective TryItems. We do not use
   // the block's pointer but rather its id to ensure deterministic iteration.
-  ArenaSafeMap<uint32_t, const DexFile::TryItem*> try_block_info(
-      std::less<uint32_t>(), arena_->Adapter(kArenaAllocGraphBuilder));
+  ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*> try_block_info(
+      std::less<uint32_t>(), local_allocator_->Adapter(kArenaAllocGraphBuilder));
 
   // Obtain TryItem information for blocks with throwing instructions, and split
   // blocks which are both try & catch to simplify the graph.
@@ -278,8 +278,8 @@
   }
 
   // Map from a handler dex_pc to the corresponding catch block.
-  ArenaSafeMap<uint32_t, HBasicBlock*> catch_blocks(
-      std::less<uint32_t>(), arena_->Adapter(kArenaAllocGraphBuilder));
+  ScopedArenaSafeMap<uint32_t, HBasicBlock*> catch_blocks(
+      std::less<uint32_t>(), local_allocator_->Adapter(kArenaAllocGraphBuilder));
 
   // Iterate over catch blocks, create artifical landing pads if necessary to
   // simplify the CFG, and set metadata.
@@ -302,8 +302,8 @@
       HBasicBlock* catch_block = GetBlockAt(address);
       bool is_try_block = (try_block_info.find(catch_block->GetBlockId()) != try_block_info.end());
       if (is_try_block || MightHaveLiveNormalPredecessors(catch_block)) {
-        HBasicBlock* new_catch_block = new (arena_) HBasicBlock(graph_, address);
-        new_catch_block->AddInstruction(new (arena_) HGoto(address));
+        HBasicBlock* new_catch_block = new (allocator_) HBasicBlock(graph_, address);
+        new_catch_block->AddInstruction(new (allocator_) HGoto(address));
         new_catch_block->AddSuccessor(catch_block);
         graph_->AddBlock(new_catch_block);
         catch_block = new_catch_block;
@@ -311,7 +311,7 @@
 
       catch_blocks.Put(address, catch_block);
       catch_block->SetTryCatchInformation(
-        new (arena_) TryCatchInformation(iterator.GetHandlerTypeIndex(), *dex_file_));
+        new (allocator_) TryCatchInformation(iterator.GetHandlerTypeIndex(), *dex_file_));
     }
     handlers_ptr = iterator.EndDataPointer();
   }
@@ -328,8 +328,8 @@
       if (GetTryItem(predecessor, try_block_info) != try_item) {
         // Found a predecessor not covered by the same TryItem. Insert entering
         // boundary block.
-        HTryBoundary* try_entry =
-            new (arena_) HTryBoundary(HTryBoundary::BoundaryKind::kEntry, try_block->GetDexPc());
+        HTryBoundary* try_entry = new (allocator_) HTryBoundary(
+            HTryBoundary::BoundaryKind::kEntry, try_block->GetDexPc());
         try_block->CreateImmediateDominator()->AddInstruction(try_entry);
         LinkToCatchBlocks(try_entry, code_item_, try_item, catch_blocks);
         break;
@@ -357,7 +357,7 @@
 
       // Insert TryBoundary and link to catch blocks.
       HTryBoundary* try_exit =
-          new (arena_) HTryBoundary(HTryBoundary::BoundaryKind::kExit, successor->GetDexPc());
+          new (allocator_) HTryBoundary(HTryBoundary::BoundaryKind::kExit, successor->GetDexPc());
       graph_->SplitEdge(try_block, successor)->AddInstruction(try_exit);
       LinkToCatchBlocks(try_exit, code_item_, try_item, catch_blocks);
     }
@@ -367,8 +367,8 @@
 bool HBasicBlockBuilder::Build() {
   DCHECK(graph_->GetBlocks().empty());
 
-  graph_->SetEntryBlock(new (arena_) HBasicBlock(graph_, kNoDexPc));
-  graph_->SetExitBlock(new (arena_) HBasicBlock(graph_, kNoDexPc));
+  graph_->SetEntryBlock(new (allocator_) HBasicBlock(graph_, kNoDexPc));
+  graph_->SetExitBlock(new (allocator_) HBasicBlock(graph_, kNoDexPc));
 
   // TODO(dbrazdil): Do CreateBranchTargets and ConnectBasicBlocks in one pass.
   if (!CreateBranchTargets()) {
diff --git a/compiler/optimizing/block_builder.h b/compiler/optimizing/block_builder.h
index 4a0f78c..79f7a7b 100644
--- a/compiler/optimizing/block_builder.h
+++ b/compiler/optimizing/block_builder.h
@@ -17,8 +17,8 @@
 #ifndef ART_COMPILER_OPTIMIZING_BLOCK_BUILDER_H_
 #define ART_COMPILER_OPTIMIZING_BLOCK_BUILDER_H_
 
-#include "base/arena_containers.h"
-#include "base/arena_object.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
 #include "dex_file.h"
 #include "nodes.h"
 
@@ -28,17 +28,21 @@
  public:
   HBasicBlockBuilder(HGraph* graph,
                      const DexFile* const dex_file,
-                     const DexFile::CodeItem& code_item)
-      : arena_(graph->GetAllocator()),
+                     const DexFile::CodeItem& code_item,
+                     ScopedArenaAllocator* local_allocator)
+      : allocator_(graph->GetAllocator()),
         graph_(graph),
         dex_file_(dex_file),
         code_item_(code_item),
+        local_allocator_(local_allocator),
         branch_targets_(code_item.insns_size_in_code_units_,
                         nullptr,
-                        arena_->Adapter(kArenaAllocGraphBuilder)),
-        throwing_blocks_(kDefaultNumberOfThrowingBlocks, arena_->Adapter(kArenaAllocGraphBuilder)),
+                        local_allocator->Adapter(kArenaAllocGraphBuilder)),
+        throwing_blocks_(kDefaultNumberOfThrowingBlocks,
+                         local_allocator->Adapter(kArenaAllocGraphBuilder)),
         number_of_branches_(0u),
-        quicken_index_for_dex_pc_(std::less<uint32_t>(), arena_->Adapter()) {}
+        quicken_index_for_dex_pc_(std::less<uint32_t>(),
+                                  local_allocator->Adapter(kArenaAllocGraphBuilder)) {}
 
   // Creates basic blocks in `graph_` at branch target dex_pc positions of the
   // `code_item_`. Blocks are connected but left unpopulated with instructions.
@@ -71,18 +75,19 @@
   // handler dex_pcs.
   bool MightHaveLiveNormalPredecessors(HBasicBlock* catch_block);
 
-  ArenaAllocator* const arena_;
+  ArenaAllocator* const allocator_;
   HGraph* const graph_;
 
   const DexFile* const dex_file_;
   const DexFile::CodeItem& code_item_;
 
-  ArenaVector<HBasicBlock*> branch_targets_;
-  ArenaVector<HBasicBlock*> throwing_blocks_;
+  ScopedArenaAllocator* const local_allocator_;
+  ScopedArenaVector<HBasicBlock*> branch_targets_;
+  ScopedArenaVector<HBasicBlock*> throwing_blocks_;
   size_t number_of_branches_;
 
   // A table to quickly find the quicken index for the first instruction of a basic block.
-  ArenaSafeMap<uint32_t, uint32_t> quicken_index_for_dex_pc_;
+  ScopedArenaSafeMap<uint32_t, uint32_t> quicken_index_for_dex_pc_;
 
   static constexpr size_t kDefaultNumberOfThrowingBlocks = 2u;
 
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 0255e73..9c2068e 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -18,7 +18,8 @@
 
 #include <limits>
 
-#include "base/arena_containers.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
 #include "induction_var_range.h"
 #include "nodes.h"
 #include "side_effects_analysis.h"
@@ -287,7 +288,7 @@
  */
 class ValueRange : public ArenaObject<kArenaAllocBoundsCheckElimination> {
  public:
-  ValueRange(ArenaAllocator* allocator, ValueBound lower, ValueBound upper)
+  ValueRange(ScopedArenaAllocator* allocator, ValueBound lower, ValueBound upper)
       : allocator_(allocator), lower_(lower), upper_(upper) {}
 
   virtual ~ValueRange() {}
@@ -297,7 +298,7 @@
     return AsMonotonicValueRange() != nullptr;
   }
 
-  ArenaAllocator* GetAllocator() const { return allocator_; }
+  ScopedArenaAllocator* GetAllocator() const { return allocator_; }
   ValueBound GetLower() const { return lower_; }
   ValueBound GetUpper() const { return upper_; }
 
@@ -350,7 +351,7 @@
   }
 
  private:
-  ArenaAllocator* const allocator_;
+  ScopedArenaAllocator* const allocator_;
   const ValueBound lower_;  // inclusive
   const ValueBound upper_;  // inclusive
 
@@ -365,7 +366,7 @@
  */
 class MonotonicValueRange : public ValueRange {
  public:
-  MonotonicValueRange(ArenaAllocator* allocator,
+  MonotonicValueRange(ScopedArenaAllocator* allocator,
                       HPhi* induction_variable,
                       HInstruction* initial,
                       int32_t increment,
@@ -510,21 +511,19 @@
              const SideEffectsAnalysis& side_effects,
              HInductionVarAnalysis* induction_analysis)
       : HGraphVisitor(graph),
+        allocator_(graph->GetArenaStack()),
         maps_(graph->GetBlocks().size(),
-              ArenaSafeMap<int, ValueRange*>(
+              ScopedArenaSafeMap<int, ValueRange*>(
                   std::less<int>(),
-                  graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
-              graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
-        first_index_bounds_check_map_(
-            std::less<int>(),
-            graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
-        early_exit_loop_(
-            std::less<uint32_t>(),
-            graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
-        taken_test_loop_(
-            std::less<uint32_t>(),
-            graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
-        finite_loop_(graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
+                  allocator_.Adapter(kArenaAllocBoundsCheckElimination)),
+              allocator_.Adapter(kArenaAllocBoundsCheckElimination)),
+        first_index_bounds_check_map_(std::less<int>(),
+                                      allocator_.Adapter(kArenaAllocBoundsCheckElimination)),
+        early_exit_loop_(std::less<uint32_t>(),
+                         allocator_.Adapter(kArenaAllocBoundsCheckElimination)),
+        taken_test_loop_(std::less<uint32_t>(),
+                         allocator_.Adapter(kArenaAllocBoundsCheckElimination)),
+        finite_loop_(allocator_.Adapter(kArenaAllocBoundsCheckElimination)),
         has_dom_based_dynamic_bce_(false),
         initial_block_size_(graph->GetBlocks().size()),
         side_effects_(side_effects),
@@ -569,7 +568,7 @@
 
  private:
   // Return the map of proven value ranges at the beginning of a basic block.
-  ArenaSafeMap<int, ValueRange*>* GetValueRangeMap(HBasicBlock* basic_block) {
+  ScopedArenaSafeMap<int, ValueRange*>* GetValueRangeMap(HBasicBlock* basic_block) {
     if (IsAddedBlock(basic_block)) {
       // Added blocks don't keep value ranges.
       return nullptr;
@@ -580,7 +579,7 @@
   // Traverse up the dominator tree to look for value range info.
   ValueRange* LookupValueRange(HInstruction* instruction, HBasicBlock* basic_block) {
     while (basic_block != nullptr) {
-      ArenaSafeMap<int, ValueRange*>* map = GetValueRangeMap(basic_block);
+      ScopedArenaSafeMap<int, ValueRange*>* map = GetValueRangeMap(basic_block);
       if (map != nullptr) {
         if (map->find(instruction->GetId()) != map->end()) {
           return map->Get(instruction->GetId());
@@ -668,8 +667,8 @@
       if (successor != nullptr) {
         bool overflow;
         bool underflow;
-        ValueRange* new_left_range = new (GetGraph()->GetAllocator()) ValueRange(
-            GetGraph()->GetAllocator(),
+        ValueRange* new_left_range = new (&allocator_) ValueRange(
+            &allocator_,
             left_range->GetBound(),
             right_range->GetBound().Add(left_compensation, &overflow, &underflow));
         if (!overflow && !underflow) {
@@ -677,8 +676,8 @@
                                    new_left_range);
         }
 
-        ValueRange* new_right_range = new (GetGraph()->GetAllocator()) ValueRange(
-            GetGraph()->GetAllocator(),
+        ValueRange* new_right_range = new (&allocator_) ValueRange(
+            &allocator_,
             left_range->GetBound().Add(right_compensation, &overflow, &underflow),
             right_range->GetBound());
         if (!overflow && !underflow) {
@@ -750,8 +749,8 @@
         if (overflow || underflow) {
           return;
         }
-        ValueRange* new_range = new (GetGraph()->GetAllocator())
-            ValueRange(GetGraph()->GetAllocator(), ValueBound::Min(), new_upper);
+        ValueRange* new_range = new (&allocator_) ValueRange(
+            &allocator_, ValueBound::Min(), new_upper);
         ApplyRangeFromComparison(left, block, true_successor, new_range);
       }
 
@@ -762,8 +761,8 @@
         if (overflow || underflow) {
           return;
         }
-        ValueRange* new_range = new (GetGraph()->GetAllocator())
-            ValueRange(GetGraph()->GetAllocator(), new_lower, ValueBound::Max());
+        ValueRange* new_range = new (&allocator_) ValueRange(
+            &allocator_, new_lower, ValueBound::Max());
         ApplyRangeFromComparison(left, block, false_successor, new_range);
       }
     } else if (cond == kCondGT || cond == kCondGE) {
@@ -774,8 +773,8 @@
         if (overflow || underflow) {
           return;
         }
-        ValueRange* new_range = new (GetGraph()->GetAllocator())
-            ValueRange(GetGraph()->GetAllocator(), new_lower, ValueBound::Max());
+        ValueRange* new_range = new (&allocator_) ValueRange(
+            &allocator_, new_lower, ValueBound::Max());
         ApplyRangeFromComparison(left, block, true_successor, new_range);
       }
 
@@ -785,8 +784,8 @@
         if (overflow || underflow) {
           return;
         }
-        ValueRange* new_range = new (GetGraph()->GetAllocator())
-            ValueRange(GetGraph()->GetAllocator(), ValueBound::Min(), new_upper);
+        ValueRange* new_range = new (&allocator_) ValueRange(
+            &allocator_, ValueBound::Min(), new_upper);
         ApplyRangeFromComparison(left, block, false_successor, new_range);
       }
     } else if (cond == kCondNE || cond == kCondEQ) {
@@ -795,8 +794,7 @@
         //   length == [c,d] yields [c, d] along true
         //   length != [c,d] yields [c, d] along false
         if (!lower.Equals(ValueBound::Min()) || !upper.Equals(ValueBound::Max())) {
-          ValueRange* new_range = new (GetGraph()->GetAllocator())
-              ValueRange(GetGraph()->GetAllocator(), lower, upper);
+          ValueRange* new_range = new (&allocator_) ValueRange(&allocator_, lower, upper);
           ApplyRangeFromComparison(
               left, block, cond == kCondEQ ? true_successor : false_successor, new_range);
         }
@@ -804,8 +802,8 @@
         //   length == 0 yields [1, max] along false
         //   length != 0 yields [1, max] along true
         if (lower.GetConstant() == 0 && upper.GetConstant() == 0) {
-          ValueRange* new_range = new (GetGraph()->GetAllocator())
-              ValueRange(GetGraph()->GetAllocator(), ValueBound(nullptr, 1), ValueBound::Max());
+          ValueRange* new_range = new (&allocator_) ValueRange(
+              &allocator_, ValueBound(nullptr, 1), ValueBound::Max());
           ApplyRangeFromComparison(
               left, block, cond == kCondEQ ? false_successor : true_successor, new_range);
         }
@@ -826,7 +824,7 @@
       // Non-constant index.
       ValueBound lower = ValueBound(nullptr, 0);        // constant 0
       ValueBound upper = ValueBound(array_length, -1);  // array_length - 1
-      ValueRange array_range(GetGraph()->GetAllocator(), lower, upper);
+      ValueRange array_range(&allocator_, lower, upper);
       // Try index range obtained by dominator-based analysis.
       ValueRange* index_range = LookupValueRange(index, block);
       if (index_range != nullptr && index_range->FitsIn(&array_range)) {
@@ -875,8 +873,7 @@
       } else {
         ValueBound lower = ValueBound(nullptr, constant + 1);
         ValueBound upper = ValueBound::Max();
-        ValueRange* range = new (GetGraph()->GetAllocator())
-            ValueRange(GetGraph()->GetAllocator(), lower, upper);
+        ValueRange* range = new (&allocator_) ValueRange(&allocator_, lower, upper);
         AssignRange(block, array_length, range);
       }
     }
@@ -938,8 +935,8 @@
           ValueRange* range = nullptr;
           if (increment == 0) {
             // Add constant 0. It's really a fixed value.
-            range = new (GetGraph()->GetAllocator()) ValueRange(
-                GetGraph()->GetAllocator(),
+            range = new (&allocator_) ValueRange(
+                &allocator_,
                 ValueBound(initial_value, 0),
                 ValueBound(initial_value, 0));
           } else {
@@ -959,8 +956,8 @@
                 bound = increment > 0 ? ValueBound::Min() : ValueBound::Max();
               }
             }
-            range = new (GetGraph()->GetAllocator()) MonotonicValueRange(
-                GetGraph()->GetAllocator(),
+            range = new (&allocator_) MonotonicValueRange(
+                &allocator_,
                 phi,
                 initial_value,
                 increment,
@@ -1039,8 +1036,8 @@
                 !ValueBound::WouldAddOverflowOrUnderflow(c0, -c1)) {
               if ((c0 - c1) <= 0) {
                 // array.length + (c0 - c1) won't overflow/underflow.
-                ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange(
-                    GetGraph()->GetAllocator(),
+                ValueRange* range = new (&allocator_) ValueRange(
+                    &allocator_,
                     ValueBound(nullptr, right_const - upper.GetConstant()),
                     ValueBound(array_length, right_const - lower.GetConstant()));
                 AssignRange(sub->GetBlock(), sub, range);
@@ -1087,8 +1084,8 @@
         // than array_length.
         return;
       }
-      ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange(
-          GetGraph()->GetAllocator(),
+      ValueRange* range = new (&allocator_) ValueRange(
+          &allocator_,
           ValueBound(nullptr, std::numeric_limits<int32_t>::min()),
           ValueBound(left, 0));
       AssignRange(instruction->GetBlock(), instruction, range);
@@ -1113,8 +1110,8 @@
       if (constant > 0) {
         // constant serves as a mask so any number masked with it
         // gets a [0, constant] value range.
-        ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange(
-            GetGraph()->GetAllocator(),
+        ValueRange* range = new (&allocator_) ValueRange(
+            &allocator_,
             ValueBound(nullptr, 0),
             ValueBound(nullptr, constant));
         AssignRange(instruction->GetBlock(), instruction, range);
@@ -1139,8 +1136,8 @@
       //   array[i % 10];  // index value range [0, 9]
       //   array[i % -10]; // index value range [0, 9]
       // }
-      ValueRange* right_range = new (GetGraph()->GetAllocator()) ValueRange(
-          GetGraph()->GetAllocator(),
+      ValueRange* right_range = new (&allocator_) ValueRange(
+          &allocator_,
           ValueBound(nullptr, 1 - right_const),
           ValueBound(nullptr, right_const - 1));
 
@@ -1169,8 +1166,8 @@
     if (right->IsArrayLength()) {
       ValueBound lower = ValueBound::Min();  // ideally, lower should be '1-array_length'.
       ValueBound upper = ValueBound(right, -1);  // array_length - 1
-      ValueRange* right_range = new (GetGraph()->GetAllocator()) ValueRange(
-          GetGraph()->GetAllocator(),
+      ValueRange* right_range = new (&allocator_) ValueRange(
+          &allocator_,
           lower,
           upper);
       ValueRange* left_range = LookupValueRange(left, instruction->GetBlock());
@@ -1195,8 +1192,7 @@
         // which isn't available as an instruction yet. new_array will
         // be treated the same as new_array.length when it's used in a ValueBound.
         ValueBound upper = ValueBound(new_array, -right_const);
-        ValueRange* range = new (GetGraph()->GetAllocator())
-            ValueRange(GetGraph()->GetAllocator(), lower, upper);
+        ValueRange* range = new (&allocator_) ValueRange(&allocator_, lower, upper);
         ValueRange* existing_range = LookupValueRange(left, new_array->GetBlock());
         if (existing_range != nullptr) {
           range = existing_range->Narrow(range);
@@ -1291,10 +1287,10 @@
       HInstruction* base = value.GetInstruction();
       int32_t min_c = base == nullptr ? 0 : value.GetConstant();
       int32_t max_c = value.GetConstant();
-      ArenaVector<HBoundsCheck*> candidates(
-          GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
-      ArenaVector<HBoundsCheck*> standby(
-          GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
+      ScopedArenaVector<HBoundsCheck*> candidates(
+          allocator_.Adapter(kArenaAllocBoundsCheckElimination));
+      ScopedArenaVector<HBoundsCheck*> standby(
+          allocator_.Adapter(kArenaAllocBoundsCheckElimination));
       for (const HUseListNode<HInstruction*>& use : array_length->GetUses()) {
         // Another bounds check in same or dominated block?
         HInstruction* user = use.GetUser();
@@ -1378,7 +1374,7 @@
           v2.is_known && (v2.a_constant == 0 || v2.a_constant == 1)) {
         DCHECK(v1.a_constant == 1 || v1.instruction == nullptr);
         DCHECK(v2.a_constant == 1 || v2.instruction == nullptr);
-        ValueRange index_range(GetGraph()->GetAllocator(),
+        ValueRange index_range(&allocator_,
                                ValueBound(v1.instruction, v1.b_constant),
                                ValueBound(v2.instruction, v2.b_constant));
         // If analysis reveals a certain OOB, disable dynamic BCE. Otherwise,
@@ -1410,10 +1406,10 @@
     HInstruction* base = value.GetInstruction();
     int32_t min_c = base == nullptr ? 0 : value.GetConstant();
     int32_t max_c = value.GetConstant();
-    ArenaVector<HBoundsCheck*> candidates(
-        GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
-    ArenaVector<HBoundsCheck*> standby(
-        GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
+    ScopedArenaVector<HBoundsCheck*> candidates(
+        allocator_.Adapter(kArenaAllocBoundsCheckElimination));
+    ScopedArenaVector<HBoundsCheck*> standby(
+        allocator_.Adapter(kArenaAllocBoundsCheckElimination));
     for (const HUseListNode<HInstruction*>& use : array_length->GetUses()) {
       HInstruction* user = use.GetUser();
       if (user->IsBoundsCheck() && loop == user->GetBlock()->GetLoopInformation()) {
@@ -1882,21 +1878,24 @@
     instruction->GetBlock()->RemoveInstruction(instruction);
   }
 
+  // Use local allocator for allocating memory.
+  ScopedArenaAllocator allocator_;
+
   // A set of maps, one per basic block, from instruction to range.
-  ArenaVector<ArenaSafeMap<int, ValueRange*>> maps_;
+  ScopedArenaVector<ScopedArenaSafeMap<int, ValueRange*>> maps_;
 
   // Map an HArrayLength instruction's id to the first HBoundsCheck instruction
   // in a block that checks an index against that HArrayLength.
-  ArenaSafeMap<int, HBoundsCheck*> first_index_bounds_check_map_;
+  ScopedArenaSafeMap<int, HBoundsCheck*> first_index_bounds_check_map_;
 
   // Early-exit loop bookkeeping.
-  ArenaSafeMap<uint32_t, bool> early_exit_loop_;
+  ScopedArenaSafeMap<uint32_t, bool> early_exit_loop_;
 
   // Taken-test loop bookkeeping.
-  ArenaSafeMap<uint32_t, HBasicBlock*> taken_test_loop_;
+  ScopedArenaSafeMap<uint32_t, HBasicBlock*> taken_test_loop_;
 
   // Finite loop bookkeeping.
-  ArenaSet<uint32_t> finite_loop_;
+  ScopedArenaSet<uint32_t> finite_loop_;
 
   // Flag that denotes whether dominator-based dynamic elimination has occurred.
   bool has_dom_based_dynamic_bce_;
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 76350a6..4ed1612 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -20,12 +20,16 @@
 #include "base/arena_bit_vector.h"
 #include "base/bit_vector-inl.h"
 #include "base/logging.h"
+#include "block_builder.h"
 #include "data_type-inl.h"
 #include "dex/verified_method.h"
 #include "driver/compiler_options.h"
+#include "instruction_builder.h"
 #include "mirror/class_loader.h"
 #include "mirror/dex_cache.h"
 #include "nodes.h"
+#include "optimizing_compiler_stats.h"
+#include "ssa_builder.h"
 #include "thread.h"
 #include "utils/dex_cache_arrays_layout-inl.h"
 
@@ -43,27 +47,13 @@
       dex_file_(&graph->GetDexFile()),
       code_item_(*dex_compilation_unit->GetCodeItem()),
       dex_compilation_unit_(dex_compilation_unit),
+      outer_compilation_unit_(outer_compilation_unit),
       compiler_driver_(driver),
+      code_generator_(code_generator),
       compilation_stats_(compiler_stats),
-      block_builder_(graph, dex_file_, code_item_),
-      ssa_builder_(graph,
-                   dex_compilation_unit->GetClassLoader(),
-                   dex_compilation_unit->GetDexCache(),
-                   handles),
-      instruction_builder_(graph,
-                           &block_builder_,
-                           &ssa_builder_,
-                           dex_file_,
-                           code_item_,
-                           DataType::FromShorty(dex_compilation_unit_->GetShorty()[0]),
-                           dex_compilation_unit,
-                           outer_compilation_unit,
-                           driver,
-                           code_generator,
-                           interpreter_metadata,
-                           compiler_stats,
-                           dex_compilation_unit->GetDexCache(),
-                           handles) {}
+      interpreter_metadata_(interpreter_metadata),
+      handles_(handles),
+      return_type_(DataType::FromShorty(dex_compilation_unit_->GetShorty()[0])) {}
 
 bool HGraphBuilder::SkipCompilation(size_t number_of_branches) {
   if (compiler_driver_ == nullptr) {
@@ -108,15 +98,38 @@
   graph_->SetMaximumNumberOfOutVRegs(code_item_.outs_size_);
   graph_->SetHasTryCatch(code_item_.tries_size_ != 0);
 
+  // Use ScopedArenaAllocator for all local allocations.
+  ScopedArenaAllocator local_allocator(graph_->GetArenaStack());
+  HBasicBlockBuilder block_builder(graph_, dex_file_, code_item_, &local_allocator);
+  SsaBuilder ssa_builder(graph_,
+                         dex_compilation_unit_->GetClassLoader(),
+                         dex_compilation_unit_->GetDexCache(),
+                         handles_,
+                         &local_allocator);
+  HInstructionBuilder instruction_builder(graph_,
+                                          &block_builder,
+                                          &ssa_builder,
+                                          dex_file_,
+                                          code_item_,
+                                          return_type_,
+                                          dex_compilation_unit_,
+                                          outer_compilation_unit_,
+                                          compiler_driver_,
+                                          code_generator_,
+                                          interpreter_metadata_,
+                                          compilation_stats_,
+                                          handles_,
+                                          &local_allocator);
+
   // 1) Create basic blocks and link them together. Basic blocks are left
   //    unpopulated with the exception of synthetic blocks, e.g. HTryBoundaries.
-  if (!block_builder_.Build()) {
+  if (!block_builder.Build()) {
     return kAnalysisInvalidBytecode;
   }
 
   // 2) Decide whether to skip this method based on its code size and number
   //    of branches.
-  if (SkipCompilation(block_builder_.GetNumberOfBranches())) {
+  if (SkipCompilation(block_builder.GetNumberOfBranches())) {
     return kAnalysisSkipped;
   }
 
@@ -127,12 +140,12 @@
   }
 
   // 4) Populate basic blocks with instructions.
-  if (!instruction_builder_.Build()) {
+  if (!instruction_builder.Build()) {
     return kAnalysisInvalidBytecode;
   }
 
   // 5) Type the graph and eliminate dead/redundant phis.
-  return ssa_builder_.BuildSsa();
+  return ssa_builder.BuildSsa();
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 6c5985a..5a860f1 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -17,21 +17,17 @@
 #ifndef ART_COMPILER_OPTIMIZING_BUILDER_H_
 #define ART_COMPILER_OPTIMIZING_BUILDER_H_
 
-#include "base/arena_containers.h"
 #include "base/arena_object.h"
-#include "block_builder.h"
 #include "dex_file-inl.h"
 #include "dex_file.h"
 #include "driver/compiler_driver.h"
 #include "driver/dex_compilation_unit.h"
-#include "instruction_builder.h"
 #include "nodes.h"
-#include "optimizing_compiler_stats.h"
-#include "ssa_builder.h"
 
 namespace art {
 
 class CodeGenerator;
+class OptimizingCompilerStats;
 
 class HGraphBuilder : public ValueObject {
  public:
@@ -46,34 +42,21 @@
 
   // Only for unit testing.
   HGraphBuilder(HGraph* graph,
+                const DexCompilationUnit* dex_compilation_unit,
                 const DexFile::CodeItem& code_item,
                 VariableSizedHandleScope* handles,
                 DataType::Type return_type = DataType::Type::kInt32)
       : graph_(graph),
-        dex_file_(nullptr),
+        dex_file_(dex_compilation_unit->GetDexFile()),
         code_item_(code_item),
-        dex_compilation_unit_(nullptr),
+        dex_compilation_unit_(dex_compilation_unit),
+        outer_compilation_unit_(nullptr),
         compiler_driver_(nullptr),
+        code_generator_(nullptr),
         compilation_stats_(nullptr),
-        block_builder_(graph, nullptr, code_item),
-        ssa_builder_(graph,
-                     handles->NewHandle<mirror::ClassLoader>(nullptr),
-                     handles->NewHandle<mirror::DexCache>(nullptr),
-                     handles),
-        instruction_builder_(graph,
-                             &block_builder_,
-                             &ssa_builder_,
-                             /* dex_file */ nullptr,
-                             code_item_,
-                             return_type,
-                             /* dex_compilation_unit */ nullptr,
-                             /* outer_compilation_unit */ nullptr,
-                             /* compiler_driver */ nullptr,
-                             /* code_generator */ nullptr,
-                             /* interpreter_metadata */ nullptr,
-                             /* compiler_stats */ nullptr,
-                             handles->NewHandle<mirror::DexCache>(nullptr),
-                             handles) {}
+        interpreter_metadata_(nullptr),
+        handles_(handles),
+        return_type_(return_type) {}
 
   GraphAnalysisResult BuildGraph();
 
@@ -90,13 +73,16 @@
   // it can be an inlined method.
   const DexCompilationUnit* const dex_compilation_unit_;
 
+  // The compilation unit of the enclosing method being compiled.
+  const DexCompilationUnit* const outer_compilation_unit_;
+
   CompilerDriver* const compiler_driver_;
+  CodeGenerator* const code_generator_;
 
-  OptimizingCompilerStats* compilation_stats_;
-
-  HBasicBlockBuilder block_builder_;
-  SsaBuilder ssa_builder_;
-  HInstructionBuilder instruction_builder_;
+  OptimizingCompilerStats* const compilation_stats_;
+  const uint8_t* const interpreter_metadata_;
+  VariableSizedHandleScope* const handles_;
+  const DataType::Type return_type_;
 
   DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
 };
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index dd8e3d2..84f0182 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -935,7 +935,7 @@
       if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
         stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
       } else {
-        Location location = current_phi->GetLiveInterval()->ToLocation();
+        Location location = current_phi->GetLocations()->Out();
         switch (location.GetKind()) {
           case Location::kStackSlot: {
             stack_map_stream_.AddDexRegisterEntry(
@@ -1202,22 +1202,21 @@
   }
 }
 
-void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const {
+void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check,
+                                                          HParallelMove* spills) const {
   LocationSummary* locations = suspend_check->GetLocations();
   HBasicBlock* block = suspend_check->GetBlock();
   DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check);
   DCHECK(block->IsLoopHeader());
+  DCHECK(block->GetFirstInstruction() == spills);
 
-  for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
-    HInstruction* current = it.Current();
-    LiveInterval* interval = current->GetLiveInterval();
-    // We only need to clear bits of loop phis containing objects and allocated in register.
-    // Loop phis allocated on stack already have the object in the stack.
-    if (current->GetType() == DataType::Type::kReference
-        && interval->HasRegister()
-        && interval->HasSpillSlot()) {
-      locations->ClearStackBit(interval->GetSpillSlot() / kVRegSize);
-    }
+  for (size_t i = 0, num_moves = spills->NumMoves(); i != num_moves; ++i) {
+    Location dest = spills->MoveOperandsAt(i)->GetDestination();
+    // All parallel moves in loop headers are spills.
+    DCHECK(dest.IsStackSlot() || dest.IsDoubleStackSlot() || dest.IsSIMDStackSlot()) << dest;
+    // Clear the stack bit marking a reference. Do not bother to check if the spill is
+    // actually a reference spill, clearing bits that are already zero is harmless.
+    locations->ClearStackBit(dest.GetStackIndex() / kVRegSize);
   }
 }
 
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 2c3cf26..2904b71 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -380,7 +380,8 @@
   // for the suspend check at the back edge (instead of where the suspend check
   // is, which is the loop entry). At this point, the spill slots for the phis
   // have not been written to.
-  void ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const;
+  void ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check,
+                                             HParallelMove* spills) const;
 
   bool* GetBlockedCoreRegisters() const { return blocked_core_registers_; }
   bool* GetBlockedFloatingPointRegisters() const { return blocked_fpu_registers_; }
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 9be9117..e6e6984 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2209,7 +2209,6 @@
     codegen_->AddSlowPath(slow_path);
     if (successor != nullptr) {
       DCHECK(successor->IsLoopHeader());
-      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
     }
   } else {
     DCHECK_EQ(slow_path->GetSuccessor(), successor);
@@ -3560,7 +3559,6 @@
   HLoopInformation* info = block->GetLoopInformation();
 
   if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
-    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
     GenerateSuspendCheck(info->GetSuspendCheck(), successor);
     return;
   }
@@ -5420,6 +5418,13 @@
 }
 
 void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction) {
+  if (instruction->GetNext()->IsSuspendCheck() &&
+      instruction->GetBlock()->GetLoopInformation() != nullptr) {
+    HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+    // The back edge will generate the suspend check.
+    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+  }
+
   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
 }
 
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index d7137a3..251f390 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2858,7 +2858,6 @@
   HLoopInformation* info = block->GetLoopInformation();
 
   if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
-    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
     GenerateSuspendCheck(info->GetSuspendCheck(), successor);
     return;
   }
@@ -6741,6 +6740,13 @@
 }
 
 void InstructionCodeGeneratorARMVIXL::VisitParallelMove(HParallelMove* instruction) {
+  if (instruction->GetNext()->IsSuspendCheck() &&
+      instruction->GetBlock()->GetLoopInformation() != nullptr) {
+    HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+    // The back edge will generate the suspend check.
+    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+  }
+
   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
 }
 
@@ -6776,7 +6782,6 @@
     codegen_->AddSlowPath(slow_path);
     if (successor != nullptr) {
       DCHECK(successor->IsLoopHeader());
-      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
     }
   } else {
     DCHECK_EQ(slow_path->GetSuccessor(), successor);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 7ea7b9c..e58f43e 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -3967,7 +3967,6 @@
   HLoopInformation* info = block->GetLoopInformation();
 
   if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
-    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
     GenerateSuspendCheck(info->GetSuspendCheck(), successor);
     return;
   }
@@ -8359,6 +8358,13 @@
 }
 
 void InstructionCodeGeneratorMIPS::VisitParallelMove(HParallelMove* instruction) {
+  if (instruction->GetNext()->IsSuspendCheck() &&
+      instruction->GetBlock()->GetLoopInformation() != nullptr) {
+    HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+    // The back edge will generate the suspend check.
+    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+  }
+
   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
 }
 
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index fad0fe7..11120cf 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -3488,7 +3488,6 @@
   HLoopInformation* info = block->GetLoopInformation();
 
   if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
-    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
     GenerateSuspendCheck(info->GetSuspendCheck(), successor);
     return;
   }
@@ -6490,6 +6489,13 @@
 }
 
 void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instruction) {
+  if (instruction->GetNext()->IsSuspendCheck() &&
+      instruction->GetBlock()->GetLoopInformation() != nullptr) {
+    HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+    // The back edge will generate the suspend check.
+    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+  }
+
   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
 }
 
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index d8a47fa..39a07b8 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -5680,6 +5680,13 @@
 }
 
 void InstructionCodeGeneratorX86::VisitParallelMove(HParallelMove* instruction) {
+  if (instruction->GetNext()->IsSuspendCheck() &&
+      instruction->GetBlock()->GetLoopInformation() != nullptr) {
+    HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+    // The back edge will generate the suspend check.
+    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+  }
+
   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
 }
 
@@ -5717,7 +5724,6 @@
     codegen_->AddSlowPath(slow_path);
     if (successor != nullptr) {
       DCHECK(successor->IsLoopHeader());
-      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
     }
   } else {
     DCHECK_EQ(slow_path->GetSuccessor(), successor);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index b6aa110..c8032c2 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -5125,6 +5125,13 @@
 }
 
 void InstructionCodeGeneratorX86_64::VisitParallelMove(HParallelMove* instruction) {
+  if (instruction->GetNext()->IsSuspendCheck() &&
+      instruction->GetBlock()->GetLoopInformation() != nullptr) {
+    HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+    // The back edge will generate the suspend check.
+    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+  }
+
   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
 }
 
@@ -5162,7 +5169,6 @@
     codegen_->AddSlowPath(slow_path);
     if (successor != nullptr) {
       DCHECK(successor->IsLoopHeader());
-      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
     }
   } else {
     DCHECK_EQ(slow_path->GetSuccessor(), successor);
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 5117e07..3cc7b0e 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -18,13 +18,18 @@
 
 #include "base/array_ref.h"
 #include "base/bit_vector-inl.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
 #include "base/stl_util.h"
 #include "ssa_phi_elimination.h"
 
 namespace art {
 
 static void MarkReachableBlocks(HGraph* graph, ArenaBitVector* visited) {
-  ArenaVector<HBasicBlock*> worklist(graph->GetAllocator()->Adapter(kArenaAllocDCE));
+  // Use local allocator for allocating memory.
+  ScopedArenaAllocator allocator(graph->GetArenaStack());
+
+  ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocDCE));
   constexpr size_t kDefaultWorlistSize = 8;
   worklist.reserve(kDefaultWorlistSize);
   visited->SetBit(graph->GetEntryBlock()->GetBlockId());
@@ -305,9 +310,12 @@
 }
 
 bool HDeadCodeElimination::RemoveDeadBlocks() {
+  // Use local allocator for allocating memory.
+  ScopedArenaAllocator allocator(graph_->GetArenaStack());
+
   // Classify blocks as reachable/unreachable.
-  ArenaAllocator* allocator = graph_->GetAllocator();
-  ArenaBitVector live_blocks(allocator, graph_->GetBlocks().size(), false, kArenaAllocDCE);
+  ArenaBitVector live_blocks(&allocator, graph_->GetBlocks().size(), false, kArenaAllocDCE);
+  live_blocks.ClearAllBits();
 
   MarkReachableBlocks(graph_, &live_blocks);
   bool removed_one_or_more_blocks = false;
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 1c7d1a0..b1ac027 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -22,8 +22,9 @@
 
 #include "android-base/stringprintf.h"
 
-#include "base/arena_containers.h"
 #include "base/bit_vector-inl.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
 
 namespace art {
 
@@ -47,10 +48,13 @@
 void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
   current_block_ = block;
 
+  // Use local allocator for allocating memory.
+  ScopedArenaAllocator allocator(GetGraph()->GetArenaStack());
+
   // Check consistency with respect to predecessors of `block`.
   // Note: Counting duplicates with a sorted vector uses up to 6x less memory
   // than ArenaSafeMap<HBasicBlock*, size_t> and also allows storage reuse.
-  ArenaVector<HBasicBlock*>& sorted_predecessors = blocks_storage_;
+  ScopedArenaVector<HBasicBlock*> sorted_predecessors(allocator.Adapter(kArenaAllocGraphChecker));
   sorted_predecessors.assign(block->GetPredecessors().begin(), block->GetPredecessors().end());
   std::sort(sorted_predecessors.begin(), sorted_predecessors.end());
   for (auto it = sorted_predecessors.begin(), end = sorted_predecessors.end(); it != end; ) {
@@ -73,7 +77,7 @@
   // Check consistency with respect to successors of `block`.
   // Note: Counting duplicates with a sorted vector uses up to 6x less memory
   // than ArenaSafeMap<HBasicBlock*, size_t> and also allows storage reuse.
-  ArenaVector<HBasicBlock*>& sorted_successors = blocks_storage_;
+  ScopedArenaVector<HBasicBlock*> sorted_successors(allocator.Adapter(kArenaAllocGraphChecker));
   sorted_successors.assign(block->GetSuccessors().begin(), block->GetSuccessors().end());
   std::sort(sorted_successors.begin(), sorted_successors.end());
   for (auto it = sorted_successors.begin(), end = sorted_successors.end(); it != end; ) {
@@ -829,10 +833,14 @@
               phi->GetRegNumber(),
               type_str.str().c_str()));
         } else {
+          // Use local allocator for allocating memory.
+          ScopedArenaAllocator allocator(GetGraph()->GetArenaStack());
           // If we get here, make sure we allocate all the necessary storage at once
           // because the BitVector reallocation strategy has very bad worst-case behavior.
-          ArenaBitVector& visited = visited_storage_;
-          visited.SetBit(GetGraph()->GetCurrentInstructionId());
+          ArenaBitVector visited(&allocator,
+                                 GetGraph()->GetCurrentInstructionId(),
+                                 /* expandable */ false,
+                                 kArenaAllocGraphChecker);
           visited.ClearAllBits();
           if (!IsConstantEquivalent(phi, other_phi, &visited)) {
             AddError(StringPrintf("Two phis (%d and %d) found for VReg %d but they "
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 6af7b42..0f0b49d 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -17,10 +17,13 @@
 #ifndef ART_COMPILER_OPTIMIZING_GRAPH_CHECKER_H_
 #define ART_COMPILER_OPTIMIZING_GRAPH_CHECKER_H_
 
-#include "nodes.h"
-
 #include <ostream>
 
+#include "base/arena_bit_vector.h"
+#include "base/bit_vector-inl.h"
+#include "base/scoped_arena_allocator.h"
+#include "nodes.h"
+
 namespace art {
 
 // A control-flow graph visitor performing various checks.
@@ -30,12 +33,10 @@
     : HGraphDelegateVisitor(graph),
       errors_(graph->GetAllocator()->Adapter(kArenaAllocGraphChecker)),
       dump_prefix_(dump_prefix),
-      seen_ids_(graph->GetAllocator(),
-                graph->GetCurrentInstructionId(),
-                false,
-                kArenaAllocGraphChecker),
-      blocks_storage_(graph->GetAllocator()->Adapter(kArenaAllocGraphChecker)),
-      visited_storage_(graph->GetAllocator(), 0u, true, kArenaAllocGraphChecker) {}
+      allocator_(graph->GetArenaStack()),
+      seen_ids_(&allocator_, graph->GetCurrentInstructionId(), false, kArenaAllocGraphChecker) {
+    seen_ids_.ClearAllBits();
+  }
 
   // Check the whole graph (in reverse post-order).
   void Run() {
@@ -104,12 +105,9 @@
  private:
   // String displayed before dumped errors.
   const char* const dump_prefix_;
+  ScopedArenaAllocator allocator_;
   ArenaBitVector seen_ids_;
 
-  // To reduce the total arena memory allocation, we reuse the same storage.
-  ArenaVector<HBasicBlock*> blocks_storage_;
-  ArenaBitVector visited_storage_;
-
   DISALLOW_COPY_AND_ASSIGN(GraphChecker);
 };
 
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index eccdccf..3851877 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -21,6 +21,7 @@
 #include <cctype>
 #include <sstream>
 
+#include "art_method.h"
 #include "bounds_check_elimination.h"
 #include "builder.h"
 #include "code_generator.h"
@@ -33,6 +34,7 @@
 #include "optimization.h"
 #include "reference_type_propagation.h"
 #include "register_allocator_linear_scan.h"
+#include "scoped_thread_state_change-inl.h"
 #include "ssa_liveness_analysis.h"
 #include "utils/assembler.h"
 #include "utils/intrusive_forward_list.h"
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index c09e5df..813772e 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -17,7 +17,8 @@
 #include "gvn.h"
 
 #include "base/arena_bit_vector.h"
-#include "base/arena_containers.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
 #include "base/bit_vector-inl.h"
 #include "side_effects_analysis.h"
 #include "utils.h"
@@ -36,7 +37,7 @@
 class ValueSet : public ArenaObject<kArenaAllocGvn> {
  public:
   // Constructs an empty ValueSet which owns all its buckets.
-  explicit ValueSet(ArenaAllocator* allocator)
+  explicit ValueSet(ScopedArenaAllocator* allocator)
       : allocator_(allocator),
         num_buckets_(kMinimumNumberOfBuckets),
         buckets_(allocator->AllocArray<Node*>(num_buckets_, kArenaAllocGvn)),
@@ -44,12 +45,13 @@
         num_entries_(0u) {
     // ArenaAllocator returns zeroed memory, so no need to set buckets to null.
     DCHECK(IsPowerOfTwo(num_buckets_));
+    std::fill_n(buckets_, num_buckets_, nullptr);
     buckets_owned_.SetInitialBits(num_buckets_);
   }
 
   // Copy constructor. Depending on the load factor, it will either make a deep
   // copy (all buckets owned) or a shallow one (buckets pointing to the parent).
-  ValueSet(ArenaAllocator* allocator, const ValueSet& other)
+  ValueSet(ScopedArenaAllocator* allocator, const ValueSet& other)
       : allocator_(allocator),
         num_buckets_(other.IdealBucketCount()),
         buckets_(allocator->AllocArray<Node*>(num_buckets_, kArenaAllocGvn)),
@@ -58,7 +60,7 @@
     // ArenaAllocator returns zeroed memory, so entries of buckets_ and
     // buckets_owned_ are initialized to null and false, respectively.
     DCHECK(IsPowerOfTwo(num_buckets_));
-    PopulateFromInternal(other, /* is_dirty */ false);
+    PopulateFromInternal(other);
   }
 
   // Erases all values in this set and populates it with values from `other`.
@@ -66,7 +68,7 @@
     if (this == &other) {
       return;
     }
-    PopulateFromInternal(other, /* is_dirty */ true);
+    PopulateFromInternal(other);
   }
 
   // Returns true if `this` has enough buckets so that if `other` is copied into
@@ -159,33 +161,19 @@
 
  private:
   // Copies all entries from `other` to `this`.
-  // If `is_dirty` is set to true, existing data will be wiped first. It is
-  // assumed that `buckets_` and `buckets_owned_` are zero-allocated otherwise.
-  void PopulateFromInternal(const ValueSet& other, bool is_dirty) {
+  void PopulateFromInternal(const ValueSet& other) {
     DCHECK_NE(this, &other);
     DCHECK_GE(num_buckets_, other.IdealBucketCount());
 
     if (num_buckets_ == other.num_buckets_) {
       // Hash table remains the same size. We copy the bucket pointers and leave
       // all buckets_owned_ bits false.
-      if (is_dirty) {
-        buckets_owned_.ClearAllBits();
-      } else {
-        DCHECK_EQ(buckets_owned_.NumSetBits(), 0u);
-      }
+      buckets_owned_.ClearAllBits();
       memcpy(buckets_, other.buckets_, num_buckets_ * sizeof(Node*));
     } else {
       // Hash table size changes. We copy and rehash all entries, and set all
       // buckets_owned_ bits to true.
-      if (is_dirty) {
-        memset(buckets_, 0, num_buckets_ * sizeof(Node*));
-      } else {
-        if (kIsDebugBuild) {
-          for (size_t i = 0; i < num_buckets_; ++i) {
-            DCHECK(buckets_[i] == nullptr) << i;
-          }
-        }
-      }
+      std::fill_n(buckets_, num_buckets_, nullptr);
       for (size_t i = 0; i < other.num_buckets_; ++i) {
         for (Node* node = other.buckets_[i]; node != nullptr; node = node->GetNext()) {
           size_t new_index = BucketIndex(node->GetHashCode());
@@ -208,7 +196,7 @@
     Node* GetNext() const { return next_; }
     void SetNext(Node* node) { next_ = node; }
 
-    Node* Dup(ArenaAllocator* allocator, Node* new_next = nullptr) {
+    Node* Dup(ScopedArenaAllocator* allocator, Node* new_next = nullptr) {
       return new (allocator) Node(instruction_, hash_code_, new_next);
     }
 
@@ -326,7 +314,7 @@
     return hash_code & (num_buckets_ - 1);
   }
 
-  ArenaAllocator* const allocator_;
+  ScopedArenaAllocator* const allocator_;
 
   // The internal bucket implementation of the set.
   size_t const num_buckets_;
@@ -350,15 +338,16 @@
  */
 class GlobalValueNumberer : public ValueObject {
  public:
-  GlobalValueNumberer(ArenaAllocator* allocator,
-                      HGraph* graph,
+  GlobalValueNumberer(HGraph* graph,
                       const SideEffectsAnalysis& side_effects)
       : graph_(graph),
-        allocator_(allocator),
+        allocator_(graph->GetArenaStack()),
         side_effects_(side_effects),
-        sets_(graph->GetBlocks().size(), nullptr, allocator->Adapter(kArenaAllocGvn)),
+        sets_(graph->GetBlocks().size(), nullptr, allocator_.Adapter(kArenaAllocGvn)),
         visited_blocks_(
-            allocator, graph->GetBlocks().size(), /* expandable */ false, kArenaAllocGvn) {}
+            &allocator_, graph->GetBlocks().size(), /* expandable */ false, kArenaAllocGvn) {
+    visited_blocks_.ClearAllBits();
+  }
 
   void Run();
 
@@ -368,7 +357,7 @@
   void VisitBasicBlock(HBasicBlock* block);
 
   HGraph* graph_;
-  ArenaAllocator* const allocator_;
+  ScopedArenaAllocator allocator_;
   const SideEffectsAnalysis& side_effects_;
 
   ValueSet* FindSetFor(HBasicBlock* block) const {
@@ -396,7 +385,7 @@
   // ValueSet for blocks. Initially null, but for an individual block they
   // are allocated and populated by the dominator, and updated by all blocks
   // in the path from the dominator to the block.
-  ArenaVector<ValueSet*> sets_;
+  ScopedArenaVector<ValueSet*> sets_;
 
   // BitVector which serves as a fast-access map from block id to
   // visited/unvisited Boolean.
@@ -407,7 +396,7 @@
 
 void GlobalValueNumberer::Run() {
   DCHECK(side_effects_.HasRun());
-  sets_[graph_->GetEntryBlock()->GetBlockId()] = new (allocator_) ValueSet(allocator_);
+  sets_[graph_->GetEntryBlock()->GetBlockId()] = new (&allocator_) ValueSet(&allocator_);
 
   // Use the reverse post order to ensure the non back-edge predecessors of a block are
   // visited before the block itself.
@@ -424,7 +413,7 @@
     // The entry block should only accumulate constant instructions, and
     // the builder puts constants only in the entry block.
     // Therefore, there is no need to propagate the value set to the next block.
-    set = new (allocator_) ValueSet(allocator_);
+    set = new (&allocator_) ValueSet(&allocator_);
   } else {
     HBasicBlock* dominator = block->GetDominator();
     ValueSet* dominator_set = FindSetFor(dominator);
@@ -443,7 +432,7 @@
       if (recyclable == nullptr) {
         // No block with a suitable ValueSet found. Allocate a new one and
         // copy `dominator_set` into it.
-        set = new (allocator_) ValueSet(allocator_, *dominator_set);
+        set = new (&allocator_) ValueSet(&allocator_, *dominator_set);
       } else {
         // Block with a recyclable ValueSet found. Clone `dominator_set` into it.
         set = FindSetFor(recyclable);
@@ -566,7 +555,7 @@
 }
 
 void GVNOptimization::Run() {
-  GlobalValueNumberer gvn(graph_->GetAllocator(), graph_, side_effects_);
+  GlobalValueNumberer gvn(graph_, side_effects_);
   gvn.Run();
 }
 
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index b06d91c..902985e 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -17,15 +17,23 @@
 #include "instruction_builder.h"
 
 #include "art_method-inl.h"
+#include "base/arena_bit_vector.h"
+#include "base/bit_vector-inl.h"
+#include "block_builder.h"
 #include "bytecode_utils.h"
 #include "class_linker.h"
 #include "data_type-inl.h"
 #include "dex_instruction-inl.h"
+#include "driver/compiler_driver-inl.h"
+#include "driver/dex_compilation_unit.h"
 #include "driver/compiler_options.h"
 #include "imtable-inl.h"
+#include "mirror/dex_cache.h"
+#include "optimizing_compiler_stats.h"
 #include "quicken_info.h"
 #include "scoped_thread_state_change-inl.h"
 #include "sharpening.h"
+#include "ssa_builder.h"
 #include "well_known_classes.h"
 
 namespace art {
@@ -34,8 +42,8 @@
   return block_builder_->GetBlockAt(dex_pc);
 }
 
-inline ArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsFor(HBasicBlock* block) {
-  ArenaVector<HInstruction*>* locals = &locals_for_[block->GetBlockId()];
+inline ScopedArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsFor(HBasicBlock* block) {
+  ScopedArenaVector<HInstruction*>* locals = &locals_for_[block->GetBlockId()];
   const size_t vregs = graph_->GetNumberOfVRegs();
   if (locals->size() == vregs) {
     return locals;
@@ -43,9 +51,9 @@
   return GetLocalsForWithAllocation(block, locals, vregs);
 }
 
-ArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsForWithAllocation(
+ScopedArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsForWithAllocation(
     HBasicBlock* block,
-    ArenaVector<HInstruction*>* locals,
+    ScopedArenaVector<HInstruction*>* locals,
     const size_t vregs) {
   DCHECK_NE(locals->size(), vregs);
   locals->resize(vregs, nullptr);
@@ -73,7 +81,7 @@
 }
 
 inline HInstruction* HInstructionBuilder::ValueOfLocalAt(HBasicBlock* block, size_t local) {
-  ArenaVector<HInstruction*>* locals = GetLocalsFor(block);
+  ScopedArenaVector<HInstruction*>* locals = GetLocalsFor(block);
   return (*locals)[local];
 }
 
@@ -168,7 +176,7 @@
 void HInstructionBuilder::PropagateLocalsToCatchBlocks() {
   const HTryBoundary& try_entry = current_block_->GetTryCatchInformation()->GetTryEntry();
   for (HBasicBlock* catch_block : try_entry.GetExceptionHandlers()) {
-    ArenaVector<HInstruction*>* handler_locals = GetLocalsFor(catch_block);
+    ScopedArenaVector<HInstruction*>* handler_locals = GetLocalsFor(catch_block);
     DCHECK_EQ(handler_locals->size(), current_locals_->size());
     for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {
       HInstruction* handler_value = (*handler_locals)[vreg];
@@ -216,7 +224,7 @@
         graph_->GetArtMethod(),
         instruction->GetDexPc(),
         instruction);
-    environment->CopyFrom(*current_locals_);
+    environment->CopyFrom(ArrayRef<HInstruction* const>(*current_locals_));
     instruction->SetRawEnvironment(environment);
   }
 }
@@ -264,8 +272,9 @@
 }
 
 bool HInstructionBuilder::Build() {
-  locals_for_.resize(graph_->GetBlocks().size(),
-                     ArenaVector<HInstruction*>(allocator_->Adapter(kArenaAllocGraphBuilder)));
+  locals_for_.resize(
+      graph_->GetBlocks().size(),
+      ScopedArenaVector<HInstruction*>(local_allocator_->Adapter(kArenaAllocGraphBuilder)));
 
   // Find locations where we want to generate extra stackmaps for native debugging.
   // This allows us to generate the info only at interesting points (for example,
@@ -274,10 +283,7 @@
                                  compiler_driver_->GetCompilerOptions().GetNativeDebuggable();
   ArenaBitVector* native_debug_info_locations = nullptr;
   if (native_debuggable) {
-    const uint32_t num_instructions = code_item_.insns_size_in_code_units_;
-    native_debug_info_locations =
-        new (allocator_) ArenaBitVector (allocator_, num_instructions, false);
-    FindNativeDebugInfoLocations(native_debug_info_locations);
+    native_debug_info_locations = FindNativeDebugInfoLocations();
   }
 
   for (HBasicBlock* block : graph_->GetReversePostOrder()) {
@@ -358,7 +364,7 @@
   return true;
 }
 
-void HInstructionBuilder::FindNativeDebugInfoLocations(ArenaBitVector* locations) {
+ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() {
   // The callback gets called when the line number changes.
   // In other words, it marks the start of new java statement.
   struct Callback {
@@ -367,6 +373,12 @@
       return false;
     }
   };
+  const uint32_t num_instructions = code_item_.insns_size_in_code_units_;
+  ArenaBitVector* locations = ArenaBitVector::Create(local_allocator_,
+                                                     num_instructions,
+                                                     /* expandable */ false,
+                                                     kArenaAllocGraphBuilder);
+  locations->ClearAllBits();
   dex_file_->DecodeDebugPositionInfo(&code_item_, Callback::Position, locations);
   // Instruction-specific tweaks.
   IterationRange<DexInstructionIterator> instructions = code_item_.Instructions();
@@ -387,6 +399,7 @@
         break;
     }
   }
+  return locations;
 }
 
 HInstruction* HInstructionBuilder::LoadLocal(uint32_t reg_number, DataType::Type type) const {
@@ -439,8 +452,8 @@
 void HInstructionBuilder::InitializeParameters() {
   DCHECK(current_block_->IsEntryBlock());
 
-  // dex_compilation_unit_ is null only when unit testing.
-  if (dex_compilation_unit_ == nullptr) {
+  // outer_compilation_unit_ is null only when unit testing.
+  if (outer_compilation_unit_ == nullptr) {
     return;
   }
 
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 79d6ddc..058b711 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -17,23 +17,32 @@
 #ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_BUILDER_H_
 #define ART_COMPILER_OPTIMIZING_INSTRUCTION_BUILDER_H_
 
-#include "base/arena_containers.h"
-#include "base/arena_object.h"
-#include "block_builder.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
+#include "data_type.h"
+#include "dex_file.h"
 #include "dex_file_types.h"
-#include "driver/compiler_driver-inl.h"
-#include "driver/compiler_driver.h"
-#include "driver/dex_compilation_unit.h"
-#include "mirror/dex_cache.h"
+#include "handle.h"
 #include "nodes.h"
-#include "optimizing_compiler_stats.h"
 #include "quicken_info.h"
-#include "ssa_builder.h"
 
 namespace art {
 
+class ArenaBitVector;
+class ArtField;
+class ArtMethod;
 class CodeGenerator;
+class CompilerDriver;
+class DexCompilationUnit;
+class HBasicBlockBuilder;
 class Instruction;
+class OptimizingCompilerStats;
+class SsaBuilder;
+class VariableSizedHandleScope;
+
+namespace mirror {
+class Class;
+}  // namespace mirror
 
 class HInstructionBuilder : public ValueObject {
  public:
@@ -45,12 +54,12 @@
                       DataType::Type return_type,
                       const DexCompilationUnit* dex_compilation_unit,
                       const DexCompilationUnit* outer_compilation_unit,
-                      CompilerDriver* driver,
+                      CompilerDriver* compiler_driver,
                       CodeGenerator* code_generator,
                       const uint8_t* interpreter_metadata,
                       OptimizingCompilerStats* compiler_stats,
-                      Handle<mirror::DexCache> dex_cache,
-                      VariableSizedHandleScope* handles)
+                      VariableSizedHandleScope* handles,
+                      ScopedArenaAllocator* local_allocator)
       : allocator_(graph->GetAllocator()),
         graph_(graph),
         handles_(handles),
@@ -59,19 +68,19 @@
         return_type_(return_type),
         block_builder_(block_builder),
         ssa_builder_(ssa_builder),
-        locals_for_(allocator_->Adapter(kArenaAllocGraphBuilder)),
-        current_block_(nullptr),
-        current_locals_(nullptr),
-        latest_result_(nullptr),
-        current_this_parameter_(nullptr),
-        compiler_driver_(driver),
+        compiler_driver_(compiler_driver),
         code_generator_(code_generator),
         dex_compilation_unit_(dex_compilation_unit),
         outer_compilation_unit_(outer_compilation_unit),
         quicken_info_(interpreter_metadata),
         compilation_stats_(compiler_stats),
-        dex_cache_(dex_cache),
-        loop_headers_(allocator_->Adapter(kArenaAllocGraphBuilder)) {
+        local_allocator_(local_allocator),
+        locals_for_(local_allocator->Adapter(kArenaAllocGraphBuilder)),
+        current_block_(nullptr),
+        current_locals_(nullptr),
+        latest_result_(nullptr),
+        current_this_parameter_(nullptr),
+        loop_headers_(local_allocator->Adapter(kArenaAllocGraphBuilder)) {
     loop_headers_.reserve(kDefaultNumberOfLoops);
   }
 
@@ -83,18 +92,18 @@
   void SetLoopHeaderPhiInputs();
 
   bool ProcessDexInstruction(const Instruction& instruction, uint32_t dex_pc, size_t quicken_index);
-  void FindNativeDebugInfoLocations(ArenaBitVector* locations);
+  ArenaBitVector* FindNativeDebugInfoLocations();
 
   bool CanDecodeQuickenedInfo() const;
   uint16_t LookupQuickenedInfo(uint32_t quicken_index);
 
   HBasicBlock* FindBlockStartingAt(uint32_t dex_pc) const;
 
-  ArenaVector<HInstruction*>* GetLocalsFor(HBasicBlock* block);
+  ScopedArenaVector<HInstruction*>* GetLocalsFor(HBasicBlock* block);
   // Out of line version of GetLocalsFor(), which has a fast path that is
   // beneficial to get inlined by callers.
-  ArenaVector<HInstruction*>* GetLocalsForWithAllocation(
-      HBasicBlock* block, ArenaVector<HInstruction*>* locals, const size_t vregs);
+  ScopedArenaVector<HInstruction*>* GetLocalsForWithAllocation(
+      HBasicBlock* block, ScopedArenaVector<HInstruction*>* locals, const size_t vregs);
   HInstruction* ValueOfLocalAt(HBasicBlock* block, size_t local);
   HInstruction* LoadLocal(uint32_t register_index, DataType::Type type) const;
   HInstruction* LoadNullCheckedLocal(uint32_t register_index, uint32_t dex_pc);
@@ -314,7 +323,7 @@
 
   ArenaAllocator* const allocator_;
   HGraph* const graph_;
-  VariableSizedHandleScope* handles_;
+  VariableSizedHandleScope* const handles_;
 
   // The dex file where the method being compiled is, and the bytecode data.
   const DexFile* const dex_file_;
@@ -323,18 +332,8 @@
   // The return type of the method being compiled.
   const DataType::Type return_type_;
 
-  HBasicBlockBuilder* block_builder_;
-  SsaBuilder* ssa_builder_;
-
-  ArenaVector<ArenaVector<HInstruction*>> locals_for_;
-  HBasicBlock* current_block_;
-  ArenaVector<HInstruction*>* current_locals_;
-  HInstruction* latest_result_;
-  // Current "this" parameter.
-  // Valid only after InitializeParameters() finishes.
-  // * Null for static methods.
-  // * Non-null for instance methods.
-  HParameterValue* current_this_parameter_;
+  HBasicBlockBuilder* const block_builder_;
+  SsaBuilder* const ssa_builder_;
 
   CompilerDriver* const compiler_driver_;
 
@@ -352,10 +351,20 @@
   // Original values kept after instruction quickening.
   QuickenInfoTable quicken_info_;
 
-  OptimizingCompilerStats* compilation_stats_;
-  Handle<mirror::DexCache> dex_cache_;
+  OptimizingCompilerStats* const compilation_stats_;
 
-  ArenaVector<HBasicBlock*> loop_headers_;
+  ScopedArenaAllocator* const local_allocator_;
+  ScopedArenaVector<ScopedArenaVector<HInstruction*>> locals_for_;
+  HBasicBlock* current_block_;
+  ScopedArenaVector<HInstruction*>* current_locals_;
+  HInstruction* latest_result_;
+  // Current "this" parameter.
+  // Valid only after InitializeParameters() finishes.
+  // * Null for static methods.
+  // * Non-null for instance methods.
+  HParameterValue* current_this_parameter_;
+
+  ScopedArenaVector<HBasicBlock*> loop_headers_;
 
   static constexpr int kDefaultNumberOfLoops = 2;
 
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index f39acab..afe7484 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -1284,9 +1284,9 @@
         DCHECK(input_other->IsShr());  // For UShr, we would have taken the branch above.
         // Replace SHR+AND with USHR, for example "(x >> 24) & 0xff" -> "x >>> 24".
         HUShr* ushr = new (GetGraph()->GetAllocator()) HUShr(instruction->GetType(),
-                                                         input_other->InputAt(0),
-                                                         input_other->InputAt(1),
-                                                         input_other->GetDexPc());
+                                                             input_other->InputAt(0),
+                                                             input_other->InputAt(1),
+                                                             input_other->GetDexPc());
         instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, ushr);
         input_other->GetBlock()->RemoveInstruction(input_other);
         RecordSimplification();
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index 3533c88..033a644 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -57,8 +57,8 @@
   bool TryDispatch(HInvoke* invoke);
 
  private:
-  ArenaAllocator* allocator_;
-  CodeGeneratorARM64* codegen_;
+  ArenaAllocator* const allocator_;
+  CodeGeneratorARM64* const codegen_;
 
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64);
 };
@@ -81,7 +81,7 @@
 
   ArenaAllocator* GetAllocator();
 
-  CodeGeneratorARM64* codegen_;
+  CodeGeneratorARM64* const codegen_;
 
   DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorARM64);
 };
diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h
index 4f18ca3..9c02d0a 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.h
+++ b/compiler/optimizing/intrinsics_arm_vixl.h
@@ -46,9 +46,9 @@
   bool TryDispatch(HInvoke* invoke);
 
  private:
-  ArenaAllocator* allocator_;
-  CodeGenerator* codegen_;
-  ArmVIXLAssembler* assembler_;
+  ArenaAllocator* const allocator_;
+  CodeGenerator* const codegen_;
+  ArmVIXLAssembler* const assembler_;
   const ArmInstructionSetFeatures& features_;
 
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARMVIXL);
@@ -71,7 +71,7 @@
   ArenaAllocator* GetAllocator();
   ArmVIXLAssembler* GetAssembler();
 
-  CodeGeneratorARMVIXL* codegen_;
+  CodeGeneratorARMVIXL* const codegen_;
 
   DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorARMVIXL);
 };
diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h
index afd9548..13397f1 100644
--- a/compiler/optimizing/intrinsics_mips.h
+++ b/compiler/optimizing/intrinsics_mips.h
@@ -49,8 +49,8 @@
   bool TryDispatch(HInvoke* invoke);
 
  private:
-  CodeGeneratorMIPS* codegen_;
-  ArenaAllocator* allocator_;
+  CodeGeneratorMIPS* const codegen_;
+  ArenaAllocator* const allocator_;
 
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS);
 };
@@ -77,7 +77,7 @@
 
   ArenaAllocator* GetAllocator();
 
-  CodeGeneratorMIPS* codegen_;
+  CodeGeneratorMIPS* const codegen_;
 
   DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorMIPS);
 };
diff --git a/compiler/optimizing/intrinsics_mips64.h b/compiler/optimizing/intrinsics_mips64.h
index 6085c7b..6f40d90 100644
--- a/compiler/optimizing/intrinsics_mips64.h
+++ b/compiler/optimizing/intrinsics_mips64.h
@@ -49,8 +49,8 @@
   bool TryDispatch(HInvoke* invoke);
 
  private:
-  CodeGeneratorMIPS64* codegen_;
-  ArenaAllocator* allocator_;
+  CodeGeneratorMIPS64* const codegen_;
+  ArenaAllocator* const allocator_;
 
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS64);
 };
@@ -73,7 +73,7 @@
 
   ArenaAllocator* GetAllocator();
 
-  CodeGeneratorMIPS64* codegen_;
+  CodeGeneratorMIPS64* const codegen_;
 
   DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorMIPS64);
 };
diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h
index ba3ca0a..e3555e7 100644
--- a/compiler/optimizing/intrinsics_x86.h
+++ b/compiler/optimizing/intrinsics_x86.h
@@ -49,8 +49,8 @@
   bool TryDispatch(HInvoke* invoke);
 
  private:
-  ArenaAllocator* allocator_;
-  CodeGeneratorX86* codegen_;
+  ArenaAllocator* const allocator_;
+  CodeGeneratorX86* const codegen_;
 
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86);
 };
@@ -73,7 +73,7 @@
 
   ArenaAllocator* GetAllocator();
 
-  CodeGeneratorX86* codegen_;
+  CodeGeneratorX86* const codegen_;
 
   DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorX86);
 };
diff --git a/compiler/optimizing/intrinsics_x86_64.h b/compiler/optimizing/intrinsics_x86_64.h
index b0fbe91..5cb601e 100644
--- a/compiler/optimizing/intrinsics_x86_64.h
+++ b/compiler/optimizing/intrinsics_x86_64.h
@@ -49,8 +49,8 @@
   bool TryDispatch(HInvoke* invoke);
 
  private:
-  ArenaAllocator* allocator_;
-  CodeGeneratorX86_64* codegen_;
+  ArenaAllocator* const allocator_;
+  CodeGeneratorX86_64* const codegen_;
 
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86_64);
 };
@@ -73,7 +73,7 @@
 
   ArenaAllocator* GetAllocator();
 
-  CodeGeneratorX86_64* codegen_;
+  CodeGeneratorX86_64* const codegen_;
 
   DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorX86_64);
 };
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index 6a25da3..5940ee7 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -25,7 +25,7 @@
 
 // A ReferenceInfo contains additional info about a reference such as
 // whether it's a singleton, returned, etc.
-class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
+class ReferenceInfo : public ArenaObject<kArenaAllocLSA> {
  public:
   ReferenceInfo(HInstruction* reference, size_t pos)
       : reference_(reference),
@@ -99,7 +99,7 @@
 
 // A heap location is a reference-offset/index pair that a value can be loaded from
 // or stored to.
-class HeapLocation : public ArenaObject<kArenaAllocMisc> {
+class HeapLocation : public ArenaObject<kArenaAllocLSA> {
  public:
   static constexpr size_t kInvalidFieldOffset = -1;
 
@@ -172,12 +172,12 @@
 
   explicit HeapLocationCollector(HGraph* graph)
       : HGraphVisitor(graph),
-        ref_info_array_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
-        heap_locations_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+        ref_info_array_(graph->GetAllocator()->Adapter(kArenaAllocLSA)),
+        heap_locations_(graph->GetAllocator()->Adapter(kArenaAllocLSA)),
         aliasing_matrix_(graph->GetAllocator(),
                          kInitialAliasingMatrixBitVectorSize,
                          true,
-                         kArenaAllocLSE),
+                         kArenaAllocLSA),
         has_heap_stores_(false),
         has_volatile_(false),
         has_monitor_operations_(false) {}
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 39bfc86..af5585e 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -16,6 +16,9 @@
 
 #include "load_store_elimination.h"
 
+#include "base/array_ref.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
 #include "escape.h"
 #include "load_store_analysis.h"
 #include "side_effects_analysis.h"
@@ -45,17 +48,18 @@
       : HGraphVisitor(graph, stats),
         heap_location_collector_(heap_locations_collector),
         side_effects_(side_effects),
+        allocator_(graph->GetArenaStack()),
         heap_values_for_(graph->GetBlocks().size(),
-                         ArenaVector<HInstruction*>(heap_locations_collector.
-                                                    GetNumberOfHeapLocations(),
-                                                    kUnknownHeapValue,
-                                                    graph->GetAllocator()->Adapter(kArenaAllocLSE)),
-                         graph->GetAllocator()->Adapter(kArenaAllocLSE)),
-        removed_loads_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
-        substitute_instructions_for_loads_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
-        possibly_removed_stores_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
-        singleton_new_instances_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
-        singleton_new_arrays_(graph->GetAllocator()->Adapter(kArenaAllocLSE)) {
+                         ScopedArenaVector<HInstruction*>(heap_locations_collector.
+                                                          GetNumberOfHeapLocations(),
+                                                          kUnknownHeapValue,
+                                                          allocator_.Adapter(kArenaAllocLSE)),
+                         allocator_.Adapter(kArenaAllocLSE)),
+        removed_loads_(allocator_.Adapter(kArenaAllocLSE)),
+        substitute_instructions_for_loads_(allocator_.Adapter(kArenaAllocLSE)),
+        possibly_removed_stores_(allocator_.Adapter(kArenaAllocLSE)),
+        singleton_new_instances_(allocator_.Adapter(kArenaAllocLSE)),
+        singleton_new_arrays_(allocator_.Adapter(kArenaAllocLSE)) {
   }
 
   void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
@@ -146,7 +150,7 @@
   void HandleLoopSideEffects(HBasicBlock* block) {
     DCHECK(block->IsLoopHeader());
     int block_id = block->GetBlockId();
-    ArenaVector<HInstruction*>& heap_values = heap_values_for_[block_id];
+    ScopedArenaVector<HInstruction*>& heap_values = heap_values_for_[block_id];
 
     // Don't eliminate loads in irreducible loops. This is safe for singletons, because
     // they are always used by the non-eliminated loop-phi.
@@ -160,7 +164,7 @@
     }
 
     HBasicBlock* pre_header = block->GetLoopInformation()->GetPreHeader();
-    ArenaVector<HInstruction*>& pre_header_heap_values =
+    ScopedArenaVector<HInstruction*>& pre_header_heap_values =
         heap_values_for_[pre_header->GetBlockId()];
 
     // Inherit the values from pre-header.
@@ -191,12 +195,12 @@
   }
 
   void MergePredecessorValues(HBasicBlock* block) {
-    const ArenaVector<HBasicBlock*>& predecessors = block->GetPredecessors();
+    ArrayRef<HBasicBlock* const> predecessors(block->GetPredecessors());
     if (predecessors.size() == 0) {
       return;
     }
 
-    ArenaVector<HInstruction*>& heap_values = heap_values_for_[block->GetBlockId()];
+    ScopedArenaVector<HInstruction*>& heap_values = heap_values_for_[block->GetBlockId()];
     for (size_t i = 0; i < heap_values.size(); i++) {
       HInstruction* merged_value = nullptr;
       // Whether merged_value is a result that's merged from all predecessors.
@@ -234,7 +238,8 @@
         // or the heap value may be needed after method return or deoptimization.
         // Keep the last store in each predecessor since future loads cannot be eliminated.
         for (HBasicBlock* predecessor : predecessors) {
-          ArenaVector<HInstruction*>& pred_values = heap_values_for_[predecessor->GetBlockId()];
+          ScopedArenaVector<HInstruction*>& pred_values =
+              heap_values_for_[predecessor->GetBlockId()];
           KeepIfIsStore(pred_values[i]);
         }
       }
@@ -303,7 +308,7 @@
     size_t idx = heap_location_collector_.FindHeapLocationIndex(
         ref_info, offset, index, declaring_class_def_index);
     DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
-    ArenaVector<HInstruction*>& heap_values =
+    ScopedArenaVector<HInstruction*>& heap_values =
         heap_values_for_[instruction->GetBlock()->GetBlockId()];
     HInstruction* heap_value = heap_values[idx];
     if (heap_value == kDefaultHeapValue) {
@@ -369,7 +374,7 @@
     size_t idx = heap_location_collector_.FindHeapLocationIndex(
         ref_info, offset, index, declaring_class_def_index);
     DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
-    ArenaVector<HInstruction*>& heap_values =
+    ScopedArenaVector<HInstruction*>& heap_values =
         heap_values_for_[instruction->GetBlock()->GetBlockId()];
     HInstruction* heap_value = heap_values[idx];
     bool same_value = false;
@@ -496,7 +501,7 @@
   }
 
   void VisitDeoptimize(HDeoptimize* instruction) {
-    const ArenaVector<HInstruction*>& heap_values =
+    const ScopedArenaVector<HInstruction*>& heap_values =
         heap_values_for_[instruction->GetBlock()->GetBlockId()];
     for (HInstruction* heap_value : heap_values) {
       // Filter out fake instructions before checking instruction kind below.
@@ -523,7 +528,7 @@
   }
 
   void HandleInvoke(HInstruction* invoke) {
-    ArenaVector<HInstruction*>& heap_values =
+    ScopedArenaVector<HInstruction*>& heap_values =
         heap_values_for_[invoke->GetBlock()->GetBlockId()];
     for (size_t i = 0; i < heap_values.size(); i++) {
       ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo();
@@ -590,7 +595,7 @@
         !new_instance->NeedsChecks()) {
       singleton_new_instances_.push_back(new_instance);
     }
-    ArenaVector<HInstruction*>& heap_values =
+    ScopedArenaVector<HInstruction*>& heap_values =
         heap_values_for_[new_instance->GetBlock()->GetBlockId()];
     for (size_t i = 0; i < heap_values.size(); i++) {
       HInstruction* ref =
@@ -612,7 +617,7 @@
     if (ref_info->IsSingletonAndRemovable()) {
       singleton_new_arrays_.push_back(new_array);
     }
-    ArenaVector<HInstruction*>& heap_values =
+    ScopedArenaVector<HInstruction*>& heap_values =
         heap_values_for_[new_array->GetBlock()->GetBlockId()];
     for (size_t i = 0; i < heap_values.size(); i++) {
       HeapLocation* location = heap_location_collector_.GetHeapLocation(i);
@@ -639,20 +644,23 @@
   const HeapLocationCollector& heap_location_collector_;
   const SideEffectsAnalysis& side_effects_;
 
+  // Use local allocator for allocating memory.
+  ScopedArenaAllocator allocator_;
+
   // One array of heap values for each block.
-  ArenaVector<ArenaVector<HInstruction*>> heap_values_for_;
+  ScopedArenaVector<ScopedArenaVector<HInstruction*>> heap_values_for_;
 
   // We record the instructions that should be eliminated but may be
   // used by heap locations. They'll be removed in the end.
-  ArenaVector<HInstruction*> removed_loads_;
-  ArenaVector<HInstruction*> substitute_instructions_for_loads_;
+  ScopedArenaVector<HInstruction*> removed_loads_;
+  ScopedArenaVector<HInstruction*> substitute_instructions_for_loads_;
 
   // Stores in this list may be removed from the list later when it's
   // found that the store cannot be eliminated.
-  ArenaVector<HInstruction*> possibly_removed_stores_;
+  ScopedArenaVector<HInstruction*> possibly_removed_stores_;
 
-  ArenaVector<HInstruction*> singleton_new_instances_;
-  ArenaVector<HInstruction*> singleton_new_arrays_;
+  ScopedArenaVector<HInstruction*> singleton_new_instances_;
+  ScopedArenaVector<HInstruction*> singleton_new_arrays_;
 
   DISALLOW_COPY_AND_ASSIGN(LSEVisitor);
 };
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 1a537ca..f4f6434 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -55,14 +55,18 @@
   // "visited" must be empty on entry, it's an output argument for all visited (i.e. live) blocks.
   DCHECK_EQ(visited->GetHighestBitSet(), -1);
 
+  // Allocate memory from local ScopedArenaAllocator.
+  ScopedArenaAllocator allocator(GetArenaStack());
   // Nodes that we're currently visiting, indexed by block id.
-  ArenaBitVector visiting(allocator_, blocks_.size(), false, kArenaAllocGraphBuilder);
+  ArenaBitVector visiting(
+      &allocator, blocks_.size(), /* expandable */ false, kArenaAllocGraphBuilder);
+  visiting.ClearAllBits();
   // Number of successors visited from a given node, indexed by block id.
-  ArenaVector<size_t> successors_visited(blocks_.size(),
-                                         0u,
-                                         allocator_->Adapter(kArenaAllocGraphBuilder));
+  ScopedArenaVector<size_t> successors_visited(blocks_.size(),
+                                               0u,
+                                               allocator.Adapter(kArenaAllocGraphBuilder));
   // Stack of nodes that we're currently visiting (same as marked in "visiting" above).
-  ArenaVector<HBasicBlock*> worklist(allocator_->Adapter(kArenaAllocGraphBuilder));
+  ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocGraphBuilder));
   constexpr size_t kDefaultWorklistSize = 8;
   worklist.reserve(kDefaultWorklistSize);
   visited->SetBit(entry_block_->GetBlockId());
@@ -173,7 +177,11 @@
 }
 
 GraphAnalysisResult HGraph::BuildDominatorTree() {
-  ArenaBitVector visited(allocator_, blocks_.size(), false, kArenaAllocGraphBuilder);
+  // Allocate memory from local ScopedArenaAllocator.
+  ScopedArenaAllocator allocator(GetArenaStack());
+
+  ArenaBitVector visited(&allocator, blocks_.size(), false, kArenaAllocGraphBuilder);
+  visited.ClearAllBits();
 
   // (1) Find the back edges in the graph doing a DFS traversal.
   FindBackEdges(&visited);
@@ -258,14 +266,16 @@
   reverse_post_order_.reserve(blocks_.size());
   reverse_post_order_.push_back(entry_block_);
 
+  // Allocate memory from local ScopedArenaAllocator.
+  ScopedArenaAllocator allocator(GetArenaStack());
   // Number of visits of a given node, indexed by block id.
-  ArenaVector<size_t> visits(blocks_.size(), 0u, allocator_->Adapter(kArenaAllocGraphBuilder));
+  ScopedArenaVector<size_t> visits(blocks_.size(), 0u, allocator.Adapter(kArenaAllocGraphBuilder));
   // Number of successors visited from a given node, indexed by block id.
-  ArenaVector<size_t> successors_visited(blocks_.size(),
-                                         0u,
-                                         allocator_->Adapter(kArenaAllocGraphBuilder));
+  ScopedArenaVector<size_t> successors_visited(blocks_.size(),
+                                               0u,
+                                               allocator.Adapter(kArenaAllocGraphBuilder));
   // Nodes for which we need to visit successors.
-  ArenaVector<HBasicBlock*> worklist(allocator_->Adapter(kArenaAllocGraphBuilder));
+  ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocGraphBuilder));
   constexpr size_t kDefaultWorklistSize = 8;
   worklist.reserve(kDefaultWorklistSize);
   worklist.push_back(entry_block_);
@@ -710,10 +720,13 @@
   bool is_irreducible_loop = HasBackEdgeNotDominatedByHeader();
 
   if (is_irreducible_loop) {
-    ArenaBitVector visited(graph->GetAllocator(),
+    // Allocate memory from local ScopedArenaAllocator.
+    ScopedArenaAllocator allocator(graph->GetArenaStack());
+    ArenaBitVector visited(&allocator,
                            graph->GetBlocks().size(),
                            /* expandable */ false,
                            kArenaAllocGraphBuilder);
+    visited.ClearAllBits();
     // Stop marking blocks at the loop header.
     visited.SetBit(header_->GetBlockId());
 
@@ -942,7 +955,7 @@
   }
 }
 
-void HEnvironment::CopyFrom(const ArenaVector<HInstruction*>& locals) {
+void HEnvironment::CopyFrom(ArrayRef<HInstruction* const> locals) {
   for (size_t i = 0; i < locals.size(); i++) {
     HInstruction* instruction = locals[i];
     SetRawEnvAt(i, instruction);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 99fde75..75cdb3e 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1839,7 +1839,7 @@
     }
   }
 
-  void CopyFrom(const ArenaVector<HInstruction*>& locals);
+  void CopyFrom(ArrayRef<HInstruction* const> locals);
   void CopyFrom(HEnvironment* environment);
 
   // Copy from `env`. If it's a loop phi for `loop_header`, copy the first
diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc
index b2180d9..9bfd250 100644
--- a/compiler/optimizing/nodes_test.cc
+++ b/compiler/optimizing/nodes_test.cc
@@ -129,10 +129,9 @@
 
   HEnvironment* environment = new (GetAllocator()) HEnvironment(
       GetAllocator(), 1, graph->GetArtMethod(), 0, with_environment);
-  ArenaVector<HInstruction*> array(GetAllocator()->Adapter());
-  array.push_back(parameter1);
+  HInstruction* const array[] = { parameter1 };
 
-  environment->CopyFrom(array);
+  environment->CopyFrom(ArrayRef<HInstruction* const>(array));
   with_environment->SetRawEnvironment(environment);
 
   ASSERT_TRUE(parameter1->HasEnvironmentUses());
@@ -140,13 +139,13 @@
 
   HEnvironment* parent1 = new (GetAllocator()) HEnvironment(
       GetAllocator(), 1, graph->GetArtMethod(), 0, nullptr);
-  parent1->CopyFrom(array);
+  parent1->CopyFrom(ArrayRef<HInstruction* const>(array));
 
   ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 2u);
 
   HEnvironment* parent2 = new (GetAllocator()) HEnvironment(
       GetAllocator(), 1, graph->GetArtMethod(), 0, nullptr);
-  parent2->CopyFrom(array);
+  parent2->CopyFrom(ArrayRef<HInstruction* const>(array));
   parent1->SetAndCopyParentChain(GetAllocator(), parent2);
 
   // One use for parent2, and one other use for the new parent of parent1.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 9bfb7a5..42f32b7 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1146,7 +1146,8 @@
         if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
           MemStats mem_stats(allocator.GetMemStats());
           MemStats peak_stats(arena_stack.GetPeakStats());
-          LOG(INFO) << dex_file.PrettyMethod(method_idx)
+          LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
+                    << dex_file.PrettyMethod(method_idx)
                     << "\n" << Dumpable<MemStats>(mem_stats)
                     << "\n" << Dumpable<MemStats>(peak_stats);
         }
@@ -1256,7 +1257,8 @@
       if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
         MemStats mem_stats(allocator.GetMemStats());
         MemStats peak_stats(arena_stack.GetPeakStats());
-        LOG(INFO) << dex_file->PrettyMethod(method_idx)
+        LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
+                  << dex_file->PrettyMethod(method_idx)
                   << "\n" << Dumpable<MemStats>(mem_stats)
                   << "\n" << Dumpable<MemStats>(peak_stats);
       }
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 5632f9a..9aba912 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -22,7 +22,9 @@
 #include "common_compiler_test.h"
 #include "dex_file.h"
 #include "dex_instruction.h"
-#include "handle_scope.h"
+#include "handle_scope-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
 #include "nodes.h"
 #include "scoped_thread_state_change.h"
 #include "ssa_builder.h"
@@ -123,8 +125,7 @@
 
   // Create a control-flow graph from Dex instructions.
   HGraph* CreateCFG(const uint16_t* data, DataType::Type return_type = DataType::Type::kInt32) {
-    const DexFile::CodeItem* item =
-      reinterpret_cast<const DexFile::CodeItem*>(data);
+    const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(data);
     HGraph* graph = CreateGraph();
 
     {
@@ -132,7 +133,19 @@
       if (handles_ == nullptr) {
         handles_.reset(new VariableSizedHandleScope(soa.Self()));
       }
-      HGraphBuilder builder(graph, *item, handles_.get(), return_type);
+      const DexFile* dex_file = graph->GetAllocator()->Alloc<DexFile>();
+      const DexCompilationUnit* dex_compilation_unit =
+          new (graph->GetAllocator()) DexCompilationUnit(
+              handles_->NewHandle<mirror::ClassLoader>(nullptr),
+              /* class_linker */ nullptr,
+              *dex_file,
+              code_item,
+              /* class_def_index */ DexFile::kDexNoIndex16,
+              /* method_idx */ dex::kDexNoIndex,
+              /* access_flags */ 0u,
+              /* verified_method */ nullptr,
+              handles_->NewHandle<mirror::DexCache>(nullptr));
+      HGraphBuilder builder(graph, dex_compilation_unit, *code_item, handles_.get(), return_type);
       bool graph_built = (builder.BuildGraph() == kAnalysisSuccess);
       return graph_built ? graph : nullptr;
     }
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 6d9ebc8..cb9dc42 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -18,8 +18,11 @@
 
 #include "art_field-inl.h"
 #include "art_method-inl.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
 #include "base/enums.h"
 #include "class_linker-inl.h"
+#include "handle_scope-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache.h"
 #include "scoped_thread_state_change-inl.h"
@@ -70,14 +73,16 @@
              Handle<mirror::ClassLoader> class_loader,
              Handle<mirror::DexCache> hint_dex_cache,
              HandleCache* handle_cache,
-             ArenaVector<HInstruction*>* worklist,
              bool is_first_run)
     : HGraphDelegateVisitor(graph),
       class_loader_(class_loader),
       hint_dex_cache_(hint_dex_cache),
       handle_cache_(handle_cache),
-      worklist_(worklist),
-      is_first_run_(is_first_run) {}
+      allocator_(graph->GetArenaStack()),
+      worklist_(allocator_.Adapter(kArenaAllocReferenceTypePropagation)),
+      is_first_run_(is_first_run) {
+    worklist_.reserve(kDefaultWorklistSize);
+  }
 
   void VisitDeoptimize(HDeoptimize* deopt) OVERRIDE;
   void VisitNewInstance(HNewInstance* new_instance) OVERRIDE;
@@ -87,9 +92,6 @@
   void VisitLoadException(HLoadException* instr) OVERRIDE;
   void VisitNewArray(HNewArray* instr) OVERRIDE;
   void VisitParameterValue(HParameterValue* instr) OVERRIDE;
-  void UpdateFieldAccessTypeInfo(HInstruction* instr, const FieldInfo& info);
-  void SetClassAsTypeInfo(HInstruction* instr, ObjPtr<mirror::Class> klass, bool is_exact)
-      REQUIRES_SHARED(Locks::mutator_lock_);
   void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
   void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
   void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) OVERRIDE;
@@ -99,16 +101,39 @@
   void VisitCheckCast(HCheckCast* instr) OVERRIDE;
   void VisitBoundType(HBoundType* instr) OVERRIDE;
   void VisitNullCheck(HNullCheck* instr) OVERRIDE;
+  void VisitPhi(HPhi* phi);
+
+  void VisitBasicBlock(HBasicBlock* block);
+  void ProcessWorklist();
+
+ private:
+  void UpdateFieldAccessTypeInfo(HInstruction* instr, const FieldInfo& info);
+  void SetClassAsTypeInfo(HInstruction* instr, ObjPtr<mirror::Class> klass, bool is_exact)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  void BoundTypeForIfNotNull(HBasicBlock* block);
+  static void BoundTypeForIfInstanceOf(HBasicBlock* block);
+  static bool UpdateNullability(HInstruction* instr);
+  static void UpdateBoundType(HBoundType* bound_type) REQUIRES_SHARED(Locks::mutator_lock_);
+  void UpdateArrayGet(HArrayGet* instr) REQUIRES_SHARED(Locks::mutator_lock_);
+  void UpdatePhi(HPhi* phi) REQUIRES_SHARED(Locks::mutator_lock_);
+  bool UpdateReferenceTypeInfo(HInstruction* instr);
   void UpdateReferenceTypeInfo(HInstruction* instr,
                                dex::TypeIndex type_idx,
                                const DexFile& dex_file,
                                bool is_exact);
 
- private:
+  void AddToWorklist(HInstruction* instruction);
+  void AddDependentInstructionsToWorklist(HInstruction* instruction);
+
+  static constexpr size_t kDefaultWorklistSize = 8;
+
   Handle<mirror::ClassLoader> class_loader_;
   Handle<mirror::DexCache> hint_dex_cache_;
-  HandleCache* handle_cache_;
-  ArenaVector<HInstruction*>* worklist_;
+  HandleCache* const handle_cache_;
+
+  // Use local allocator for allocating memory.
+  ScopedArenaAllocator allocator_;
+  ScopedArenaVector<HInstruction*> worklist_;
   const bool is_first_run_;
 };
 
@@ -122,7 +147,6 @@
       class_loader_(class_loader),
       hint_dex_cache_(hint_dex_cache),
       handle_cache_(handles),
-      worklist_(graph->GetAllocator()->Adapter(kArenaAllocReferenceTypePropagation)),
       is_first_run_(is_first_run) {
 }
 
@@ -158,7 +182,6 @@
                      class_loader_,
                      hint_dex_cache_,
                      &handle_cache_,
-                     &worklist_,
                      is_first_run_);
   instruction->Accept(&visitor);
 }
@@ -319,26 +342,20 @@
 }
 
 void ReferenceTypePropagation::Run() {
-  worklist_.reserve(kDefaultWorklistSize);
+  RTPVisitor visitor(graph_, class_loader_, hint_dex_cache_, &handle_cache_, is_first_run_);
 
   // To properly propagate type info we need to visit in the dominator-based order.
   // Reverse post order guarantees a node's dominators are visited first.
   // We take advantage of this order in `VisitBasicBlock`.
   for (HBasicBlock* block : graph_->GetReversePostOrder()) {
-    VisitBasicBlock(block);
+    visitor.VisitBasicBlock(block);
   }
 
-  ProcessWorklist();
+  visitor.ProcessWorklist();
   ValidateTypes();
 }
 
-void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
-  RTPVisitor visitor(graph_,
-                     class_loader_,
-                     hint_dex_cache_,
-                     &handle_cache_,
-                     &worklist_,
-                     is_first_run_);
+void ReferenceTypePropagation::RTPVisitor::VisitBasicBlock(HBasicBlock* block) {
   // Handle Phis first as there might be instructions in the same block who depend on them.
   for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
     VisitPhi(it.Current()->AsPhi());
@@ -348,7 +365,7 @@
   // last visited instruction, use `HInstructionIteratorHandleChanges` iterator.
   for (HInstructionIteratorHandleChanges it(block->GetInstructions()); !it.Done(); it.Advance()) {
     HInstruction* instr = it.Current();
-    instr->Accept(&visitor);
+    instr->Accept(this);
   }
 
   // Add extra nodes to bound types.
@@ -357,7 +374,7 @@
   BoundTypeForClassCheck(block->GetLastInstruction());
 }
 
-void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) {
+void ReferenceTypePropagation::RTPVisitor::BoundTypeForIfNotNull(HBasicBlock* block) {
   HIf* ifInstruction = block->GetLastInstruction()->AsIf();
   if (ifInstruction == nullptr) {
     return;
@@ -391,7 +408,7 @@
       : ifInstruction->IfFalseSuccessor();
 
   ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
-      handle_cache_.GetObjectClassHandle(), /* is_exact */ false);
+      handle_cache_->GetObjectClassHandle(), /* is_exact */ false);
 
   BoundTypeIn(obj, notNullBlock, /* start_instruction */ nullptr, object_rti);
 }
@@ -469,7 +486,7 @@
 // `if (x instanceof ClassX) { }`
 // If that's the case insert an HBoundType instruction to bound the type of `x`
 // to `ClassX` in the scope of the dominated blocks.
-void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) {
+void ReferenceTypePropagation::RTPVisitor::BoundTypeForIfInstanceOf(HBasicBlock* block) {
   HIf* ifInstruction = block->GetLastInstruction()->AsIf();
   if (ifInstruction == nullptr) {
     return;
@@ -728,7 +745,7 @@
   }
 }
 
-void ReferenceTypePropagation::VisitPhi(HPhi* phi) {
+void ReferenceTypePropagation::RTPVisitor::VisitPhi(HPhi* phi) {
   if (phi->IsDead() || phi->GetType() != DataType::Type::kReference) {
     return;
   }
@@ -812,7 +829,7 @@
   return ReferenceTypeInfo::Create(result_type_handle, is_exact);
 }
 
-void ReferenceTypePropagation::UpdateArrayGet(HArrayGet* instr, HandleCache* handle_cache) {
+void ReferenceTypePropagation::RTPVisitor::UpdateArrayGet(HArrayGet* instr) {
   DCHECK_EQ(DataType::Type::kReference, instr->GetType());
 
   ReferenceTypeInfo parent_rti = instr->InputAt(0)->GetReferenceTypeInfo();
@@ -823,7 +840,7 @@
   Handle<mirror::Class> handle = parent_rti.GetTypeHandle();
   if (handle->IsObjectArrayClass() && IsAdmissible(handle->GetComponentType())) {
     ReferenceTypeInfo::TypeHandle component_handle =
-        handle_cache->NewHandle(handle->GetComponentType());
+        handle_cache_->NewHandle(handle->GetComponentType());
     bool is_exact = component_handle->CannotBeAssignedFromOtherTypes();
     instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(component_handle, is_exact));
   } else {
@@ -832,7 +849,7 @@
   }
 }
 
-bool ReferenceTypePropagation::UpdateReferenceTypeInfo(HInstruction* instr) {
+bool ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction* instr) {
   ScopedObjectAccess soa(Thread::Current());
 
   ReferenceTypeInfo previous_rti = instr->GetReferenceTypeInfo();
@@ -848,7 +865,7 @@
   } else if (instr->IsArrayGet()) {
     // TODO: consider if it's worth "looking back" and binding the input object
     // to an array type.
-    UpdateArrayGet(instr->AsArrayGet(), &handle_cache_);
+    UpdateArrayGet(instr->AsArrayGet());
   } else {
     LOG(FATAL) << "Invalid instruction (should not get here)";
   }
@@ -873,13 +890,13 @@
   }
 
   ScopedObjectAccess soa(Thread::Current());
-  UpdateArrayGet(instr, handle_cache_);
+  UpdateArrayGet(instr);
   if (!instr->GetReferenceTypeInfo().IsValid()) {
-    worklist_->push_back(instr);
+    worklist_.push_back(instr);
   }
 }
 
-void ReferenceTypePropagation::UpdateBoundType(HBoundType* instr) {
+void ReferenceTypePropagation::RTPVisitor::UpdateBoundType(HBoundType* instr) {
   ReferenceTypeInfo input_rti = instr->InputAt(0)->GetReferenceTypeInfo();
   if (!input_rti.IsValid()) {
     return;  // No new info yet.
@@ -903,7 +920,7 @@
 
 // NullConstant inputs are ignored during merging as they do not provide any useful information.
 // If all the inputs are NullConstants then the type of the phi will be set to Object.
-void ReferenceTypePropagation::UpdatePhi(HPhi* instr) {
+void ReferenceTypePropagation::RTPVisitor::UpdatePhi(HPhi* instr) {
   DCHECK(instr->IsLive());
 
   HInputsRef inputs = instr->GetInputs();
@@ -931,7 +948,7 @@
     if (inputs[i]->IsNullConstant()) {
       continue;
     }
-    new_rti = MergeTypes(new_rti, inputs[i]->GetReferenceTypeInfo(), &handle_cache_);
+    new_rti = MergeTypes(new_rti, inputs[i]->GetReferenceTypeInfo(), handle_cache_);
     if (new_rti.IsValid() && new_rti.IsObjectClass()) {
       if (!new_rti.IsExact()) {
         break;
@@ -948,7 +965,7 @@
 
 // Re-computes and updates the nullability of the instruction. Returns whether or
 // not the nullability was changed.
-bool ReferenceTypePropagation::UpdateNullability(HInstruction* instr) {
+bool ReferenceTypePropagation::RTPVisitor::UpdateNullability(HInstruction* instr) {
   DCHECK((instr->IsPhi() && instr->AsPhi()->IsLive())
       || instr->IsBoundType()
       || instr->IsNullCheck()
@@ -976,7 +993,7 @@
   return existing_can_be_null != instr->CanBeNull();
 }
 
-void ReferenceTypePropagation::ProcessWorklist() {
+void ReferenceTypePropagation::RTPVisitor::ProcessWorklist() {
   while (!worklist_.empty()) {
     HInstruction* instruction = worklist_.back();
     worklist_.pop_back();
@@ -988,13 +1005,14 @@
   }
 }
 
-void ReferenceTypePropagation::AddToWorklist(HInstruction* instruction) {
+void ReferenceTypePropagation::RTPVisitor::AddToWorklist(HInstruction* instruction) {
   DCHECK_EQ(instruction->GetType(), DataType::Type::kReference)
       << instruction->DebugName() << ":" << instruction->GetType();
   worklist_.push_back(instruction);
 }
 
-void ReferenceTypePropagation::AddDependentInstructionsToWorklist(HInstruction* instruction) {
+void ReferenceTypePropagation::RTPVisitor::AddDependentInstructionsToWorklist(
+    HInstruction* instruction) {
   for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
     HInstruction* user = use.GetUser();
     if ((user->IsPhi() && user->AsPhi()->IsLive())
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index c221282..fd4dad2 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -18,12 +18,10 @@
 #define ART_COMPILER_OPTIMIZING_REFERENCE_TYPE_PROPAGATION_H_
 
 #include "base/arena_containers.h"
-#include "driver/dex_compilation_unit.h"
-#include "handle_scope-inl.h"
+#include "mirror/class-inl.h"
 #include "nodes.h"
 #include "obj_ptr.h"
 #include "optimization.h"
-#include "optimizing_compiler_stats.h"
 
 namespace art {
 
@@ -91,22 +89,6 @@
 
   class RTPVisitor;
 
-  void VisitPhi(HPhi* phi);
-  void VisitBasicBlock(HBasicBlock* block);
-  void UpdateBoundType(HBoundType* bound_type) REQUIRES_SHARED(Locks::mutator_lock_);
-  void UpdatePhi(HPhi* phi) REQUIRES_SHARED(Locks::mutator_lock_);
-  void BoundTypeForIfNotNull(HBasicBlock* block);
-  void BoundTypeForIfInstanceOf(HBasicBlock* block);
-  void ProcessWorklist();
-  void AddToWorklist(HInstruction* instr);
-  void AddDependentInstructionsToWorklist(HInstruction* instr);
-
-  bool UpdateNullability(HInstruction* instr);
-  bool UpdateReferenceTypeInfo(HInstruction* instr);
-
-  static void UpdateArrayGet(HArrayGet* instr, HandleCache* handle_cache)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   static ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a,
                                       const ReferenceTypeInfo& b,
                                       HandleCache* handle_cache)
@@ -122,13 +104,9 @@
   Handle<mirror::DexCache> hint_dex_cache_;
   HandleCache handle_cache_;
 
-  ArenaVector<HInstruction*> worklist_;
-
   // Whether this reference type propagation is the first run we are doing.
   const bool is_first_run_;
 
-  static constexpr size_t kDefaultWorklistSize = 8;
-
   friend class ReferenceTypePropagationTest;
 
   DISALLOW_COPY_AND_ASSIGN(ReferenceTypePropagation);
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index ece9904..86e9713 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -53,6 +53,21 @@
   }
 }
 
+RegisterAllocator::~RegisterAllocator() {
+  if (kIsDebugBuild) {
+    // Poison live interval pointers with "Error: BAD 71ve1nt3rval."
+    LiveInterval* bad_live_interval = reinterpret_cast<LiveInterval*>(0xebad7113u);
+    for (HBasicBlock* block : codegen_->GetGraph()->GetLinearOrder()) {
+      for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+        it.Current()->SetLiveInterval(bad_live_interval);
+      }
+      for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+        it.Current()->SetLiveInterval(bad_live_interval);
+      }
+    }
+  }
+}
+
 bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
                                                 InstructionSet instruction_set) {
   return instruction_set == kArm
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index eaeec3b..18ef69f 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -50,7 +50,7 @@
                                                    const SsaLivenessAnalysis& analysis,
                                                    Strategy strategy = kRegisterAllocatorDefault);
 
-  virtual ~RegisterAllocator() = default;
+  virtual ~RegisterAllocator();
 
   // Main entry point for the register allocator. Given the liveness analysis,
   // allocates registers to live intervals.
@@ -87,7 +87,7 @@
   // to find an optimal split position.
   LiveInterval* SplitBetween(LiveInterval* interval, size_t from, size_t to);
 
-  ScopedArenaAllocator* allocator_;
+  ScopedArenaAllocator* const allocator_;
   CodeGenerator* const codegen_;
   const SsaLivenessAnalysis& liveness_;
 };
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index c673d54..57eb762 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -781,7 +781,7 @@
 #if defined(ART_ENABLE_CODEGEN_arm64) || defined(ART_ENABLE_CODEGEN_arm)
   // Phase-local allocator that allocates scheduler internal data structures like
   // scheduling nodes, internel nodes map, dependencies, etc.
-  ScopedArenaAllocator arena_allocator(graph_->GetArenaStack());
+  ScopedArenaAllocator allocator(graph_->GetArenaStack());
   CriticalPathSchedulingNodeSelector critical_path_selector;
   RandomSchedulingNodeSelector random_selector;
   SchedulingNodeSelector* selector = schedule_randomly
@@ -797,7 +797,7 @@
   switch (instruction_set_) {
 #ifdef ART_ENABLE_CODEGEN_arm64
     case kArm64: {
-      arm64::HSchedulerARM64 scheduler(&arena_allocator, selector);
+      arm64::HSchedulerARM64 scheduler(&allocator, selector);
       scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
       scheduler.Schedule(graph_);
       break;
@@ -807,7 +807,7 @@
     case kThumb2:
     case kArm: {
       arm::SchedulingLatencyVisitorARM arm_latency_visitor(codegen_);
-      arm::HSchedulerARM scheduler(&arena_allocator, selector, &arm_latency_visitor);
+      arm::HSchedulerARM scheduler(&allocator, selector, &arm_latency_visitor);
       scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
       scheduler.Schedule(graph_);
       break;
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index 3efd26a..afdf6f1 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -253,14 +253,14 @@
  public:
   SchedulingGraph(const HScheduler* scheduler, ScopedArenaAllocator* allocator)
       : scheduler_(scheduler),
-        arena_(allocator),
+        allocator_(allocator),
         contains_scheduling_barrier_(false),
-        nodes_map_(arena_->Adapter(kArenaAllocScheduler)),
+        nodes_map_(allocator_->Adapter(kArenaAllocScheduler)),
         heap_location_collector_(nullptr) {}
 
   SchedulingNode* AddNode(HInstruction* instr, bool is_scheduling_barrier = false) {
     std::unique_ptr<SchedulingNode> node(
-        new (arena_) SchedulingNode(instr, arena_, is_scheduling_barrier));
+        new (allocator_) SchedulingNode(instr, allocator_, is_scheduling_barrier));
     SchedulingNode* result = node.get();
     nodes_map_.Insert(std::make_pair(instr, std::move(node)));
     contains_scheduling_barrier_ |= is_scheduling_barrier;
@@ -323,7 +323,7 @@
 
   const HScheduler* const scheduler_;
 
-  ScopedArenaAllocator* const arena_;
+  ScopedArenaAllocator* const allocator_;
 
   bool contains_scheduling_barrier_;
 
diff --git a/compiler/optimizing/select_generator.cc b/compiler/optimizing/select_generator.cc
index 0e46aec..77ec9a6 100644
--- a/compiler/optimizing/select_generator.cc
+++ b/compiler/optimizing/select_generator.cc
@@ -16,6 +16,8 @@
 
 #include "select_generator.h"
 
+#include "reference_type_propagation.h"
+
 namespace art {
 
 static constexpr size_t kMaxInstructionsInBranch = 1u;
diff --git a/compiler/optimizing/select_generator.h b/compiler/optimizing/select_generator.h
index c060146..f8cf00e 100644
--- a/compiler/optimizing/select_generator.h
+++ b/compiler/optimizing/select_generator.h
@@ -58,7 +58,6 @@
 #define ART_COMPILER_OPTIMIZING_SELECT_GENERATOR_H_
 
 #include "optimization.h"
-#include "reference_type_propagation.h"
 
 namespace art {
 
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index f4a8a17..e4edbfd 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -105,7 +105,7 @@
 }
 
 static void AddDependentInstructionsToWorklist(HInstruction* instruction,
-                                               ArenaVector<HPhi*>* worklist) {
+                                               ScopedArenaVector<HPhi*>* worklist) {
   // If `instruction` is a dead phi, type conflict was just identified. All its
   // live phi users, and transitively users of those users, therefore need to be
   // marked dead/conflicting too, so we add them to the worklist. Otherwise we
@@ -167,7 +167,7 @@
 }
 
 // Replace inputs of `phi` to match its type. Return false if conflict is identified.
-bool SsaBuilder::TypeInputsOfPhi(HPhi* phi, ArenaVector<HPhi*>* worklist) {
+bool SsaBuilder::TypeInputsOfPhi(HPhi* phi, ScopedArenaVector<HPhi*>* worklist) {
   DataType::Type common_type = phi->GetType();
   if (DataType::IsIntegralType(common_type)) {
     // We do not need to retype ambiguous inputs because they are always constructed
@@ -213,7 +213,7 @@
 
 // Attempt to set the primitive type of `phi` to match its inputs. Return whether
 // it was changed by the algorithm or not.
-bool SsaBuilder::UpdatePrimitiveType(HPhi* phi, ArenaVector<HPhi*>* worklist) {
+bool SsaBuilder::UpdatePrimitiveType(HPhi* phi, ScopedArenaVector<HPhi*>* worklist) {
   DCHECK(phi->IsLive());
   DataType::Type original_type = phi->GetType();
 
@@ -233,7 +233,7 @@
 }
 
 void SsaBuilder::RunPrimitiveTypePropagation() {
-  ArenaVector<HPhi*> worklist(graph_->GetAllocator()->Adapter(kArenaAllocGraphBuilder));
+  ScopedArenaVector<HPhi*> worklist(local_allocator_->Adapter(kArenaAllocGraphBuilder));
 
   for (HBasicBlock* block : graph_->GetReversePostOrder()) {
     if (block->IsLoopHeader()) {
@@ -262,7 +262,7 @@
   EquivalentPhisCleanup();
 }
 
-void SsaBuilder::ProcessPrimitiveTypePropagationWorklist(ArenaVector<HPhi*>* worklist) {
+void SsaBuilder::ProcessPrimitiveTypePropagationWorklist(ScopedArenaVector<HPhi*>* worklist) {
   // Process worklist
   while (!worklist->empty()) {
     HPhi* phi = worklist->back();
@@ -319,7 +319,7 @@
   // uses (because they are untyped) and environment uses (if --debuggable).
   // After resolving all ambiguous ArrayGets, we will re-run primitive type
   // propagation on the Phis which need to be updated.
-  ArenaVector<HPhi*> worklist(graph_->GetAllocator()->Adapter(kArenaAllocGraphBuilder));
+  ScopedArenaVector<HPhi*> worklist(local_allocator_->Adapter(kArenaAllocGraphBuilder));
 
   {
     ScopedObjectAccess soa(Thread::Current());
@@ -623,8 +623,7 @@
       || (next->GetType() != type)) {
     ArenaAllocator* allocator = graph_->GetAllocator();
     HInputsRef inputs = phi->GetInputs();
-    HPhi* new_phi =
-        new (allocator) HPhi(allocator, phi->GetRegNumber(), inputs.size(), type);
+    HPhi* new_phi = new (allocator) HPhi(allocator, phi->GetRegNumber(), inputs.size(), type);
     // Copy the inputs. Note that the graph may not be correctly typed
     // by doing this copy, but the type propagation phase will fix it.
     ArrayRef<HUserRecord<HInstruction*>> new_input_records = new_phi->GetInputRecords();
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 509cdc1..60831a9 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -17,7 +17,8 @@
 #ifndef ART_COMPILER_OPTIMIZING_SSA_BUILDER_H_
 #define ART_COMPILER_OPTIMIZING_SSA_BUILDER_H_
 
-#include "base/arena_containers.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
 #include "nodes.h"
 #include "optimization.h"
 
@@ -50,15 +51,17 @@
   SsaBuilder(HGraph* graph,
              Handle<mirror::ClassLoader> class_loader,
              Handle<mirror::DexCache> dex_cache,
-             VariableSizedHandleScope* handles)
+             VariableSizedHandleScope* handles,
+             ScopedArenaAllocator* local_allocator)
       : graph_(graph),
         class_loader_(class_loader),
         dex_cache_(dex_cache),
         handles_(handles),
         agets_fixed_(false),
-        ambiguous_agets_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)),
-        ambiguous_asets_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)),
-        uninitialized_strings_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)) {
+        local_allocator_(local_allocator),
+        ambiguous_agets_(local_allocator->Adapter(kArenaAllocGraphBuilder)),
+        ambiguous_asets_(local_allocator->Adapter(kArenaAllocGraphBuilder)),
+        uninitialized_strings_(local_allocator->Adapter(kArenaAllocGraphBuilder)) {
     graph_->InitializeInexactObjectRTI(handles);
   }
 
@@ -105,9 +108,9 @@
   // input. Returns false if the type of an array is unknown.
   bool FixAmbiguousArrayOps();
 
-  bool TypeInputsOfPhi(HPhi* phi, ArenaVector<HPhi*>* worklist);
-  bool UpdatePrimitiveType(HPhi* phi, ArenaVector<HPhi*>* worklist);
-  void ProcessPrimitiveTypePropagationWorklist(ArenaVector<HPhi*>* worklist);
+  bool TypeInputsOfPhi(HPhi* phi, ScopedArenaVector<HPhi*>* worklist);
+  bool UpdatePrimitiveType(HPhi* phi, ScopedArenaVector<HPhi*>* worklist);
+  void ProcessPrimitiveTypePropagationWorklist(ScopedArenaVector<HPhi*>* worklist);
 
   HFloatConstant* GetFloatEquivalent(HIntConstant* constant);
   HDoubleConstant* GetDoubleEquivalent(HLongConstant* constant);
@@ -116,7 +119,7 @@
 
   void RemoveRedundantUninitializedStrings();
 
-  HGraph* graph_;
+  HGraph* const graph_;
   Handle<mirror::ClassLoader> class_loader_;
   Handle<mirror::DexCache> dex_cache_;
   VariableSizedHandleScope* const handles_;
@@ -124,9 +127,10 @@
   // True if types of ambiguous ArrayGets have been resolved.
   bool agets_fixed_;
 
-  ArenaVector<HArrayGet*> ambiguous_agets_;
-  ArenaVector<HArraySet*> ambiguous_asets_;
-  ArenaVector<HNewInstance*> uninitialized_strings_;
+  ScopedArenaAllocator* const local_allocator_;
+  ScopedArenaVector<HArrayGet*> ambiguous_agets_;
+  ScopedArenaVector<HArraySet*> ambiguous_asets_;
+  ScopedArenaVector<HNewInstance*> uninitialized_strings_;
 
   DISALLOW_COPY_AND_ASSIGN(SsaBuilder);
 };
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 9800af7..f83bb52 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1292,7 +1292,7 @@
 
   // Use a local ScopedArenaAllocator for allocating memory.
   // This allocator must remain alive while doing register allocation.
-  ScopedArenaAllocator* allocator_;
+  ScopedArenaAllocator* const allocator_;
 
   ScopedArenaVector<BlockInfo*> block_infos_;
 
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index 9b78e0e..b9bfbaa 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -95,8 +95,7 @@
       graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32);
   HInstruction* extra_arg2 = new (GetAllocator()) HParameterValue(
       graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference);
-  ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 },
-                                  GetAllocator()->Adapter());
+  HInstruction* const args[] = { array, index, value, extra_arg1, extra_arg2 };
   for (HInstruction* insn : args) {
     entry_->AddInstruction(insn);
   }
@@ -109,7 +108,7 @@
                                                                    /* method */ nullptr,
                                                                    /* dex_pc */ 0u,
                                                                    null_check);
-  null_check_env->CopyFrom(args);
+  null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
   null_check->SetRawEnvironment(null_check_env);
   HInstruction* length = new (GetAllocator()) HArrayLength(array, 0);
   block->AddInstruction(length);
@@ -120,7 +119,7 @@
                                                                      /* method */ nullptr,
                                                                      /* dex_pc */ 0u,
                                                                      bounds_check);
-  bounds_check_env->CopyFrom(args);
+  bounds_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
   bounds_check->SetRawEnvironment(bounds_check_env);
   HInstruction* array_set =
       new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
@@ -144,7 +143,7 @@
       // Environment uses keep the reference argument alive.
       "ranges: { [10,19) }, uses: { }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0",
   };
-  ASSERT_EQ(arraysize(expected), args.size());
+  static_assert(arraysize(expected) == arraysize(args), "Array size check.");
   size_t arg_index = 0u;
   for (HInstruction* arg : args) {
     std::ostringstream arg_dump;
@@ -165,8 +164,7 @@
       graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32);
   HInstruction* extra_arg2 = new (GetAllocator()) HParameterValue(
       graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference);
-  ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 },
-                                  GetAllocator()->Adapter());
+  HInstruction* const args[] = { array, index, value, extra_arg1, extra_arg2 };
   for (HInstruction* insn : args) {
     entry_->AddInstruction(insn);
   }
@@ -179,7 +177,7 @@
                                                                    /* method */ nullptr,
                                                                    /* dex_pc */ 0u,
                                                                    null_check);
-  null_check_env->CopyFrom(args);
+  null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
   null_check->SetRawEnvironment(null_check_env);
   HInstruction* length = new (GetAllocator()) HArrayLength(array, 0);
   block->AddInstruction(length);
@@ -194,7 +192,7 @@
                                                                    /* method */ nullptr,
                                                                    /* dex_pc */ 0u,
                                                                    deoptimize);
-  deoptimize_env->CopyFrom(args);
+  deoptimize_env->CopyFrom(ArrayRef<HInstruction* const>(args));
   deoptimize->SetRawEnvironment(deoptimize_env);
   HInstruction* array_set =
       new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
@@ -217,7 +215,7 @@
       // Environment uses keep the reference argument alive.
       "ranges: { [10,21) }, uses: { }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0",
   };
-  ASSERT_EQ(arraysize(expected), args.size());
+  static_assert(arraysize(expected) == arraysize(args), "Array size check.");
   size_t arg_index = 0u;
   for (HInstruction* arg : args) {
     std::ostringstream arg_dump;
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index 3b95b86..cb27ded 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -17,7 +17,8 @@
 #include "ssa_phi_elimination.h"
 
 #include "base/arena_bit_vector.h"
-#include "base/arena_containers.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
 #include "base/bit_vector-inl.h"
 
 namespace art {
@@ -28,10 +29,17 @@
 }
 
 void SsaDeadPhiElimination::MarkDeadPhis() {
+  // Use local allocator for allocating memory used by this optimization.
+  ScopedArenaAllocator allocator(graph_->GetArenaStack());
+
+  static constexpr size_t kDefaultWorklistSize = 8;
+  ScopedArenaVector<HPhi*> worklist(allocator.Adapter(kArenaAllocSsaPhiElimination));
+  worklist.reserve(kDefaultWorklistSize);
+
   // Phis are constructed live and should not be revived if previously marked
   // dead. This algorithm temporarily breaks that invariant but we DCHECK that
   // only phis which were initially live are revived.
-  ArenaSet<HPhi*> initially_live(graph_->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination));
+  ScopedArenaSet<HPhi*> initially_live(allocator.Adapter(kArenaAllocSsaPhiElimination));
 
   // Add to the worklist phis referenced by non-phi instructions.
   for (HBasicBlock* block : graph_->GetReversePostOrder()) {
@@ -52,7 +60,7 @@
       }
 
       if (keep_alive) {
-        worklist_.push_back(phi);
+        worklist.push_back(phi);
       } else {
         phi->SetDead();
         if (kIsDebugBuild) {
@@ -63,9 +71,9 @@
   }
 
   // Process the worklist by propagating liveness to phi inputs.
-  while (!worklist_.empty()) {
-    HPhi* phi = worklist_.back();
-    worklist_.pop_back();
+  while (!worklist.empty()) {
+    HPhi* phi = worklist.back();
+    worklist.pop_back();
     for (HInstruction* raw_input : phi->GetInputs()) {
       HPhi* input = raw_input->AsPhi();
       if (input != nullptr && input->IsDead()) {
@@ -73,7 +81,7 @@
         // that the phi was not dead initially (see definition of `initially_live`).
         DCHECK(ContainsElement(initially_live, input));
         input->SetLive();
-        worklist_.push_back(input);
+        worklist.push_back(input);
       }
     }
   }
@@ -115,23 +123,31 @@
 }
 
 void SsaRedundantPhiElimination::Run() {
+  // Use local allocator for allocating memory used by this optimization.
+  ScopedArenaAllocator allocator(graph_->GetArenaStack());
+
+  static constexpr size_t kDefaultWorklistSize = 8;
+  ScopedArenaVector<HPhi*> worklist(allocator.Adapter(kArenaAllocSsaPhiElimination));
+  worklist.reserve(kDefaultWorklistSize);
+
   // Add all phis in the worklist. Order does not matter for correctness, and
   // neither will necessarily converge faster.
   for (HBasicBlock* block : graph_->GetReversePostOrder()) {
     for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
-      worklist_.push_back(inst_it.Current()->AsPhi());
+      worklist.push_back(inst_it.Current()->AsPhi());
     }
   }
 
-  ArenaBitVector visited_phis_in_cycle(graph_->GetAllocator(),
+  ArenaBitVector visited_phis_in_cycle(&allocator,
                                        graph_->GetCurrentInstructionId(),
                                        /* expandable */ false,
                                        kArenaAllocSsaPhiElimination);
-  ArenaVector<HPhi*> cycle_worklist(graph_->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination));
+  visited_phis_in_cycle.ClearAllBits();
+  ScopedArenaVector<HPhi*> cycle_worklist(allocator.Adapter(kArenaAllocSsaPhiElimination));
 
-  while (!worklist_.empty()) {
-    HPhi* phi = worklist_.back();
-    worklist_.pop_back();
+  while (!worklist.empty()) {
+    HPhi* phi = worklist.back();
+    worklist.pop_back();
 
     // If the phi has already been processed, continue.
     if (!phi->IsInBlock()) {
@@ -231,7 +247,7 @@
       for (const HUseListNode<HInstruction*>& use : current->GetUses()) {
         HInstruction* user = use.GetUser();
         if (user->IsPhi() && !visited_phis_in_cycle.IsBitSet(user->GetId())) {
-          worklist_.push_back(user->AsPhi());
+          worklist.push_back(user->AsPhi());
         }
       }
       DCHECK(candidate->StrictlyDominates(current));
diff --git a/compiler/optimizing/ssa_phi_elimination.h b/compiler/optimizing/ssa_phi_elimination.h
index e0cde07..11d5837 100644
--- a/compiler/optimizing/ssa_phi_elimination.h
+++ b/compiler/optimizing/ssa_phi_elimination.h
@@ -17,7 +17,6 @@
 #ifndef ART_COMPILER_OPTIMIZING_SSA_PHI_ELIMINATION_H_
 #define ART_COMPILER_OPTIMIZING_SSA_PHI_ELIMINATION_H_
 
-#include "base/arena_containers.h"
 #include "nodes.h"
 #include "optimization.h"
 
@@ -30,10 +29,7 @@
 class SsaDeadPhiElimination : public HOptimization {
  public:
   explicit SsaDeadPhiElimination(HGraph* graph)
-      : HOptimization(graph, kSsaDeadPhiEliminationPassName),
-        worklist_(graph->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination)) {
-    worklist_.reserve(kDefaultWorklistSize);
-  }
+      : HOptimization(graph, kSsaDeadPhiEliminationPassName) {}
 
   void Run() OVERRIDE;
 
@@ -43,10 +39,6 @@
   static constexpr const char* kSsaDeadPhiEliminationPassName = "dead_phi_elimination";
 
  private:
-  ArenaVector<HPhi*> worklist_;
-
-  static constexpr size_t kDefaultWorklistSize = 8;
-
   DISALLOW_COPY_AND_ASSIGN(SsaDeadPhiElimination);
 };
 
@@ -59,20 +51,13 @@
 class SsaRedundantPhiElimination : public HOptimization {
  public:
   explicit SsaRedundantPhiElimination(HGraph* graph)
-      : HOptimization(graph, kSsaRedundantPhiEliminationPassName),
-        worklist_(graph->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination)) {
-    worklist_.reserve(kDefaultWorklistSize);
-  }
+      : HOptimization(graph, kSsaRedundantPhiEliminationPassName) {}
 
   void Run() OVERRIDE;
 
   static constexpr const char* kSsaRedundantPhiEliminationPassName = "redundant_phi_elimination";
 
  private:
-  ArenaVector<HPhi*> worklist_;
-
-  static constexpr size_t kDefaultWorklistSize = 8;
-
   DISALLOW_COPY_AND_ASSIGN(SsaRedundantPhiElimination);
 };
 
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index a574566..62ed7ee 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -223,7 +223,7 @@
                            size_t dex_register_locations_index) const;
   void CheckCodeInfo(MemoryRegion region) const;
 
-  ArenaAllocator* allocator_;
+  ArenaAllocator* const allocator_;
   const InstructionSet instruction_set_;
   ArenaVector<StackMapEntry> stack_maps_;
 
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index a842c6e..96ac368 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -47,10 +47,10 @@
 
 TEST(StackMapTest, Test1) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
-  StackMapStream stream(&arena, kRuntimeISA);
+  ArenaAllocator allocator(&pool);
+  StackMapStream stream(&allocator, kRuntimeISA);
 
-  ArenaBitVector sp_mask(&arena, 0, false);
+  ArenaBitVector sp_mask(&allocator, 0, false);
   size_t number_of_dex_registers = 2;
   stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
   stream.AddDexRegisterEntry(Kind::kInStack, 0);         // Short location.
@@ -58,7 +58,7 @@
   stream.EndStackMapEntry();
 
   size_t size = stream.PrepareForFillIn();
-  void* memory = arena.Alloc(size, kArenaAllocMisc);
+  void* memory = allocator.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
   stream.FillInCodeInfo(region);
 
@@ -128,11 +128,11 @@
 
 TEST(StackMapTest, Test2) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
-  StackMapStream stream(&arena, kRuntimeISA);
+  ArenaAllocator allocator(&pool);
+  StackMapStream stream(&allocator, kRuntimeISA);
   ArtMethod art_method;
 
-  ArenaBitVector sp_mask1(&arena, 0, true);
+  ArenaBitVector sp_mask1(&allocator, 0, true);
   sp_mask1.SetBit(2);
   sp_mask1.SetBit(4);
   size_t number_of_dex_registers = 2;
@@ -146,7 +146,7 @@
   stream.EndInlineInfoEntry();
   stream.EndStackMapEntry();
 
-  ArenaBitVector sp_mask2(&arena, 0, true);
+  ArenaBitVector sp_mask2(&allocator, 0, true);
   sp_mask2.SetBit(3);
   sp_mask2.SetBit(8);
   stream.BeginStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0);
@@ -154,7 +154,7 @@
   stream.AddDexRegisterEntry(Kind::kInFpuRegister, 3);   // Short location.
   stream.EndStackMapEntry();
 
-  ArenaBitVector sp_mask3(&arena, 0, true);
+  ArenaBitVector sp_mask3(&allocator, 0, true);
   sp_mask3.SetBit(1);
   sp_mask3.SetBit(5);
   stream.BeginStackMapEntry(2, 192, 0xAB, &sp_mask3, number_of_dex_registers, 0);
@@ -162,7 +162,7 @@
   stream.AddDexRegisterEntry(Kind::kInRegisterHigh, 8);   // Short location.
   stream.EndStackMapEntry();
 
-  ArenaBitVector sp_mask4(&arena, 0, true);
+  ArenaBitVector sp_mask4(&allocator, 0, true);
   sp_mask4.SetBit(6);
   sp_mask4.SetBit(7);
   stream.BeginStackMapEntry(3, 256, 0xCD, &sp_mask4, number_of_dex_registers, 0);
@@ -171,7 +171,7 @@
   stream.EndStackMapEntry();
 
   size_t size = stream.PrepareForFillIn();
-  void* memory = arena.Alloc(size, kArenaAllocMisc);
+  void* memory = allocator.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
   stream.FillInCodeInfo(region);
 
@@ -412,11 +412,11 @@
 
 TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
-  StackMapStream stream(&arena, kRuntimeISA);
+  ArenaAllocator allocator(&pool);
+  StackMapStream stream(&allocator, kRuntimeISA);
   ArtMethod art_method;
 
-  ArenaBitVector sp_mask1(&arena, 0, true);
+  ArenaBitVector sp_mask1(&allocator, 0, true);
   sp_mask1.SetBit(2);
   sp_mask1.SetBit(4);
   const size_t number_of_dex_registers = 2;
@@ -431,7 +431,7 @@
   stream.EndStackMapEntry();
 
   size_t size = stream.PrepareForFillIn();
-  void* memory = arena.Alloc(size, kArenaAllocMisc);
+  void* memory = allocator.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
   stream.FillInCodeInfo(region);
 
@@ -506,10 +506,10 @@
 
 TEST(StackMapTest, TestNonLiveDexRegisters) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
-  StackMapStream stream(&arena, kRuntimeISA);
+  ArenaAllocator allocator(&pool);
+  StackMapStream stream(&allocator, kRuntimeISA);
 
-  ArenaBitVector sp_mask(&arena, 0, false);
+  ArenaBitVector sp_mask(&allocator, 0, false);
   uint32_t number_of_dex_registers = 2;
   stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
   stream.AddDexRegisterEntry(Kind::kNone, 0);            // No location.
@@ -517,7 +517,7 @@
   stream.EndStackMapEntry();
 
   size_t size = stream.PrepareForFillIn();
-  void* memory = arena.Alloc(size, kArenaAllocMisc);
+  void* memory = allocator.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
   stream.FillInCodeInfo(region);
 
@@ -585,10 +585,10 @@
 // not treat it as kNoDexRegisterMap.
 TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
-  StackMapStream stream(&arena, kRuntimeISA);
+  ArenaAllocator allocator(&pool);
+  StackMapStream stream(&allocator, kRuntimeISA);
 
-  ArenaBitVector sp_mask(&arena, 0, false);
+  ArenaBitVector sp_mask(&allocator, 0, false);
   uint32_t number_of_dex_registers = 1024;
   // Create the first stack map (and its Dex register map).
   stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
@@ -609,7 +609,7 @@
   stream.EndStackMapEntry();
 
   size_t size = stream.PrepareForFillIn();
-  void* memory = arena.Alloc(size, kArenaAllocMisc);
+  void* memory = allocator.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
   stream.FillInCodeInfo(region);
 
@@ -648,10 +648,10 @@
 
 TEST(StackMapTest, TestShareDexRegisterMap) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
-  StackMapStream stream(&arena, kRuntimeISA);
+  ArenaAllocator allocator(&pool);
+  StackMapStream stream(&allocator, kRuntimeISA);
 
-  ArenaBitVector sp_mask(&arena, 0, false);
+  ArenaBitVector sp_mask(&allocator, 0, false);
   uint32_t number_of_dex_registers = 2;
   // First stack map.
   stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
@@ -670,7 +670,7 @@
   stream.EndStackMapEntry();
 
   size_t size = stream.PrepareForFillIn();
-  void* memory = arena.Alloc(size, kArenaAllocMisc);
+  void* memory = allocator.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
   stream.FillInCodeInfo(region);
 
@@ -706,10 +706,10 @@
 
 TEST(StackMapTest, TestNoDexRegisterMap) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
-  StackMapStream stream(&arena, kRuntimeISA);
+  ArenaAllocator allocator(&pool);
+  StackMapStream stream(&allocator, kRuntimeISA);
 
-  ArenaBitVector sp_mask(&arena, 0, false);
+  ArenaBitVector sp_mask(&allocator, 0, false);
   uint32_t number_of_dex_registers = 0;
   stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
   stream.EndStackMapEntry();
@@ -719,7 +719,7 @@
   stream.EndStackMapEntry();
 
   size_t size = stream.PrepareForFillIn();
-  void* memory = arena.Alloc(size, kArenaAllocMisc);
+  void* memory = allocator.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
   stream.FillInCodeInfo(region);
 
@@ -755,11 +755,11 @@
 
 TEST(StackMapTest, InlineTest) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
-  StackMapStream stream(&arena, kRuntimeISA);
+  ArenaAllocator allocator(&pool);
+  StackMapStream stream(&allocator, kRuntimeISA);
   ArtMethod art_method;
 
-  ArenaBitVector sp_mask1(&arena, 0, true);
+  ArenaBitVector sp_mask1(&allocator, 0, true);
   sp_mask1.SetBit(2);
   sp_mask1.SetBit(4);
 
@@ -821,7 +821,7 @@
   stream.EndStackMapEntry();
 
   size_t size = stream.PrepareForFillIn();
-  void* memory = arena.Alloc(size, kArenaAllocMisc);
+  void* memory = allocator.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
   stream.FillInCodeInfo(region);
 
@@ -936,10 +936,10 @@
 
 TEST(StackMapTest, TestDeduplicateStackMask) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
-  StackMapStream stream(&arena, kRuntimeISA);
+  ArenaAllocator allocator(&pool);
+  StackMapStream stream(&allocator, kRuntimeISA);
 
-  ArenaBitVector sp_mask(&arena, 0, true);
+  ArenaBitVector sp_mask(&allocator, 0, true);
   sp_mask.SetBit(1);
   sp_mask.SetBit(4);
   stream.BeginStackMapEntry(0, 4, 0x3, &sp_mask, 0, 0);
@@ -948,7 +948,7 @@
   stream.EndStackMapEntry();
 
   size_t size = stream.PrepareForFillIn();
-  void* memory = arena.Alloc(size, kArenaAllocMisc);
+  void* memory = allocator.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
   stream.FillInCodeInfo(region);
 
@@ -964,10 +964,10 @@
 
 TEST(StackMapTest, TestInvokeInfo) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
-  StackMapStream stream(&arena, kRuntimeISA);
+  ArenaAllocator allocator(&pool);
+  StackMapStream stream(&allocator, kRuntimeISA);
 
-  ArenaBitVector sp_mask(&arena, 0, true);
+  ArenaBitVector sp_mask(&allocator, 0, true);
   sp_mask.SetBit(1);
   stream.BeginStackMapEntry(0, 4, 0x3, &sp_mask, 0, 0);
   stream.AddInvoke(kSuper, 1);
@@ -980,11 +980,12 @@
   stream.EndStackMapEntry();
 
   const size_t code_info_size = stream.PrepareForFillIn();
-  MemoryRegion code_info_region(arena.Alloc(code_info_size, kArenaAllocMisc), code_info_size);
+  MemoryRegion code_info_region(allocator.Alloc(code_info_size, kArenaAllocMisc), code_info_size);
   stream.FillInCodeInfo(code_info_region);
 
   const size_t method_info_size = stream.ComputeMethodInfoSize();
-  MemoryRegion method_info_region(arena.Alloc(method_info_size, kArenaAllocMisc), method_info_size);
+  MemoryRegion method_info_region(allocator.Alloc(method_info_size, kArenaAllocMisc),
+                                  method_info_size);
   stream.FillInMethodInfo(method_info_region);
 
   CodeInfo code_info(code_info_region);
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 1e9a521..9527a60 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -244,19 +244,19 @@
                                                                EntryPointCallingConvention abi,
                                                                ThreadOffset64 offset) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
+  ArenaAllocator allocator(&pool);
   switch (isa) {
 #ifdef ART_ENABLE_CODEGEN_arm64
     case kArm64:
-      return arm64::CreateTrampoline(&arena, abi, offset);
+      return arm64::CreateTrampoline(&allocator, abi, offset);
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
     case kMips64:
-      return mips64::CreateTrampoline(&arena, abi, offset);
+      return mips64::CreateTrampoline(&allocator, abi, offset);
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
     case kX86_64:
-      return x86_64::CreateTrampoline(&arena, offset);
+      return x86_64::CreateTrampoline(&allocator, offset);
 #endif
     default:
       UNUSED(abi);
@@ -270,21 +270,21 @@
                                                                EntryPointCallingConvention abi,
                                                                ThreadOffset32 offset) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
+  ArenaAllocator allocator(&pool);
   switch (isa) {
 #ifdef ART_ENABLE_CODEGEN_arm
     case kArm:
     case kThumb2:
-      return arm::CreateTrampoline(&arena, abi, offset);
+      return arm::CreateTrampoline(&allocator, abi, offset);
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
     case kMips:
-      return mips::CreateTrampoline(&arena, abi, offset);
+      return mips::CreateTrampoline(&allocator, abi, offset);
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
     case kX86:
       UNUSED(abi);
-      return x86::CreateTrampoline(&arena, offset);
+      return x86::CreateTrampoline(&allocator, offset);
 #endif
     default:
       LOG(FATAL) << "Unexpected InstructionSet: " << isa;
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index dbd35ab..e0cef85 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -252,7 +252,7 @@
   // for a single, fast space check per instruction.
   static const int kMinimumGap = 32;
 
-  ArenaAllocator* allocator_;
+  ArenaAllocator* const allocator_;
   uint8_t* contents_;
   uint8_t* cursor_;
   uint8_t* limit_;
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 11a9b91..ae7636b 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -719,8 +719,8 @@
   explicit AssemblerTest() {}
 
   void SetUp() OVERRIDE {
-    arena_.reset(new ArenaAllocator(&pool_));
-    assembler_.reset(CreateAssembler(arena_.get()));
+    allocator_.reset(new ArenaAllocator(&pool_));
+    assembler_.reset(CreateAssembler(allocator_.get()));
     test_helper_.reset(
         new AssemblerTestInfrastructure(GetArchitectureString(),
                                         GetAssemblerCmdName(),
@@ -737,7 +737,7 @@
   void TearDown() OVERRIDE {
     test_helper_.reset();  // Clean up the helper.
     assembler_.reset();
-    arena_.reset();
+    allocator_.reset();
   }
 
   // Override this to set up any architecture-specific things, e.g., CPU revision.
@@ -1589,7 +1589,7 @@
   static constexpr size_t kWarnManyCombinationsThreshold = 500;
 
   ArenaPool pool_;
-  std::unique_ptr<ArenaAllocator> arena_;
+  std::unique_ptr<ArenaAllocator> allocator_;
   std::unique_ptr<Ass> assembler_;
   std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
 
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 5622f89..5307d17 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -167,10 +167,10 @@
 
 class ArmVIXLAssemblerTest : public ::testing::Test {
  public:
-  ArmVIXLAssemblerTest() : pool(), arena(&pool), assembler(&arena) { }
+  ArmVIXLAssemblerTest() : pool(), allocator(&pool), assembler(&allocator) { }
 
   ArenaPool pool;
-  ArenaAllocator arena;
+  ArenaAllocator allocator;
   ArmVIXLJNIMacroAssembler assembler;
 };
 
@@ -209,18 +209,16 @@
   const bool is_critical_native = false;
   const char* shorty = "IIFII";
 
-  ArenaPool pool;
-  ArenaAllocator arena(&pool);
-
   std::unique_ptr<JniCallingConvention> jni_conv(
-      JniCallingConvention::Create(&arena,
+      JniCallingConvention::Create(&allocator,
                                    is_static,
                                    is_synchronized,
                                    is_critical_native,
                                    shorty,
                                    kThumb2));
   std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
-      ManagedRuntimeCallingConvention::Create(&arena, is_static, is_synchronized, shorty, kThumb2));
+      ManagedRuntimeCallingConvention::Create(
+          &allocator, is_static, is_synchronized, shorty, kThumb2));
   const int frame_size(jni_conv->FrameSize());
   ArrayRef<const ManagedRegister> callee_save_regs = jni_conv->CalleeSaveRegisters();
 
diff --git a/compiler/utils/jni_macro_assembler_test.h b/compiler/utils/jni_macro_assembler_test.h
index ba95e21..34ab4c3 100644
--- a/compiler/utils/jni_macro_assembler_test.h
+++ b/compiler/utils/jni_macro_assembler_test.h
@@ -58,8 +58,8 @@
   explicit JNIMacroAssemblerTest() {}
 
   void SetUp() OVERRIDE {
-    arena_.reset(new ArenaAllocator(&pool_));
-    assembler_.reset(CreateAssembler(arena_.get()));
+    allocator_.reset(new ArenaAllocator(&pool_));
+    assembler_.reset(CreateAssembler(allocator_.get()));
     test_helper_.reset(
         new AssemblerTestInfrastructure(GetArchitectureString(),
                                         GetAssemblerCmdName(),
@@ -76,7 +76,7 @@
   void TearDown() OVERRIDE {
     test_helper_.reset();  // Clean up the helper.
     assembler_.reset();
-    arena_.reset();
+    allocator_.reset();
   }
 
   // Override this to set up any architecture-specific things, e.g., CPU revision.
@@ -140,7 +140,7 @@
   }
 
   ArenaPool pool_;
-  std::unique_ptr<ArenaAllocator> arena_;
+  std::unique_ptr<ArenaAllocator> allocator_;
   std::unique_ptr<Ass> assembler_;
   std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
 
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index cccde37..e232add 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -24,8 +24,8 @@
 
 TEST(AssemblerX86, CreateBuffer) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
-  AssemblerBuffer buffer(&arena);
+  ArenaAllocator allocator(&pool);
+  AssemblerBuffer buffer(&allocator);
   AssemblerBuffer::EnsureCapacity ensured(&buffer);
   buffer.Emit<uint8_t>(0x42);
   ASSERT_EQ(static_cast<size_t>(1), buffer.Size());
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index b08ba4a..0cb3ffd 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -30,8 +30,8 @@
 
 TEST(AssemblerX86_64, CreateBuffer) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
-  AssemblerBuffer buffer(&arena);
+  ArenaAllocator allocator(&pool);
+  AssemblerBuffer buffer(&allocator);
   AssemblerBuffer::EnsureCapacity ensured(&buffer);
   buffer.Emit<uint8_t>(0x42);
   ASSERT_EQ(static_cast<size_t>(1), buffer.Size());
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 73724b2..642d26e 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -35,7 +35,7 @@
 class ProfileAssistantTest : public CommonRuntimeTest {
  public:
   void PostRuntimeCreate() OVERRIDE {
-    arena_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
+    allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
   }
 
  protected:
@@ -108,7 +108,7 @@
   // Creates an inline cache which will be destructed at the end of the test.
   ProfileCompilationInfo::InlineCacheMap* CreateInlineCacheMap() {
     used_inline_caches.emplace_back(new ProfileCompilationInfo::InlineCacheMap(
-        std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)));
+        std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile)));
     return used_inline_caches.back().get();
   }
 
@@ -122,13 +122,13 @@
 
     // Monomorphic
     for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
-      ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+      ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
       dex_pc_data.AddClass(0, dex::TypeIndex(0));
       ic_map->Put(dex_pc, dex_pc_data);
     }
     // Polymorphic
     for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
-      ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+      ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
       dex_pc_data.AddClass(0, dex::TypeIndex(0));
       dex_pc_data.AddClass(1, dex::TypeIndex(1));
 
@@ -136,13 +136,13 @@
     }
     // Megamorphic
     for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
-      ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+      ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
       dex_pc_data.SetIsMegamorphic();
       ic_map->Put(dex_pc, dex_pc_data);
     }
     // Missing types
     for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
-      ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+      ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
       dex_pc_data.SetIsMissingTypes();
       ic_map->Put(dex_pc, dex_pc_data);
     }
@@ -375,7 +375,7 @@
     return ProcessProfiles(profile_fds, reference_profile_fd);
   }
 
-  std::unique_ptr<ArenaAllocator> arena_;
+  std::unique_ptr<ArenaAllocator> allocator_;
 
   // Cache of inline caches generated during tests.
   // This makes it easier to pass data between different utilities and ensure that
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index c48e30f..2e35f8a 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -72,6 +72,7 @@
   "InductionVar ",
   "BCE          ",
   "DCE          ",
+  "LSA          ",
   "LSE          ",
   "CFRE         ",
   "LICM         ",
@@ -296,7 +297,7 @@
 
 void ArenaPool::ReclaimMemory() {
   while (free_arenas_ != nullptr) {
-    auto* arena = free_arenas_;
+    Arena* arena = free_arenas_;
     free_arenas_ = free_arenas_->next_;
     delete arena;
   }
@@ -330,7 +331,7 @@
     ScopedTrace trace(__PRETTY_FUNCTION__);
     // Doesn't work for malloc.
     MutexLock lock(Thread::Current(), lock_);
-    for (auto* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
+    for (Arena* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
       arena->Release();
     }
   }
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 212edfb..a327cb0 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -79,6 +79,7 @@
   kArenaAllocInductionVarAnalysis,
   kArenaAllocBoundsCheckElimination,
   kArenaAllocDCE,
+  kArenaAllocLSA,
   kArenaAllocLSE,
   kArenaAllocCFRE,
   kArenaAllocLICM,
diff --git a/runtime/base/arena_allocator_test.cc b/runtime/base/arena_allocator_test.cc
index 6bf56c8..68e26af 100644
--- a/runtime/base/arena_allocator_test.cc
+++ b/runtime/base/arena_allocator_test.cc
@@ -34,8 +34,8 @@
 
 TEST_F(ArenaAllocatorTest, Test) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
-  ArenaBitVector bv(&arena, 10, true);
+  ArenaAllocator allocator(&pool);
+  ArenaBitVector bv(&allocator, 10, true);
   bv.SetBit(5);
   EXPECT_EQ(1U, bv.GetStorageSize());
   bv.SetBit(35);
@@ -50,14 +50,14 @@
   uint32_t* small_array;
   {
     // Allocate a small array from an arena and release it.
-    ArenaAllocator arena(&pool);
-    small_array = arena.AllocArray<uint32_t>(kSmallArraySize);
+    ArenaAllocator allocator(&pool);
+    small_array = allocator.AllocArray<uint32_t>(kSmallArraySize);
     ASSERT_EQ(0u, small_array[kSmallArraySize - 1u]);
   }
   {
     // Reuse the previous arena and allocate more than previous allocation including red zone.
-    ArenaAllocator arena(&pool);
-    uint32_t* large_array = arena.AllocArray<uint32_t>(kLargeArraySize);
+    ArenaAllocator allocator(&pool);
+    uint32_t* large_array = allocator.AllocArray<uint32_t>(kLargeArraySize);
     ASSERT_EQ(0u, large_array[kLargeArraySize - 1u]);
     // Verify that the allocation was made on the same arena.
     ASSERT_EQ(small_array, large_array);
@@ -72,70 +72,72 @@
 
   {
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
+    ArenaAllocator allocator(&pool);
     // Note: Leaving some space for memory tool red zones.
-    void* alloc1 = arena.Alloc(arena_allocator::kArenaDefaultSize * 5 / 8);
-    void* alloc2 = arena.Alloc(arena_allocator::kArenaDefaultSize * 2 / 8);
+    void* alloc1 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 5 / 8);
+    void* alloc2 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 2 / 8);
     ASSERT_NE(alloc1, alloc2);
-    ASSERT_EQ(1u, NumberOfArenas(&arena));
+    ASSERT_EQ(1u, NumberOfArenas(&allocator));
   }
   {
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
-    void* alloc1 = arena.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
-    void* alloc2 = arena.Alloc(arena_allocator::kArenaDefaultSize * 11 / 16);
+    ArenaAllocator allocator(&pool);
+    void* alloc1 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
+    void* alloc2 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 11 / 16);
     ASSERT_NE(alloc1, alloc2);
-    ASSERT_EQ(2u, NumberOfArenas(&arena));
-    void* alloc3 = arena.Alloc(arena_allocator::kArenaDefaultSize * 7 / 16);
+    ASSERT_EQ(2u, NumberOfArenas(&allocator));
+    void* alloc3 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 7 / 16);
     ASSERT_NE(alloc1, alloc3);
     ASSERT_NE(alloc2, alloc3);
-    ASSERT_EQ(3u, NumberOfArenas(&arena));
+    ASSERT_EQ(3u, NumberOfArenas(&allocator));
   }
   {
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
-    void* alloc1 = arena.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
-    void* alloc2 = arena.Alloc(arena_allocator::kArenaDefaultSize * 9 / 16);
+    ArenaAllocator allocator(&pool);
+    void* alloc1 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
+    void* alloc2 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 9 / 16);
     ASSERT_NE(alloc1, alloc2);
-    ASSERT_EQ(2u, NumberOfArenas(&arena));
+    ASSERT_EQ(2u, NumberOfArenas(&allocator));
     // Note: Leaving some space for memory tool red zones.
-    void* alloc3 = arena.Alloc(arena_allocator::kArenaDefaultSize * 5 / 16);
+    void* alloc3 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 5 / 16);
     ASSERT_NE(alloc1, alloc3);
     ASSERT_NE(alloc2, alloc3);
-    ASSERT_EQ(2u, NumberOfArenas(&arena));
+    ASSERT_EQ(2u, NumberOfArenas(&allocator));
   }
   {
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
-    void* alloc1 = arena.Alloc(arena_allocator::kArenaDefaultSize * 9 / 16);
-    void* alloc2 = arena.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
+    ArenaAllocator allocator(&pool);
+    void* alloc1 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 9 / 16);
+    void* alloc2 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
     ASSERT_NE(alloc1, alloc2);
-    ASSERT_EQ(2u, NumberOfArenas(&arena));
+    ASSERT_EQ(2u, NumberOfArenas(&allocator));
     // Note: Leaving some space for memory tool red zones.
-    void* alloc3 = arena.Alloc(arena_allocator::kArenaDefaultSize * 5 / 16);
+    void* alloc3 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 5 / 16);
     ASSERT_NE(alloc1, alloc3);
     ASSERT_NE(alloc2, alloc3);
-    ASSERT_EQ(2u, NumberOfArenas(&arena));
+    ASSERT_EQ(2u, NumberOfArenas(&allocator));
   }
   {
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
+    ArenaAllocator allocator(&pool);
     // Note: Leaving some space for memory tool red zones.
     for (size_t i = 0; i != 15; ++i) {
-      arena.Alloc(arena_allocator::kArenaDefaultSize * 1 / 16);    // Allocate 15 times from the same arena.
-      ASSERT_EQ(i + 1u, NumberOfArenas(&arena));
-      arena.Alloc(arena_allocator::kArenaDefaultSize * 17 / 16);   // Allocate a separate arena.
-      ASSERT_EQ(i + 2u, NumberOfArenas(&arena));
+      // Allocate 15 times from the same arena.
+      allocator.Alloc(arena_allocator::kArenaDefaultSize * 1 / 16);
+      ASSERT_EQ(i + 1u, NumberOfArenas(&allocator));
+      // Allocate a separate arena.
+      allocator.Alloc(arena_allocator::kArenaDefaultSize * 17 / 16);
+      ASSERT_EQ(i + 2u, NumberOfArenas(&allocator));
     }
   }
 }
 
 TEST_F(ArenaAllocatorTest, AllocAlignment) {
   ArenaPool pool;
-  ArenaAllocator arena(&pool);
+  ArenaAllocator allocator(&pool);
   for (size_t iterations = 0; iterations <= 10; ++iterations) {
     for (size_t size = 1; size <= ArenaAllocator::kAlignment + 1; ++size) {
-      void* allocation = arena.Alloc(size);
+      void* allocation = allocator.Alloc(size);
       EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(allocation))
           << reinterpret_cast<uintptr_t>(allocation);
     }
@@ -152,52 +154,52 @@
   {
     // Case 1: small aligned allocation, aligned extend inside arena.
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
+    ArenaAllocator allocator(&pool);
 
     const size_t original_size = ArenaAllocator::kAlignment * 2;
-    void* original_allocation = arena.Alloc(original_size);
+    void* original_allocation = allocator.Alloc(original_size);
 
     const size_t new_size = ArenaAllocator::kAlignment * 3;
-    void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+    void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
     EXPECT_EQ(original_allocation, realloc_allocation);
   }
 
   {
     // Case 2: small aligned allocation, non-aligned extend inside arena.
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
+    ArenaAllocator allocator(&pool);
 
     const size_t original_size = ArenaAllocator::kAlignment * 2;
-    void* original_allocation = arena.Alloc(original_size);
+    void* original_allocation = allocator.Alloc(original_size);
 
     const size_t new_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
-    void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+    void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
     EXPECT_EQ(original_allocation, realloc_allocation);
   }
 
   {
     // Case 3: small non-aligned allocation, aligned extend inside arena.
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
+    ArenaAllocator allocator(&pool);
 
     const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
-    void* original_allocation = arena.Alloc(original_size);
+    void* original_allocation = allocator.Alloc(original_size);
 
     const size_t new_size = ArenaAllocator::kAlignment * 4;
-    void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+    void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
     EXPECT_EQ(original_allocation, realloc_allocation);
   }
 
   {
     // Case 4: small non-aligned allocation, aligned non-extend inside arena.
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
+    ArenaAllocator allocator(&pool);
 
     const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
-    void* original_allocation = arena.Alloc(original_size);
+    void* original_allocation = allocator.Alloc(original_size);
 
     const size_t new_size = ArenaAllocator::kAlignment * 3;
-    void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+    void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
     EXPECT_EQ(original_allocation, realloc_allocation);
   }
 
@@ -207,31 +209,31 @@
   {
     // Case 5: large allocation, aligned extend into next arena.
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
+    ArenaAllocator allocator(&pool);
 
     const size_t original_size = arena_allocator::kArenaDefaultSize -
         ArenaAllocator::kAlignment * 5;
-    void* original_allocation = arena.Alloc(original_size);
+    void* original_allocation = allocator.Alloc(original_size);
 
     const size_t new_size = arena_allocator::kArenaDefaultSize + ArenaAllocator::kAlignment * 2;
-    void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+    void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
     EXPECT_NE(original_allocation, realloc_allocation);
   }
 
   {
     // Case 6: large allocation, non-aligned extend into next arena.
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
+    ArenaAllocator allocator(&pool);
 
     const size_t original_size = arena_allocator::kArenaDefaultSize -
         ArenaAllocator::kAlignment * 4 -
         ArenaAllocator::kAlignment / 2;
-    void* original_allocation = arena.Alloc(original_size);
+    void* original_allocation = allocator.Alloc(original_size);
 
     const size_t new_size = arena_allocator::kArenaDefaultSize +
         ArenaAllocator::kAlignment * 2 +
         ArenaAllocator::kAlignment / 2;
-    void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+    void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
     EXPECT_NE(original_allocation, realloc_allocation);
   }
 }
@@ -240,68 +242,68 @@
   {
     // Case 1: small aligned allocation, aligned extend inside arena.
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
+    ArenaAllocator allocator(&pool);
 
     const size_t original_size = ArenaAllocator::kAlignment * 2;
-    void* original_allocation = arena.Alloc(original_size);
+    void* original_allocation = allocator.Alloc(original_size);
     ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
 
     const size_t new_size = ArenaAllocator::kAlignment * 3;
-    void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+    void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
     EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
 
-    void* after_alloc = arena.Alloc(1);
+    void* after_alloc = allocator.Alloc(1);
     EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
   }
 
   {
     // Case 2: small aligned allocation, non-aligned extend inside arena.
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
+    ArenaAllocator allocator(&pool);
 
     const size_t original_size = ArenaAllocator::kAlignment * 2;
-    void* original_allocation = arena.Alloc(original_size);
+    void* original_allocation = allocator.Alloc(original_size);
     ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
 
     const size_t new_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
-    void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+    void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
     EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
 
-    void* after_alloc = arena.Alloc(1);
+    void* after_alloc = allocator.Alloc(1);
     EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
   }
 
   {
     // Case 3: small non-aligned allocation, aligned extend inside arena.
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
+    ArenaAllocator allocator(&pool);
 
     const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
-    void* original_allocation = arena.Alloc(original_size);
+    void* original_allocation = allocator.Alloc(original_size);
     ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
 
     const size_t new_size = ArenaAllocator::kAlignment * 4;
-    void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+    void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
     EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
 
-    void* after_alloc = arena.Alloc(1);
+    void* after_alloc = allocator.Alloc(1);
     EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
   }
 
   {
     // Case 4: small non-aligned allocation, aligned non-extend inside arena.
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
+    ArenaAllocator allocator(&pool);
 
     const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
-    void* original_allocation = arena.Alloc(original_size);
+    void* original_allocation = allocator.Alloc(original_size);
     ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
 
     const size_t new_size = ArenaAllocator::kAlignment * 3;
-    void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+    void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
     EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
 
-    void* after_alloc = arena.Alloc(1);
+    void* after_alloc = allocator.Alloc(1);
     EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
   }
 
@@ -311,39 +313,39 @@
   {
     // Case 5: large allocation, aligned extend into next arena.
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
+    ArenaAllocator allocator(&pool);
 
     const size_t original_size = arena_allocator::kArenaDefaultSize -
         ArenaAllocator::kAlignment * 5;
-    void* original_allocation = arena.Alloc(original_size);
+    void* original_allocation = allocator.Alloc(original_size);
     ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
 
     const size_t new_size = arena_allocator::kArenaDefaultSize + ArenaAllocator::kAlignment * 2;
-    void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+    void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
     EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
 
-    void* after_alloc = arena.Alloc(1);
+    void* after_alloc = allocator.Alloc(1);
     EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
   }
 
   {
     // Case 6: large allocation, non-aligned extend into next arena.
     ArenaPool pool;
-    ArenaAllocator arena(&pool);
+    ArenaAllocator allocator(&pool);
 
     const size_t original_size = arena_allocator::kArenaDefaultSize -
         ArenaAllocator::kAlignment * 4 -
         ArenaAllocator::kAlignment / 2;
-    void* original_allocation = arena.Alloc(original_size);
+    void* original_allocation = allocator.Alloc(original_size);
     ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
 
     const size_t new_size = arena_allocator::kArenaDefaultSize +
         ArenaAllocator::kAlignment * 2 +
         ArenaAllocator::kAlignment / 2;
-    void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+    void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
     EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
 
-    void* after_alloc = arena.Alloc(1);
+    void* after_alloc = allocator.Alloc(1);
     EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
   }
 }
diff --git a/runtime/base/arena_bit_vector.cc b/runtime/base/arena_bit_vector.cc
index 5f8f5d2..1542e9d 100644
--- a/runtime/base/arena_bit_vector.cc
+++ b/runtime/base/arena_bit_vector.cc
@@ -52,9 +52,9 @@
 template <typename ArenaAlloc>
 class ArenaBitVectorAllocator FINAL : public Allocator, private ArenaBitVectorAllocatorKind {
  public:
-  static ArenaBitVectorAllocator* Create(ArenaAlloc* arena, ArenaAllocKind kind) {
-    void* storage = arena->template Alloc<ArenaBitVectorAllocator>(kind);
-    return new (storage) ArenaBitVectorAllocator(arena, kind);
+  static ArenaBitVectorAllocator* Create(ArenaAlloc* allocator, ArenaAllocKind kind) {
+    void* storage = allocator->template Alloc<ArenaBitVectorAllocator>(kind);
+    return new (storage) ArenaBitVectorAllocator(allocator, kind);
   }
 
   ~ArenaBitVectorAllocator() {
@@ -63,36 +63,36 @@
   }
 
   virtual void* Alloc(size_t size) {
-    return arena_->Alloc(size, this->Kind());
+    return allocator_->Alloc(size, this->Kind());
   }
 
   virtual void Free(void*) {}  // Nop.
 
  private:
-  ArenaBitVectorAllocator(ArenaAlloc* arena, ArenaAllocKind kind)
-      : ArenaBitVectorAllocatorKind(kind), arena_(arena) { }
+  ArenaBitVectorAllocator(ArenaAlloc* allocator, ArenaAllocKind kind)
+      : ArenaBitVectorAllocatorKind(kind), allocator_(allocator) { }
 
-  ArenaAlloc* const arena_;
+  ArenaAlloc* const allocator_;
 
   DISALLOW_COPY_AND_ASSIGN(ArenaBitVectorAllocator);
 };
 
-ArenaBitVector::ArenaBitVector(ArenaAllocator* arena,
+ArenaBitVector::ArenaBitVector(ArenaAllocator* allocator,
                                unsigned int start_bits,
                                bool expandable,
                                ArenaAllocKind kind)
   :  BitVector(start_bits,
                expandable,
-               ArenaBitVectorAllocator<ArenaAllocator>::Create(arena, kind)) {
+               ArenaBitVectorAllocator<ArenaAllocator>::Create(allocator, kind)) {
 }
 
-ArenaBitVector::ArenaBitVector(ScopedArenaAllocator* arena,
+ArenaBitVector::ArenaBitVector(ScopedArenaAllocator* allocator,
                                unsigned int start_bits,
                                bool expandable,
                                ArenaAllocKind kind)
   :  BitVector(start_bits,
                expandable,
-               ArenaBitVectorAllocator<ScopedArenaAllocator>::Create(arena, kind)) {
+               ArenaBitVectorAllocator<ScopedArenaAllocator>::Create(allocator, kind)) {
 }
 
 }  // namespace art
diff --git a/runtime/base/arena_bit_vector.h b/runtime/base/arena_bit_vector.h
index d86d622..ca1d5b1 100644
--- a/runtime/base/arena_bit_vector.h
+++ b/runtime/base/arena_bit_vector.h
@@ -31,19 +31,19 @@
 class ArenaBitVector : public BitVector, public ArenaObject<kArenaAllocGrowableBitMap> {
  public:
   template <typename Allocator>
-  static ArenaBitVector* Create(Allocator* arena,
+  static ArenaBitVector* Create(Allocator* allocator,
                                 uint32_t start_bits,
                                 bool expandable,
                                 ArenaAllocKind kind = kArenaAllocGrowableBitMap) {
-    void* storage = arena->template Alloc<ArenaBitVector>(kind);
-    return new (storage) ArenaBitVector(arena, start_bits, expandable, kind);
+    void* storage = allocator->template Alloc<ArenaBitVector>(kind);
+    return new (storage) ArenaBitVector(allocator, start_bits, expandable, kind);
   }
 
-  ArenaBitVector(ArenaAllocator* arena,
+  ArenaBitVector(ArenaAllocator* allocator,
                  uint32_t start_bits,
                  bool expandable,
                  ArenaAllocKind kind = kArenaAllocGrowableBitMap);
-  ArenaBitVector(ScopedArenaAllocator* arena,
+  ArenaBitVector(ScopedArenaAllocator* allocator,
                  uint32_t start_bits,
                  bool expandable,
                  ArenaAllocKind kind = kArenaAllocGrowableBitMap);
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index 1fad28d..19501de 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -1829,7 +1829,7 @@
 ProfileCompilationInfo::DexFileData::FindOrAddMethod(uint16_t method_index) {
   return &(method_map.FindOrAdd(
       method_index,
-      InlineCacheMap(std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)))->second);
+      InlineCacheMap(std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile)))->second);
 }
 
 // Mark a method as executed at least once.
@@ -1848,7 +1848,7 @@
   if ((flags & MethodHotness::kFlagHot) != 0) {
     method_map.FindOrAdd(
         index,
-        InlineCacheMap(std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)));
+        InlineCacheMap(std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile)));
   }
   return true;
 }
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index 8889b34..8dbb43f 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -434,7 +434,7 @@
                 uint32_t location_checksum,
                 uint16_t index,
                 uint32_t num_methods)
-        : arena_(allocator),
+        : allocator_(allocator),
           profile_key(key),
           profile_index(index),
           checksum(location_checksum),
@@ -466,8 +466,8 @@
 
     MethodHotness GetHotnessInfo(uint32_t dex_method_index) const;
 
-    // The arena used to allocate new inline cache maps.
-    ArenaAllocator* arena_;
+    // The allocator used to allocate new inline cache maps.
+    ArenaAllocator* const allocator_;
     // The profile key this data belongs to.
     std::string profile_key;
     // The profile index of this dex file (matches ClassReference#dex_profile_index).
diff --git a/runtime/jit/profile_compilation_info_test.cc b/runtime/jit/profile_compilation_info_test.cc
index 2cb8294..f155d7e 100644
--- a/runtime/jit/profile_compilation_info_test.cc
+++ b/runtime/jit/profile_compilation_info_test.cc
@@ -39,7 +39,7 @@
 class ProfileCompilationInfoTest : public CommonRuntimeTest {
  public:
   void PostRuntimeCreate() OVERRIDE {
-    arena_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
+    allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
   }
 
  protected:
@@ -176,7 +176,7 @@
   // Creates an inline cache which will be destructed at the end of the test.
   ProfileCompilationInfo::InlineCacheMap* CreateInlineCacheMap() {
     used_inline_caches.emplace_back(new ProfileCompilationInfo::InlineCacheMap(
-        std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)));
+        std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile)));
     return used_inline_caches.back().get();
   }
 
@@ -188,7 +188,7 @@
     for (const auto& inline_cache : pmi.inline_caches) {
       ProfileCompilationInfo::DexPcData& dex_pc_data =
           ic_map->FindOrAdd(
-              inline_cache.dex_pc, ProfileCompilationInfo::DexPcData(arena_.get()))->second;
+              inline_cache.dex_pc, ProfileCompilationInfo::DexPcData(allocator_.get()))->second;
       if (inline_cache.is_missing_types) {
         dex_pc_data.SetIsMissingTypes();
       }
@@ -215,13 +215,13 @@
 
     // Monomorphic
     for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
-      ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+      ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
       dex_pc_data.AddClass(0, dex::TypeIndex(0));
       ic_map->Put(dex_pc, dex_pc_data);
     }
     // Polymorphic
     for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
-      ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+      ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
       dex_pc_data.AddClass(0, dex::TypeIndex(0));
       dex_pc_data.AddClass(1, dex::TypeIndex(1));
       dex_pc_data.AddClass(2, dex::TypeIndex(2));
@@ -230,13 +230,13 @@
     }
     // Megamorphic
     for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
-      ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+      ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
       dex_pc_data.SetIsMegamorphic();
       ic_map->Put(dex_pc, dex_pc_data);
     }
     // Missing types
     for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
-      ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+      ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
       dex_pc_data.SetIsMissingTypes();
       ic_map->Put(dex_pc, dex_pc_data);
     }
@@ -273,7 +273,7 @@
   static constexpr int kProfileMagicSize = 4;
   static constexpr int kProfileVersionSize = 4;
 
-  std::unique_ptr<ArenaAllocator> arena_;
+  std::unique_ptr<ArenaAllocator> allocator_;
 
   // Cache of inline caches generated during tests.
   // This makes it easier to pass data between different utilities and ensure that
@@ -730,7 +730,7 @@
   pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
   pmi.dex_references.emplace_back("dex_location2", /* checksum */ 2, kMaxMethodIds);
   for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
-    ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+    ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
     dex_pc_data.AddClass(0, dex::TypeIndex(0));
     dex_pc_data.AddClass(1, dex::TypeIndex(1));
     ic_map->Put(dex_pc, dex_pc_data);
@@ -741,7 +741,7 @@
   pmi_reindexed.dex_references.emplace_back("dex_location2", /* checksum */ 2, kMaxMethodIds);
   pmi_reindexed.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
   for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
-    ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+    ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
     dex_pc_data.AddClass(1, dex::TypeIndex(0));
     dex_pc_data.AddClass(0, dex::TypeIndex(1));
     ic_map_reindexed->Put(dex_pc, dex_pc_data);
@@ -795,7 +795,7 @@
   ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
   ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
   pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
-  ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+  ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
   dex_pc_data.SetIsMegamorphic();
   ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
 
@@ -825,7 +825,7 @@
   ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
   ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
   pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
-  ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+  ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
   dex_pc_data.SetIsMissingTypes();
   ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
 
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 7246bae..0033167 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -71,8 +71,8 @@
 // sure we only print this once.
 static bool gPrintedDxMonitorText = false;
 
-PcToRegisterLineTable::PcToRegisterLineTable(ScopedArenaAllocator& arena)
-    : register_lines_(arena.Adapter(kArenaAllocVerifier)) {}
+PcToRegisterLineTable::PcToRegisterLineTable(ScopedArenaAllocator& allocator)
+    : register_lines_(allocator.Adapter(kArenaAllocVerifier)) {}
 
 void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InstructionFlags* flags,
                                  uint32_t insns_size, uint16_t registers_size,
@@ -552,9 +552,9 @@
                                bool allow_thread_suspension)
     : self_(self),
       arena_stack_(Runtime::Current()->GetArenaPool()),
-      arena_(&arena_stack_),
-      reg_types_(can_load_classes, arena_),
-      reg_table_(arena_),
+      allocator_(&arena_stack_),
+      reg_types_(can_load_classes, allocator_),
+      reg_table_(allocator_),
       work_insn_idx_(dex::kDexNoIndex),
       dex_method_idx_(dex_method_idx),
       mirror_method_(method),
@@ -868,7 +868,7 @@
   }
 
   // Allocate and initialize an array to hold instruction data.
-  insn_flags_.reset(arena_.AllocArray<InstructionFlags>(code_item_->insns_size_in_code_units_));
+  insn_flags_.reset(allocator_.AllocArray<InstructionFlags>(code_item_->insns_size_in_code_units_));
   DCHECK(insn_flags_ != nullptr);
   std::uninitialized_fill_n(insn_flags_.get(),
                             code_item_->insns_size_in_code_units_,
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 57ab56c..1f1d7c1 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -67,7 +67,7 @@
 // execution of that instruction.
 class PcToRegisterLineTable {
  public:
-  explicit PcToRegisterLineTable(ScopedArenaAllocator& arena);
+  explicit PcToRegisterLineTable(ScopedArenaAllocator& allocator);
   ~PcToRegisterLineTable();
 
   // Initialize the RegisterTable. Every instruction address can have a different set of information
@@ -222,7 +222,7 @@
   }
 
   ScopedArenaAllocator& GetScopedAllocator() {
-    return arena_;
+    return allocator_;
   }
 
  private:
@@ -711,7 +711,7 @@
 
   // Arena allocator.
   ArenaStack arena_stack_;
-  ScopedArenaAllocator arena_;
+  ScopedArenaAllocator allocator_;
 
   RegTypeCache reg_types_;
 
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 0c00868..4ebe151 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -164,7 +164,7 @@
 }
 
 StringPiece RegTypeCache::AddString(const StringPiece& string_piece) {
-  char* ptr = arena_.AllocArray<char>(string_piece.length());
+  char* ptr = allocator_.AllocArray<char>(string_piece.length());
   memcpy(ptr, string_piece.data(), string_piece.length());
   return StringPiece(ptr, string_piece.length());
 }
@@ -197,9 +197,10 @@
     if (klass->CannotBeAssignedFromOtherTypes() || precise) {
       DCHECK(!(klass->IsAbstract()) || klass->IsArrayClass());
       DCHECK(!klass->IsInterface());
-      entry = new (&arena_) PreciseReferenceType(klass, AddString(sp_descriptor), entries_.size());
+      entry =
+          new (&allocator_) PreciseReferenceType(klass, AddString(sp_descriptor), entries_.size());
     } else {
-      entry = new (&arena_) ReferenceType(klass, AddString(sp_descriptor), entries_.size());
+      entry = new (&allocator_) ReferenceType(klass, AddString(sp_descriptor), entries_.size());
     }
     return AddEntry(entry);
   } else {  // Class not resolved.
@@ -213,7 +214,7 @@
     }
     if (IsValidDescriptor(descriptor)) {
       return AddEntry(
-          new (&arena_) UnresolvedReferenceType(AddString(sp_descriptor), entries_.size()));
+          new (&allocator_) UnresolvedReferenceType(AddString(sp_descriptor), entries_.size()));
     } else {
       // The descriptor is broken return the unknown type as there's nothing sensible that
       // could be done at runtime
@@ -224,7 +225,7 @@
 
 const RegType& RegTypeCache::MakeUnresolvedReference() {
   // The descriptor is intentionally invalid so nothing else will match this type.
-  return AddEntry(new (&arena_) UnresolvedReferenceType(AddString("a"), entries_.size()));
+  return AddEntry(new (&allocator_) UnresolvedReferenceType(AddString("a"), entries_.size()));
 }
 
 const RegType* RegTypeCache::FindClass(mirror::Class* klass, bool precise) const {
@@ -253,8 +254,8 @@
   DCHECK(FindClass(klass, precise) == nullptr);
   RegType* const reg_type = precise
       ? static_cast<RegType*>(
-          new (&arena_) PreciseReferenceType(klass, descriptor, entries_.size()))
-      : new (&arena_) ReferenceType(klass, descriptor, entries_.size());
+          new (&allocator_) PreciseReferenceType(klass, descriptor, entries_.size()))
+      : new (&allocator_) ReferenceType(klass, descriptor, entries_.size());
   return &AddEntry(reg_type);
 }
 
@@ -267,11 +268,11 @@
   return *reg_type;
 }
 
-RegTypeCache::RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena)
-    : entries_(arena.Adapter(kArenaAllocVerifier)),
-      klass_entries_(arena.Adapter(kArenaAllocVerifier)),
+RegTypeCache::RegTypeCache(bool can_load_classes, ScopedArenaAllocator& allocator)
+    : entries_(allocator.Adapter(kArenaAllocVerifier)),
+      klass_entries_(allocator.Adapter(kArenaAllocVerifier)),
       can_load_classes_(can_load_classes),
-      arena_(arena) {
+      allocator_(allocator) {
   if (kIsDebugBuild) {
     Thread::Current()->AssertThreadSuspensionIsAllowable(gAborting == 0);
   }
@@ -349,7 +350,7 @@
 const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left,
                                                  const RegType& right,
                                                  MethodVerifier* verifier) {
-  ArenaBitVector types(&arena_,
+  ArenaBitVector types(&allocator_,
                        kDefaultArenaBitVectorBytes * kBitsPerByte,  // Allocate at least 8 bytes.
                        true);                                       // Is expandable.
   const RegType* left_resolved;
@@ -426,10 +427,10 @@
       }
     }
   }
-  return AddEntry(new (&arena_) UnresolvedMergedType(resolved_parts_merged,
-                                                     types,
-                                                     this,
-                                                     entries_.size()));
+  return AddEntry(new (&allocator_) UnresolvedMergedType(resolved_parts_merged,
+                                                         types,
+                                                         this,
+                                                         entries_.size()));
 }
 
 const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
@@ -446,7 +447,7 @@
       }
     }
   }
-  return AddEntry(new (&arena_) UnresolvedSuperClass(child.GetId(), this, entries_.size()));
+  return AddEntry(new (&allocator_) UnresolvedSuperClass(child.GetId(), this, entries_.size()));
 }
 
 const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
@@ -462,9 +463,9 @@
         return *down_cast<const UnresolvedUninitializedRefType*>(cur_entry);
       }
     }
-    entry = new (&arena_) UnresolvedUninitializedRefType(descriptor,
-                                                         allocation_pc,
-                                                         entries_.size());
+    entry = new (&allocator_) UnresolvedUninitializedRefType(descriptor,
+                                                             allocation_pc,
+                                                             entries_.size());
   } else {
     mirror::Class* klass = type.GetClass();
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -476,10 +477,10 @@
         return *down_cast<const UninitializedReferenceType*>(cur_entry);
       }
     }
-    entry = new (&arena_) UninitializedReferenceType(klass,
-                                                     descriptor,
-                                                     allocation_pc,
-                                                     entries_.size());
+    entry = new (&allocator_) UninitializedReferenceType(klass,
+                                                         descriptor,
+                                                         allocation_pc,
+                                                         entries_.size());
   }
   return AddEntry(entry);
 }
@@ -496,7 +497,7 @@
         return *cur_entry;
       }
     }
-    entry = new (&arena_) UnresolvedReferenceType(descriptor, entries_.size());
+    entry = new (&allocator_) UnresolvedReferenceType(descriptor, entries_.size());
   } else {
     mirror::Class* klass = uninit_type.GetClass();
     if (uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
@@ -507,7 +508,7 @@
           return *cur_entry;
         }
       }
-      entry = new (&arena_) ReferenceType(klass, "", entries_.size());
+      entry = new (&allocator_) ReferenceType(klass, "", entries_.size());
     } else if (!klass->IsPrimitive()) {
       // We're uninitialized because of allocation, look or create a precise type as allocations
       // may only create objects of that type.
@@ -526,9 +527,9 @@
           return *cur_entry;
         }
       }
-      entry = new (&arena_) PreciseReferenceType(klass,
-                                                 uninit_type.GetDescriptor(),
-                                                 entries_.size());
+      entry = new (&allocator_) PreciseReferenceType(klass,
+                                                     uninit_type.GetDescriptor(),
+                                                     entries_.size());
     } else {
       return Conflict();
     }
@@ -547,7 +548,7 @@
         return *down_cast<const UninitializedType*>(cur_entry);
       }
     }
-    entry = new (&arena_) UnresolvedUninitializedThisRefType(descriptor, entries_.size());
+    entry = new (&allocator_) UnresolvedUninitializedThisRefType(descriptor, entries_.size());
   } else {
     mirror::Class* klass = type.GetClass();
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -556,7 +557,7 @@
         return *down_cast<const UninitializedType*>(cur_entry);
       }
     }
-    entry = new (&arena_) UninitializedThisReferenceType(klass, descriptor, entries_.size());
+    entry = new (&allocator_) UninitializedThisReferenceType(klass, descriptor, entries_.size());
   }
   return AddEntry(entry);
 }
@@ -572,9 +573,9 @@
   }
   ConstantType* entry;
   if (precise) {
-    entry = new (&arena_) PreciseConstType(value, entries_.size());
+    entry = new (&allocator_) PreciseConstType(value, entries_.size());
   } else {
-    entry = new (&arena_) ImpreciseConstType(value, entries_.size());
+    entry = new (&allocator_) ImpreciseConstType(value, entries_.size());
   }
   return AddEntry(entry);
 }
@@ -589,9 +590,9 @@
   }
   ConstantType* entry;
   if (precise) {
-    entry = new (&arena_) PreciseConstLoType(value, entries_.size());
+    entry = new (&allocator_) PreciseConstLoType(value, entries_.size());
   } else {
-    entry = new (&arena_) ImpreciseConstLoType(value, entries_.size());
+    entry = new (&allocator_) ImpreciseConstLoType(value, entries_.size());
   }
   return AddEntry(entry);
 }
@@ -606,9 +607,9 @@
   }
   ConstantType* entry;
   if (precise) {
-    entry = new (&arena_) PreciseConstHiType(value, entries_.size());
+    entry = new (&allocator_) PreciseConstHiType(value, entries_.size());
   } else {
-    entry = new (&arena_) ImpreciseConstHiType(value, entries_.size());
+    entry = new (&allocator_) ImpreciseConstHiType(value, entries_.size());
   }
   return AddEntry(entry);
 }
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 96eca05..74d9e9d 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -61,7 +61,7 @@
 
 class RegTypeCache {
  public:
-  explicit RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena);
+  explicit RegTypeCache(bool can_load_classes, ScopedArenaAllocator& allocator);
   ~RegTypeCache();
   static void Init() REQUIRES_SHARED(Locks::mutator_lock_) {
     if (!RegTypeCache::primitive_initialized_) {
@@ -201,7 +201,7 @@
   const bool can_load_classes_;
 
   // Arena allocator.
-  ScopedArenaAllocator& arena_;
+  ScopedArenaAllocator& allocator_;
 
   DISALLOW_COPY_AND_ASSIGN(RegTypeCache);
 };
diff --git a/tools/ahat/src/main/com/android/ahat/Main.java b/tools/ahat/src/main/com/android/ahat/Main.java
index 31c485d..a0fbf77 100644
--- a/tools/ahat/src/main/com/android/ahat/Main.java
+++ b/tools/ahat/src/main/com/android/ahat/Main.java
@@ -48,6 +48,26 @@
     out.println("");
   }
 
+  /**
+   * Load the given heap dump file.
+   * Prints an error message and exits the application on failure to load the
+   * heap dump.
+   */
+  private static AhatSnapshot loadHeapDump(File hprof, ProguardMap map) {
+    System.out.println("Processing '" + hprof + "' ...");
+    try {
+      return Parser.parseHeapDump(hprof, map);
+    } catch (IOException e) {
+      System.err.println("Unable to load '" + hprof + "':");
+      e.printStackTrace();
+    } catch (HprofFormatException e) {
+      System.err.println("'" + hprof + "' does not appear to be a valid Java heap dump:");
+      e.printStackTrace();
+    }
+    System.exit(1);
+    throw new AssertionError("Unreachable");
+  }
+
   public static void main(String[] args) {
     int port = 7100;
     for (String arg : args) {
@@ -105,40 +125,39 @@
       return;
     }
 
+    // Launch the server before parsing the hprof file so we get
+    // BindExceptions quickly.
+    InetAddress loopback = InetAddress.getLoopbackAddress();
+    InetSocketAddress addr = new InetSocketAddress(loopback, port);
+    System.out.println("Preparing " + addr + " ...");
+    HttpServer server = null;
     try {
-      // Launch the server before parsing the hprof file so we get
-      // BindExceptions quickly.
-      InetAddress loopback = InetAddress.getLoopbackAddress();
-      InetSocketAddress addr = new InetSocketAddress(loopback, port);
-      System.out.println("Preparing " + addr + " ...");
-      HttpServer server = HttpServer.create(addr, 0);
-
-      System.out.println("Processing '" + hprof + "' ...");
-      AhatSnapshot ahat = Parser.parseHeapDump(hprof, map);
-
-      if (hprofbase != null) {
-        System.out.println("Processing '" + hprofbase + "' ...");
-        AhatSnapshot base = Parser.parseHeapDump(hprofbase, mapbase);
-
-        System.out.println("Diffing heap dumps ...");
-        Diff.snapshots(ahat, base);
-      }
-
-      server.createContext("/", new AhatHttpHandler(new OverviewHandler(ahat, hprof, hprofbase)));
-      server.createContext("/rooted", new AhatHttpHandler(new RootedHandler(ahat)));
-      server.createContext("/object", new AhatHttpHandler(new ObjectHandler(ahat)));
-      server.createContext("/objects", new AhatHttpHandler(new ObjectsHandler(ahat)));
-      server.createContext("/site", new AhatHttpHandler(new SiteHandler(ahat)));
-      server.createContext("/bitmap", new BitmapHandler(ahat));
-      server.createContext("/style.css", new StaticHandler("style.css", "text/css"));
-      server.setExecutor(Executors.newFixedThreadPool(1));
-      System.out.println("Server started on localhost:" + port);
-
-      server.start();
-    } catch (HprofFormatException|IOException e) {
-      System.err.println("Unable to launch ahat:");
+      server = HttpServer.create(addr, 0);
+    } catch (IOException e) {
+      System.err.println("Unable to setup ahat server:");
       e.printStackTrace();
+      System.exit(1);
     }
+
+    AhatSnapshot ahat = loadHeapDump(hprof, map);
+    if (hprofbase != null) {
+      AhatSnapshot base = loadHeapDump(hprofbase, mapbase);
+
+      System.out.println("Diffing heap dumps ...");
+      Diff.snapshots(ahat, base);
+    }
+
+    server.createContext("/", new AhatHttpHandler(new OverviewHandler(ahat, hprof, hprofbase)));
+    server.createContext("/rooted", new AhatHttpHandler(new RootedHandler(ahat)));
+    server.createContext("/object", new AhatHttpHandler(new ObjectHandler(ahat)));
+    server.createContext("/objects", new AhatHttpHandler(new ObjectsHandler(ahat)));
+    server.createContext("/site", new AhatHttpHandler(new SiteHandler(ahat)));
+    server.createContext("/bitmap", new BitmapHandler(ahat));
+    server.createContext("/style.css", new StaticHandler("style.css", "text/css"));
+    server.setExecutor(Executors.newFixedThreadPool(1));
+    System.out.println("Server started on localhost:" + port);
+
+    server.start();
   }
 }