Use ScopedArenaAllocator for code generation.

Reuse the memory previously allocated on the ArenaStack by
optimization passes.

This CL handles only the architecture-independent codegen
and slow paths, architecture-dependent codegen allocations
shall be moved to the ScopedArenaAllocator in a follow-up.

Memory needed to compile the two most expensive methods for
aosp_angler-userdebug boot image:
  BatteryStats.dumpCheckinLocked() : 19.6MiB -> 18.5MiB (-1189KiB)
  BatteryStats.dumpLocked(): 39.3MiB -> 37.0MiB (-2379KiB)

Also move definitions of functions that use bit_vector-inl.h
from bit_vector.h also to bit_vector-inl.h .

Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Bug: 64312607
Change-Id: I84688c3a5a95bf90f56bd3a150bc31fedc95f29c
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 84f0182..9d0b5c8 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -42,6 +42,7 @@
 
 #include "base/bit_utils.h"
 #include "base/bit_utils_iterator.h"
+#include "base/casts.h"
 #include "bytecode_utils.h"
 #include "class_linker.h"
 #include "compiled_method.h"
@@ -59,6 +60,7 @@
 #include "parallel_move_resolver.h"
 #include "scoped_thread_state_change-inl.h"
 #include "ssa_liveness_analysis.h"
+#include "stack_map_stream.h"
 #include "thread-current-inl.h"
 #include "utils/assembler.h"
 
@@ -141,6 +143,158 @@
   return true;
 }
 
+class CodeGenerator::CodeGenerationData : public DeletableArenaObject<kArenaAllocCodeGenerator> {
+ public:
+  static std::unique_ptr<CodeGenerationData> Create(ArenaStack* arena_stack,
+                                                    InstructionSet instruction_set) {
+    ScopedArenaAllocator allocator(arena_stack);
+    void* memory = allocator.Alloc<CodeGenerationData>(kArenaAllocCodeGenerator);
+    return std::unique_ptr<CodeGenerationData>(
+        ::new (memory) CodeGenerationData(std::move(allocator), instruction_set));
+  }
+
+  ScopedArenaAllocator* GetScopedAllocator() {
+    return &allocator_;
+  }
+
+  void AddSlowPath(SlowPathCode* slow_path) {
+    slow_paths_.emplace_back(std::unique_ptr<SlowPathCode>(slow_path));
+  }
+
+  ArrayRef<const std::unique_ptr<SlowPathCode>> GetSlowPaths() const {
+    return ArrayRef<const std::unique_ptr<SlowPathCode>>(slow_paths_);
+  }
+
+  StackMapStream* GetStackMapStream() { return &stack_map_stream_; }
+
+  void ReserveJitStringRoot(StringReference string_reference, Handle<mirror::String> string) {
+    jit_string_roots_.Overwrite(string_reference,
+                                reinterpret_cast64<uint64_t>(string.GetReference()));
+  }
+
+  uint64_t GetJitStringRootIndex(StringReference string_reference) const {
+    return jit_string_roots_.Get(string_reference);
+  }
+
+  size_t GetNumberOfJitStringRoots() const {
+    return jit_string_roots_.size();
+  }
+
+  void ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
+    jit_class_roots_.Overwrite(type_reference, reinterpret_cast64<uint64_t>(klass.GetReference()));
+  }
+
+  uint64_t GetJitClassRootIndex(TypeReference type_reference) const {
+    return jit_class_roots_.Get(type_reference);
+  }
+
+  size_t GetNumberOfJitClassRoots() const {
+    return jit_class_roots_.size();
+  }
+
+  size_t GetNumberOfJitRoots() const {
+    return GetNumberOfJitStringRoots() + GetNumberOfJitClassRoots();
+  }
+
+  void EmitJitRoots(Handle<mirror::ObjectArray<mirror::Object>> roots)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+  CodeGenerationData(ScopedArenaAllocator&& allocator, InstructionSet instruction_set)
+      : allocator_(std::move(allocator)),
+        stack_map_stream_(&allocator_, instruction_set),
+        slow_paths_(allocator_.Adapter(kArenaAllocCodeGenerator)),
+        jit_string_roots_(StringReferenceValueComparator(),
+                          allocator_.Adapter(kArenaAllocCodeGenerator)),
+        jit_class_roots_(TypeReferenceValueComparator(),
+                         allocator_.Adapter(kArenaAllocCodeGenerator)) {
+    slow_paths_.reserve(kDefaultSlowPathsCapacity);
+  }
+
+  static constexpr size_t kDefaultSlowPathsCapacity = 8;
+
+  ScopedArenaAllocator allocator_;
+  StackMapStream stack_map_stream_;
+  ScopedArenaVector<std::unique_ptr<SlowPathCode>> slow_paths_;
+
+  // Maps a StringReference (dex_file, string_index) to the index in the literal table.
+  // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
+  // will compute all the indices.
+  ScopedArenaSafeMap<StringReference, uint64_t, StringReferenceValueComparator> jit_string_roots_;
+
+  // Maps a ClassReference (dex_file, type_index) to the index in the literal table.
+  // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
+  // will compute all the indices.
+  ScopedArenaSafeMap<TypeReference, uint64_t, TypeReferenceValueComparator> jit_class_roots_;
+};
+
+void CodeGenerator::CodeGenerationData::EmitJitRoots(
+    Handle<mirror::ObjectArray<mirror::Object>> roots) {
+  DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  size_t index = 0;
+  for (auto& entry : jit_string_roots_) {
+    // Update the `roots` with the string, and replace the address temporarily
+    // stored to the index in the table.
+    uint64_t address = entry.second;
+    roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr());
+    DCHECK(roots->Get(index) != nullptr);
+    entry.second = index;
+    // Ensure the string is strongly interned. This is a requirement on how the JIT
+    // handles strings. b/32995596
+    class_linker->GetInternTable()->InternStrong(
+        reinterpret_cast<mirror::String*>(roots->Get(index)));
+    ++index;
+  }
+  for (auto& entry : jit_class_roots_) {
+    // Update the `roots` with the class, and replace the address temporarily
+    // stored to the index in the table.
+    uint64_t address = entry.second;
+    roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr());
+    DCHECK(roots->Get(index) != nullptr);
+    entry.second = index;
+    ++index;
+  }
+}
+
+ScopedArenaAllocator* CodeGenerator::GetScopedAllocator() {
+  DCHECK(code_generation_data_ != nullptr);
+  return code_generation_data_->GetScopedAllocator();
+}
+
+StackMapStream* CodeGenerator::GetStackMapStream() {
+  DCHECK(code_generation_data_ != nullptr);
+  return code_generation_data_->GetStackMapStream();
+}
+
+void CodeGenerator::ReserveJitStringRoot(StringReference string_reference,
+                                         Handle<mirror::String> string) {
+  DCHECK(code_generation_data_ != nullptr);
+  code_generation_data_->ReserveJitStringRoot(string_reference, string);
+}
+
+uint64_t CodeGenerator::GetJitStringRootIndex(StringReference string_reference) {
+  DCHECK(code_generation_data_ != nullptr);
+  return code_generation_data_->GetJitStringRootIndex(string_reference);
+}
+
+void CodeGenerator::ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
+  DCHECK(code_generation_data_ != nullptr);
+  code_generation_data_->ReserveJitClassRoot(type_reference, klass);
+}
+
+uint64_t CodeGenerator::GetJitClassRootIndex(TypeReference type_reference) {
+  DCHECK(code_generation_data_ != nullptr);
+  return code_generation_data_->GetJitClassRootIndex(type_reference);
+}
+
+void CodeGenerator::EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED,
+                                       const uint8_t* roots_data ATTRIBUTE_UNUSED) {
+  DCHECK(code_generation_data_ != nullptr);
+  DCHECK_EQ(code_generation_data_->GetNumberOfJitStringRoots(), 0u);
+  DCHECK_EQ(code_generation_data_->GetNumberOfJitClassRoots(), 0u);
+}
+
 size_t CodeGenerator::GetCacheOffset(uint32_t index) {
   return sizeof(GcRoot<mirror::Object>) * index;
 }
@@ -210,9 +364,10 @@
 
 
 void CodeGenerator::GenerateSlowPaths() {
+  DCHECK(code_generation_data_ != nullptr);
   size_t code_start = 0;
-  for (const std::unique_ptr<SlowPathCode>& slow_path_unique_ptr : slow_paths_) {
-    SlowPathCode* slow_path = slow_path_unique_ptr.get();
+  for (const std::unique_ptr<SlowPathCode>& slow_path_ptr : code_generation_data_->GetSlowPaths()) {
+    SlowPathCode* slow_path = slow_path_ptr.get();
     current_slow_path_ = slow_path;
     if (disasm_info_ != nullptr) {
       code_start = GetAssembler()->CodeSize();
@@ -227,7 +382,14 @@
   current_slow_path_ = nullptr;
 }
 
+void CodeGenerator::InitializeCodeGenerationData() {
+  DCHECK(code_generation_data_ == nullptr);
+  code_generation_data_ = CodeGenerationData::Create(graph_->GetArenaStack(), GetInstructionSet());
+}
+
 void CodeGenerator::Compile(CodeAllocator* allocator) {
+  InitializeCodeGenerationData();
+
   // The register allocator already called `InitializeCodeGeneration`,
   // where the frame size has been computed.
   DCHECK(block_order_ != nullptr);
@@ -667,12 +829,54 @@
   }
 }
 
+CodeGenerator::CodeGenerator(HGraph* graph,
+                             size_t number_of_core_registers,
+                             size_t number_of_fpu_registers,
+                             size_t number_of_register_pairs,
+                             uint32_t core_callee_save_mask,
+                             uint32_t fpu_callee_save_mask,
+                             const CompilerOptions& compiler_options,
+                             OptimizingCompilerStats* stats)
+    : frame_size_(0),
+      core_spill_mask_(0),
+      fpu_spill_mask_(0),
+      first_register_slot_in_slow_path_(0),
+      allocated_registers_(RegisterSet::Empty()),
+      blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers,
+                                                                      kArenaAllocCodeGenerator)),
+      blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers,
+                                                                     kArenaAllocCodeGenerator)),
+      number_of_core_registers_(number_of_core_registers),
+      number_of_fpu_registers_(number_of_fpu_registers),
+      number_of_register_pairs_(number_of_register_pairs),
+      core_callee_save_mask_(core_callee_save_mask),
+      fpu_callee_save_mask_(fpu_callee_save_mask),
+      block_order_(nullptr),
+      disasm_info_(nullptr),
+      stats_(stats),
+      graph_(graph),
+      compiler_options_(compiler_options),
+      current_slow_path_(nullptr),
+      current_block_index_(0),
+      is_leaf_(true),
+      requires_current_method_(false),
+      code_generation_data_() {
+}
+
+CodeGenerator::~CodeGenerator() {}
+
 void CodeGenerator::ComputeStackMapAndMethodInfoSize(size_t* stack_map_size,
                                                      size_t* method_info_size) {
   DCHECK(stack_map_size != nullptr);
   DCHECK(method_info_size != nullptr);
-  *stack_map_size = stack_map_stream_.PrepareForFillIn();
-  *method_info_size = stack_map_stream_.ComputeMethodInfoSize();
+  StackMapStream* stack_map_stream = GetStackMapStream();
+  *stack_map_size = stack_map_stream->PrepareForFillIn();
+  *method_info_size = stack_map_stream->ComputeMethodInfoSize();
+}
+
+size_t CodeGenerator::GetNumberOfJitRoots() const {
+  DCHECK(code_generation_data_ != nullptr);
+  return code_generation_data_->GetNumberOfJitRoots();
 }
 
 static void CheckCovers(uint32_t dex_pc,
@@ -740,8 +944,9 @@
 void CodeGenerator::BuildStackMaps(MemoryRegion stack_map_region,
                                    MemoryRegion method_info_region,
                                    const DexFile::CodeItem& code_item) {
-  stack_map_stream_.FillInCodeInfo(stack_map_region);
-  stack_map_stream_.FillInMethodInfo(method_info_region);
+  StackMapStream* stack_map_stream = GetStackMapStream();
+  stack_map_stream->FillInCodeInfo(stack_map_region);
+  stack_map_stream->FillInMethodInfo(method_info_region);
   if (kIsDebugBuild) {
     CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map_region), code_item);
   }
@@ -791,11 +996,12 @@
   // Collect PC infos for the mapping table.
   uint32_t native_pc = GetAssembler()->CodePosition();
 
+  StackMapStream* stack_map_stream = GetStackMapStream();
   if (instruction == nullptr) {
     // For stack overflow checks and native-debug-info entries without dex register
     // mapping (i.e. start of basic block or start of slow path).
-    stack_map_stream_.BeginStackMapEntry(outer_dex_pc, native_pc, 0, 0, 0, 0);
-    stack_map_stream_.EndStackMapEntry();
+    stack_map_stream->BeginStackMapEntry(outer_dex_pc, native_pc, 0, 0, 0, 0);
+    stack_map_stream->EndStackMapEntry();
     return;
   }
   LocationSummary* locations = instruction->GetLocations();
@@ -814,7 +1020,7 @@
     // The register mask must be a subset of callee-save registers.
     DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
   }
-  stack_map_stream_.BeginStackMapEntry(outer_dex_pc,
+  stack_map_stream->BeginStackMapEntry(outer_dex_pc,
                                        native_pc,
                                        register_mask,
                                        locations->GetStackMask(),
@@ -830,10 +1036,10 @@
         instruction->IsInvoke() &&
         instruction->IsInvokeStaticOrDirect()) {
       HInvoke* const invoke = instruction->AsInvoke();
-      stack_map_stream_.AddInvoke(invoke->GetInvokeType(), invoke->GetDexMethodIndex());
+      stack_map_stream->AddInvoke(invoke->GetInvokeType(), invoke->GetDexMethodIndex());
     }
   }
-  stack_map_stream_.EndStackMapEntry();
+  stack_map_stream->EndStackMapEntry();
 
   HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
   if (instruction->IsSuspendCheck() &&
@@ -844,10 +1050,10 @@
     // We duplicate the stack map as a marker that this stack map can be an OSR entry.
     // Duplicating it avoids having the runtime recognize and skip an OSR stack map.
     DCHECK(info->IsIrreducible());
-    stack_map_stream_.BeginStackMapEntry(
+    stack_map_stream->BeginStackMapEntry(
         dex_pc, native_pc, register_mask, locations->GetStackMask(), outer_environment_size, 0);
     EmitEnvironment(instruction->GetEnvironment(), slow_path);
-    stack_map_stream_.EndStackMapEntry();
+    stack_map_stream->EndStackMapEntry();
     if (kIsDebugBuild) {
       for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
         HInstruction* in_environment = environment->GetInstructionAt(i);
@@ -867,21 +1073,22 @@
   } else if (kIsDebugBuild) {
     // Ensure stack maps are unique, by checking that the native pc in the stack map
     // last emitted is different than the native pc of the stack map just emitted.
-    size_t number_of_stack_maps = stack_map_stream_.GetNumberOfStackMaps();
+    size_t number_of_stack_maps = stack_map_stream->GetNumberOfStackMaps();
     if (number_of_stack_maps > 1) {
-      DCHECK_NE(stack_map_stream_.GetStackMap(number_of_stack_maps - 1).native_pc_code_offset,
-                stack_map_stream_.GetStackMap(number_of_stack_maps - 2).native_pc_code_offset);
+      DCHECK_NE(stack_map_stream->GetStackMap(number_of_stack_maps - 1).native_pc_code_offset,
+                stack_map_stream->GetStackMap(number_of_stack_maps - 2).native_pc_code_offset);
     }
   }
 }
 
 bool CodeGenerator::HasStackMapAtCurrentPc() {
   uint32_t pc = GetAssembler()->CodeSize();
-  size_t count = stack_map_stream_.GetNumberOfStackMaps();
+  StackMapStream* stack_map_stream = GetStackMapStream();
+  size_t count = stack_map_stream->GetNumberOfStackMaps();
   if (count == 0) {
     return false;
   }
-  CodeOffset native_pc_offset = stack_map_stream_.GetStackMap(count - 1).native_pc_code_offset;
+  CodeOffset native_pc_offset = stack_map_stream->GetStackMap(count - 1).native_pc_code_offset;
   return (native_pc_offset.Uint32Value(GetInstructionSet()) == pc);
 }
 
@@ -899,6 +1106,7 @@
 
 void CodeGenerator::RecordCatchBlockInfo() {
   ArenaAllocator* allocator = graph_->GetAllocator();
+  StackMapStream* stack_map_stream = GetStackMapStream();
 
   for (HBasicBlock* block : *block_order_) {
     if (!block->IsCatchBlock()) {
@@ -915,7 +1123,7 @@
     ArenaBitVector* stack_mask =
         ArenaBitVector::Create(allocator, 0, /* expandable */ true, kArenaAllocCodeGenerator);
 
-    stack_map_stream_.BeginStackMapEntry(dex_pc,
+    stack_map_stream->BeginStackMapEntry(dex_pc,
                                          native_pc,
                                          register_mask,
                                          stack_mask,
@@ -933,19 +1141,19 @@
     }
 
       if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
-        stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
+        stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
       } else {
         Location location = current_phi->GetLocations()->Out();
         switch (location.GetKind()) {
           case Location::kStackSlot: {
-            stack_map_stream_.AddDexRegisterEntry(
+            stack_map_stream->AddDexRegisterEntry(
                 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
             break;
           }
           case Location::kDoubleStackSlot: {
-            stack_map_stream_.AddDexRegisterEntry(
+            stack_map_stream->AddDexRegisterEntry(
                 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
-            stack_map_stream_.AddDexRegisterEntry(
+            stack_map_stream->AddDexRegisterEntry(
                 DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
             ++vreg;
             DCHECK_LT(vreg, num_vregs);
@@ -960,17 +1168,23 @@
       }
     }
 
-    stack_map_stream_.EndStackMapEntry();
+    stack_map_stream->EndStackMapEntry();
   }
 }
 
+void CodeGenerator::AddSlowPath(SlowPathCode* slow_path) {
+  DCHECK(code_generation_data_ != nullptr);
+  code_generation_data_->AddSlowPath(slow_path);
+}
+
 void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path) {
   if (environment == nullptr) return;
 
+  StackMapStream* stack_map_stream = GetStackMapStream();
   if (environment->GetParent() != nullptr) {
     // We emit the parent environment first.
     EmitEnvironment(environment->GetParent(), slow_path);
-    stack_map_stream_.BeginInlineInfoEntry(environment->GetMethod(),
+    stack_map_stream->BeginInlineInfoEntry(environment->GetMethod(),
                                            environment->GetDexPc(),
                                            environment->Size(),
                                            &graph_->GetDexFile());
@@ -980,7 +1194,7 @@
   for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
     HInstruction* current = environment->GetInstructionAt(i);
     if (current == nullptr) {
-      stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
+      stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
       continue;
     }
 
@@ -990,43 +1204,43 @@
         DCHECK_EQ(current, location.GetConstant());
         if (current->IsLongConstant()) {
           int64_t value = current->AsLongConstant()->GetValue();
-          stack_map_stream_.AddDexRegisterEntry(
+          stack_map_stream->AddDexRegisterEntry(
               DexRegisterLocation::Kind::kConstant, Low32Bits(value));
-          stack_map_stream_.AddDexRegisterEntry(
+          stack_map_stream->AddDexRegisterEntry(
               DexRegisterLocation::Kind::kConstant, High32Bits(value));
           ++i;
           DCHECK_LT(i, environment_size);
         } else if (current->IsDoubleConstant()) {
           int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
-          stack_map_stream_.AddDexRegisterEntry(
+          stack_map_stream->AddDexRegisterEntry(
               DexRegisterLocation::Kind::kConstant, Low32Bits(value));
-          stack_map_stream_.AddDexRegisterEntry(
+          stack_map_stream->AddDexRegisterEntry(
               DexRegisterLocation::Kind::kConstant, High32Bits(value));
           ++i;
           DCHECK_LT(i, environment_size);
         } else if (current->IsIntConstant()) {
           int32_t value = current->AsIntConstant()->GetValue();
-          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
+          stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
         } else if (current->IsNullConstant()) {
-          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0);
+          stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0);
         } else {
           DCHECK(current->IsFloatConstant()) << current->DebugName();
           int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
-          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
+          stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
         }
         break;
       }
 
       case Location::kStackSlot: {
-        stack_map_stream_.AddDexRegisterEntry(
+        stack_map_stream->AddDexRegisterEntry(
             DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
         break;
       }
 
       case Location::kDoubleStackSlot: {
-        stack_map_stream_.AddDexRegisterEntry(
+        stack_map_stream->AddDexRegisterEntry(
             DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
-        stack_map_stream_.AddDexRegisterEntry(
+        stack_map_stream->AddDexRegisterEntry(
             DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
         ++i;
         DCHECK_LT(i, environment_size);
@@ -1037,17 +1251,17 @@
         int id = location.reg();
         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
-          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+          stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
           if (current->GetType() == DataType::Type::kInt64) {
-            stack_map_stream_.AddDexRegisterEntry(
+            stack_map_stream->AddDexRegisterEntry(
                 DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
             ++i;
             DCHECK_LT(i, environment_size);
           }
         } else {
-          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
+          stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
           if (current->GetType() == DataType::Type::kInt64) {
-            stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegisterHigh, id);
+            stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegisterHigh, id);
             ++i;
             DCHECK_LT(i, environment_size);
           }
@@ -1059,17 +1273,17 @@
         int id = location.reg();
         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
-          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+          stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
           if (current->GetType() == DataType::Type::kFloat64) {
-            stack_map_stream_.AddDexRegisterEntry(
+            stack_map_stream->AddDexRegisterEntry(
                 DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
             ++i;
             DCHECK_LT(i, environment_size);
           }
         } else {
-          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
+          stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
           if (current->GetType() == DataType::Type::kFloat64) {
-            stack_map_stream_.AddDexRegisterEntry(
+            stack_map_stream->AddDexRegisterEntry(
                 DexRegisterLocation::Kind::kInFpuRegisterHigh, id);
             ++i;
             DCHECK_LT(i, environment_size);
@@ -1083,16 +1297,16 @@
         int high = location.high();
         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
-          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+          stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
         } else {
-          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, low);
+          stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, low);
         }
         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
-          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+          stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
           ++i;
         } else {
-          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, high);
+          stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, high);
           ++i;
         }
         DCHECK_LT(i, environment_size);
@@ -1104,15 +1318,15 @@
         int high = location.high();
         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
-          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+          stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
         } else {
-          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, low);
+          stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, low);
         }
         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
-          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+          stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
         } else {
-          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, high);
+          stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, high);
         }
         ++i;
         DCHECK_LT(i, environment_size);
@@ -1120,7 +1334,7 @@
       }
 
       case Location::kInvalid: {
-        stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
+        stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
         break;
       }
 
@@ -1130,7 +1344,7 @@
   }
 
   if (environment->GetParent() != nullptr) {
-    stack_map_stream_.EndInlineInfoEntry();
+    stack_map_stream->EndInlineInfoEntry();
   }
 }
 
@@ -1408,31 +1622,7 @@
 void CodeGenerator::EmitJitRoots(uint8_t* code,
                                  Handle<mirror::ObjectArray<mirror::Object>> roots,
                                  const uint8_t* roots_data) {
-  DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  size_t index = 0;
-  for (auto& entry : jit_string_roots_) {
-    // Update the `roots` with the string, and replace the address temporarily
-    // stored to the index in the table.
-    uint64_t address = entry.second;
-    roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr());
-    DCHECK(roots->Get(index) != nullptr);
-    entry.second = index;
-    // Ensure the string is strongly interned. This is a requirement on how the JIT
-    // handles strings. b/32995596
-    class_linker->GetInternTable()->InternStrong(
-        reinterpret_cast<mirror::String*>(roots->Get(index)));
-    ++index;
-  }
-  for (auto& entry : jit_class_roots_) {
-    // Update the `roots` with the class, and replace the address temporarily
-    // stored to the index in the table.
-    uint64_t address = entry.second;
-    roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr());
-    DCHECK(roots->Get(index) != nullptr);
-    entry.second = index;
-    ++index;
-  }
+  code_generation_data_->EmitJitRoots(roots);
   EmitJitRootPatches(code, roots_data);
 }
 
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 2904b71..64c88eb 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -32,7 +32,7 @@
 #include "optimizing_compiler_stats.h"
 #include "read_barrier_option.h"
 #include "stack.h"
-#include "stack_map_stream.h"
+#include "stack_map.h"
 #include "string_reference.h"
 #include "type_reference.h"
 #include "utils/label.h"
@@ -61,6 +61,7 @@
 class CodeGenerator;
 class CompilerDriver;
 class CompilerOptions;
+class StackMapStream;
 class ParallelMoveResolver;
 
 namespace linker {
@@ -190,7 +191,7 @@
                                                const InstructionSetFeatures& isa_features,
                                                const CompilerOptions& compiler_options,
                                                OptimizingCompilerStats* stats = nullptr);
-  virtual ~CodeGenerator() {}
+  virtual ~CodeGenerator();
 
   // Get the graph. This is the outermost graph, never the graph of a method being inlined.
   HGraph* GetGraph() const { return graph_; }
@@ -338,18 +339,16 @@
   // TODO: Replace with a catch-entering instruction that records the environment.
   void RecordCatchBlockInfo();
 
-  // TODO: Avoid creating the `std::unique_ptr` here.
-  void AddSlowPath(SlowPathCode* slow_path) {
-    slow_paths_.push_back(std::unique_ptr<SlowPathCode>(slow_path));
-  }
+  // Get the ScopedArenaAllocator used for codegen memory allocation.
+  ScopedArenaAllocator* GetScopedAllocator();
+
+  void AddSlowPath(SlowPathCode* slow_path);
 
   void BuildStackMaps(MemoryRegion stack_map_region,
                       MemoryRegion method_info_region,
                       const DexFile::CodeItem& code_item);
   void ComputeStackMapAndMethodInfoSize(size_t* stack_map_size, size_t* method_info_size);
-  size_t GetNumberOfJitRoots() const {
-    return jit_string_roots_.size() + jit_class_roots_.size();
-  }
+  size_t GetNumberOfJitRoots() const;
 
   // Fills the `literals` array with literals collected during code generation.
   // Also emits literal patches.
@@ -600,38 +599,7 @@
                 uint32_t core_callee_save_mask,
                 uint32_t fpu_callee_save_mask,
                 const CompilerOptions& compiler_options,
-                OptimizingCompilerStats* stats)
-      : frame_size_(0),
-        core_spill_mask_(0),
-        fpu_spill_mask_(0),
-        first_register_slot_in_slow_path_(0),
-        allocated_registers_(RegisterSet::Empty()),
-        blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers,
-                                                                        kArenaAllocCodeGenerator)),
-        blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers,
-                                                                       kArenaAllocCodeGenerator)),
-        number_of_core_registers_(number_of_core_registers),
-        number_of_fpu_registers_(number_of_fpu_registers),
-        number_of_register_pairs_(number_of_register_pairs),
-        core_callee_save_mask_(core_callee_save_mask),
-        fpu_callee_save_mask_(fpu_callee_save_mask),
-        stack_map_stream_(graph->GetAllocator(), graph->GetInstructionSet()),
-        block_order_(nullptr),
-        jit_string_roots_(StringReferenceValueComparator(),
-                          graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-        jit_class_roots_(TypeReferenceValueComparator(),
-                         graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-        disasm_info_(nullptr),
-        stats_(stats),
-        graph_(graph),
-        compiler_options_(compiler_options),
-        slow_paths_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-        current_slow_path_(nullptr),
-        current_block_index_(0),
-        is_leaf_(true),
-        requires_current_method_(false) {
-    slow_paths_.reserve(8);
-  }
+                OptimizingCompilerStats* stats);
 
   virtual HGraphVisitor* GetLocationBuilder() = 0;
   virtual HGraphVisitor* GetInstructionVisitor() = 0;
@@ -687,12 +655,15 @@
     return current_slow_path_;
   }
 
+  StackMapStream* GetStackMapStream();
+
+  void ReserveJitStringRoot(StringReference string_reference, Handle<mirror::String> string);
+  uint64_t GetJitStringRootIndex(StringReference string_reference);
+  void ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass);
+  uint64_t GetJitClassRootIndex(TypeReference type_reference);
+
   // Emit the patches assocatied with JIT roots. Only applies to JIT compiled code.
-  virtual void EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED,
-                                  const uint8_t* roots_data ATTRIBUTE_UNUSED) {
-    DCHECK_EQ(jit_string_roots_.size(), 0u);
-    DCHECK_EQ(jit_class_roots_.size(), 0u);
-  }
+  virtual void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data);
 
   // Frame size required for this method.
   uint32_t frame_size_;
@@ -714,24 +685,15 @@
   const uint32_t core_callee_save_mask_;
   const uint32_t fpu_callee_save_mask_;
 
-  StackMapStream stack_map_stream_;
-
   // The order to use for code generation.
   const ArenaVector<HBasicBlock*>* block_order_;
 
-  // Maps a StringReference (dex_file, string_index) to the index in the literal table.
-  // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
-  // will compute all the indices.
-  ArenaSafeMap<StringReference, uint64_t, StringReferenceValueComparator> jit_string_roots_;
-
-  // Maps a ClassReference (dex_file, type_index) to the index in the literal table.
-  // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
-  // will compute all the indices.
-  ArenaSafeMap<TypeReference, uint64_t, TypeReferenceValueComparator> jit_class_roots_;
-
   DisassemblyInformation* disasm_info_;
 
  private:
+  class CodeGenerationData;
+
+  void InitializeCodeGenerationData();
   size_t GetStackOffsetOfSavedRegister(size_t index);
   void GenerateSlowPaths();
   void BlockIfInRegister(Location location, bool is_out = false) const;
@@ -742,8 +704,6 @@
   HGraph* const graph_;
   const CompilerOptions& compiler_options_;
 
-  ArenaVector<std::unique_ptr<SlowPathCode>> slow_paths_;
-
   // The current slow-path that we're generating code for.
   SlowPathCode* current_slow_path_;
 
@@ -759,6 +719,12 @@
   // needs the environment including a valid stack frame.
   bool requires_current_method_;
 
+  // The CodeGenerationData contains a ScopedArenaAllocator intended for reusing the
+  // ArenaStack memory allocated in previous passes instead of adding to the memory
+  // held by the ArenaAllocator. This ScopedArenaAllocator is created in
+  // CodeGenerator::Compile() and remains alive until the CodeGenerator is destroyed.
+  std::unique_ptr<CodeGenerationData> code_generation_data_;
+
   friend class OptimizingCFITest;
 
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
@@ -863,7 +829,8 @@
                                 {{}, {graph_->GetAllocator()->Adapter(kArenaAllocSlowPaths)}});
     }
     // Cannot share: create and add new slow-path for this particular dex-pc.
-    SlowPathCodeType* slow_path = new (graph_->GetAllocator()) SlowPathCodeType(instruction);
+    SlowPathCodeType* slow_path =
+        new (codegen_->GetScopedAllocator()) SlowPathCodeType(instruction);
     iter->second.emplace_back(std::make_pair(instruction, slow_path));
     codegen_->AddSlowPath(slow_path);
     return slow_path;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index e6e6984..0b65b41 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2204,7 +2204,8 @@
   SuspendCheckSlowPathARM64* slow_path =
       down_cast<SuspendCheckSlowPathARM64*>(instruction->GetSlowPath());
   if (slow_path == nullptr) {
-    slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathARM64(instruction, successor);
+    slow_path =
+        new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathARM64(instruction, successor);
     instruction->SetSlowPath(slow_path);
     codegen_->AddSlowPath(slow_path);
     if (successor != nullptr) {
@@ -3011,7 +3012,7 @@
       uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
 
       if (may_need_runtime_call_for_type_check) {
-        slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathARM64(instruction);
+        slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathARM64(instruction);
         codegen_->AddSlowPath(slow_path);
         if (instruction->GetValueCanBeNull()) {
           vixl::aarch64::Label non_zero;
@@ -3126,7 +3127,7 @@
 
 void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
   BoundsCheckSlowPathARM64* slow_path =
-      new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARM64(instruction);
+      new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARM64(instruction);
   codegen_->AddSlowPath(slow_path);
   __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
   __ B(slow_path->GetEntryLabel(), hs);
@@ -3143,7 +3144,7 @@
 
 void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
   // We assume the class is not null.
-  SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARM64(
+  SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(
       check->GetLoadClass(), check, check->GetDexPc(), true);
   codegen_->AddSlowPath(slow_path);
   GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
@@ -3500,7 +3501,7 @@
 
 void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
   SlowPathCodeARM64* slow_path =
-      new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathARM64(instruction);
+      new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathARM64(instruction);
   codegen_->AddSlowPath(slow_path);
   Location value = instruction->GetLocations()->InAt(0);
 
@@ -4055,8 +4056,8 @@
                                         kWithoutReadBarrier);
       __ Cmp(out, cls);
       DCHECK(locations->OnlyCallsOnSlowPath());
-      slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
-                                                                          /* is_fatal */ false);
+      slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
+          instruction, /* is_fatal */ false);
       codegen_->AddSlowPath(slow_path);
       __ B(ne, slow_path->GetEntryLabel());
       __ Mov(out, 1);
@@ -4087,8 +4088,8 @@
       // call to the runtime not using a type checking slow path).
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
-      slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
-                                                                          /* is_fatal */ false);
+      slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
+          instruction, /* is_fatal */ false);
       codegen_->AddSlowPath(slow_path);
       __ B(slow_path->GetEntryLabel());
       if (zero.IsLinked()) {
@@ -4176,8 +4177,8 @@
         !instruction->CanThrowIntoCatchBlock();
   }
   SlowPathCodeARM64* type_check_slow_path =
-      new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
-                                                              is_type_check_slow_path_fatal);
+      new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
+          instruction, is_type_check_slow_path_fatal);
   codegen_->AddSlowPath(type_check_slow_path);
 
   vixl::aarch64::Label done;
@@ -4685,8 +4686,7 @@
 
 vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLiteral(
     const DexFile& dex_file, dex::StringIndex string_index, Handle<mirror::String> handle) {
-  jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
-                              reinterpret_cast64<uint64_t>(handle.GetReference()));
+  ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
   return jit_string_patches_.GetOrCreate(
       StringReference(&dex_file, string_index),
       [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
@@ -4694,8 +4694,7 @@
 
 vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral(
     const DexFile& dex_file, dex::TypeIndex type_index, Handle<mirror::Class> handle) {
-  jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
-                             reinterpret_cast64<uint64_t>(handle.GetReference()));
+  ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
   return jit_class_patches_.GetOrCreate(
       TypeReference(&dex_file, type_index),
       [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
@@ -5010,7 +5009,7 @@
   bool do_clinit = cls->MustGenerateClinitCheck();
   if (generate_null_check || do_clinit) {
     DCHECK(cls->CanCallRuntime());
-    SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARM64(
+    SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(
         cls, cls, cls->GetDexPc(), do_clinit, bss_entry_temp, bss_entry_adrp_label);
     codegen_->AddSlowPath(slow_path);
     if (generate_null_check) {
@@ -5150,7 +5149,7 @@
                               ldr_label,
                               kCompilerReadBarrierOption);
       SlowPathCodeARM64* slow_path =
-          new (GetGraph()->GetAllocator()) LoadStringSlowPathARM64(load, temp, adrp_label);
+          new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load, temp, adrp_label);
       codegen_->AddSlowPath(slow_path);
       __ Cbz(out.X(), slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
@@ -5391,8 +5390,7 @@
 }
 
 void CodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) {
-  SlowPathCodeARM64* slow_path =
-      new (GetGraph()->GetAllocator()) NullCheckSlowPathARM64(instruction);
+  SlowPathCodeARM64* slow_path = new (GetScopedAllocator()) NullCheckSlowPathARM64(instruction);
   AddSlowPath(slow_path);
 
   LocationSummary* locations = instruction->GetLocations();
@@ -6034,7 +6032,7 @@
         // Slow path marking the GC root `root`. The entrypoint will
         // be loaded by the slow path code.
         SlowPathCodeARM64* slow_path =
-            new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root);
+            new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root);
         codegen_->AddSlowPath(slow_path);
 
         // /* GcRoot<mirror::Object> */ root = *(obj + offset)
@@ -6293,7 +6291,7 @@
   // Slow path marking the object `ref` when the GC is marking. The
   // entrypoint will be loaded by the slow path code.
   SlowPathCodeARM64* slow_path =
-      new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARM64(
+      new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARM64(
           instruction,
           ref,
           obj,
@@ -6351,7 +6349,7 @@
   // Slow path updating the object reference at address `obj + field_offset`
   // when the GC is marking. The entrypoint will be loaded by the slow path code.
   SlowPathCodeARM64* slow_path =
-      new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64(
+      new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64(
           instruction,
           ref,
           obj,
@@ -6478,7 +6476,7 @@
   // not used by the artReadBarrierSlow entry point.
   //
   // TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
-  SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator())
+  SlowPathCodeARM64* slow_path = new (GetScopedAllocator())
       ReadBarrierForHeapReferenceSlowPathARM64(instruction, out, ref, obj, offset, index);
   AddSlowPath(slow_path);
 
@@ -6514,7 +6512,7 @@
   // Note that GC roots are not affected by heap poisoning, so we do
   // not need to do anything special for this here.
   SlowPathCodeARM64* slow_path =
-      new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathARM64(instruction, out, root);
+      new (GetScopedAllocator()) ReadBarrierForRootSlowPathARM64(instruction, out, root);
   AddSlowPath(slow_path);
 
   __ B(slow_path->GetEntryLabel());
@@ -6560,17 +6558,13 @@
   for (const auto& entry : jit_string_patches_) {
     const StringReference& string_reference = entry.first;
     vixl::aarch64::Literal<uint32_t>* table_entry_literal = entry.second;
-    const auto it = jit_string_roots_.find(string_reference);
-    DCHECK(it != jit_string_roots_.end());
-    uint64_t index_in_table = it->second;
+    uint64_t index_in_table = GetJitStringRootIndex(string_reference);
     PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
   }
   for (const auto& entry : jit_class_patches_) {
     const TypeReference& type_reference = entry.first;
     vixl::aarch64::Literal<uint32_t>* table_entry_literal = entry.second;
-    const auto it = jit_class_roots_.find(type_reference);
-    DCHECK(it != jit_class_roots_.end());
-    uint64_t index_in_table = it->second;
+    uint64_t index_in_table = GetJitClassRootIndex(type_reference);
     PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
   }
 }
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 251f390..32acd66 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -4733,7 +4733,7 @@
 
 void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
   DivZeroCheckSlowPathARMVIXL* slow_path =
-      new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathARMVIXL(instruction);
+      new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathARMVIXL(instruction);
   codegen_->AddSlowPath(slow_path);
 
   LocationSummary* locations = instruction->GetLocations();
@@ -5959,7 +5959,7 @@
 
 void CodeGeneratorARMVIXL::GenerateExplicitNullCheck(HNullCheck* instruction) {
   NullCheckSlowPathARMVIXL* slow_path =
-      new (GetGraph()->GetAllocator()) NullCheckSlowPathARMVIXL(instruction);
+      new (GetScopedAllocator()) NullCheckSlowPathARMVIXL(instruction);
   AddSlowPath(slow_path);
   __ CompareAndBranchIfZero(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
 }
@@ -6432,7 +6432,7 @@
       SlowPathCodeARMVIXL* slow_path = nullptr;
 
       if (may_need_runtime_call_for_type_check) {
-        slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathARMVIXL(instruction);
+        slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathARMVIXL(instruction);
         codegen_->AddSlowPath(slow_path);
         if (instruction->GetValueCanBeNull()) {
           vixl32::Label non_zero;
@@ -6693,7 +6693,7 @@
       int32_t index = Int32ConstantFrom(index_loc);
       if (index < 0 || index >= length) {
         SlowPathCodeARMVIXL* slow_path =
-            new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
+            new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
         codegen_->AddSlowPath(slow_path);
         __ B(slow_path->GetEntryLabel());
       } else {
@@ -6704,13 +6704,13 @@
     }
 
     SlowPathCodeARMVIXL* slow_path =
-        new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
+        new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
     __ Cmp(RegisterFrom(index_loc), length);
     codegen_->AddSlowPath(slow_path);
     __ B(hs, slow_path->GetEntryLabel());
   } else {
     SlowPathCodeARMVIXL* slow_path =
-        new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
+        new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
     __ Cmp(RegisterFrom(length_loc), InputOperandAt(instruction, 0));
     codegen_->AddSlowPath(slow_path);
     __ B(ls, slow_path->GetEntryLabel());
@@ -6777,7 +6777,7 @@
       down_cast<SuspendCheckSlowPathARMVIXL*>(instruction->GetSlowPath());
   if (slow_path == nullptr) {
     slow_path =
-        new (GetGraph()->GetAllocator()) SuspendCheckSlowPathARMVIXL(instruction, successor);
+        new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathARMVIXL(instruction, successor);
     instruction->SetSlowPath(slow_path);
     codegen_->AddSlowPath(slow_path);
     if (successor != nullptr) {
@@ -7214,8 +7214,9 @@
 
   if (generate_null_check || cls->MustGenerateClinitCheck()) {
     DCHECK(cls->CanCallRuntime());
-    LoadClassSlowPathARMVIXL* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARMVIXL(
-        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+    LoadClassSlowPathARMVIXL* slow_path =
+        new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL(
+            cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
     codegen_->AddSlowPath(slow_path);
     if (generate_null_check) {
       __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
@@ -7241,10 +7242,10 @@
 void InstructionCodeGeneratorARMVIXL::VisitClinitCheck(HClinitCheck* check) {
   // We assume the class is not null.
   LoadClassSlowPathARMVIXL* slow_path =
-      new (GetGraph()->GetAllocator()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
-                                                                check,
-                                                                check->GetDexPc(),
-                                                                /* do_clinit */ true);
+      new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
+                                                                    check,
+                                                                    check->GetDexPc(),
+                                                                    /* do_clinit */ true);
   codegen_->AddSlowPath(slow_path);
   GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
 }
@@ -7354,7 +7355,7 @@
       codegen_->EmitMovwMovtPlaceholder(labels, temp);
       GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption);
       LoadStringSlowPathARMVIXL* slow_path =
-          new (GetGraph()->GetAllocator()) LoadStringSlowPathARMVIXL(load);
+          new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load);
       codegen_->AddSlowPath(slow_path);
       __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
@@ -7681,8 +7682,8 @@
                                         kWithoutReadBarrier);
       __ Cmp(out, cls);
       DCHECK(locations->OnlyCallsOnSlowPath());
-      slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
-                                                                            /* is_fatal */ false);
+      slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
+          instruction, /* is_fatal */ false);
       codegen_->AddSlowPath(slow_path);
       __ B(ne, slow_path->GetEntryLabel());
       __ Mov(out, 1);
@@ -7710,8 +7711,8 @@
       // call to the runtime not using a type checking slow path).
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
-      slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
-                                                                            /* is_fatal */ false);
+      slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
+          instruction, /* is_fatal */ false);
       codegen_->AddSlowPath(slow_path);
       __ B(slow_path->GetEntryLabel());
       break;
@@ -7789,8 +7790,8 @@
         !instruction->CanThrowIntoCatchBlock();
   }
   SlowPathCodeARMVIXL* type_check_slow_path =
-      new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
-                                                                is_type_check_slow_path_fatal);
+      new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
+          instruction, is_type_check_slow_path_fatal);
   codegen_->AddSlowPath(type_check_slow_path);
 
   vixl32::Label done;
@@ -8451,7 +8452,7 @@
         // Slow path marking the GC root `root`. The entrypoint will
         // be loaded by the slow path code.
         SlowPathCodeARMVIXL* slow_path =
-            new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root);
+            new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root);
         codegen_->AddSlowPath(slow_path);
 
         // /* GcRoot<mirror::Object> */ root = *(obj + offset)
@@ -8700,7 +8701,7 @@
   // Slow path marking the object `ref` when the GC is marking. The
   // entrypoint will be loaded by the slow path code.
   SlowPathCodeARMVIXL* slow_path =
-      new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL(
+      new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL(
           instruction, ref, obj, offset, index, scale_factor, needs_null_check, temp_reg);
   AddSlowPath(slow_path);
 
@@ -8746,8 +8747,8 @@
 
   // Slow path updating the object reference at address `obj + field_offset`
   // when the GC is marking. The entrypoint will be loaded by the slow path code.
-  SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetAllocator())
-      LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL(
+  SlowPathCodeARMVIXL* slow_path =
+      new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL(
           instruction,
           ref,
           obj,
@@ -8858,7 +8859,7 @@
   // not used by the artReadBarrierSlow entry point.
   //
   // TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
-  SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetAllocator())
+  SlowPathCodeARMVIXL* slow_path = new (GetScopedAllocator())
       ReadBarrierForHeapReferenceSlowPathARMVIXL(instruction, out, ref, obj, offset, index);
   AddSlowPath(slow_path);
 
@@ -8894,7 +8895,7 @@
   // Note that GC roots are not affected by heap poisoning, so we do
   // not need to do anything special for this here.
   SlowPathCodeARMVIXL* slow_path =
-      new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathARMVIXL(instruction, out, root);
+      new (GetScopedAllocator()) ReadBarrierForRootSlowPathARMVIXL(instruction, out, root);
   AddSlowPath(slow_path);
 
   __ B(slow_path->GetEntryLabel());
@@ -9108,8 +9109,7 @@
     const DexFile& dex_file,
     dex::StringIndex string_index,
     Handle<mirror::String> handle) {
-  jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
-                              reinterpret_cast64<uint64_t>(handle.GetReference()));
+  ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
   return jit_string_patches_.GetOrCreate(
       StringReference(&dex_file, string_index),
       [this]() {
@@ -9120,8 +9120,7 @@
 VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitClassLiteral(const DexFile& dex_file,
                                                       dex::TypeIndex type_index,
                                                       Handle<mirror::Class> handle) {
-  jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
-                             reinterpret_cast64<uint64_t>(handle.GetReference()));
+  ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
   return jit_class_patches_.GetOrCreate(
       TypeReference(&dex_file, type_index),
       [this]() {
@@ -9401,17 +9400,13 @@
   for (const auto& entry : jit_string_patches_) {
     const StringReference& string_reference = entry.first;
     VIXLUInt32Literal* table_entry_literal = entry.second;
-    const auto it = jit_string_roots_.find(string_reference);
-    DCHECK(it != jit_string_roots_.end());
-    uint64_t index_in_table = it->second;
+    uint64_t index_in_table = GetJitStringRootIndex(string_reference);
     PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
   }
   for (const auto& entry : jit_class_patches_) {
     const TypeReference& type_reference = entry.first;
     VIXLUInt32Literal* table_entry_literal = entry.second;
-    const auto it = jit_class_roots_.find(type_reference);
-    DCHECK(it != jit_class_roots_.end());
-    uint64_t index_in_table = it->second;
+    uint64_t index_in_table = GetJitClassRootIndex(type_reference);
     PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
   }
 }
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index e58f43e..c6346eb 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -33,6 +33,7 @@
 #include "mirror/array-inl.h"
 #include "mirror/class-inl.h"
 #include "offsets.h"
+#include "stack_map_stream.h"
 #include "thread.h"
 #include "utils/assembler.h"
 #include "utils/mips/assembler_mips.h"
@@ -1128,12 +1129,13 @@
   __ FinalizeCode();
 
   // Adjust native pc offsets in stack maps.
-  for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
+  StackMapStream* stack_map_stream = GetStackMapStream();
+  for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
     uint32_t old_position =
-        stack_map_stream_.GetStackMap(i).native_pc_code_offset.Uint32Value(kMips);
+        stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(kMips);
     uint32_t new_position = __ GetAdjustedPosition(old_position);
     DCHECK_GE(new_position, old_position);
-    stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
+    stack_map_stream->SetStackMapNativePcOffset(i, new_position);
   }
 
   // Adjust pc offsets for the disassembly information.
@@ -1788,21 +1790,19 @@
 
 CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootStringPatch(
     const DexFile& dex_file,
-    dex::StringIndex dex_index,
+    dex::StringIndex string_index,
     Handle<mirror::String> handle) {
-  jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index),
-                              reinterpret_cast64<uint64_t>(handle.GetReference()));
-  jit_string_patches_.emplace_back(dex_file, dex_index.index_);
+  ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
+  jit_string_patches_.emplace_back(dex_file, string_index.index_);
   return &jit_string_patches_.back();
 }
 
 CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootClassPatch(
     const DexFile& dex_file,
-    dex::TypeIndex dex_index,
+    dex::TypeIndex type_index,
     Handle<mirror::Class> handle) {
-  jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index),
-                             reinterpret_cast64<uint64_t>(handle.GetReference()));
-  jit_class_patches_.emplace_back(dex_file, dex_index.index_);
+  ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
+  jit_class_patches_.emplace_back(dex_file, type_index.index_);
   return &jit_class_patches_.back();
 }
 
@@ -1834,17 +1834,13 @@
 
 void CodeGeneratorMIPS::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
   for (const JitPatchInfo& info : jit_string_patches_) {
-    const auto it = jit_string_roots_.find(StringReference(&info.target_dex_file,
-                                                           dex::StringIndex(info.index)));
-    DCHECK(it != jit_string_roots_.end());
-    uint64_t index_in_table = it->second;
+    StringReference string_reference(&info.target_dex_file, dex::StringIndex(info.index));
+    uint64_t index_in_table = GetJitStringRootIndex(string_reference);
     PatchJitRootUse(code, roots_data, info, index_in_table);
   }
   for (const JitPatchInfo& info : jit_class_patches_) {
-    const auto it = jit_class_roots_.find(TypeReference(&info.target_dex_file,
-                                                        dex::TypeIndex(info.index)));
-    DCHECK(it != jit_class_roots_.end());
-    uint64_t index_in_table = it->second;
+    TypeReference type_reference(&info.target_dex_file, dex::TypeIndex(info.index));
+    uint64_t index_in_table = GetJitClassRootIndex(type_reference);
     PatchJitRootUse(code, roots_data, info, index_in_table);
   }
 }
@@ -1998,7 +1994,7 @@
 void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction,
                                                         HBasicBlock* successor) {
   SuspendCheckSlowPathMIPS* slow_path =
-    new (GetGraph()->GetAllocator()) SuspendCheckSlowPathMIPS(instruction, successor);
+    new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS(instruction, successor);
   codegen_->AddSlowPath(slow_path);
 
   __ LoadFromOffset(kLoadUnsignedHalfword,
@@ -2986,7 +2982,7 @@
       SlowPathCodeMIPS* slow_path = nullptr;
 
       if (may_need_runtime_call_for_type_check) {
-        slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathMIPS(instruction);
+        slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathMIPS(instruction);
         codegen_->AddSlowPath(slow_path);
         if (instruction->GetValueCanBeNull()) {
           MipsLabel non_zero;
@@ -3171,7 +3167,7 @@
 void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
   LocationSummary* locations = instruction->GetLocations();
   BoundsCheckSlowPathMIPS* slow_path =
-      new (GetGraph()->GetAllocator()) BoundsCheckSlowPathMIPS(instruction);
+      new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
   codegen_->AddSlowPath(slow_path);
 
   Register index = locations->InAt(0).AsRegister<Register>();
@@ -3263,8 +3259,8 @@
         !instruction->CanThrowIntoCatchBlock();
   }
   SlowPathCodeMIPS* slow_path =
-      new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
-                                                             is_type_check_slow_path_fatal);
+      new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
+          instruction, is_type_check_slow_path_fatal);
   codegen_->AddSlowPath(slow_path);
 
   // Avoid this check if we know `obj` is not null.
@@ -3427,7 +3423,7 @@
 
 void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) {
   // We assume the class is not null.
-  SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS(
+  SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(
       check->GetLoadClass(),
       check,
       check->GetDexPc(),
@@ -3884,7 +3880,7 @@
 
 void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
   SlowPathCodeMIPS* slow_path =
-      new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathMIPS(instruction);
+      new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathMIPS(instruction);
   codegen_->AddSlowPath(slow_path);
   Location value = instruction->GetLocations()->InAt(0);
   DataType::Type type = instruction->GetType();
@@ -6692,7 +6688,7 @@
         // Slow path marking the GC root `root`.
         Location temp = Location::RegisterLocation(T9);
         SlowPathCodeMIPS* slow_path =
-            new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS(
+            new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS(
                 instruction,
                 root,
                 /*entrypoint*/ temp);
@@ -7019,14 +7015,14 @@
     // to be null in this code path.
     DCHECK_EQ(offset, 0u);
     DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
-    slow_path = new (GetGraph()->GetAllocator())
+    slow_path = new (GetScopedAllocator())
         ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction,
                                                   ref,
                                                   obj,
                                                   /* field_offset */ index,
                                                   temp_reg);
   } else {
-    slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
+    slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
   }
   AddSlowPath(slow_path);
 
@@ -7062,7 +7058,7 @@
   // not used by the artReadBarrierSlow entry point.
   //
   // TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
-  SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator())
+  SlowPathCodeMIPS* slow_path = new (GetScopedAllocator())
       ReadBarrierForHeapReferenceSlowPathMIPS(instruction, out, ref, obj, offset, index);
   AddSlowPath(slow_path);
 
@@ -7098,7 +7094,7 @@
   // Note that GC roots are not affected by heap poisoning, so we do
   // not need to do anything special for this here.
   SlowPathCodeMIPS* slow_path =
-      new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathMIPS(instruction, out, root);
+      new (GetScopedAllocator()) ReadBarrierForRootSlowPathMIPS(instruction, out, root);
   AddSlowPath(slow_path);
 
   __ B(slow_path->GetEntryLabel());
@@ -7268,8 +7264,8 @@
                                         maybe_temp_loc,
                                         kWithoutReadBarrier);
       DCHECK(locations->OnlyCallsOnSlowPath());
-      slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
-                                                                         /* is_fatal */ false);
+      slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
+          instruction, /* is_fatal */ false);
       codegen_->AddSlowPath(slow_path);
       __ Bne(out, cls, slow_path->GetEntryLabel());
       __ LoadConst32(out, 1);
@@ -7297,8 +7293,8 @@
       // call to the runtime not using a type checking slow path).
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
-      slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
-                                                                         /* is_fatal */ false);
+      slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
+          instruction, /* is_fatal */ false);
       codegen_->AddSlowPath(slow_path);
       __ B(slow_path->GetEntryLabel());
       break;
@@ -7841,7 +7837,7 @@
 
   if (generate_null_check || cls->MustGenerateClinitCheck()) {
     DCHECK(cls->CanCallRuntime());
-    SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS(
+    SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(
         cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
     codegen_->AddSlowPath(slow_path);
     if (generate_null_check) {
@@ -8006,7 +8002,7 @@
                               kCompilerReadBarrierOption,
                               &info_low->label);
       SlowPathCodeMIPS* slow_path =
-          new (GetGraph()->GetAllocator()) LoadStringSlowPathMIPS(load, info_high);
+          new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS(load, info_high);
       codegen_->AddSlowPath(slow_path);
       __ Beqz(out, slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
@@ -8333,7 +8329,7 @@
 }
 
 void CodeGeneratorMIPS::GenerateExplicitNullCheck(HNullCheck* instruction) {
-  SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathMIPS(instruction);
+  SlowPathCodeMIPS* slow_path = new (GetScopedAllocator()) NullCheckSlowPathMIPS(instruction);
   AddSlowPath(slow_path);
 
   Location obj = instruction->GetLocations()->InAt(0);
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 5f2f900..7845e31 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -662,10 +662,10 @@
                        const JitPatchInfo& info,
                        uint64_t index_in_table) const;
   JitPatchInfo* NewJitRootStringPatch(const DexFile& dex_file,
-                                      dex::StringIndex dex_index,
+                                      dex::StringIndex string_index,
                                       Handle<mirror::String> handle);
   JitPatchInfo* NewJitRootClassPatch(const DexFile& dex_file,
-                                     dex::TypeIndex dex_index,
+                                     dex::TypeIndex type_index,
                                      Handle<mirror::Class> handle);
 
  private:
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 11120cf..557a1ec 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -31,6 +31,7 @@
 #include "mirror/array-inl.h"
 #include "mirror/class-inl.h"
 #include "offsets.h"
+#include "stack_map_stream.h"
 #include "thread.h"
 #include "utils/assembler.h"
 #include "utils/mips64/assembler_mips64.h"
@@ -1072,12 +1073,13 @@
   __ FinalizeCode();
 
   // Adjust native pc offsets in stack maps.
-  for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
+  StackMapStream* stack_map_stream = GetStackMapStream();
+  for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
     uint32_t old_position =
-        stack_map_stream_.GetStackMap(i).native_pc_code_offset.Uint32Value(kMips64);
+        stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(kMips64);
     uint32_t new_position = __ GetAdjustedPosition(old_position);
     DCHECK_GE(new_position, old_position);
-    stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
+    stack_map_stream->SetStackMapNativePcOffset(i, new_position);
   }
 
   // Adjust pc offsets for the disassembly information.
@@ -1681,8 +1683,7 @@
 Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_file,
                                                           dex::StringIndex string_index,
                                                           Handle<mirror::String> handle) {
-  jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
-                              reinterpret_cast64<uint64_t>(handle.GetReference()));
+  ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
   return jit_string_patches_.GetOrCreate(
       StringReference(&dex_file, string_index),
       [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
@@ -1691,8 +1692,7 @@
 Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file,
                                                          dex::TypeIndex type_index,
                                                          Handle<mirror::Class> handle) {
-  jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
-                             reinterpret_cast64<uint64_t>(handle.GetReference()));
+  ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
   return jit_class_patches_.GetOrCreate(
       TypeReference(&dex_file, type_index),
       [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
@@ -1712,17 +1712,13 @@
   for (const auto& entry : jit_string_patches_) {
     const StringReference& string_reference = entry.first;
     Literal* table_entry_literal = entry.second;
-    const auto it = jit_string_roots_.find(string_reference);
-    DCHECK(it != jit_string_roots_.end());
-    uint64_t index_in_table = it->second;
+    uint64_t index_in_table = GetJitStringRootIndex(string_reference);
     PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
   }
   for (const auto& entry : jit_class_patches_) {
     const TypeReference& type_reference = entry.first;
     Literal* table_entry_literal = entry.second;
-    const auto it = jit_class_roots_.find(type_reference);
-    DCHECK(it != jit_class_roots_.end());
-    uint64_t index_in_table = it->second;
+    uint64_t index_in_table = GetJitClassRootIndex(type_reference);
     PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
   }
 }
@@ -1835,7 +1831,7 @@
 void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
                                                           HBasicBlock* successor) {
   SuspendCheckSlowPathMIPS64* slow_path =
-    new (GetGraph()->GetAllocator()) SuspendCheckSlowPathMIPS64(instruction, successor);
+    new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS64(instruction, successor);
   codegen_->AddSlowPath(slow_path);
 
   __ LoadFromOffset(kLoadUnsignedHalfword,
@@ -2543,7 +2539,7 @@
       SlowPathCodeMIPS64* slow_path = nullptr;
 
       if (may_need_runtime_call_for_type_check) {
-        slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathMIPS64(instruction);
+        slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathMIPS64(instruction);
         codegen_->AddSlowPath(slow_path);
         if (instruction->GetValueCanBeNull()) {
           Mips64Label non_zero;
@@ -2700,7 +2696,7 @@
 void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
   LocationSummary* locations = instruction->GetLocations();
   BoundsCheckSlowPathMIPS64* slow_path =
-      new (GetGraph()->GetAllocator()) BoundsCheckSlowPathMIPS64(instruction);
+      new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
   codegen_->AddSlowPath(slow_path);
 
   GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
@@ -2792,8 +2788,8 @@
         !instruction->CanThrowIntoCatchBlock();
   }
   SlowPathCodeMIPS64* slow_path =
-      new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
-                                                               is_type_check_slow_path_fatal);
+      new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
+          instruction, is_type_check_slow_path_fatal);
   codegen_->AddSlowPath(slow_path);
 
   // Avoid this check if we know `obj` is not null.
@@ -2956,7 +2952,7 @@
 
 void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
   // We assume the class is not null.
-  SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS64(
+  SlowPathCodeMIPS64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(
       check->GetLoadClass(),
       check,
       check->GetDexPc(),
@@ -3430,7 +3426,7 @@
 
 void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
   SlowPathCodeMIPS64* slow_path =
-      new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathMIPS64(instruction);
+      new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathMIPS64(instruction);
   codegen_->AddSlowPath(slow_path);
   Location value = instruction->GetLocations()->InAt(0);
 
@@ -5050,7 +5046,7 @@
         // Slow path marking the GC root `root`.
         Location temp = Location::RegisterLocation(T9);
         SlowPathCodeMIPS64* slow_path =
-            new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS64(
+            new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS64(
                 instruction,
                 root,
                 /*entrypoint*/ temp);
@@ -5335,14 +5331,14 @@
     // above are expected to be null in this code path.
     DCHECK_EQ(offset, 0u);
     DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
-    slow_path = new (GetGraph()->GetAllocator())
+    slow_path = new (GetScopedAllocator())
         ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(instruction,
                                                     ref,
                                                     obj,
                                                     /* field_offset */ index,
                                                     temp_reg);
   } else {
-    slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
+    slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
   }
   AddSlowPath(slow_path);
 
@@ -5378,7 +5374,7 @@
   // not used by the artReadBarrierSlow entry point.
   //
   // TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
-  SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator())
+  SlowPathCodeMIPS64* slow_path = new (GetScopedAllocator())
       ReadBarrierForHeapReferenceSlowPathMIPS64(instruction, out, ref, obj, offset, index);
   AddSlowPath(slow_path);
 
@@ -5414,7 +5410,7 @@
   // Note that GC roots are not affected by heap poisoning, so we do
   // not need to do anything special for this here.
   SlowPathCodeMIPS64* slow_path =
-      new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root);
+      new (GetScopedAllocator()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root);
   AddSlowPath(slow_path);
 
   __ Bc(slow_path->GetEntryLabel());
@@ -5584,8 +5580,8 @@
                                         maybe_temp_loc,
                                         kWithoutReadBarrier);
       DCHECK(locations->OnlyCallsOnSlowPath());
-      slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
-                                                                           /* is_fatal */ false);
+      slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
+          instruction, /* is_fatal */ false);
       codegen_->AddSlowPath(slow_path);
       __ Bnec(out, cls, slow_path->GetEntryLabel());
       __ LoadConst32(out, 1);
@@ -5613,8 +5609,8 @@
       // call to the runtime not using a type checking slow path).
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
-      slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
-                                                                           /* is_fatal */ false);
+      slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
+          instruction, /* is_fatal */ false);
       codegen_->AddSlowPath(slow_path);
       __ Bc(slow_path->GetEntryLabel());
       break;
@@ -6082,7 +6078,7 @@
 
   if (generate_null_check || cls->MustGenerateClinitCheck()) {
     DCHECK(cls->CanCallRuntime());
-    SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS64(
+    SlowPathCodeMIPS64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(
         cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
     codegen_->AddSlowPath(slow_path);
     if (generate_null_check) {
@@ -6200,7 +6196,7 @@
                               kCompilerReadBarrierOption,
                               &info_low->label);
       SlowPathCodeMIPS64* slow_path =
-          new (GetGraph()->GetAllocator()) LoadStringSlowPathMIPS64(load, info_high);
+          new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS64(load, info_high);
       codegen_->AddSlowPath(slow_path);
       __ Beqzc(out, slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
@@ -6464,7 +6460,7 @@
 
 void CodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
   SlowPathCodeMIPS64* slow_path =
-      new (GetGraph()->GetAllocator()) NullCheckSlowPathMIPS64(instruction);
+      new (GetScopedAllocator()) NullCheckSlowPathMIPS64(instruction);
   AddSlowPath(slow_path);
 
   Location obj = instruction->GetLocations()->InAt(0);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 39a07b8..3bcd7b9 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -3581,7 +3581,7 @@
           GenerateDivRemWithAnyConstant(instruction);
         }
       } else {
-        SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) DivRemMinusOneSlowPathX86(
+        SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) DivRemMinusOneSlowPathX86(
             instruction, out.AsRegister<Register>(), is_div);
         codegen_->AddSlowPath(slow_path);
 
@@ -3817,7 +3817,8 @@
 }
 
 void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
-  SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathX86(instruction);
+  SlowPathCode* slow_path =
+      new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathX86(instruction);
   codegen_->AddSlowPath(slow_path);
 
   LocationSummary* locations = instruction->GetLocations();
@@ -5149,7 +5150,7 @@
 }
 
 void CodeGeneratorX86::GenerateExplicitNullCheck(HNullCheck* instruction) {
-  SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathX86(instruction);
+  SlowPathCode* slow_path = new (GetScopedAllocator()) NullCheckSlowPathX86(instruction);
   AddSlowPath(slow_path);
 
   LocationSummary* locations = instruction->GetLocations();
@@ -5427,7 +5428,7 @@
       Location temp_loc = locations->GetTemp(0);
       Register temp = temp_loc.AsRegister<Register>();
       if (may_need_runtime_call_for_type_check) {
-        slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathX86(instruction);
+        slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathX86(instruction);
         codegen_->AddSlowPath(slow_path);
         if (instruction->GetValueCanBeNull()) {
           __ testl(register_value, register_value);
@@ -5618,7 +5619,7 @@
   Location index_loc = locations->InAt(0);
   Location length_loc = locations->InAt(1);
   SlowPathCode* slow_path =
-    new (GetGraph()->GetAllocator()) BoundsCheckSlowPathX86(instruction);
+    new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathX86(instruction);
 
   if (length_loc.IsConstant()) {
     int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
@@ -5719,7 +5720,8 @@
   SuspendCheckSlowPathX86* slow_path =
       down_cast<SuspendCheckSlowPathX86*>(instruction->GetSlowPath());
   if (slow_path == nullptr) {
-    slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathX86(instruction, successor);
+    slow_path =
+        new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathX86(instruction, successor);
     instruction->SetSlowPath(slow_path);
     codegen_->AddSlowPath(slow_path);
     if (successor != nullptr) {
@@ -6076,12 +6078,11 @@
 }
 
 Label* CodeGeneratorX86::NewJitRootClassPatch(const DexFile& dex_file,
-                                              dex::TypeIndex dex_index,
+                                              dex::TypeIndex type_index,
                                               Handle<mirror::Class> handle) {
-  jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index),
-                             reinterpret_cast64<uint64_t>(handle.GetReference()));
+  ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
   // Add a patch entry and return the label.
-  jit_class_patches_.emplace_back(dex_file, dex_index.index_);
+  jit_class_patches_.emplace_back(dex_file, type_index.index_);
   PatchInfo<Label>* info = &jit_class_patches_.back();
   return &info->label;
 }
@@ -6171,7 +6172,7 @@
 
   if (generate_null_check || cls->MustGenerateClinitCheck()) {
     DCHECK(cls->CanCallRuntime());
-    SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86(
+    SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86(
         cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
     codegen_->AddSlowPath(slow_path);
 
@@ -6199,7 +6200,7 @@
 
 void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
   // We assume the class to not be null.
-  SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86(
+  SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86(
       check->GetLoadClass(), check, check->GetDexPc(), true);
   codegen_->AddSlowPath(slow_path);
   GenerateClassInitializationCheck(slow_path,
@@ -6261,12 +6262,11 @@
 }
 
 Label* CodeGeneratorX86::NewJitRootStringPatch(const DexFile& dex_file,
-                                               dex::StringIndex dex_index,
+                                               dex::StringIndex string_index,
                                                Handle<mirror::String> handle) {
-  jit_string_roots_.Overwrite(
-      StringReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
+  ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
   // Add a patch entry and return the label.
-  jit_string_patches_.emplace_back(dex_file, dex_index.index_);
+  jit_string_patches_.emplace_back(dex_file, string_index.index_);
   PatchInfo<Label>* info = &jit_string_patches_.back();
   return &info->label;
 }
@@ -6306,7 +6306,7 @@
       Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
       // /* GcRoot<mirror::String> */ out = *address  /* PC-relative */
       GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
-      SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadStringSlowPathX86(load);
+      SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathX86(load);
       codegen_->AddSlowPath(slow_path);
       __ testl(out, out);
       __ j(kEqual, slow_path->GetEntryLabel());
@@ -6587,8 +6587,8 @@
         __ cmpl(out, Address(ESP, cls.GetStackIndex()));
       }
       DCHECK(locations->OnlyCallsOnSlowPath());
-      slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
-                                                                        /* is_fatal */ false);
+      slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
+          instruction, /* is_fatal */ false);
       codegen_->AddSlowPath(slow_path);
       __ j(kNotEqual, slow_path->GetEntryLabel());
       __ movl(out, Immediate(1));
@@ -6619,8 +6619,8 @@
       // call to the runtime not using a type checking slow path).
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
-      slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
-                                                                        /* is_fatal */ false);
+      slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
+          instruction, /* is_fatal */ false);
       codegen_->AddSlowPath(slow_path);
       __ jmp(slow_path->GetEntryLabel());
       if (zero.IsLinked()) {
@@ -6712,8 +6712,8 @@
       IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
 
   SlowPathCode* type_check_slow_path =
-      new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
-                                                            is_type_check_slow_path_fatal);
+      new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
+          instruction, is_type_check_slow_path_fatal);
   codegen_->AddSlowPath(type_check_slow_path);
 
   NearLabel done;
@@ -7156,7 +7156,7 @@
                     "have different sizes.");
 
       // Slow path marking the GC root `root`.
-      SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86(
+      SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86(
           instruction, root, /* unpoison_ref_before_marking */ false);
       codegen_->AddSlowPath(slow_path);
 
@@ -7286,10 +7286,10 @@
   SlowPathCode* slow_path;
   if (always_update_field) {
     DCHECK(temp != nullptr);
-    slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
+    slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
         instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp);
   } else {
-    slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86(
+    slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86(
         instruction, ref, /* unpoison_ref_before_marking */ true);
   }
   AddSlowPath(slow_path);
@@ -7322,7 +7322,7 @@
   // not used by the artReadBarrierSlow entry point.
   //
   // TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
-  SlowPathCode* slow_path = new (GetGraph()->GetAllocator())
+  SlowPathCode* slow_path = new (GetScopedAllocator())
       ReadBarrierForHeapReferenceSlowPathX86(instruction, out, ref, obj, offset, index);
   AddSlowPath(slow_path);
 
@@ -7358,7 +7358,7 @@
   // Note that GC roots are not affected by heap poisoning, so we do
   // not need to do anything special for this here.
   SlowPathCode* slow_path =
-      new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathX86(instruction, out, root);
+      new (GetScopedAllocator()) ReadBarrierForRootSlowPathX86(instruction, out, root);
   AddSlowPath(slow_path);
 
   __ jmp(slow_path->GetEntryLabel());
@@ -7810,18 +7810,14 @@
 
 void CodeGeneratorX86::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
   for (const PatchInfo<Label>& info : jit_string_patches_) {
-    const auto it = jit_string_roots_.find(
-        StringReference(&info.dex_file, dex::StringIndex(info.index)));
-    DCHECK(it != jit_string_roots_.end());
-    uint64_t index_in_table = it->second;
+    StringReference string_reference(&info.dex_file, dex::StringIndex(info.index));
+    uint64_t index_in_table = GetJitStringRootIndex(string_reference);
     PatchJitRootUse(code, roots_data, info, index_in_table);
   }
 
   for (const PatchInfo<Label>& info : jit_class_patches_) {
-    const auto it = jit_class_roots_.find(
-        TypeReference(&info.dex_file, dex::TypeIndex(info.index)));
-    DCHECK(it != jit_class_roots_.end());
-    uint64_t index_in_table = it->second;
+    TypeReference type_reference(&info.dex_file, dex::TypeIndex(info.index));
+    uint64_t index_in_table = GetJitClassRootIndex(type_reference);
     PatchJitRootUse(code, roots_data, info, index_in_table);
   }
 }
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index fb61e75..176e4df 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -422,10 +422,10 @@
   void RecordBootStringPatch(HLoadString* load_string);
   Label* NewStringBssEntryPatch(HLoadString* load_string);
   Label* NewJitRootStringPatch(const DexFile& dex_file,
-                               dex::StringIndex dex_index,
+                               dex::StringIndex string_index,
                                Handle<mirror::String> handle);
   Label* NewJitRootClassPatch(const DexFile& dex_file,
-                              dex::TypeIndex dex_index,
+                              dex::TypeIndex type_index,
                               Handle<mirror::Class> handle);
 
   void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index c8032c2..2bb7eda 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -3649,7 +3649,7 @@
     }
   } else {
     SlowPathCode* slow_path =
-        new (GetGraph()->GetAllocator()) DivRemMinusOneSlowPathX86_64(
+        new (codegen_->GetScopedAllocator()) DivRemMinusOneSlowPathX86_64(
             instruction, out.AsRegister(), type, is_div);
     codegen_->AddSlowPath(slow_path);
 
@@ -3818,7 +3818,7 @@
 
 void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
   SlowPathCode* slow_path =
-      new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathX86_64(instruction);
+      new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathX86_64(instruction);
   codegen_->AddSlowPath(slow_path);
 
   LocationSummary* locations = instruction->GetLocations();
@@ -4602,7 +4602,7 @@
 }
 
 void CodeGeneratorX86_64::GenerateExplicitNullCheck(HNullCheck* instruction) {
-  SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathX86_64(instruction);
+  SlowPathCode* slow_path = new (GetScopedAllocator()) NullCheckSlowPathX86_64(instruction);
   AddSlowPath(slow_path);
 
   LocationSummary* locations = instruction->GetLocations();
@@ -4864,7 +4864,7 @@
       Location temp_loc = locations->GetTemp(0);
       CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
       if (may_need_runtime_call_for_type_check) {
-        slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathX86_64(instruction);
+        slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathX86_64(instruction);
         codegen_->AddSlowPath(slow_path);
         if (instruction->GetValueCanBeNull()) {
           __ testl(register_value, register_value);
@@ -5043,7 +5043,8 @@
   LocationSummary* locations = instruction->GetLocations();
   Location index_loc = locations->InAt(0);
   Location length_loc = locations->InAt(1);
-  SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) BoundsCheckSlowPathX86_64(instruction);
+  SlowPathCode* slow_path =
+      new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathX86_64(instruction);
 
   if (length_loc.IsConstant()) {
     int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
@@ -5164,7 +5165,8 @@
   SuspendCheckSlowPathX86_64* slow_path =
       down_cast<SuspendCheckSlowPathX86_64*>(instruction->GetSlowPath());
   if (slow_path == nullptr) {
-    slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathX86_64(instruction, successor);
+    slow_path =
+        new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathX86_64(instruction, successor);
     instruction->SetSlowPath(slow_path);
     codegen_->AddSlowPath(slow_path);
     if (successor != nullptr) {
@@ -5468,12 +5470,11 @@
 }
 
 Label* CodeGeneratorX86_64::NewJitRootClassPatch(const DexFile& dex_file,
-                                                 dex::TypeIndex dex_index,
+                                                 dex::TypeIndex type_index,
                                                  Handle<mirror::Class> handle) {
-  jit_class_roots_.Overwrite(
-      TypeReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
+  ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
   // Add a patch entry and return the label.
-  jit_class_patches_.emplace_back(dex_file, dex_index.index_);
+  jit_class_patches_.emplace_back(dex_file, type_index.index_);
   PatchInfo<Label>* info = &jit_class_patches_.back();
   return &info->label;
 }
@@ -5561,7 +5562,7 @@
 
   if (generate_null_check || cls->MustGenerateClinitCheck()) {
     DCHECK(cls->CanCallRuntime());
-    SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86_64(
+    SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86_64(
         cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
     codegen_->AddSlowPath(slow_path);
     if (generate_null_check) {
@@ -5587,7 +5588,7 @@
 
 void InstructionCodeGeneratorX86_64::VisitClinitCheck(HClinitCheck* check) {
   // We assume the class to not be null.
-  SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86_64(
+  SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86_64(
       check->GetLoadClass(), check, check->GetDexPc(), true);
   codegen_->AddSlowPath(slow_path);
   GenerateClassInitializationCheck(slow_path,
@@ -5634,12 +5635,11 @@
 }
 
 Label* CodeGeneratorX86_64::NewJitRootStringPatch(const DexFile& dex_file,
-                                                  dex::StringIndex dex_index,
+                                                  dex::StringIndex string_index,
                                                   Handle<mirror::String> handle) {
-  jit_string_roots_.Overwrite(
-      StringReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
+  ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
   // Add a patch entry and return the label.
-  jit_string_patches_.emplace_back(dex_file, dex_index.index_);
+  jit_string_patches_.emplace_back(dex_file, string_index.index_);
   PatchInfo<Label>* info = &jit_string_patches_.back();
   return &info->label;
 }
@@ -5677,7 +5677,7 @@
       Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
       // /* GcRoot<mirror::Class> */ out = *address  /* PC-relative */
       GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
-      SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadStringSlowPathX86_64(load);
+      SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathX86_64(load);
       codegen_->AddSlowPath(slow_path);
       __ testl(out, out);
       __ j(kEqual, slow_path->GetEntryLabel());
@@ -5967,8 +5967,8 @@
         __ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
       }
       DCHECK(locations->OnlyCallsOnSlowPath());
-      slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
-                                                                           /* is_fatal */ false);
+      slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
+          instruction, /* is_fatal */ false);
       codegen_->AddSlowPath(slow_path);
       __ j(kNotEqual, slow_path->GetEntryLabel());
       __ movl(out, Immediate(1));
@@ -5999,8 +5999,8 @@
       // call to the runtime not using a type checking slow path).
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
-      slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
-                                                                           /* is_fatal */ false);
+      slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
+          instruction, /* is_fatal */ false);
       codegen_->AddSlowPath(slow_path);
       __ jmp(slow_path->GetEntryLabel());
       if (zero.IsLinked()) {
@@ -6094,8 +6094,8 @@
   bool is_type_check_slow_path_fatal =
       IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
   SlowPathCode* type_check_slow_path =
-      new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
-                                                               is_type_check_slow_path_fatal);
+      new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
+          instruction, is_type_check_slow_path_fatal);
   codegen_->AddSlowPath(type_check_slow_path);
 
 
@@ -6520,7 +6520,7 @@
                     "have different sizes.");
 
       // Slow path marking the GC root `root`.
-      SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86_64(
+      SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64(
           instruction, root, /* unpoison_ref_before_marking */ false);
       codegen_->AddSlowPath(slow_path);
 
@@ -6652,10 +6652,10 @@
   if (always_update_field) {
     DCHECK(temp1 != nullptr);
     DCHECK(temp2 != nullptr);
-    slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
+    slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
         instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp1, *temp2);
   } else {
-    slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86_64(
+    slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64(
         instruction, ref, /* unpoison_ref_before_marking */ true);
   }
   AddSlowPath(slow_path);
@@ -6688,7 +6688,7 @@
   // not used by the artReadBarrierSlow entry point.
   //
   // TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
-  SlowPathCode* slow_path = new (GetGraph()->GetAllocator())
+  SlowPathCode* slow_path = new (GetScopedAllocator())
       ReadBarrierForHeapReferenceSlowPathX86_64(instruction, out, ref, obj, offset, index);
   AddSlowPath(slow_path);
 
@@ -6724,7 +6724,7 @@
   // Note that GC roots are not affected by heap poisoning, so we do
   // not need to do anything special for this here.
   SlowPathCode* slow_path =
-      new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathX86_64(instruction, out, root);
+      new (GetScopedAllocator()) ReadBarrierForRootSlowPathX86_64(instruction, out, root);
   AddSlowPath(slow_path);
 
   __ jmp(slow_path->GetEntryLabel());
@@ -7113,18 +7113,14 @@
 
 void CodeGeneratorX86_64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
   for (const PatchInfo<Label>& info : jit_string_patches_) {
-    const auto it = jit_string_roots_.find(
-        StringReference(&info.dex_file, dex::StringIndex(info.index)));
-    DCHECK(it != jit_string_roots_.end());
-    uint64_t index_in_table = it->second;
+    StringReference string_reference(&info.dex_file, dex::StringIndex(info.index));
+    uint64_t index_in_table = GetJitStringRootIndex(string_reference);
     PatchJitRootUse(code, roots_data, info, index_in_table);
   }
 
   for (const PatchInfo<Label>& info : jit_class_patches_) {
-    const auto it = jit_class_roots_.find(
-        TypeReference(&info.dex_file, dex::TypeIndex(info.index)));
-    DCHECK(it != jit_class_roots_.end());
-    uint64_t index_in_table = it->second;
+    TypeReference type_reference(&info.dex_file, dex::TypeIndex(info.index));
+    uint64_t index_in_table = GetJitClassRootIndex(type_reference);
     PatchJitRootUse(code, roots_data, info, index_in_table);
   }
 }
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 6f67a45..00c5c27 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -416,10 +416,10 @@
   void RecordBootStringPatch(HLoadString* load_string);
   Label* NewStringBssEntryPatch(HLoadString* load_string);
   Label* NewJitRootStringPatch(const DexFile& dex_file,
-                               dex::StringIndex dex_index,
+                               dex::StringIndex string_index,
                                Handle<mirror::String> handle);
   Label* NewJitRootClassPatch(const DexFile& dex_file,
-                              dex::TypeIndex dex_index,
+                              dex::TypeIndex type_index,
                               Handle<mirror::Class> handle);
 
   void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index ee07c4f..ef85f9c 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1339,7 +1339,7 @@
   SlowPathCodeARM64* slow_path = nullptr;
   const bool can_slow_path = invoke->InputAt(1)->CanBeNull();
   if (can_slow_path) {
-    slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+    slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
     codegen_->AddSlowPath(slow_path);
     __ Cbz(arg, slow_path->GetEntryLabel());
   }
@@ -1702,7 +1702,6 @@
 static void GenerateVisitStringIndexOf(HInvoke* invoke,
                                        MacroAssembler* masm,
                                        CodeGeneratorARM64* codegen,
-                                       ArenaAllocator* allocator,
                                        bool start_at_zero) {
   LocationSummary* locations = invoke->GetLocations();
 
@@ -1717,7 +1716,7 @@
     if (static_cast<uint32_t>(code_point->AsIntConstant()->GetValue()) > 0xFFFFU) {
       // Always needs the slow-path. We could directly dispatch to it, but this case should be
       // rare, so for simplicity just put the full slow-path down and branch unconditionally.
-      slow_path = new (allocator) IntrinsicSlowPathARM64(invoke);
+      slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
       codegen->AddSlowPath(slow_path);
       __ B(slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
@@ -1726,7 +1725,7 @@
   } else if (code_point->GetType() != DataType::Type::kUint16) {
     Register char_reg = WRegisterFrom(locations->InAt(1));
     __ Tst(char_reg, 0xFFFF0000);
-    slow_path = new (allocator) IntrinsicSlowPathARM64(invoke);
+    slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
     codegen->AddSlowPath(slow_path);
     __ B(ne, slow_path->GetEntryLabel());
   }
@@ -1760,8 +1759,7 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateVisitStringIndexOf(
-      invoke, GetVIXLAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
+  GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ true);
 }
 
 void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1777,8 +1775,7 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateVisitStringIndexOf(
-      invoke, GetVIXLAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
+  GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ false);
 }
 
 void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1798,7 +1795,8 @@
 
   Register byte_array = WRegisterFrom(locations->InAt(0));
   __ Cmp(byte_array, 0);
-  SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+  SlowPathCodeARM64* slow_path =
+      new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
   codegen_->AddSlowPath(slow_path);
   __ B(eq, slow_path->GetEntryLabel());
 
@@ -1842,7 +1840,8 @@
 
   Register string_to_copy = WRegisterFrom(locations->InAt(0));
   __ Cmp(string_to_copy, 0);
-  SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+  SlowPathCodeARM64* slow_path =
+      new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
   codegen_->AddSlowPath(slow_path);
   __ B(eq, slow_path->GetEntryLabel());
 
@@ -2285,7 +2284,8 @@
   Location dst_pos = locations->InAt(3);
   Location length = locations->InAt(4);
 
-  SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+  SlowPathCodeARM64* slow_path =
+      new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
   codegen_->AddSlowPath(slow_path);
 
   // If source and destination are the same, take the slow path. Overlapping copy regions must be
@@ -2462,7 +2462,8 @@
   Register temp2 = WRegisterFrom(locations->GetTemp(1));
   Location temp2_loc = LocationFrom(temp2);
 
-  SlowPathCodeARM64* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+  SlowPathCodeARM64* intrinsic_slow_path =
+      new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
   codegen_->AddSlowPath(intrinsic_slow_path);
 
   vixl::aarch64::Label conditions_on_positions_validated;
@@ -2839,7 +2840,8 @@
 
         // Slow path used to copy array when `src` is gray.
         SlowPathCodeARM64* read_barrier_slow_path =
-            new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARM64(invoke, LocationFrom(tmp));
+            new (codegen_->GetScopedAllocator()) ReadBarrierSystemArrayCopySlowPathARM64(
+                invoke, LocationFrom(tmp));
         codegen_->AddSlowPath(read_barrier_slow_path);
 
         // Given the numeric representation, it's enough to check the low bit of the rb_state.
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 332306b..e0874d9 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -1490,7 +1490,7 @@
   SlowPathCodeARMVIXL* slow_path = nullptr;
   const bool can_slow_path = invoke->InputAt(1)->CanBeNull();
   if (can_slow_path) {
-    slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
+    slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
     codegen_->AddSlowPath(slow_path);
     __ CompareAndBranchIfZero(arg, slow_path->GetEntryLabel());
   }
@@ -1916,7 +1916,6 @@
 static void GenerateVisitStringIndexOf(HInvoke* invoke,
                                        ArmVIXLAssembler* assembler,
                                        CodeGeneratorARMVIXL* codegen,
-                                       ArenaAllocator* allocator,
                                        bool start_at_zero) {
   LocationSummary* locations = invoke->GetLocations();
 
@@ -1932,7 +1931,7 @@
         std::numeric_limits<uint16_t>::max()) {
       // Always needs the slow-path. We could directly dispatch to it, but this case should be
       // rare, so for simplicity just put the full slow-path down and branch unconditionally.
-      slow_path = new (allocator) IntrinsicSlowPathARMVIXL(invoke);
+      slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
       codegen->AddSlowPath(slow_path);
       __ B(slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
@@ -1942,7 +1941,7 @@
     vixl32::Register char_reg = InputRegisterAt(invoke, 1);
     // 0xffff is not modified immediate but 0x10000 is, so use `>= 0x10000` instead of `> 0xffff`.
     __ Cmp(char_reg, static_cast<uint32_t>(std::numeric_limits<uint16_t>::max()) + 1);
-    slow_path = new (allocator) IntrinsicSlowPathARMVIXL(invoke);
+    slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
     codegen->AddSlowPath(slow_path);
     __ B(hs, slow_path->GetEntryLabel());
   }
@@ -1977,8 +1976,7 @@
 }
 
 void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateVisitStringIndexOf(
-      invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
+  GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1994,8 +1992,7 @@
 }
 
 void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateVisitStringIndexOf(
-      invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
+  GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -2013,7 +2010,8 @@
   ArmVIXLAssembler* assembler = GetAssembler();
   vixl32::Register byte_array = InputRegisterAt(invoke, 0);
   __ Cmp(byte_array, 0);
-  SlowPathCodeARMVIXL* slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
+  SlowPathCodeARMVIXL* slow_path =
+      new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
   codegen_->AddSlowPath(slow_path);
   __ B(eq, slow_path->GetEntryLabel());
 
@@ -2055,7 +2053,8 @@
   ArmVIXLAssembler* assembler = GetAssembler();
   vixl32::Register string_to_copy = InputRegisterAt(invoke, 0);
   __ Cmp(string_to_copy, 0);
-  SlowPathCodeARMVIXL* slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
+  SlowPathCodeARMVIXL* slow_path =
+      new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
   codegen_->AddSlowPath(slow_path);
   __ B(eq, slow_path->GetEntryLabel());
 
@@ -2190,7 +2189,8 @@
   Location temp3_loc = locations->GetTemp(2);
   vixl32::Register temp3 = RegisterFrom(temp3_loc);
 
-  SlowPathCodeARMVIXL* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
+  SlowPathCodeARMVIXL* intrinsic_slow_path =
+      new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
   codegen_->AddSlowPath(intrinsic_slow_path);
 
   vixl32::Label conditions_on_positions_validated;
@@ -2496,7 +2496,7 @@
       // Note that the base destination address is computed in `temp2`
       // by the slow path code.
       SlowPathCodeARMVIXL* read_barrier_slow_path =
-          new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARMVIXL(invoke);
+          new (codegen_->GetScopedAllocator()) ReadBarrierSystemArrayCopySlowPathARMVIXL(invoke);
       codegen_->AddSlowPath(read_barrier_slow_path);
 
       // Given the numeric representation, it's enough to check the low bit of the
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 5f2f71b..4a8fbf2 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2053,7 +2053,7 @@
   DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
 
   Register argument = locations->InAt(1).AsRegister<Register>();
-  SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
+  SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
   codegen_->AddSlowPath(slow_path);
   __ Beqz(argument, slow_path->GetEntryLabel());
   codegen_->InvokeRuntime(kQuickStringCompareTo, invoke, invoke->GetDexPc(), slow_path);
@@ -2185,8 +2185,7 @@
 static void GenerateStringIndexOf(HInvoke* invoke,
                                   bool start_at_zero,
                                   MipsAssembler* assembler,
-                                  CodeGeneratorMIPS* codegen,
-                                  ArenaAllocator* allocator) {
+                                  CodeGeneratorMIPS* codegen) {
   LocationSummary* locations = invoke->GetLocations();
   Register tmp_reg = start_at_zero ? locations->GetTemp(0).AsRegister<Register>() : TMP;
 
@@ -2202,7 +2201,7 @@
       // Always needs the slow-path. We could directly dispatch to it,
       // but this case should be rare, so for simplicity just put the
       // full slow-path down and branch unconditionally.
-      slow_path = new (allocator) IntrinsicSlowPathMIPS(invoke);
+      slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
       codegen->AddSlowPath(slow_path);
       __ B(slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
@@ -2219,7 +2218,7 @@
     // two halfwords so we fallback to using the generic implementation
     // of indexOf().
     __ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max());
-    slow_path = new (allocator) IntrinsicSlowPathMIPS(invoke);
+    slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
     codegen->AddSlowPath(slow_path);
     __ Bltu(tmp_reg, char_reg, slow_path->GetEntryLabel());
   }
@@ -2253,11 +2252,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke,
-                        /* start_at_zero */ true,
-                        GetAssembler(),
-                        codegen_,
-                        GetAllocator());
+  GenerateStringIndexOf(invoke, /* start_at_zero */ true, GetAssembler(), codegen_);
 }
 
 // int java.lang.String.indexOf(int ch, int fromIndex)
@@ -2278,11 +2273,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke,
-                        /* start_at_zero */ false,
-                        GetAssembler(),
-                        codegen_,
-                        GetAllocator());
+  GenerateStringIndexOf(invoke, /* start_at_zero */ false, GetAssembler(), codegen_);
 }
 
 // java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
@@ -2303,7 +2294,7 @@
   LocationSummary* locations = invoke->GetLocations();
 
   Register byte_array = locations->InAt(0).AsRegister<Register>();
-  SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
+  SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
   codegen_->AddSlowPath(slow_path);
   __ Beqz(byte_array, slow_path->GetEntryLabel());
   codegen_->InvokeRuntime(kQuickAllocStringFromBytes, invoke, invoke->GetDexPc(), slow_path);
@@ -2347,7 +2338,7 @@
   LocationSummary* locations = invoke->GetLocations();
 
   Register string_to_copy = locations->InAt(0).AsRegister<Register>();
-  SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
+  SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
   codegen_->AddSlowPath(slow_path);
   __ Beqz(string_to_copy, slow_path->GetEntryLabel());
   codegen_->InvokeRuntime(kQuickAllocStringFromString, invoke, invoke->GetDexPc());
@@ -3059,7 +3050,7 @@
   Register src_base = locations->GetTemp(1).AsRegister<Register>();
   Register count = locations->GetTemp(2).AsRegister<Register>();
 
-  SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
+  SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
   codegen_->AddSlowPath(slow_path);
 
   // Bail out if the source and destination are the same (to handle overlap).
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 8d5be80..512fb68 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1626,7 +1626,8 @@
   DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
 
   GpuRegister argument = locations->InAt(1).AsRegister<GpuRegister>();
-  SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+  SlowPathCodeMIPS64* slow_path =
+      new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
   codegen_->AddSlowPath(slow_path);
   __ Beqzc(argument, slow_path->GetEntryLabel());
 
@@ -1754,7 +1755,6 @@
 static void GenerateStringIndexOf(HInvoke* invoke,
                                   Mips64Assembler* assembler,
                                   CodeGeneratorMIPS64* codegen,
-                                  ArenaAllocator* allocator,
                                   bool start_at_zero) {
   LocationSummary* locations = invoke->GetLocations();
   GpuRegister tmp_reg = start_at_zero ? locations->GetTemp(0).AsRegister<GpuRegister>() : TMP;
@@ -1771,7 +1771,7 @@
       // Always needs the slow-path. We could directly dispatch to it,
       // but this case should be rare, so for simplicity just put the
       // full slow-path down and branch unconditionally.
-      slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke);
+      slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
       codegen->AddSlowPath(slow_path);
       __ Bc(slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
@@ -1780,7 +1780,7 @@
   } else if (code_point->GetType() != DataType::Type::kUint16) {
     GpuRegister char_reg = locations->InAt(1).AsRegister<GpuRegister>();
     __ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max());
-    slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke);
+    slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
     codegen->AddSlowPath(slow_path);
     __ Bltuc(tmp_reg, char_reg, slow_path->GetEntryLabel());    // UTF-16 required
   }
@@ -1816,7 +1816,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
 }
 
 // int java.lang.String.indexOf(int ch, int fromIndex)
@@ -1834,8 +1834,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateStringIndexOf(
-      invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
 }
 
 // java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
@@ -1856,7 +1855,8 @@
   LocationSummary* locations = invoke->GetLocations();
 
   GpuRegister byte_array = locations->InAt(0).AsRegister<GpuRegister>();
-  SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+  SlowPathCodeMIPS64* slow_path =
+      new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
   codegen_->AddSlowPath(slow_path);
   __ Beqzc(byte_array, slow_path->GetEntryLabel());
 
@@ -1903,7 +1903,8 @@
   LocationSummary* locations = invoke->GetLocations();
 
   GpuRegister string_to_copy = locations->InAt(0).AsRegister<GpuRegister>();
-  SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+  SlowPathCodeMIPS64* slow_path =
+      new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
   codegen_->AddSlowPath(slow_path);
   __ Beqzc(string_to_copy, slow_path->GetEntryLabel());
 
@@ -2160,7 +2161,8 @@
   GpuRegister src_base = locations->GetTemp(1).AsRegister<GpuRegister>();
   GpuRegister count = locations->GetTemp(2).AsRegister<GpuRegister>();
 
-  SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+  SlowPathCodeMIPS64* slow_path =
+      new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
   codegen_->AddSlowPath(slow_path);
 
   // Bail out if the source and destination are the same (to handle overlap).
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 8b389ba..8a0b6ae 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1253,7 +1253,7 @@
   Register count = locations->GetTemp(2).AsRegister<Register>();
   DCHECK_EQ(count, ECX);
 
-  SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+  SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
   codegen_->AddSlowPath(slow_path);
 
   // Bail out if the source and destination are the same (to handle overlap).
@@ -1336,7 +1336,7 @@
 
   Register argument = locations->InAt(1).AsRegister<Register>();
   __ testl(argument, argument);
-  SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+  SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
   codegen_->AddSlowPath(slow_path);
   __ j(kEqual, slow_path->GetEntryLabel());
 
@@ -1485,7 +1485,6 @@
 static void GenerateStringIndexOf(HInvoke* invoke,
                                   X86Assembler* assembler,
                                   CodeGeneratorX86* codegen,
-                                  ArenaAllocator* allocator,
                                   bool start_at_zero) {
   LocationSummary* locations = invoke->GetLocations();
 
@@ -1515,7 +1514,7 @@
     std::numeric_limits<uint16_t>::max()) {
       // Always needs the slow-path. We could directly dispatch to it, but this case should be
       // rare, so for simplicity just put the full slow-path down and branch unconditionally.
-      slow_path = new (allocator) IntrinsicSlowPathX86(invoke);
+      slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
       codegen->AddSlowPath(slow_path);
       __ jmp(slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
@@ -1523,7 +1522,7 @@
     }
   } else if (code_point->GetType() != DataType::Type::kUint16) {
     __ cmpl(search_value, Immediate(std::numeric_limits<uint16_t>::max()));
-    slow_path = new (allocator) IntrinsicSlowPathX86(invoke);
+    slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
     codegen->AddSlowPath(slow_path);
     __ j(kAbove, slow_path->GetEntryLabel());
   }
@@ -1640,7 +1639,7 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
 }
 
 void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1648,8 +1647,7 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateStringIndexOf(
-      invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
 }
 
 void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1669,7 +1667,7 @@
 
   Register byte_array = locations->InAt(0).AsRegister<Register>();
   __ testl(byte_array, byte_array);
-  SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+  SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
   codegen_->AddSlowPath(slow_path);
   __ j(kEqual, slow_path->GetEntryLabel());
 
@@ -1713,7 +1711,7 @@
 
   Register string_to_copy = locations->InAt(0).AsRegister<Register>();
   __ testl(string_to_copy, string_to_copy);
-  SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+  SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
   codegen_->AddSlowPath(slow_path);
   __ j(kEqual, slow_path->GetEntryLabel());
 
@@ -2901,7 +2899,8 @@
   Location temp2_loc = locations->GetTemp(1);
   Register temp2 = temp2_loc.AsRegister<Register>();
 
-  SlowPathCode* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+  SlowPathCode* intrinsic_slow_path =
+      new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
   codegen_->AddSlowPath(intrinsic_slow_path);
 
   NearLabel conditions_on_positions_validated;
@@ -3215,7 +3214,7 @@
 
     // Slow path used to copy array when `src` is gray.
     SlowPathCode* read_barrier_slow_path =
-        new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathX86(invoke);
+        new (codegen_->GetScopedAllocator()) ReadBarrierSystemArrayCopySlowPathX86(invoke);
     codegen_->AddSlowPath(read_barrier_slow_path);
 
     // We have done the "if" of the gray bit check above, now branch based on the flags.
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 6337900..92ffda4 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1033,7 +1033,7 @@
   CpuRegister count = locations->GetTemp(2).AsRegister<CpuRegister>();
   DCHECK_EQ(count.AsRegister(), RCX);
 
-  SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+  SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
   codegen_->AddSlowPath(slow_path);
 
   // Bail out if the source and destination are the same.
@@ -1175,7 +1175,8 @@
   CpuRegister temp3 = temp3_loc.AsRegister<CpuRegister>();
   Location TMP_loc = Location::RegisterLocation(TMP);
 
-  SlowPathCode* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+  SlowPathCode* intrinsic_slow_path =
+      new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
   codegen_->AddSlowPath(intrinsic_slow_path);
 
   NearLabel conditions_on_positions_validated;
@@ -1449,7 +1450,7 @@
 
     // Slow path used to copy array when `src` is gray.
     SlowPathCode* read_barrier_slow_path =
-        new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathX86_64(invoke);
+        new (codegen_->GetScopedAllocator()) ReadBarrierSystemArrayCopySlowPathX86_64(invoke);
     codegen_->AddSlowPath(read_barrier_slow_path);
 
     // We have done the "if" of the gray bit check above, now branch based on the flags.
@@ -1510,7 +1511,7 @@
 
   CpuRegister argument = locations->InAt(1).AsRegister<CpuRegister>();
   __ testl(argument, argument);
-  SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+  SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
   codegen_->AddSlowPath(slow_path);
   __ j(kEqual, slow_path->GetEntryLabel());
 
@@ -1655,7 +1656,6 @@
 static void GenerateStringIndexOf(HInvoke* invoke,
                                   X86_64Assembler* assembler,
                                   CodeGeneratorX86_64* codegen,
-                                  ArenaAllocator* allocator,
                                   bool start_at_zero) {
   LocationSummary* locations = invoke->GetLocations();
 
@@ -1683,7 +1683,7 @@
     std::numeric_limits<uint16_t>::max()) {
       // Always needs the slow-path. We could directly dispatch to it, but this case should be
       // rare, so for simplicity just put the full slow-path down and branch unconditionally.
-      slow_path = new (allocator) IntrinsicSlowPathX86_64(invoke);
+      slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
       codegen->AddSlowPath(slow_path);
       __ jmp(slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
@@ -1691,7 +1691,7 @@
     }
   } else if (code_point->GetType() != DataType::Type::kUint16) {
     __ cmpl(search_value, Immediate(std::numeric_limits<uint16_t>::max()));
-    slow_path = new (allocator) IntrinsicSlowPathX86_64(invoke);
+    slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
     codegen->AddSlowPath(slow_path);
     __ j(kAbove, slow_path->GetEntryLabel());
   }
@@ -1800,7 +1800,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1808,8 +1808,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateStringIndexOf(
-      invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1829,7 +1828,7 @@
 
   CpuRegister byte_array = locations->InAt(0).AsRegister<CpuRegister>();
   __ testl(byte_array, byte_array);
-  SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+  SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
   codegen_->AddSlowPath(slow_path);
   __ j(kEqual, slow_path->GetEntryLabel());
 
@@ -1873,7 +1872,7 @@
 
   CpuRegister string_to_copy = locations->InAt(0).AsRegister<CpuRegister>();
   __ testl(string_to_copy, string_to_copy);
-  SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+  SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
   codegen_->AddSlowPath(slow_path);
   __ j(kEqual, slow_path->GetEntryLabel());
 
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index bd65cbf..b7380b0 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -63,6 +63,7 @@
     // Generate simple frame with some spills.
     code_gen_ = CodeGenerator::Create(graph_, isa, *isa_features_, opts_);
     code_gen_->GetAssembler()->cfi().SetEnabled(true);
+    code_gen_->InitializeCodeGenerationData();
     const int frame_size = 64;
     int core_reg = 0;
     int fp_reg = 0;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 42f32b7..29319f8 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1142,6 +1142,7 @@
       method = Emit(&allocator, &code_allocator, codegen.get(), compiler_driver, code_item);
 
       if (kArenaAllocatorCountAllocations) {
+        codegen.reset();  // Release codegen's ScopedArenaAllocator for memory accounting.
         size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
         if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
           MemStats mem_stats(allocator.GetMemStats());
@@ -1251,18 +1252,6 @@
     if (codegen.get() == nullptr) {
       return false;
     }
-
-    if (kArenaAllocatorCountAllocations) {
-      size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
-      if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
-        MemStats mem_stats(allocator.GetMemStats());
-        MemStats peak_stats(arena_stack.GetPeakStats());
-        LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
-                  << dex_file->PrettyMethod(method_idx)
-                  << "\n" << Dumpable<MemStats>(mem_stats)
-                  << "\n" << Dumpable<MemStats>(peak_stats);
-      }
-    }
   }
 
   size_t stack_map_size = 0;
@@ -1357,6 +1346,19 @@
     jit_logger->WriteLog(code, code_allocator.GetSize(), method);
   }
 
+  if (kArenaAllocatorCountAllocations) {
+    codegen.reset();  // Release codegen's ScopedArenaAllocator for memory accounting.
+    size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
+    if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
+      MemStats mem_stats(allocator.GetMemStats());
+      MemStats peak_stats(arena_stack.GetPeakStats());
+      LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
+                << dex_file->PrettyMethod(method_idx)
+                << "\n" << Dumpable<MemStats>(mem_stats)
+                << "\n" << Dumpable<MemStats>(peak_stats);
+    }
+  }
+
   return true;
 }
 
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 5ed9e02..1d3fe03 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -16,6 +16,7 @@
 
 #include "register_allocation_resolver.h"
 
+#include "base/bit_vector-inl.h"
 #include "code_generator.h"
 #include "linear_order.h"
 #include "ssa_liveness_analysis.h"
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 7eb2188..9bc8045 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -43,9 +43,12 @@
   current_entry_.dex_method_index = dex::kDexNoIndex;
   current_entry_.dex_register_entry.num_dex_registers = num_dex_registers;
   current_entry_.dex_register_entry.locations_start_index = dex_register_locations_.size();
-  current_entry_.dex_register_entry.live_dex_registers_mask = (num_dex_registers != 0)
-      ? ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream)
-      : nullptr;
+  current_entry_.dex_register_entry.live_dex_registers_mask = nullptr;
+  if (num_dex_registers != 0u) {
+    current_entry_.dex_register_entry.live_dex_registers_mask =
+        ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream);
+    current_entry_.dex_register_entry.live_dex_registers_mask->ClearAllBits();
+  }
   if (sp_mask != nullptr) {
     stack_mask_max_ = std::max(stack_mask_max_, sp_mask->GetHighestBitSet());
   }
@@ -121,9 +124,12 @@
   current_inline_info_.dex_pc = dex_pc;
   current_inline_info_.dex_register_entry.num_dex_registers = num_dex_registers;
   current_inline_info_.dex_register_entry.locations_start_index = dex_register_locations_.size();
-  current_inline_info_.dex_register_entry.live_dex_registers_mask = (num_dex_registers != 0)
-      ? ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream)
-      : nullptr;
+  current_inline_info_.dex_register_entry.live_dex_registers_mask = nullptr;
+  if (num_dex_registers != 0) {
+    current_inline_info_.dex_register_entry.live_dex_registers_mask =
+        ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream);
+    current_inline_info_.dex_register_entry.live_dex_registers_mask->ClearAllBits();
+  }
   current_dex_register_ = 0;
 }
 
@@ -468,7 +474,7 @@
   if (entries_it == dex_map_hash_to_stack_map_indices_.end()) {
     // We don't have a perfect hash functions so we need a list to collect all stack maps
     // which might have the same dex register map.
-    ArenaVector<uint32_t> stack_map_indices(allocator_->Adapter(kArenaAllocStackMapStream));
+    ScopedArenaVector<uint32_t> stack_map_indices(allocator_->Adapter(kArenaAllocStackMapStream));
     stack_map_indices.push_back(current_entry_index);
     dex_map_hash_to_stack_map_indices_.Put(entry.hash, std::move(stack_map_indices));
   } else {
@@ -546,7 +552,7 @@
 
 size_t StackMapStream::PrepareRegisterMasks() {
   register_masks_.resize(stack_maps_.size(), 0u);
-  ArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
+  ScopedArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
   for (StackMapEntry& stack_map : stack_maps_) {
     const size_t index = dedupe.size();
     stack_map.register_mask_index = dedupe.emplace(stack_map.register_mask, index).first->second;
@@ -558,7 +564,7 @@
 void StackMapStream::PrepareMethodIndices() {
   CHECK(method_indices_.empty());
   method_indices_.resize(stack_maps_.size() + inline_infos_.size());
-  ArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
+  ScopedArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
   for (StackMapEntry& stack_map : stack_maps_) {
     const size_t index = dedupe.size();
     const uint32_t method_index = stack_map.dex_method_index;
@@ -584,11 +590,11 @@
   stack_masks_.resize(byte_entry_size * stack_maps_.size(), 0u);
   // For deduplicating we store the stack masks as byte packed for simplicity. We can bit pack later
   // when copying out from stack_masks_.
-  ArenaUnorderedMap<MemoryRegion,
-                    size_t,
-                    FNVHash<MemoryRegion>,
-                    MemoryRegion::ContentEquals> dedup(
-                        stack_maps_.size(), allocator_->Adapter(kArenaAllocStackMapStream));
+  ScopedArenaUnorderedMap<MemoryRegion,
+                          size_t,
+                          FNVHash<MemoryRegion>,
+                          MemoryRegion::ContentEquals> dedup(
+                              stack_maps_.size(), allocator_->Adapter(kArenaAllocStackMapStream));
   for (StackMapEntry& stack_map : stack_maps_) {
     size_t index = dedup.size();
     MemoryRegion stack_mask(stack_masks_.data() + index * byte_entry_size, byte_entry_size);
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 62ed7ee..e126609 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -17,9 +17,9 @@
 #ifndef ART_COMPILER_OPTIMIZING_STACK_MAP_STREAM_H_
 #define ART_COMPILER_OPTIMIZING_STACK_MAP_STREAM_H_
 
-#include "base/arena_containers.h"
 #include "base/bit_vector-inl.h"
 #include "base/hash_map.h"
+#include "base/scoped_arena_containers.h"
 #include "base/value_object.h"
 #include "memory_region.h"
 #include "method_info.h"
@@ -60,8 +60,7 @@
  */
 class StackMapStream : public ValueObject {
  public:
-  explicit StackMapStream(ArenaAllocator* allocator,
-                          InstructionSet instruction_set)
+  explicit StackMapStream(ScopedArenaAllocator* allocator, InstructionSet instruction_set)
       : allocator_(allocator),
         instruction_set_(instruction_set),
         stack_maps_(allocator->Adapter(kArenaAllocStackMapStream)),
@@ -223,37 +222,37 @@
                            size_t dex_register_locations_index) const;
   void CheckCodeInfo(MemoryRegion region) const;
 
-  ArenaAllocator* const allocator_;
+  ScopedArenaAllocator* const allocator_;
   const InstructionSet instruction_set_;
-  ArenaVector<StackMapEntry> stack_maps_;
+  ScopedArenaVector<StackMapEntry> stack_maps_;
 
   // A catalog of unique [location_kind, register_value] pairs (per method).
-  ArenaVector<DexRegisterLocation> location_catalog_entries_;
+  ScopedArenaVector<DexRegisterLocation> location_catalog_entries_;
   // Map from Dex register location catalog entries to their indices in the
   // location catalog.
-  using LocationCatalogEntriesIndices = ArenaHashMap<DexRegisterLocation,
-                                                     size_t,
-                                                     LocationCatalogEntriesIndicesEmptyFn,
-                                                     DexRegisterLocationHashFn>;
+  using LocationCatalogEntriesIndices = ScopedArenaHashMap<DexRegisterLocation,
+                                                           size_t,
+                                                           LocationCatalogEntriesIndicesEmptyFn,
+                                                           DexRegisterLocationHashFn>;
   LocationCatalogEntriesIndices location_catalog_entries_indices_;
 
   // A set of concatenated maps of Dex register locations indices to `location_catalog_entries_`.
-  ArenaVector<size_t> dex_register_locations_;
-  ArenaVector<InlineInfoEntry> inline_infos_;
-  ArenaVector<uint8_t> stack_masks_;
-  ArenaVector<uint32_t> register_masks_;
-  ArenaVector<uint32_t> method_indices_;
-  ArenaVector<DexRegisterMapEntry> dex_register_entries_;
+  ScopedArenaVector<size_t> dex_register_locations_;
+  ScopedArenaVector<InlineInfoEntry> inline_infos_;
+  ScopedArenaVector<uint8_t> stack_masks_;
+  ScopedArenaVector<uint32_t> register_masks_;
+  ScopedArenaVector<uint32_t> method_indices_;
+  ScopedArenaVector<DexRegisterMapEntry> dex_register_entries_;
   int stack_mask_max_;
   uint32_t dex_pc_max_;
   uint32_t register_mask_max_;
   size_t number_of_stack_maps_with_inline_info_;
 
-  ArenaSafeMap<uint32_t, ArenaVector<uint32_t>> dex_map_hash_to_stack_map_indices_;
+  ScopedArenaSafeMap<uint32_t, ScopedArenaVector<uint32_t>> dex_map_hash_to_stack_map_indices_;
 
   StackMapEntry current_entry_;
   InlineInfoEntry current_inline_info_;
-  ArenaVector<uint8_t> code_info_encoding_;
+  ScopedArenaVector<uint8_t> code_info_encoding_;
   size_t needed_size_;
   uint32_t current_dex_register_;
   bool in_inline_frame_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 96ac368..91f86d5 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -47,7 +47,8 @@
 
 TEST(StackMapTest, Test1) {
   ArenaPool pool;
-  ArenaAllocator allocator(&pool);
+  ArenaStack arena_stack(&pool);
+  ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
 
   ArenaBitVector sp_mask(&allocator, 0, false);
@@ -128,7 +129,8 @@
 
 TEST(StackMapTest, Test2) {
   ArenaPool pool;
-  ArenaAllocator allocator(&pool);
+  ArenaStack arena_stack(&pool);
+  ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
   ArtMethod art_method;
 
@@ -412,7 +414,8 @@
 
 TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
   ArenaPool pool;
-  ArenaAllocator allocator(&pool);
+  ArenaStack arena_stack(&pool);
+  ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
   ArtMethod art_method;
 
@@ -506,7 +509,8 @@
 
 TEST(StackMapTest, TestNonLiveDexRegisters) {
   ArenaPool pool;
-  ArenaAllocator allocator(&pool);
+  ArenaStack arena_stack(&pool);
+  ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
 
   ArenaBitVector sp_mask(&allocator, 0, false);
@@ -585,7 +589,8 @@
 // not treat it as kNoDexRegisterMap.
 TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
   ArenaPool pool;
-  ArenaAllocator allocator(&pool);
+  ArenaStack arena_stack(&pool);
+  ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
 
   ArenaBitVector sp_mask(&allocator, 0, false);
@@ -648,7 +653,8 @@
 
 TEST(StackMapTest, TestShareDexRegisterMap) {
   ArenaPool pool;
-  ArenaAllocator allocator(&pool);
+  ArenaStack arena_stack(&pool);
+  ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
 
   ArenaBitVector sp_mask(&allocator, 0, false);
@@ -706,7 +712,8 @@
 
 TEST(StackMapTest, TestNoDexRegisterMap) {
   ArenaPool pool;
-  ArenaAllocator allocator(&pool);
+  ArenaStack arena_stack(&pool);
+  ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
 
   ArenaBitVector sp_mask(&allocator, 0, false);
@@ -755,7 +762,8 @@
 
 TEST(StackMapTest, InlineTest) {
   ArenaPool pool;
-  ArenaAllocator allocator(&pool);
+  ArenaStack arena_stack(&pool);
+  ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
   ArtMethod art_method;
 
@@ -936,7 +944,8 @@
 
 TEST(StackMapTest, TestDeduplicateStackMask) {
   ArenaPool pool;
-  ArenaAllocator allocator(&pool);
+  ArenaStack arena_stack(&pool);
+  ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
 
   ArenaBitVector sp_mask(&allocator, 0, true);
@@ -964,7 +973,8 @@
 
 TEST(StackMapTest, TestInvokeInfo) {
   ArenaPool pool;
-  ArenaAllocator allocator(&pool);
+  ArenaStack arena_stack(&pool);
+  ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
 
   ArenaBitVector sp_mask(&allocator, 0, true);