Remove CodeAllocator and the extra copy of generated code.

The code used to copy the final generated code twice: from assembler to
CodeAllocator, and then to CodeAllocator to SwapAllocator/JitMemory.

The assemblers never depended on the exact location of the generated
code, so just drop that feature.

Test: test.py
Change-Id: I8dc82e4926097092b9aac336a5a5d40f79dc62ca
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 78aa328..d6d9009 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -263,7 +263,7 @@
   code_generation_data_ = CodeGenerationData::Create(graph_->GetArenaStack(), GetInstructionSet());
 }
 
-void CodeGenerator::Compile(CodeAllocator* allocator) {
+void CodeGenerator::Compile() {
   InitializeCodeGenerationData();
 
   // The register allocator already called `InitializeCodeGeneration`,
@@ -328,17 +328,13 @@
   }
 
   // Finalize instructions in assember;
-  Finalize(allocator);
+  Finalize();
 
   GetStackMapStream()->EndMethod(GetAssembler()->CodeSize());
 }
 
-void CodeGenerator::Finalize(CodeAllocator* allocator) {
-  size_t code_size = GetAssembler()->CodeSize();
-  uint8_t* buffer = allocator->Allocate(code_size);
-
-  MemoryRegion code(buffer, code_size);
-  GetAssembler()->FinalizeInstructions(code);
+void CodeGenerator::Finalize() {
+  GetAssembler()->FinalizeCode();
 }
 
 void CodeGenerator::EmitLinkerPatches(
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 5f4f377..bcbffe4 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -87,18 +87,6 @@
 class LinkerPatch;
 }  // namespace linker
 
-class CodeAllocator {
- public:
-  CodeAllocator() {}
-  virtual ~CodeAllocator() {}
-
-  virtual uint8_t* Allocate(size_t size) = 0;
-  virtual ArrayRef<const uint8_t> GetMemory() const = 0;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(CodeAllocator);
-};
-
 class SlowPathCode : public DeletableArenaObject<kArenaAllocSlowPaths> {
  public:
   explicit SlowPathCode(HInstruction* instruction) : instruction_(instruction) {
@@ -205,7 +193,7 @@
 class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
  public:
   // Compiles the graph to executable instructions.
-  void Compile(CodeAllocator* allocator);
+  void Compile();
   static std::unique_ptr<CodeGenerator> Create(HGraph* graph,
                                                const CompilerOptions& compiler_options,
                                                OptimizingCompilerStats* stats = nullptr);
@@ -226,7 +214,7 @@
   }
 
   virtual void Initialize() = 0;
-  virtual void Finalize(CodeAllocator* allocator);
+  virtual void Finalize();
   virtual void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches);
   virtual bool NeedsThunkCode(const linker::LinkerPatch& patch) const;
   virtual void EmitThunkCode(const linker::LinkerPatch& patch,
@@ -736,6 +724,11 @@
   static QuickEntrypointEnum GetArrayAllocationEntrypoint(HNewArray* new_array);
   static ScaleFactor ScaleFactorForType(DataType::Type type);
 
+  ArrayRef<const uint8_t> GetCode() const {
+    return ArrayRef<const uint8_t>(GetAssembler().CodeBufferBaseAddress(),
+                                   GetAssembler().CodeSize());
+  }
+
  protected:
   // Patch info used for recording locations of required linker patches and their targets,
   // i.e. target method, string, type or code identified by their dex file and index,
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index fe81b31..130d9bb 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1040,7 +1040,7 @@
   }
 }
 
-void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
+void CodeGeneratorARM64::Finalize() {
   EmitJumpTables();
 
   // Emit JIT baker read barrier slow paths.
@@ -1055,11 +1055,11 @@
   // Ensure we emit the literal pool.
   __ FinalizeCode();
 
-  CodeGenerator::Finalize(allocator);
+  CodeGenerator::Finalize();
 
   // Verify Baker read barrier linker patches.
   if (kIsDebugBuild) {
-    ArrayRef<const uint8_t> code = allocator->GetMemory();
+    ArrayRef<const uint8_t> code(GetCode());
     for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) {
       DCHECK(info.label.IsBound());
       uint32_t literal_offset = info.label.GetLocation();
@@ -5371,7 +5371,7 @@
   assembler.FinalizeCode();
   code->resize(assembler.CodeSize());
   MemoryRegion code_region(code->data(), code->size());
-  assembler.FinalizeInstructions(code_region);
+  assembler.CopyInstructions(code_region);
 }
 
 vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateUint32Literal(uint32_t value) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index b256307..52c5377 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -698,7 +698,7 @@
     return jump_tables_.back().get();
   }
 
-  void Finalize(CodeAllocator* allocator) override;
+  void Finalize() override;
 
   // Code generation helpers.
   void MoveConstant(vixl::aarch64::CPURegister destination, HConstant* constant);
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index b61f6b5..36bf421 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2025,7 +2025,7 @@
 
 #define __ reinterpret_cast<ArmVIXLAssembler*>(GetAssembler())->GetVIXLAssembler()->  // NOLINT
 
-void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) {
+void CodeGeneratorARMVIXL::Finalize() {
   FixJumpTables();
 
   // Emit JIT baker read barrier slow paths.
@@ -2038,11 +2038,11 @@
   }
 
   GetAssembler()->FinalizeCode();
-  CodeGenerator::Finalize(allocator);
+  CodeGenerator::Finalize();
 
   // Verify Baker read barrier linker patches.
   if (kIsDebugBuild) {
-    ArrayRef<const uint8_t> code = allocator->GetMemory();
+    ArrayRef<const uint8_t> code(GetCode());
     for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) {
       DCHECK(info.label.IsBound());
       uint32_t literal_offset = info.label.GetLocation();
@@ -9874,7 +9874,7 @@
   assembler.FinalizeCode();
   code->resize(assembler.CodeSize());
   MemoryRegion code_region(code->data(), code->size());
-  assembler.FinalizeInstructions(code_region);
+  assembler.CopyInstructions(code_region);
 }
 
 VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateUint32Literal(
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index f5abe69..0175448 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -620,7 +620,7 @@
     block_labels_.resize(GetGraph()->GetBlocks().size());
   }
 
-  void Finalize(CodeAllocator* allocator) override;
+  void Finalize() override;
 
   bool NeedsTwoRegisters(DataType::Type type) const override {
     return type == DataType::Type::kFloat64 || type == DataType::Type::kInt64;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 3ae6515..da943bf 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -8930,7 +8930,7 @@
   const HX86PackedSwitch* switch_instr_;
 };
 
-void CodeGeneratorX86::Finalize(CodeAllocator* allocator) {
+void CodeGeneratorX86::Finalize() {
   // Generate the constant area if needed.
   X86Assembler* assembler = GetAssembler();
 
@@ -8950,7 +8950,7 @@
   }
 
   // And finish up.
-  CodeGenerator::Finalize(allocator);
+  CodeGenerator::Finalize();
 }
 
 Address CodeGeneratorX86::LiteralDoubleAddress(double v,
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 0905f32..aa25528 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -635,7 +635,7 @@
 
   Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value);
 
-  void Finalize(CodeAllocator* allocator) override;
+  void Finalize() override;
 
   // Fast path implementation of ReadBarrier::Barrier for a heap
   // reference field load when Baker's read barriers are used.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 7c61519..93b9660 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -8170,7 +8170,7 @@
   const HPackedSwitch* switch_instr_;
 };
 
-void CodeGeneratorX86_64::Finalize(CodeAllocator* allocator) {
+void CodeGeneratorX86_64::Finalize() {
   // Generate the constant area if needed.
   X86_64Assembler* assembler = GetAssembler();
   if (!assembler->IsConstantAreaEmpty() || !fixups_to_jump_tables_.empty()) {
@@ -8188,7 +8188,7 @@
   }
 
   // And finish up.
-  CodeGenerator::Finalize(allocator);
+  CodeGenerator::Finalize();
 }
 
 Address CodeGeneratorX86_64::LiteralDoubleAddress(double v) {
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index e1ce3a9..5a940c1 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -468,7 +468,7 @@
   void SetupBlockedRegisters() const override;
   void DumpCoreRegister(std::ostream& stream, int reg) const override;
   void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
-  void Finalize(CodeAllocator* allocator) override;
+  void Finalize() override;
 
   InstructionSet GetInstructionSet() const override {
     return InstructionSet::kX86_64;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 2d9acc4..f890ba9 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -733,8 +733,7 @@
   move->AddMove(Location::StackSlot(8192), Location::StackSlot(0), DataType::Type::kInt32, nullptr);
   codegen.GetMoveResolver()->EmitNativeCode(move);
 
-  InternalCodeAllocator code_allocator;
-  codegen.Finalize(&code_allocator);
+  codegen.Finalize();
 }
 #endif
 
@@ -785,8 +784,7 @@
                 nullptr);
   codegen.GetMoveResolver()->EmitNativeCode(move);
 
-  InternalCodeAllocator code_allocator;
-  codegen.Finalize(&code_allocator);
+  codegen.Finalize();
 }
 
 // Check that ParallelMoveResolver works fine for ARM64 for both cases when SIMD is on and off.
@@ -821,8 +819,7 @@
     graph->SetHasSIMD(false);
   }
 
-  InternalCodeAllocator code_allocator;
-  codegen.Finalize(&code_allocator);
+  codegen.Finalize();
 }
 
 // Check that ART ISA Features are propagated to VIXL for arm64 (using cortex-a75 as example).
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index 53163da..a8425c9 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -167,28 +167,6 @@
 };
 #endif
 
-class InternalCodeAllocator : public CodeAllocator {
- public:
-  InternalCodeAllocator() : size_(0) { }
-
-  uint8_t* Allocate(size_t size) override {
-    size_ = size;
-    memory_.reset(new uint8_t[size]);
-    return memory_.get();
-  }
-
-  size_t GetSize() const { return size_; }
-  ArrayRef<const uint8_t> GetMemory() const override {
-    return ArrayRef<const uint8_t>(memory_.get(), size_);
-  }
-
- private:
-  size_t size_;
-  std::unique_ptr<uint8_t[]> memory_;
-
-  DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator);
-};
-
 static bool CanExecuteOnHardware(InstructionSet target_isa) {
   return (target_isa == kRuntimeISA)
       // Handle the special case of ARM, with two instructions sets (ARM32 and Thumb-2).
@@ -247,8 +225,7 @@
 }
 
 template <typename Expected>
-static void Run(const InternalCodeAllocator& allocator,
-                const CodeGenerator& codegen,
+static void Run(const CodeGenerator& codegen,
                 bool has_result,
                 Expected expected) {
   InstructionSet target_isa = codegen.GetInstructionSet();
@@ -260,7 +237,7 @@
   };
   CodeHolder code_holder;
   const void* method_code =
-      code_holder.MakeExecutable(allocator.GetMemory(), ArrayRef<const uint8_t>(), target_isa);
+      code_holder.MakeExecutable(codegen.GetCode(), ArrayRef<const uint8_t>(), target_isa);
 
   using fptr = Expected (*)();
   fptr f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(method_code));
@@ -294,9 +271,8 @@
     register_allocator->AllocateRegisters();
   }
   hook_before_codegen(graph);
-  InternalCodeAllocator allocator;
-  codegen->Compile(&allocator);
-  Run(allocator, *codegen, has_result, expected);
+  codegen->Compile();
+  Run(*codegen, has_result, expected);
 }
 
 template <typename Expected>
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index f12e748..632c32a 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -89,7 +89,7 @@
 
   void Finish() {
     code_gen_->GenerateFrameExit();
-    code_gen_->Finalize(&code_allocator_);
+    code_gen_->Finalize();
   }
 
   void Check(InstructionSet isa,
@@ -97,7 +97,7 @@
              const std::vector<uint8_t>& expected_asm,
              const std::vector<uint8_t>& expected_cfi) {
     // Get the outputs.
-    ArrayRef<const uint8_t> actual_asm = code_allocator_.GetMemory();
+    ArrayRef<const uint8_t> actual_asm = code_gen_->GetCode();
     Assembler* opt_asm = code_gen_->GetAssembler();
     ArrayRef<const uint8_t> actual_cfi(*(opt_asm->cfi().data()));
 
@@ -123,27 +123,9 @@
   }
 
  private:
-  class InternalCodeAllocator : public CodeAllocator {
-   public:
-    InternalCodeAllocator() {}
-
-    uint8_t* Allocate(size_t size) override {
-      memory_.resize(size);
-      return memory_.data();
-    }
-
-    ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_); }
-
-   private:
-    std::vector<uint8_t> memory_;
-
-    DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator);
-  };
-
   HGraph* graph_;
   std::unique_ptr<CodeGenerator> code_gen_;
   ArenaVector<HBasicBlock*> blocks_;
-  InternalCodeAllocator code_allocator_;
 };
 
 #define TEST_ISA(isa)                                                 \
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 8207501..e4c9b27 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -69,28 +69,6 @@
 static constexpr const char* kPassNameSeparator = "$";
 
 /**
- * Used by the code generator, to allocate the code in a vector.
- */
-class CodeVectorAllocator final : public CodeAllocator {
- public:
-  explicit CodeVectorAllocator(ArenaAllocator* allocator)
-      : memory_(allocator->Adapter(kArenaAllocCodeBuffer)) {}
-
-  uint8_t* Allocate(size_t size) override {
-    memory_.resize(size);
-    return &memory_[0];
-  }
-
-  ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_); }
-  uint8_t* GetData() { return memory_.data(); }
-
- private:
-  ArenaVector<uint8_t> memory_;
-
-  DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator);
-};
-
-/**
  * Filter to apply to the visualizer. Methods whose name contain that filter will
  * be dumped.
  */
@@ -361,7 +339,6 @@
 
   // Create a 'CompiledMethod' for an optimized graph.
   CompiledMethod* Emit(ArenaAllocator* allocator,
-                       CodeVectorAllocator* code_allocator,
                        CodeGenerator* codegen,
                        bool is_intrinsic,
                        const dex::CodeItem* item) const;
@@ -372,10 +349,8 @@
   // 1) Builds the graph. Returns null if it failed to build it.
   // 2) Transforms the graph to SSA. Returns null if it failed.
   // 3) Runs optimizations on the graph, including register allocator.
-  // 4) Generates code with the `code_allocator` provided.
   CodeGenerator* TryCompile(ArenaAllocator* allocator,
                             ArenaStack* arena_stack,
-                            CodeVectorAllocator* code_allocator,
                             const DexCompilationUnit& dex_compilation_unit,
                             ArtMethod* method,
                             CompilationKind compilation_kind,
@@ -383,7 +358,6 @@
 
   CodeGenerator* TryCompileIntrinsic(ArenaAllocator* allocator,
                                      ArenaStack* arena_stack,
-                                     CodeVectorAllocator* code_allocator,
                                      const DexCompilationUnit& dex_compilation_unit,
                                      ArtMethod* method,
                                      VariableSizedHandleScope* handles) const;
@@ -719,7 +693,6 @@
 }
 
 CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
-                                         CodeVectorAllocator* code_allocator,
                                          CodeGenerator* codegen,
                                          bool is_intrinsic,
                                          const dex::CodeItem* code_item_for_osr_check) const {
@@ -729,7 +702,7 @@
   CompiledCodeStorage* storage = GetCompiledCodeStorage();
   CompiledMethod* compiled_method = storage->CreateCompiledMethod(
       codegen->GetInstructionSet(),
-      code_allocator->GetMemory(),
+      codegen->GetCode(),
       ArrayRef<const uint8_t>(stack_map),
       ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
       ArrayRef<const linker::LinkerPatch>(linker_patches),
@@ -749,7 +722,6 @@
 
 CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
                                               ArenaStack* arena_stack,
-                                              CodeVectorAllocator* code_allocator,
                                               const DexCompilationUnit& dex_compilation_unit,
                                               ArtMethod* method,
                                               CompilationKind compilation_kind,
@@ -914,7 +886,7 @@
                     regalloc_strategy,
                     compilation_stats_.get());
 
-  codegen->Compile(code_allocator);
+  codegen->Compile();
   pass_observer.DumpDisassembly();
 
   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledBytecode);
@@ -924,7 +896,6 @@
 CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
     ArenaAllocator* allocator,
     ArenaStack* arena_stack,
-    CodeVectorAllocator* code_allocator,
     const DexCompilationUnit& dex_compilation_unit,
     ArtMethod* method,
     VariableSizedHandleScope* handles) const {
@@ -1013,7 +984,7 @@
     return nullptr;
   }
 
-  codegen->Compile(code_allocator);
+  codegen->Compile();
   pass_observer.DumpDisassembly();
 
   VLOG(compiler) << "Compiled intrinsic: " << method->GetIntrinsic()
@@ -1037,7 +1008,6 @@
   DCHECK(runtime->IsAotCompiler());
   ArenaAllocator allocator(runtime->GetArenaPool());
   ArenaStack arena_stack(runtime->GetArenaPool());
-  CodeVectorAllocator code_allocator(&allocator);
   std::unique_ptr<CodeGenerator> codegen;
   bool compiled_intrinsic = false;
   {
@@ -1071,7 +1041,6 @@
       codegen.reset(
           TryCompileIntrinsic(&allocator,
                               &arena_stack,
-                              &code_allocator,
                               dex_compilation_unit,
                               method,
                               &handles));
@@ -1083,7 +1052,6 @@
       codegen.reset(
           TryCompile(&allocator,
                      &arena_stack,
-                     &code_allocator,
                      dex_compilation_unit,
                      method,
                      compiler_options.IsBaseline()
@@ -1094,7 +1062,6 @@
   }
   if (codegen.get() != nullptr) {
     compiled_method = Emit(&allocator,
-                           &code_allocator,
                            codegen.get(),
                            compiled_intrinsic,
                            compiled_intrinsic ? nullptr : code_item);
@@ -1177,19 +1144,16 @@
           /*verified_method=*/ nullptr,
           dex_cache,
           compiling_class);
-      CodeVectorAllocator code_allocator(&allocator);
       // Go to native so that we don't block GC during compilation.
       ScopedThreadSuspension sts(soa.Self(), ThreadState::kNative);
       std::unique_ptr<CodeGenerator> codegen(
           TryCompileIntrinsic(&allocator,
                               &arena_stack,
-                              &code_allocator,
                               dex_compilation_unit,
                               method,
                               &handles));
       if (codegen != nullptr) {
         return Emit(&allocator,
-                    &code_allocator,
                     codegen.get(),
                     /*is_intrinsic=*/ true,
                     /*item=*/ nullptr);
@@ -1342,7 +1306,6 @@
   }
 
   ArenaStack arena_stack(runtime->GetJitArenaPool());
-  CodeVectorAllocator code_allocator(&allocator);
   VariableSizedHandleScope handles(self);
 
   std::unique_ptr<CodeGenerator> codegen;
@@ -1365,7 +1328,6 @@
     codegen.reset(
         TryCompile(&allocator,
                    &arena_stack,
-                   &code_allocator,
                    dex_compilation_unit,
                    method,
                    compilation_kind,
@@ -1381,7 +1343,7 @@
   ArrayRef<const uint8_t> reserved_data;
   if (!code_cache->Reserve(self,
                            region,
-                           code_allocator.GetMemory().size(),
+                           codegen->GetAssembler()->CodeSize(),
                            stack_map.size(),
                            /*number_of_roots=*/codegen->GetNumberOfJitRoots(),
                            method,
@@ -1394,7 +1356,9 @@
   const uint8_t* roots_data = reserved_data.data();
 
   std::vector<Handle<mirror::Object>> roots;
-  codegen->EmitJitRoots(code_allocator.GetData(), roots_data, &roots);
+  codegen->EmitJitRoots(const_cast<uint8_t*>(codegen->GetAssembler()->CodeBufferBaseAddress()),
+                        roots_data,
+                        &roots);
   // The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope.
   DCHECK(std::all_of(roots.begin(),
                      roots.end(),
@@ -1418,7 +1382,7 @@
     info.is_optimized = true;
     info.is_code_address_text_relative = false;
     info.code_address = reinterpret_cast<uintptr_t>(code);
-    info.code_size = code_allocator.GetMemory().size();
+    info.code_size = codegen->GetAssembler()->CodeSize(),
     info.frame_size_in_bytes = codegen->GetFrameSize();
     info.code_info = stack_map.size() == 0 ? nullptr : stack_map.data();
     info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
@@ -1429,7 +1393,7 @@
                           region,
                           method,
                           reserved_code,
-                          code_allocator.GetMemory(),
+                          codegen->GetCode(),
                           reserved_data,
                           roots,
                           ArrayRef<const uint8_t>(stack_map),
@@ -1444,7 +1408,7 @@
 
   Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
   if (jit_logger != nullptr) {
-    jit_logger->WriteLog(code, code_allocator.GetMemory().size(), method);
+    jit_logger->WriteLog(code, codegen->GetAssembler()->CodeSize(), method);
   }
 
   if (kArenaAllocatorCountAllocations) {