Add code size to CodeInfo.

This is in preparation of removing it from OatQuickMethodHeader.

Bug: 123510633
Test: m test-art-host-gtest
Test: ./art/test.py -b -r --host
Change-Id: I5c5adb4c040e329b81c1393aa1b80ee017729c8a
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index 9212ea6..6a01452 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -81,7 +81,7 @@
     stack_maps.BeginMethod(4 * sizeof(void*), 0u, 0u, 0u);
     stack_maps.BeginStackMapEntry(kDexPc, native_pc_offset);
     stack_maps.EndStackMapEntry();
-    stack_maps.EndMethod();
+    stack_maps.EndMethod(code_size);
     ScopedArenaVector<uint8_t> stack_map = stack_maps.Encode();
 
     const size_t stack_maps_size = stack_map.size();
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 8333b32..886cabb 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -435,7 +435,7 @@
   // Finalize instructions in assember;
   Finalize(allocator);
 
-  GetStackMapStream()->EndMethod();
+  GetStackMapStream()->EndMethod(GetAssembler()->CodeSize());
 }
 
 void CodeGenerator::Finalize(CodeAllocator* allocator) {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 172a083..bf99a0e 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1131,7 +1131,8 @@
 }
 
 static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* allocator,
-                                                    const JniCompiledMethod& jni_compiled_method) {
+                                                    const JniCompiledMethod& jni_compiled_method,
+                                                    size_t code_size) {
   // StackMapStream is quite large, so allocate it using the ScopedArenaAllocator
   // to stay clear of the frame size limit.
   std::unique_ptr<StackMapStream> stack_map_stream(
@@ -1142,7 +1143,7 @@
       jni_compiled_method.GetFpSpillMask(),
       /* num_dex_registers= */ 0,
       /* baseline= */ false);
-  stack_map_stream->EndMethod();
+  stack_map_stream->EndMethod(code_size);
   return stack_map_stream->Encode();
 }
 
@@ -1203,8 +1204,8 @@
   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub);
 
   ScopedArenaAllocator stack_map_allocator(&arena_stack);  // Will hold the stack map.
-  ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
-                                                           jni_compiled_method);
+  ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(
+      &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size());
   return CompiledMethod::SwapAllocCompiledMethod(
       GetCompiledMethodStorage(),
       jni_compiled_method.GetInstructionSet(),
@@ -1262,8 +1263,8 @@
     ArenaStack arena_stack(runtime->GetJitArenaPool());
     // StackMapStream is large and it does not fit into this frame, so we need helper method.
     ScopedArenaAllocator stack_map_allocator(&arena_stack);  // Will hold the stack map.
-    ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
-                                                             jni_compiled_method);
+    ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(
+        &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size());
 
     ArrayRef<const uint8_t> reserved_code;
     ArrayRef<const uint8_t> reserved_data;
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index dd6d1a2..e52a3ce 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -65,9 +65,10 @@
   }
 }
 
-void StackMapStream::EndMethod() {
+void StackMapStream::EndMethod(size_t code_size) {
   DCHECK(in_method_) << "Mismatched Begin/End calls";
   in_method_ = false;
+  code_size_ = code_size;
 
   // Read the stack masks now. The compiler might have updated them.
   for (size_t i = 0; i < lazy_stack_masks_.size(); i++) {
@@ -77,6 +78,19 @@
           stack_masks_.Dedup(stack_mask->GetRawStorage(), stack_mask->GetNumberOfBits());
     }
   }
+
+  if (kIsDebugBuild) {
+    uint32_t packed_code_size = StackMap::PackNativePc(code_size, instruction_set_);
+    for (size_t i = 0; i < stack_maps_.size(); i++) {
+      DCHECK_LE(stack_maps_[i][StackMap::kPackedNativePc], packed_code_size);
+    }
+  }
+
+  if (kVerifyStackMaps) {
+    dchecks_.emplace_back([=](const CodeInfo& code_info) {
+        CHECK_EQ(code_info.code_size_, code_size);
+    });
+  }
 }
 
 void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
@@ -302,6 +316,7 @@
 
   uint32_t flags = (inline_infos_.size() > 0) ? CodeInfo::kHasInlineInfo : 0;
   flags |= baseline_ ? CodeInfo::kIsBaseline : 0;
+  DCHECK_LE(flags, kVarintMax);  // Ensure flags can be read directly as byte.
   uint32_t bit_table_flags = 0;
   ForEachBitTable([&bit_table_flags](size_t i, auto bit_table) {
     if (bit_table->size() != 0) {  // Record which bit-tables are stored.
@@ -313,6 +328,7 @@
   BitMemoryWriter<ScopedArenaVector<uint8_t>> out(&buffer);
   out.WriteInterleavedVarints(std::array<uint32_t, CodeInfo::kNumHeaders>{
     flags,
+    code_size_,
     packed_frame_size_,
     core_spill_mask_,
     fp_spill_mask_,
@@ -330,6 +346,8 @@
   CodeInfo code_info(buffer.data(), &number_of_read_bits);
   CHECK_EQ(number_of_read_bits, out.NumberOfWrittenBits());
   CHECK_EQ(code_info.GetNumberOfStackMaps(), stack_maps_.size());
+  CHECK_EQ(CodeInfo::HasInlineInfo(buffer.data()), inline_infos_.size() > 0);
+  CHECK_EQ(CodeInfo::IsBaseline(buffer.data()), baseline_);
 
   // Verify all written data (usually only in debug builds).
   if (kVerifyStackMaps) {
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 67f716c..3ded07d 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -63,7 +63,7 @@
                    size_t fp_spill_mask,
                    uint32_t num_dex_registers,
                    bool baseline = false);
-  void EndMethod();
+  void EndMethod(size_t code_size);
 
   void BeginStackMapEntry(uint32_t dex_pc,
                           uint32_t native_pc_offset,
@@ -116,6 +116,7 @@
 
   ScopedArenaAllocator* allocator_;
   const InstructionSet instruction_set_;
+  uint32_t code_size_ = 0;
   uint32_t packed_frame_size_ = 0;
   uint32_t core_spill_mask_ = 0;
   uint32_t fp_spill_mask_ = 0;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 0dd5773..0158ab5 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -61,7 +61,7 @@
   stream.AddDexRegisterEntry(Kind::kConstant, -2);       // Short location.
   stream.EndStackMapEntry();
 
-  stream.EndMethod();
+  stream.EndMethod(64 * kPcAlign);
   ScopedArenaVector<uint8_t> memory = stream.Encode();
 
   CodeInfo code_info(memory.data());
@@ -147,7 +147,7 @@
   stream.AddDexRegisterEntry(Kind::kInFpuRegisterHigh, 1);  // Short location.
   stream.EndStackMapEntry();
 
-  stream.EndMethod();
+  stream.EndMethod(256 * kPcAlign);
   ScopedArenaVector<uint8_t> memory = stream.Encode();
 
   CodeInfo code_info(memory.data());
@@ -317,7 +317,7 @@
   stream.EndInlineInfoEntry();
   stream.EndStackMapEntry();
 
-  stream.EndMethod();
+  stream.EndMethod(64 * kPcAlign);
   ScopedArenaVector<uint8_t> memory = stream.Encode();
 
   CodeInfo code_info(memory.data());
@@ -372,7 +372,7 @@
   stream.AddDexRegisterEntry(Kind::kConstant, -2);       // Large location.
   stream.EndStackMapEntry();
 
-  stream.EndMethod();
+  stream.EndMethod(64 * kPcAlign);
   ScopedArenaVector<uint8_t> memory = stream.Encode();
 
   CodeInfo code_info(memory.data());
@@ -431,7 +431,7 @@
   stream.AddDexRegisterEntry(Kind::kConstant, -2);   // Large location.
   stream.EndStackMapEntry();
 
-  stream.EndMethod();
+  stream.EndMethod(66 * kPcAlign);
   ScopedArenaVector<uint8_t> memory = stream.Encode();
 
   CodeInfo ci(memory.data());
@@ -479,7 +479,7 @@
   stream.AddDexRegisterEntry(Kind::kNone, 0);
   stream.EndStackMapEntry();
 
-  stream.EndMethod();
+  stream.EndMethod(68 * kPcAlign);
   ScopedArenaVector<uint8_t> memory = stream.Encode();
 
   CodeInfo code_info(memory.data());
@@ -578,7 +578,7 @@
 
   stream.EndStackMapEntry();
 
-  stream.EndMethod();
+  stream.EndMethod(78 * kPcAlign);
   ScopedArenaVector<uint8_t> memory = stream.Encode();
 
   CodeInfo ci(memory.data());
@@ -714,7 +714,7 @@
   stream.BeginStackMapEntry(0, 8 * kPcAlign, 0x3, &sp_mask);
   stream.EndStackMapEntry();
 
-  stream.EndMethod();
+  stream.EndMethod(8 * kPcAlign);
   ScopedArenaVector<uint8_t> memory = stream.Encode();
 
   CodeInfo code_info(memory.data());
@@ -738,7 +738,7 @@
   stream.AddDexRegisterEntry(Kind::kConstant, -2);
   stream.EndStackMapEntry();
 
-  stream.EndMethod();
+  stream.EndMethod(64 * kPcAlign);
   ScopedArenaVector<uint8_t> memory = stream.Encode();
 
   std::vector<uint8_t> out;
diff --git a/runtime/oat.h b/runtime/oat.h
index e7e9a58..2c07145 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@
 class PACKED(4) OatHeader {
  public:
   static constexpr std::array<uint8_t, 4> kOatMagic { { 'o', 'a', 't', '\n' } };
-  // Last oat version changed reason: Nterp trampoline.
-  static constexpr std::array<uint8_t, 4> kOatVersion { { '1', '9', '0', '\0' } };
+  // Last oat version changed reason: Add code size to CodeInfo.
+  static constexpr std::array<uint8_t, 4> kOatVersion { { '1', '9', '1', '\0' } };
 
   static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
   static constexpr const char* kDebuggableKey = "debuggable";
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 9a1133e..7e1fc9b 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -67,7 +67,7 @@
   }
 
   bool IsOptimized() const {
-    return GetCodeSize() != 0 && vmap_table_offset_ != 0;
+    return (code_size_ & kCodeSizeMask) != 0 && vmap_table_offset_ != 0;
   }
 
   const uint8_t* GetOptimizedCodeInfoPtr() const {
@@ -89,6 +89,10 @@
     // accidentally use a function pointer to one of the stubs/trampolines.
     // We prefix those with 0xFF in the aseembly so that we can do DCHECKs.
     CHECK_NE(code_size_, 0xFFFFFFFF) << code_size_;
+    if (IsOptimized()) {
+      // Temporary code: Check that the code size in code info matches.
+      CHECK_EQ(code_size_ & kCodeSizeMask, CodeInfo::DecodeCodeSize(this));
+    }
     return code_size_ & kCodeSizeMask;
   }
 
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 5cda030..5086f92 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -87,6 +87,10 @@
   return copy;
 }
 
+uint32_t CodeInfo::DecodeCodeSize(const OatQuickMethodHeader* header) {
+  return CodeInfo(header->GetOptimizedCodeInfoPtr()).code_size_;
+}
+
 size_t CodeInfo::Deduper::Dedupe(const uint8_t* code_info_data) {
   writer_.ByteAlign();
   size_t deduped_offset = writer_.NumberOfWrittenBits() / kBitsPerByte;
@@ -263,6 +267,7 @@
                     bool verbose,
                     InstructionSet instruction_set) const {
   vios->Stream() << "CodeInfo "
+    << " CodeSize:" << code_size_
     << " FrameSize:" << packed_frame_size_ * kStackAlignment
     << " CoreSpillMask:" << std::hex << core_spill_mask_
     << " FpSpillMask:" << std::hex << fp_spill_mask_
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 0d289b8..e8e57aa 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -299,6 +299,7 @@
   static QuickMethodFrameInfo DecodeFrameInfo(const uint8_t* data);
   static CodeInfo DecodeGcMasksOnly(const OatQuickMethodHeader* header);
   static CodeInfo DecodeInlineInfoOnly(const OatQuickMethodHeader* header);
+  static uint32_t DecodeCodeSize(const OatQuickMethodHeader* header);
 
   ALWAYS_INLINE const BitTable<StackMap>& GetStackMaps() const {
     return stack_maps_;
@@ -447,6 +448,7 @@
   ALWAYS_INLINE static void ForEachHeaderField(Callback callback) {
     size_t index = 0;
     callback(index++, &CodeInfo::flags_);
+    callback(index++, &CodeInfo::code_size_);
     callback(index++, &CodeInfo::packed_frame_size_);
     callback(index++, &CodeInfo::core_spill_mask_);
     callback(index++, &CodeInfo::fp_spill_mask_);
@@ -480,8 +482,9 @@
   };
 
   // The CodeInfo starts with sequence of variable-length bit-encoded integers.
-  static constexpr size_t kNumHeaders = 6;
-  uint32_t flags_ = 0;
+  static constexpr size_t kNumHeaders = 7;
+  uint32_t flags_ = 0;      // Note that the space is limited to three bits.
+  uint32_t code_size_ = 0;  // The size of native PC range in bytes.
   uint32_t packed_frame_size_ = 0;  // Frame size in kStackAlignment units.
   uint32_t core_spill_mask_ = 0;
   uint32_t fp_spill_mask_ = 0;