Revert^2 "Add code size to CodeInfo"
This temporarily adds 0.25% to oat file size.
The space will be reclaimed back in follow-up CL.
This reverts commit 8f20a23a35fa6fbe4dcb4ff70268a24dc7fb2a24.
Reason for revert: Reland as-is after CL/903819
Bug: 123510633
Test: DCHECK compare the two stored code sizes.
Change-Id: Ia3ab31c208948f4996188764fcdcba13d9977d19
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index d5ceafe..f978cc6 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -80,7 +80,7 @@
stack_maps.BeginMethod(4 * sizeof(void*), 0u, 0u, 0u);
stack_maps.BeginStackMapEntry(kDexPc, native_pc_offset);
stack_maps.EndStackMapEntry();
- stack_maps.EndMethod();
+ stack_maps.EndMethod(code_size);
ScopedArenaVector<uint8_t> stack_map = stack_maps.Encode();
const size_t stack_maps_size = stack_map.size();
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 9e2f5cd..122f27b 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -440,7 +440,7 @@
// Finalize instructions in assember;
Finalize(allocator);
- GetStackMapStream()->EndMethod();
+ GetStackMapStream()->EndMethod(GetAssembler()->CodeSize());
}
void CodeGenerator::Finalize(CodeAllocator* allocator) {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index e8f8d32..4f43b71 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1171,7 +1171,8 @@
}
static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* allocator,
- const JniCompiledMethod& jni_compiled_method) {
+ const JniCompiledMethod& jni_compiled_method,
+ size_t code_size) {
// StackMapStream is quite large, so allocate it using the ScopedArenaAllocator
// to stay clear of the frame size limit.
std::unique_ptr<StackMapStream> stack_map_stream(
@@ -1181,7 +1182,7 @@
jni_compiled_method.GetCoreSpillMask(),
jni_compiled_method.GetFpSpillMask(),
/* num_dex_registers= */ 0);
- stack_map_stream->EndMethod();
+ stack_map_stream->EndMethod(code_size);
return stack_map_stream->Encode();
}
@@ -1239,8 +1240,8 @@
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub);
ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map.
- ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
- jni_compiled_method);
+ ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(
+ &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size());
return CompiledMethod::SwapAllocCompiledMethod(
GetCompilerDriver()->GetCompiledMethodStorage(),
jni_compiled_method.GetInstructionSet(),
@@ -1290,8 +1291,8 @@
ArenaStack arena_stack(runtime->GetJitArenaPool());
// StackMapStream is large and it does not fit into this frame, so we need helper method.
ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map.
- ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
- jni_compiled_method);
+ ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(
+ &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size());
uint8_t* stack_map_data = nullptr;
uint8_t* roots_data = nullptr;
uint32_t data_size = code_cache->ReserveData(self,
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 60ca61c..e87f3c8 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -54,9 +54,10 @@
num_dex_registers_ = num_dex_registers;
}
-void StackMapStream::EndMethod() {
+void StackMapStream::EndMethod(size_t code_size) {
DCHECK(in_method_) << "Mismatched Begin/End calls";
in_method_ = false;
+ packed_code_size_ = StackMap::PackNativePc(code_size, instruction_set_);
// Read the stack masks now. The compiler might have updated them.
for (size_t i = 0; i < lazy_stack_masks_.size(); i++) {
@@ -66,6 +67,10 @@
stack_masks_.Dedup(stack_mask->GetRawStorage(), stack_mask->GetNumberOfBits());
}
}
+
+ for (size_t i = 0; i < stack_maps_.size(); i++) {
+ DCHECK_LE(stack_maps_[i][StackMap::kPackedNativePc], packed_code_size_);
+ }
}
void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
@@ -296,6 +301,7 @@
ScopedArenaVector<uint8_t> buffer(allocator_->Adapter(kArenaAllocStackMapStream));
BitMemoryWriter<ScopedArenaVector<uint8_t>> out(&buffer);
+ out.WriteVarint(packed_code_size_);
out.WriteVarint(packed_frame_size_);
out.WriteVarint(core_spill_mask_);
out.WriteVarint(fp_spill_mask_);
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 01c6bf9..164e902 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -62,7 +62,7 @@
size_t core_spill_mask,
size_t fp_spill_mask,
uint32_t num_dex_registers);
- void EndMethod();
+ void EndMethod(size_t code_size);
void BeginStackMapEntry(uint32_t dex_pc,
uint32_t native_pc_offset,
@@ -99,6 +99,7 @@
ScopedArenaAllocator* allocator_;
const InstructionSet instruction_set_;
+ uint32_t packed_code_size_ = 0;
uint32_t packed_frame_size_ = 0;
uint32_t core_spill_mask_ = 0;
uint32_t fp_spill_mask_ = 0;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index d28f09f..cbd844f 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -61,7 +61,7 @@
stream.AddDexRegisterEntry(Kind::kConstant, -2); // Short location.
stream.EndStackMapEntry();
- stream.EndMethod();
+ stream.EndMethod(64 * kPcAlign);
ScopedArenaVector<uint8_t> memory = stream.Encode();
CodeInfo code_info(memory.data());
@@ -147,7 +147,7 @@
stream.AddDexRegisterEntry(Kind::kInFpuRegisterHigh, 1); // Short location.
stream.EndStackMapEntry();
- stream.EndMethod();
+ stream.EndMethod(256 * kPcAlign);
ScopedArenaVector<uint8_t> memory = stream.Encode();
CodeInfo code_info(memory.data());
@@ -317,7 +317,7 @@
stream.EndInlineInfoEntry();
stream.EndStackMapEntry();
- stream.EndMethod();
+ stream.EndMethod(64 * kPcAlign);
ScopedArenaVector<uint8_t> memory = stream.Encode();
CodeInfo code_info(memory.data());
@@ -372,7 +372,7 @@
stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
stream.EndStackMapEntry();
- stream.EndMethod();
+ stream.EndMethod(64 * kPcAlign);
ScopedArenaVector<uint8_t> memory = stream.Encode();
CodeInfo code_info(memory.data());
@@ -431,7 +431,7 @@
stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
stream.EndStackMapEntry();
- stream.EndMethod();
+ stream.EndMethod(66 * kPcAlign);
ScopedArenaVector<uint8_t> memory = stream.Encode();
CodeInfo ci(memory.data());
@@ -479,7 +479,7 @@
stream.AddDexRegisterEntry(Kind::kNone, 0);
stream.EndStackMapEntry();
- stream.EndMethod();
+ stream.EndMethod(68 * kPcAlign);
ScopedArenaVector<uint8_t> memory = stream.Encode();
CodeInfo code_info(memory.data());
@@ -578,7 +578,7 @@
stream.EndStackMapEntry();
- stream.EndMethod();
+ stream.EndMethod(78 * kPcAlign);
ScopedArenaVector<uint8_t> memory = stream.Encode();
CodeInfo ci(memory.data());
@@ -722,7 +722,7 @@
stream.BeginStackMapEntry(0, 8 * kPcAlign, 0x3, &sp_mask);
stream.EndStackMapEntry();
- stream.EndMethod();
+ stream.EndMethod(8 * kPcAlign);
ScopedArenaVector<uint8_t> memory = stream.Encode();
CodeInfo code_info(memory.data());
@@ -746,7 +746,7 @@
stream.AddDexRegisterEntry(Kind::kConstant, -2);
stream.EndStackMapEntry();
- stream.EndMethod();
+ stream.EndMethod(64 * kPcAlign);
ScopedArenaVector<uint8_t> memory = stream.Encode();
std::vector<uint8_t> out;
diff --git a/runtime/oat.h b/runtime/oat.h
index 88238d9..b824729 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -31,8 +31,8 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- // Last oat version changed reason: Partial boot image.
- static constexpr uint8_t kOatVersion[] = { '1', '6', '6', '\0' };
+ // Last oat version changed reason: Add code size to CodeInfo.
+ static constexpr uint8_t kOatVersion[] = { '1', '6', '7', '\0' };
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
static constexpr const char* kDebuggableKey = "debuggable";
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 8798c69..6c123c4 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -35,6 +35,8 @@
uint32_t code_size)
: vmap_table_offset_(vmap_table_offset),
code_size_(code_size) {
+ DCHECK_NE(vmap_table_offset, 0u);
+ DCHECK_NE(code_size, 0u);
}
static OatQuickMethodHeader* FromCodePointer(const void* code_ptr) {
@@ -58,7 +60,7 @@
}
bool IsOptimized() const {
- return GetCodeSize() != 0 && vmap_table_offset_ != 0;
+ return (code_size_ & kCodeSizeMask) != 0 && vmap_table_offset_ != 0;
}
const uint8_t* GetOptimizedCodeInfoPtr() const {
@@ -76,7 +78,11 @@
}
uint32_t GetCodeSize() const {
- return code_size_ & kCodeSizeMask;
+ DCHECK(IsOptimized());
+ size_t code_size1 = code_size_ & kCodeSizeMask;
+ size_t code_size2 = CodeInfo::DecodeCodeSize(GetOptimizedCodeInfoPtr());
+ DCHECK_EQ(code_size1, code_size2);
+ return code_size2;
}
const uint32_t* GetCodeSizeAddr() const {
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 62dec15..5d30b77 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -227,6 +227,7 @@
bool verbose,
InstructionSet instruction_set) const {
vios->Stream() << "CodeInfo BitSize=" << size_in_bits_
+ << " CodeSize:" << StackMap::UnpackNativePc(packed_code_size_, instruction_set)
<< " FrameSize:" << packed_frame_size_ * kStackAlignment
<< " CoreSpillMask:" << std::hex << core_spill_mask_
<< " FpSpillMask:" << std::hex << fp_spill_mask_
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 87133cf..59da923 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -438,8 +438,15 @@
// Accumulate code info size statistics into the given Stats tree.
static void CollectSizeStats(const uint8_t* code_info, /*out*/ Stats* parent);
+ ALWAYS_INLINE static size_t DecodeCodeSize(const uint8_t* data,
+ InstructionSet isa = kRuntimeISA) {
+ uint32_t packed_code_size = BitMemoryReader(data).ReadVarint();
+ return StackMap::UnpackNativePc(packed_code_size, isa);
+ }
+
ALWAYS_INLINE static QuickMethodFrameInfo DecodeFrameInfo(const uint8_t* data) {
BitMemoryReader reader(data);
+ reader.ReadVarint(); // Skip code size.
return QuickMethodFrameInfo(
reader.ReadVarint() * kStackAlignment, // Decode packed_frame_size_ and unpack.
reader.ReadVarint(), // core_spill_mask_.
@@ -461,6 +468,7 @@
// Invokes the callback with member pointer of each header field.
template<typename Callback>
ALWAYS_INLINE static void ForEachHeaderField(Callback callback) {
+ callback(&CodeInfo::packed_code_size_);
callback(&CodeInfo::packed_frame_size_);
callback(&CodeInfo::core_spill_mask_);
callback(&CodeInfo::fp_spill_mask_);
@@ -486,6 +494,7 @@
callback(&CodeInfo::dex_register_catalog_);
}
+ uint32_t packed_code_size_ = 0; // The size of native PC range.
uint32_t packed_frame_size_ = 0; // Frame size in kStackAlignment units.
uint32_t core_spill_mask_ = 0;
uint32_t fp_spill_mask_ = 0;