diff options
author | 2019-02-13 17:27:17 +0000 | |
---|---|---|
committer | 2019-02-14 09:43:00 +0000 | |
commit | e1412dacbf1d2a809bd1fca658cc8cb8f61f8ee6 (patch) | |
tree | bb3f56ecefe08b66f7a330a02caef0b089a0c2af /compiler/optimizing | |
parent | 8f20a23a35fa6fbe4dcb4ff70268a24dc7fb2a24 (diff) |
Revert^2 "Add code size to CodeInfo"
This temporarily adds 0.25% to oat file size.
The space will be reclaimed back in follow-up CL.
This reverts commit 8f20a23a35fa6fbe4dcb4ff70268a24dc7fb2a24.
Reason for revert: Reland as-is after CL/903819
Bug: 123510633
Test: DCHECK compare the two stored code sizes.
Change-Id: Ia3ab31c208948f4996188764fcdcba13d9977d19
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/code_generator.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 13 | ||||
-rw-r--r-- | compiler/optimizing/stack_map_stream.cc | 8 | ||||
-rw-r--r-- | compiler/optimizing/stack_map_stream.h | 3 | ||||
-rw-r--r-- | compiler/optimizing/stack_map_test.cc | 18 |
5 files changed, 26 insertions, 18 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 9e2f5cd508..122f27b0e0 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -440,7 +440,7 @@ void CodeGenerator::Compile(CodeAllocator* allocator) { // Finalize instructions in assember; Finalize(allocator); - GetStackMapStream()->EndMethod(); + GetStackMapStream()->EndMethod(GetAssembler()->CodeSize()); } void CodeGenerator::Finalize(CodeAllocator* allocator) { diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index e8f8d32525..4f43b71fde 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -1171,7 +1171,8 @@ CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item, } static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* allocator, - const JniCompiledMethod& jni_compiled_method) { + const JniCompiledMethod& jni_compiled_method, + size_t code_size) { // StackMapStream is quite large, so allocate it using the ScopedArenaAllocator // to stay clear of the frame size limit. std::unique_ptr<StackMapStream> stack_map_stream( @@ -1181,7 +1182,7 @@ static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* alloca jni_compiled_method.GetCoreSpillMask(), jni_compiled_method.GetFpSpillMask(), /* num_dex_registers= */ 0); - stack_map_stream->EndMethod(); + stack_map_stream->EndMethod(code_size); return stack_map_stream->Encode(); } @@ -1239,8 +1240,8 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags, MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub); ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map. - ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator, - jni_compiled_method); + ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap( + &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size()); return CompiledMethod::SwapAllocCompiledMethod( GetCompilerDriver()->GetCompiledMethodStorage(), jni_compiled_method.GetInstructionSet(), @@ -1290,8 +1291,8 @@ bool OptimizingCompiler::JitCompile(Thread* self, ArenaStack arena_stack(runtime->GetJitArenaPool()); // StackMapStream is large and it does not fit into this frame, so we need helper method. ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map. - ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator, - jni_compiled_method); + ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap( + &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size()); uint8_t* stack_map_data = nullptr; uint8_t* roots_data = nullptr; uint32_t data_size = code_cache->ReserveData(self, diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc index 60ca61c133..e87f3c80ad 100644 --- a/compiler/optimizing/stack_map_stream.cc +++ b/compiler/optimizing/stack_map_stream.cc @@ -54,9 +54,10 @@ void StackMapStream::BeginMethod(size_t frame_size_in_bytes, num_dex_registers_ = num_dex_registers; } -void StackMapStream::EndMethod() { +void StackMapStream::EndMethod(size_t code_size) { DCHECK(in_method_) << "Mismatched Begin/End calls"; in_method_ = false; + packed_code_size_ = StackMap::PackNativePc(code_size, instruction_set_); // Read the stack masks now. The compiler might have updated them. for (size_t i = 0; i < lazy_stack_masks_.size(); i++) { @@ -66,6 +67,10 @@ void StackMapStream::EndMethod() { stack_masks_.Dedup(stack_mask->GetRawStorage(), stack_mask->GetNumberOfBits()); } } + + for (size_t i = 0; i < stack_maps_.size(); i++) { + DCHECK_LE(stack_maps_[i][StackMap::kPackedNativePc], packed_code_size_); + } } void StackMapStream::BeginStackMapEntry(uint32_t dex_pc, @@ -296,6 +301,7 @@ ScopedArenaVector<uint8_t> StackMapStream::Encode() { ScopedArenaVector<uint8_t> buffer(allocator_->Adapter(kArenaAllocStackMapStream)); BitMemoryWriter<ScopedArenaVector<uint8_t>> out(&buffer); + out.WriteVarint(packed_code_size_); out.WriteVarint(packed_frame_size_); out.WriteVarint(core_spill_mask_); out.WriteVarint(fp_spill_mask_); diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h index 01c6bf9e0e..164e9021b2 100644 --- a/compiler/optimizing/stack_map_stream.h +++ b/compiler/optimizing/stack_map_stream.h @@ -62,7 +62,7 @@ class StackMapStream : public DeletableArenaObject<kArenaAllocStackMapStream> { size_t core_spill_mask, size_t fp_spill_mask, uint32_t num_dex_registers); - void EndMethod(); + void EndMethod(size_t code_size); void BeginStackMapEntry(uint32_t dex_pc, uint32_t native_pc_offset, @@ -99,6 +99,7 @@ class StackMapStream : public DeletableArenaObject<kArenaAllocStackMapStream> { ScopedArenaAllocator* allocator_; const InstructionSet instruction_set_; + uint32_t packed_code_size_ = 0; uint32_t packed_frame_size_ = 0; uint32_t core_spill_mask_ = 0; uint32_t fp_spill_mask_ = 0; diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc index d28f09fbba..cbd844f2cf 100644 --- a/compiler/optimizing/stack_map_test.cc +++ b/compiler/optimizing/stack_map_test.cc @@ -61,7 +61,7 @@ TEST(StackMapTest, Test1) { stream.AddDexRegisterEntry(Kind::kConstant, -2); // Short location. stream.EndStackMapEntry(); - stream.EndMethod(); + stream.EndMethod(64 * kPcAlign); ScopedArenaVector<uint8_t> memory = stream.Encode(); CodeInfo code_info(memory.data()); @@ -147,7 +147,7 @@ TEST(StackMapTest, Test2) { stream.AddDexRegisterEntry(Kind::kInFpuRegisterHigh, 1); // Short location. stream.EndStackMapEntry(); - stream.EndMethod(); + stream.EndMethod(256 * kPcAlign); ScopedArenaVector<uint8_t> memory = stream.Encode(); CodeInfo code_info(memory.data()); @@ -317,7 +317,7 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) { stream.EndInlineInfoEntry(); stream.EndStackMapEntry(); - stream.EndMethod(); + stream.EndMethod(64 * kPcAlign); ScopedArenaVector<uint8_t> memory = stream.Encode(); CodeInfo code_info(memory.data()); @@ -372,7 +372,7 @@ TEST(StackMapTest, TestNonLiveDexRegisters) { stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location. stream.EndStackMapEntry(); - stream.EndMethod(); + stream.EndMethod(64 * kPcAlign); ScopedArenaVector<uint8_t> memory = stream.Encode(); CodeInfo code_info(memory.data()); @@ -431,7 +431,7 @@ TEST(StackMapTest, TestShareDexRegisterMap) { stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location. stream.EndStackMapEntry(); - stream.EndMethod(); + stream.EndMethod(66 * kPcAlign); ScopedArenaVector<uint8_t> memory = stream.Encode(); CodeInfo ci(memory.data()); @@ -479,7 +479,7 @@ TEST(StackMapTest, TestNoDexRegisterMap) { stream.AddDexRegisterEntry(Kind::kNone, 0); stream.EndStackMapEntry(); - stream.EndMethod(); + stream.EndMethod(68 * kPcAlign); ScopedArenaVector<uint8_t> memory = stream.Encode(); CodeInfo code_info(memory.data()); @@ -578,7 +578,7 @@ TEST(StackMapTest, InlineTest) { stream.EndStackMapEntry(); - stream.EndMethod(); + stream.EndMethod(78 * kPcAlign); ScopedArenaVector<uint8_t> memory = stream.Encode(); CodeInfo ci(memory.data()); @@ -722,7 +722,7 @@ TEST(StackMapTest, TestDeduplicateStackMask) { stream.BeginStackMapEntry(0, 8 * kPcAlign, 0x3, &sp_mask); stream.EndStackMapEntry(); - stream.EndMethod(); + stream.EndMethod(8 * kPcAlign); ScopedArenaVector<uint8_t> memory = stream.Encode(); CodeInfo code_info(memory.data()); @@ -746,7 +746,7 @@ TEST(StackMapTest, TestDedupeBitTables) { stream.AddDexRegisterEntry(Kind::kConstant, -2); stream.EndStackMapEntry(); - stream.EndMethod(); + stream.EndMethod(64 * kPcAlign); ScopedArenaVector<uint8_t> memory = stream.Encode(); std::vector<uint8_t> out; |