From e42a4b95eed312e6f7019645f4c66b2d77254433 Mon Sep 17 00:00:00 2001 From: David Srbecky Date: Sun, 26 May 2019 00:10:25 +0100 Subject: Optimize stack maps: add fast path for no inline info. Consumers of CodeInfo can skip significant chunks of work if they can quickly determine that method has no inlining. Store this fact as a flag bit at the start of code info. This changes binary format and adds <0.1% to oat size. I added the extra flag field as the simplest solution for now, although I would like to use it for more things in the future. (e.g. store the special cases of empty/deduped tables in it) This improves app startup by 0.4% (maps,speed). PMD on golem seems to gets around 15% faster. Bug: 133257467 Test: ./art/test.py -b --host --64 Change-Id: Ia498a31bafc74b51cc95b8c70cf1da4b0e3d894e --- compiler/optimizing/stack_map_stream.cc | 2 ++ 1 file changed, 2 insertions(+) (limited to 'compiler/optimizing/stack_map_stream.cc') diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc index 8c3664312d..e21e21cdf3 100644 --- a/compiler/optimizing/stack_map_stream.cc +++ b/compiler/optimizing/stack_map_stream.cc @@ -184,6 +184,7 @@ void StackMapStream::BeginInlineInfoEntry(ArtMethod* method, in_inline_info_ = true; DCHECK_EQ(expected_num_dex_registers_, current_dex_registers_.size()); + flags_ |= CodeInfo::kHasInlineInfo; expected_num_dex_registers_ += num_dex_registers; BitTableBuilder::Entry entry; @@ -305,6 +306,7 @@ ScopedArenaVector StackMapStream::Encode() { ScopedArenaVector buffer(allocator_->Adapter(kArenaAllocStackMapStream)); BitMemoryWriter> out(&buffer); + out.WriteVarint(flags_); out.WriteVarint(packed_frame_size_); out.WriteVarint(core_spill_mask_); out.WriteVarint(fp_spill_mask_); -- cgit v1.2.3-59-g8ed1b