Deduplicate stackmaps at BitTable level.
Make it possible to share BitTables between CodeInfos.
This saves 1% of .oat file size.
Test: test-art-host-gtest
Change-Id: I14172cba6b65e734b94f8c232f24eeee1fc67113
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index da6c711..e1b6575 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -292,6 +292,12 @@
}
}
+template<typename Writer, typename Builder>
+ALWAYS_INLINE static void EncodeTable(Writer& out, const Builder& bit_table) {
+ out.WriteBit(false); // Is not deduped.
+ bit_table.Encode(out);
+}
+
size_t StackMapStream::PrepareForFillIn() {
DCHECK_EQ(out_.size(), 0u);
@@ -309,13 +315,13 @@
EncodeUnsignedLeb128(&out_, fp_spill_mask_);
EncodeUnsignedLeb128(&out_, num_dex_registers_);
BitMemoryWriter<ScopedArenaVector<uint8_t>> out(&out_, out_.size() * kBitsPerByte);
- stack_maps_.Encode(out);
- inline_infos_.Encode(out);
- register_masks_.Encode(out);
- stack_masks_.Encode(out);
- dex_register_masks_.Encode(out);
- dex_register_maps_.Encode(out);
- dex_register_catalog_.Encode(out);
+ EncodeTable(out, stack_maps_);
+ EncodeTable(out, inline_infos_);
+ EncodeTable(out, register_masks_);
+ EncodeTable(out, stack_masks_);
+ EncodeTable(out, dex_register_masks_);
+ EncodeTable(out, dex_register_maps_);
+ EncodeTable(out, dex_register_catalog_);
return out_.size();
}
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 42f9789..16a9216 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -758,4 +758,48 @@
stack_map2.GetStackMaskIndex());
}
+TEST(StackMapTest, TestDedupeBitTables) {
+ MallocArenaPool pool;
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
+ StackMapStream stream(&allocator, kRuntimeISA);
+ stream.BeginMethod(32, 0, 0, 2);
+
+ stream.BeginStackMapEntry(0, 64 * kPcAlign);
+ stream.AddDexRegisterEntry(Kind::kInStack, 0);
+ stream.AddDexRegisterEntry(Kind::kConstant, -2);
+ stream.EndStackMapEntry();
+
+ stream.EndMethod();
+ std::vector<uint8_t> memory(stream.PrepareForFillIn());
+ MemoryRegion region(memory.data(), memory.size());
+ stream.FillInCodeInfo(region);
+
+ std::vector<uint8_t> out;
+ CodeInfo::DedupeMap dedupe_map;
+ size_t deduped1 = CodeInfo::Dedupe(&out, memory.data(), &dedupe_map);
+ size_t deduped2 = CodeInfo::Dedupe(&out, memory.data(), &dedupe_map);
+
+ for (size_t deduped : { deduped1, deduped2 }) {
+ CodeInfo code_info(out.data() + deduped);
+ ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
+
+ StackMap stack_map = code_info.GetStackMapAt(0);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64 * kPcAlign)));
+ ASSERT_EQ(0u, stack_map.GetDexPc());
+ ASSERT_EQ(64u * kPcAlign, stack_map.GetNativePcOffset(kRuntimeISA));
+
+ ASSERT_TRUE(stack_map.HasDexRegisterMap());
+ DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map);
+
+ ASSERT_EQ(Kind::kInStack, dex_register_map[0].GetKind());
+ ASSERT_EQ(Kind::kConstant, dex_register_map[1].GetKind());
+ ASSERT_EQ(0, dex_register_map[0].GetStackOffsetInBytes());
+ ASSERT_EQ(-2, dex_register_map[1].GetConstant());
+ }
+
+ ASSERT_GT(memory.size() * 2, out.size());
+}
+
} // namespace art