summaryrefslogtreecommitdiff
path: root/compiler/optimizing/stack_map_stream.cc
diff options
context:
space:
mode:
author David Srbecky <dsrbecky@google.com> 2018-05-24 13:55:52 +0100
committer David Srbecky <dsrbecky@google.com> 2018-05-25 16:17:09 +0100
commitdd966bc5b30aac068ee25d8f9bdb18a53904e312 (patch)
treed8cc9a393e2e6a86fa5b92b0497706c2d5639dfc /compiler/optimizing/stack_map_stream.cc
parent64e8175b611df79b617892435a26fb2db67bfa85 (diff)
Change the BitTableBuilder API to be POD based.
The compiler has two copies of all stack map intermediate data in memory at the same time at the moment. Change the BitTableBuilder so that it will be able to store the intermediate data directly (e.g. StackMapEntry), and thus we can save the space, and can avoid the copying code complexity. It will also make it possible to deduplicate data as we go, thus saving further memory and code complexity. Test: test-art-host-gtest-stack_map_test Change-Id: I660fddf0629422ae0d2588333854d8fdf1e1bd0f
Diffstat (limited to 'compiler/optimizing/stack_map_stream.cc')
-rw-r--r--compiler/optimizing/stack_map_stream.cc39
1 files changed, 19 insertions, 20 deletions
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index aa28c8b500..c6e375a1b2 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -199,9 +199,6 @@ static MemoryRegion EncodeMemoryRegion(Vector* out, size_t* bit_offset, uint32_t
return region;
}
-template<uint32_t NumColumns>
-using ScopedBitTableBuilder = BitTableBuilder<NumColumns, ScopedArenaAllocatorAdapter<uint32_t>>;
-
size_t StackMapStream::PrepareForFillIn() {
size_t bit_offset = 0;
out_.clear();
@@ -258,20 +255,21 @@ size_t StackMapStream::PrepareForFillIn() {
DCHECK_EQ(location_catalog_offset, dex_register_location_catalog_region.size());
// Write stack maps.
- ScopedArenaAllocatorAdapter<void> adapter = allocator_->Adapter(kArenaAllocStackMapStream);
- ScopedBitTableBuilder<StackMap::Field::kCount> stack_map_builder((adapter));
- ScopedBitTableBuilder<InvokeInfo::Field::kCount> invoke_info_builder((adapter));
- ScopedBitTableBuilder<InlineInfo::Field::kCount> inline_info_builder((adapter));
+ BitTableBuilder<std::array<uint32_t, StackMap::kCount>> stack_map_builder(allocator_);
+ BitTableBuilder<std::array<uint32_t, InvokeInfo::kCount>> invoke_info_builder(allocator_);
+ BitTableBuilder<std::array<uint32_t, InlineInfo::kCount>> inline_info_builder(allocator_);
for (const StackMapEntry& entry : stack_maps_) {
if (entry.dex_method_index != dex::kDexNoIndex) {
- invoke_info_builder.AddRow(
+ std::array<uint32_t, InvokeInfo::kCount> invoke_info_entry {
entry.native_pc_code_offset.CompressedValue(),
entry.invoke_type,
- entry.dex_method_index_idx);
+ entry.dex_method_index_idx
+ };
+ invoke_info_builder.Add(invoke_info_entry);
}
// Set the inlining info.
- uint32_t inline_info_index = StackMap::kNoValue;
+ uint32_t inline_info_index = inline_info_builder.size();
DCHECK_LE(entry.inline_infos_start_index + entry.inlining_depth, inline_infos_.size());
for (size_t depth = 0; depth < entry.inlining_depth; ++depth) {
InlineInfoEntry inline_entry = inline_infos_[depth + entry.inline_infos_start_index];
@@ -281,32 +279,33 @@ size_t StackMapStream::PrepareForFillIn() {
method_index_idx = High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method));
extra_data = Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method));
}
- uint32_t index = inline_info_builder.AddRow(
+ std::array<uint32_t, InlineInfo::kCount> inline_info_entry {
(depth == entry.inlining_depth - 1) ? InlineInfo::kLast : InlineInfo::kMore,
method_index_idx,
inline_entry.dex_pc,
extra_data,
- dex_register_entries_[inline_entry.dex_register_map_index].offset);
- if (depth == 0) {
- inline_info_index = index;
- }
+ dex_register_entries_[inline_entry.dex_register_map_index].offset,
+ };
+ inline_info_builder.Add(inline_info_entry);
}
- stack_map_builder.AddRow(
+ std::array<uint32_t, StackMap::kCount> stack_map_entry {
entry.native_pc_code_offset.CompressedValue(),
entry.dex_pc,
dex_register_entries_[entry.dex_register_map_index].offset,
- inline_info_index,
+ entry.inlining_depth != 0 ? inline_info_index : InlineInfo::kNoValue,
entry.register_mask_index,
- entry.stack_mask_index);
+ entry.stack_mask_index,
+ };
+ stack_map_builder.Add(stack_map_entry);
}
stack_map_builder.Encode(&out_, &bit_offset);
invoke_info_builder.Encode(&out_, &bit_offset);
inline_info_builder.Encode(&out_, &bit_offset);
// Write register masks table.
- ScopedBitTableBuilder<1> register_mask_builder((adapter));
+ BitTableBuilder<uint32_t> register_mask_builder(allocator_);
for (size_t i = 0; i < num_register_masks; ++i) {
- register_mask_builder.AddRow(register_masks_[i]);
+ register_mask_builder.Add(register_masks_[i]);
}
register_mask_builder.Encode(&out_, &bit_offset);