ART: Compress LengthPrefixedArray on 32-bit targets.
Previously, the LengthPrefixedArray<ArtMethod> on 32-bit
targets contained a 64-bit length field followed by the
ArtMethod elements with size only a multiple of 4, not 8.
Consequently, an odd-length array broke the alignment for
the following array which would have the 64-bit length
placed at an unaligned address.
To fix that, we make the length field 32-bit and explicitly
pass the alignment information to the LengthPrefixedArray.
This also makes the 32-bit boot image a bit smaller.
On Nexus 5, AOSP, ToT, the field section is 11528B smaller
and the method section is 21036B smaller. 64-bit targets
should see the same savings for the field section but no
difference for the methods section.
Change-Id: I3e03e7b94129025c8a1c117c27645a34dec516d2
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index a03ff75..c4fe2cb 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -244,8 +244,8 @@
DCHECK(object != nullptr);
DCHECK_NE(image_objects_offset_begin_, 0u);
- size_t previous_bin_sizes = bin_slot_previous_sizes_[bin_slot.GetBin()];
- size_t new_offset = image_objects_offset_begin_ + previous_bin_sizes + bin_slot.GetIndex();
+ size_t bin_slot_offset = bin_slot_offsets_[bin_slot.GetBin()];
+ size_t new_offset = bin_slot_offset + bin_slot.GetIndex();
DCHECK_ALIGNED(new_offset, kObjectAlignment);
SetImageOffset(object, new_offset);
@@ -866,8 +866,10 @@
}
bool any_dirty = false;
size_t count = 0;
+ const size_t method_alignment = ArtMethod::ObjectAlignment(target_ptr_size_);
const size_t method_size = ArtMethod::ObjectSize(target_ptr_size_);
- auto iteration_range = MakeIterationRangeFromLengthPrefixedArray(array, method_size);
+ auto iteration_range =
+ MakeIterationRangeFromLengthPrefixedArray(array, method_size, method_alignment);
for (auto& m : iteration_range) {
any_dirty = any_dirty || WillMethodBeDirty(&m);
++count;
@@ -876,7 +878,9 @@
kNativeObjectRelocationTypeArtMethodClean;
Bin bin_type = BinTypeForNativeRelocationType(type);
// Forward the entire array at once, but header first.
- const size_t header_size = LengthPrefixedArray<ArtMethod>::ComputeSize(0, method_size);
+ const size_t header_size = LengthPrefixedArray<ArtMethod>::ComputeSize(0,
+ method_size,
+ method_alignment);
auto it = native_object_relocations_.find(array);
CHECK(it == native_object_relocations_.end()) << "Method array " << array
<< " already forwarded";
@@ -972,9 +976,10 @@
size_t& offset = bin_slot_sizes_[BinTypeForNativeRelocationType(image_method_type)];
native_object_relocations_.emplace(&image_method_array_,
NativeObjectRelocation { offset, image_method_type });
+ size_t method_alignment = ArtMethod::ObjectAlignment(target_ptr_size_);
const size_t array_size = LengthPrefixedArray<ArtMethod>::ComputeSize(
- 0, ArtMethod::ObjectSize(target_ptr_size_));
- CHECK_ALIGNED(array_size, 8u);
+ 0, ArtMethod::ObjectSize(target_ptr_size_), method_alignment);
+ CHECK_ALIGNED_PARAM(array_size, method_alignment);
offset += array_size;
for (auto* m : image_methods_) {
CHECK(m != nullptr);
@@ -982,13 +987,21 @@
AssignMethodOffset(m, kNativeObjectRelocationTypeArtMethodClean);
}
- // Calculate cumulative bin slot sizes.
- size_t previous_sizes = 0u;
+ // Calculate bin slot offsets.
+ size_t bin_offset = image_objects_offset_begin_;
for (size_t i = 0; i != kBinSize; ++i) {
- bin_slot_previous_sizes_[i] = previous_sizes;
- previous_sizes += bin_slot_sizes_[i];
+ bin_slot_offsets_[i] = bin_offset;
+ bin_offset += bin_slot_sizes_[i];
+ if (i == kBinArtField) {
+ static_assert(kBinArtField + 1 == kBinArtMethodClean, "Methods follow fields.");
+ static_assert(alignof(ArtField) == 4u, "ArtField alignment is 4.");
+ DCHECK_ALIGNED(bin_offset, 4u);
+ DCHECK(method_alignment == 4u || method_alignment == 8u);
+ bin_offset = RoundUp(bin_offset, method_alignment);
+ }
}
- DCHECK_EQ(previous_sizes, GetBinSizeSum());
+ // NOTE: There may be additional padding between the bin slots and the intern table.
+
DCHECK_EQ(image_end_, GetBinSizeSum(kBinMirrorCount) + image_objects_offset_begin_);
// Transform each object's bin slot into an offset which will be used to do the final copy.
@@ -1002,7 +1015,7 @@
for (auto& pair : native_object_relocations_) {
NativeObjectRelocation& relocation = pair.second;
Bin bin_type = BinTypeForNativeRelocationType(relocation.type);
- relocation.offset += image_objects_offset_begin_ + bin_slot_previous_sizes_[bin_type];
+ relocation.offset += bin_slot_offsets_[bin_type];
}
// Calculate how big the intern table will be after being serialized.
@@ -1029,15 +1042,15 @@
// Add field section.
auto* field_section = §ions[ImageHeader::kSectionArtFields];
*field_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtField]);
- CHECK_EQ(image_objects_offset_begin_ + bin_slot_previous_sizes_[kBinArtField],
- field_section->Offset());
+ CHECK_EQ(bin_slot_offsets_[kBinArtField], field_section->Offset());
cur_pos = field_section->End();
+ // Round up to the alignment the required by the method section.
+ cur_pos = RoundUp(cur_pos, ArtMethod::ObjectAlignment(target_ptr_size_));
// Add method section.
auto* methods_section = §ions[ImageHeader::kSectionArtMethods];
*methods_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtMethodClean] +
bin_slot_sizes_[kBinArtMethodDirty]);
- CHECK_EQ(image_objects_offset_begin_ + bin_slot_previous_sizes_[kBinArtMethodClean],
- methods_section->Offset());
+ CHECK_EQ(bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset());
cur_pos = methods_section->End();
// Round up to the alignment the string table expects. See HashSet::WriteToMemory.
cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
@@ -1135,7 +1148,10 @@
}
case kNativeObjectRelocationTypeArtMethodArrayClean:
case kNativeObjectRelocationTypeArtMethodArrayDirty: {
- memcpy(dest, pair.first, LengthPrefixedArray<ArtMethod>::ComputeSize(0));
+ memcpy(dest, pair.first, LengthPrefixedArray<ArtMethod>::ComputeSize(
+ 0,
+ ArtMethod::ObjectSize(target_ptr_size_),
+ ArtMethod::ObjectAlignment(target_ptr_size_)));
break;
}
}