summaryrefslogtreecommitdiff
path: root/compiler/image_writer.cc
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/image_writer.cc')
-rw-r--r--compiler/image_writer.cc19
1 files changed, 13 insertions, 6 deletions
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 12f477adb7..fa9f64c9a6 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -689,7 +689,7 @@ bool ImageWriter::AllocMemory() {
for (ImageInfo& image_info : image_infos_) {
ImageSection unused_sections[ImageHeader::kSectionCount];
const size_t length = RoundUp(
- image_info.CreateImageSections(unused_sections), kPageSize);
+ image_info.CreateImageSections(unused_sections, compile_app_image_), kPageSize);
std::string error_msg;
image_info.image_.reset(MemMap::MapAnonymous("image writer image",
@@ -1834,7 +1834,8 @@ void ImageWriter::CalculateNewObjectOffsets() {
image_info.image_begin_ = global_image_begin_ + image_offset;
image_info.image_offset_ = image_offset;
ImageSection unused_sections[ImageHeader::kSectionCount];
- image_info.image_size_ = RoundUp(image_info.CreateImageSections(unused_sections), kPageSize);
+ image_info.image_size_ =
+ RoundUp(image_info.CreateImageSections(unused_sections, compile_app_image_), kPageSize);
// There should be no gaps until the next image.
image_offset += image_info.image_size_;
}
@@ -1865,7 +1866,8 @@ void ImageWriter::CalculateNewObjectOffsets() {
}
}
-size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) const {
+size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections,
+ bool app_image) const {
DCHECK(out_sections != nullptr);
// Do not round up any sections here that are represented by the bins since it will break
@@ -1904,8 +1906,13 @@ size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) c
ImageSection* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays];
*dex_cache_arrays_section = ImageSection(bin_slot_offsets_[kBinDexCacheArray],
bin_slot_sizes_[kBinDexCacheArray]);
- // Round up to the alignment the string table expects. See HashSet::WriteToMemory.
- size_t cur_pos = RoundUp(dex_cache_arrays_section->End(), sizeof(uint64_t));
+ // For boot image, round up to the page boundary to separate the interned strings and
+ // class table from the modifiable data. We shall mprotect() these pages read-only when
+ // we load the boot image. This is more than sufficient for the string table alignment,
+ // namely sizeof(uint64_t). See HashSet::WriteToMemory.
+ static_assert(IsAligned<sizeof(uint64_t)>(kPageSize), "String table alignment check.");
+ size_t cur_pos =
+ RoundUp(dex_cache_arrays_section->End(), app_image ? sizeof(uint64_t) : kPageSize);
// Calculate the size of the interned strings.
ImageSection* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings];
*interned_strings_section = ImageSection(cur_pos, intern_table_bytes_);
@@ -1928,7 +1935,7 @@ void ImageWriter::CreateHeader(size_t oat_index) {
// Create the image sections.
ImageSection sections[ImageHeader::kSectionCount];
- const size_t image_end = image_info.CreateImageSections(sections);
+ const size_t image_end = image_info.CreateImageSections(sections, compile_app_image_);
// Finally bitmap section.
const size_t bitmap_bytes = image_info.image_bitmap_->Size();