summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--dex2oat/linker/image_writer.cc21
-rw-r--r--dex2oat/linker/multi_oat_relative_patcher.cc2
-rw-r--r--dex2oat/linker/oat_writer.cc16
-rw-r--r--libartbase/base/globals.h6
-rw-r--r--libelffile/elf/elf_builder.h31
-rw-r--r--oatdump/oatdump.cc2
-rw-r--r--runtime/elf_file.cc6
-rw-r--r--runtime/gc/collector/immune_spaces.cc3
-rw-r--r--runtime/gc/heap.cc2
-rw-r--r--runtime/gc/space/image_space.cc31
-rw-r--r--runtime/image.cc36
-rw-r--r--runtime/oat.cc10
-rw-r--r--runtime/oat_file.cc9
-rw-r--r--runtime/runtime_image.cc15
14 files changed, 117 insertions, 73 deletions
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 8991ee1ce7..090103d38b 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -871,7 +871,7 @@ void ImageWriter::UpdateImageBinSlotOffset(mirror::Object* object,
bool ImageWriter::AllocMemory() {
for (ImageInfo& image_info : image_infos_) {
- const size_t length = RoundUp(image_info.CreateImageSections().first, kPageSize);
+ const size_t length = RoundUp(image_info.CreateImageSections().first, kElfSegmentAlignment);
std::string error_msg;
image_info.image_ = MemMap::MapAnonymous("image writer image",
@@ -2577,7 +2577,7 @@ void ImageWriter::CalculateNewObjectOffsets() {
for (ImageInfo& image_info : image_infos_) {
image_info.image_begin_ = global_image_begin_ + image_offset;
image_info.image_offset_ = image_offset;
- image_info.image_size_ = RoundUp(image_info.CreateImageSections().first, kPageSize);
+ image_info.image_size_ = RoundUp(image_info.CreateImageSections().first, kElfSegmentAlignment);
// There should be no gaps until the next image.
image_offset += image_info.image_size_;
}
@@ -2713,7 +2713,7 @@ void ImageWriter::CreateHeader(size_t oat_index, size_t component_count) {
const uint8_t* oat_data_end = image_info.oat_data_begin_ + image_info.oat_size_;
uint32_t image_reservation_size = image_info.image_size_;
- DCHECK_ALIGNED(image_reservation_size, kPageSize);
+ DCHECK_ALIGNED(image_reservation_size, kElfSegmentAlignment);
uint32_t current_component_count = 1u;
if (compiler_options_.IsAppImage()) {
DCHECK_EQ(oat_index, 0u);
@@ -2724,9 +2724,9 @@ void ImageWriter::CreateHeader(size_t oat_index, size_t component_count) {
if (oat_index == 0u) {
const ImageInfo& last_info = image_infos_.back();
const uint8_t* end = last_info.oat_file_begin_ + last_info.oat_loaded_size_;
- DCHECK_ALIGNED(image_info.image_begin_, kPageSize);
- image_reservation_size =
- dchecked_integral_cast<uint32_t>(RoundUp(end - image_info.image_begin_, kPageSize));
+ DCHECK_ALIGNED(image_info.image_begin_, kElfSegmentAlignment);
+ image_reservation_size = dchecked_integral_cast<uint32_t>(
+ RoundUp(end - image_info.image_begin_, kElfSegmentAlignment));
current_component_count = component_count;
} else {
image_reservation_size = 0u;
@@ -2758,10 +2758,11 @@ void ImageWriter::CreateHeader(size_t oat_index, size_t component_count) {
// Finally bitmap section.
const size_t bitmap_bytes = image_info.image_bitmap_.Size();
auto* bitmap_section = &sections[ImageHeader::kSectionImageBitmap];
- // Bitmap section size doesn't have to be rounded up as it is located at the end of the file.
- // When mapped to memory, if the last page of the mapping is only partially filled with data,
- // the rest will be zero-filled.
- *bitmap_section = ImageSection(RoundUp(image_end, kPageSize), bitmap_bytes);
+ // The offset of the bitmap section should be aligned to kElfSegmentAlignment to enable mapping
+ // the section from file to memory. However the section size doesn't have to be rounded up as it
+ // is located at the end of the file. When mapping file contents to memory, if the last page of
+ // the mapping is only partially filled with data, the rest will be zero-filled.
+ *bitmap_section = ImageSection(RoundUp(image_end, kElfSegmentAlignment), bitmap_bytes);
if (VLOG_IS_ON(compiler)) {
LOG(INFO) << "Creating header for " << oat_filenames_[oat_index];
size_t idx = 0;
diff --git a/dex2oat/linker/multi_oat_relative_patcher.cc b/dex2oat/linker/multi_oat_relative_patcher.cc
index a6797ffb8a..15f495e05e 100644
--- a/dex2oat/linker/multi_oat_relative_patcher.cc
+++ b/dex2oat/linker/multi_oat_relative_patcher.cc
@@ -50,7 +50,7 @@ MultiOatRelativePatcher::MultiOatRelativePatcher(InstructionSet instruction_set,
}
void MultiOatRelativePatcher::StartOatFile(uint32_t adjustment) {
- DCHECK_ALIGNED(adjustment, kPageSize);
+ DCHECK_ALIGNED(adjustment, kElfSegmentAlignment);
adjustment_ = adjustment;
start_size_code_alignment_ = relative_patcher_->CodeAlignmentSize();
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index e5a2ebaa74..6125ccbc22 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -621,7 +621,7 @@ void OatWriter::PrepareLayout(MultiOatRelativePatcher* relative_patcher) {
offset = InitDataBimgRelRoLayout(offset);
}
oat_size_ = offset; // .bss does not count towards oat_size_.
- bss_start_ = (bss_size_ != 0u) ? RoundUp(oat_size_, kPageSize) : 0u;
+ bss_start_ = (bss_size_ != 0u) ? RoundUp(oat_size_, kElfSegmentAlignment) : 0u;
CHECK_EQ(dex_files_->size(), oat_dex_files_.size());
@@ -2201,7 +2201,7 @@ size_t OatWriter::InitOatCode(size_t offset) {
// calculate the offsets within OatHeader to executable code
size_t old_offset = offset;
// required to be on a new page boundary
- offset = RoundUp(offset, kPageSize);
+ offset = RoundUp(offset, kElfSegmentAlignment);
oat_header_->SetExecutableOffset(offset);
size_executable_offset_alignment_ = offset - old_offset;
InstructionSet instruction_set = compiler_options_.GetInstructionSet();
@@ -2315,7 +2315,7 @@ size_t OatWriter::InitDataBimgRelRoLayout(size_t offset) {
return offset;
}
- data_bimg_rel_ro_start_ = RoundUp(offset, kPageSize);
+ data_bimg_rel_ro_start_ = RoundUp(offset, kElfSegmentAlignment);
for (auto& entry : data_bimg_rel_ro_entries_) {
size_t& entry_offset = entry.second;
@@ -2522,7 +2522,7 @@ bool OatWriter::WriteDataBimgRelRo(OutputStream* out) {
// Record the padding before the .data.bimg.rel.ro section.
// Do not write anything, this zero-filled part was skipped (Seek()) when starting the section.
size_t code_end = GetOatHeader().GetExecutableOffset() + code_size_;
- DCHECK_EQ(RoundUp(code_end, kPageSize), relative_offset);
+ DCHECK_EQ(RoundUp(code_end, kElfSegmentAlignment), relative_offset);
size_t padding_size = relative_offset - code_end;
DCHECK_EQ(size_data_bimg_rel_ro_alignment_, 0u);
size_data_bimg_rel_ro_alignment_ = padding_size;
@@ -3228,7 +3228,7 @@ bool OatWriter::WriteDexFiles(File* file,
// Extend the file and include the full page at the end as we need to write
// additional data there and do not want to mmap that page twice.
- size_t page_aligned_size = RoundUp(vdex_size_with_dex_files, kPageSize);
+ size_t page_aligned_size = RoundUp(vdex_size_with_dex_files, kElfSegmentAlignment);
if (!use_existing_vdex) {
if (file->SetLength(page_aligned_size) != 0) {
PLOG(ERROR) << "Failed to resize vdex file " << file->GetPath();
@@ -3607,7 +3607,7 @@ bool OatWriter::FinishVdexFile(File* vdex_file, verifier::VerifierDeps* verifier
if (extract_dex_files_into_vdex_) {
DCHECK(vdex_begin != nullptr);
// Write data to the last already mmapped page of the vdex file.
- size_t mmapped_vdex_size = RoundUp(old_vdex_size, kPageSize);
+ size_t mmapped_vdex_size = RoundUp(old_vdex_size, kElfSegmentAlignment);
size_t first_chunk_size = std::min(buffer.size(), mmapped_vdex_size - old_vdex_size);
memcpy(vdex_begin + old_vdex_size, buffer.data(), first_chunk_size);
@@ -3696,7 +3696,7 @@ bool OatWriter::FinishVdexFile(File* vdex_file, verifier::VerifierDeps* verifier
if (extract_dex_files_into_vdex_) {
// Note: We passed the ownership of the vdex dex file MemMap to the caller,
// so we need to use msync() for the range explicitly.
- if (msync(vdex_begin, RoundUp(old_vdex_size, kPageSize), MS_SYNC) != 0) {
+ if (msync(vdex_begin, RoundUp(old_vdex_size, kElfSegmentAlignment), MS_SYNC) != 0) {
PLOG(ERROR) << "Failed to sync vdex file contents" << vdex_file->GetPath();
return false;
}
@@ -3714,7 +3714,7 @@ bool OatWriter::FinishVdexFile(File* vdex_file, verifier::VerifierDeps* verifier
// Note: If `extract_dex_files_into_vdex_`, we passed the ownership of the vdex dex file
// MemMap to the caller, so we need to use msync() for the range explicitly.
- if (msync(vdex_begin, kPageSize, MS_SYNC) != 0) {
+ if (msync(vdex_begin, kElfSegmentAlignment, MS_SYNC) != 0) {
PLOG(ERROR) << "Failed to sync vdex file header " << vdex_file->GetPath();
return false;
}
diff --git a/libartbase/base/globals.h b/libartbase/base/globals.h
index 148ee34ffb..06b1f7e0aa 100644
--- a/libartbase/base/globals.h
+++ b/libartbase/base/globals.h
@@ -49,6 +49,12 @@ static constexpr size_t kMaxPageSize = 16384;
static constexpr size_t kMaxPageSize = kMinPageSize;
#endif
+// Targets can have different page size (eg. 4kB or 16kB). Because Art can crosscompile, it needs
+// to be able to generate OAT (ELF) and other image files with alignment other than the host page
+// size. kElfSegmentAlignment needs to be equal to the largest page size supported. Effectively,
+// this is the value to be used in images files for aligning contents to page size.
+static constexpr size_t kElfSegmentAlignment = kMaxPageSize;
+
// TODO: Kernels for arm and x86 in both, 32-bit and 64-bit modes use 512 entries per page-table
// page. Find a way to confirm that in userspace.
// Address range covered by 1 Page Middle Directory (PMD) entry in the page table
diff --git a/libelffile/elf/elf_builder.h b/libelffile/elf/elf_builder.h
index 6720569aad..309417e4d8 100644
--- a/libelffile/elf/elf_builder.h
+++ b/libelffile/elf/elf_builder.h
@@ -202,7 +202,7 @@ class ElfBuilder final {
std::vector<Section*>& sections = owner_->sections_;
Elf_Word last = sections.empty() ? PF_R : sections.back()->phdr_flags_;
if (phdr_flags_ != last) {
- header_.sh_addralign = kPageSize; // Page-align if R/W/X flags changed.
+ header_.sh_addralign = kElfSegmentAlignment; // Page-align if R/W/X flags changed.
}
sections.push_back(this);
section_index_ = sections.size(); // First ELF section has index 1.
@@ -460,16 +460,18 @@ class ElfBuilder final {
ElfBuilder(InstructionSet isa, OutputStream* output)
: isa_(isa),
stream_(output),
- rodata_(this, ".rodata", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
- text_(this, ".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR, nullptr, 0, kPageSize, 0),
- data_bimg_rel_ro_(
- this, ".data.bimg.rel.ro", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
- bss_(this, ".bss", SHT_NOBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
- dex_(this, ".dex", SHT_NOBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
- dynstr_(this, ".dynstr", SHF_ALLOC, kPageSize),
+ rodata_(this, ".rodata", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kElfSegmentAlignment, 0),
+ text_(this, ".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR, nullptr, 0,
+ kElfSegmentAlignment, 0),
+ data_bimg_rel_ro_(this, ".data.bimg.rel.ro", SHT_PROGBITS, SHF_ALLOC, nullptr, 0,
+ kElfSegmentAlignment, 0),
+ bss_(this, ".bss", SHT_NOBITS, SHF_ALLOC, nullptr, 0, kElfSegmentAlignment, 0),
+ dex_(this, ".dex", SHT_NOBITS, SHF_ALLOC, nullptr, 0, kElfSegmentAlignment, 0),
+ dynstr_(this, ".dynstr", SHF_ALLOC, kElfSegmentAlignment),
dynsym_(this, ".dynsym", SHT_DYNSYM, SHF_ALLOC, &dynstr_),
hash_(this, ".hash", SHT_HASH, SHF_ALLOC, &dynsym_, 0, sizeof(Elf_Word), sizeof(Elf_Word)),
- dynamic_(this, ".dynamic", SHT_DYNAMIC, SHF_ALLOC, &dynstr_, 0, kPageSize, sizeof(Elf_Dyn)),
+ dynamic_(this, ".dynamic", SHT_DYNAMIC, SHF_ALLOC, &dynstr_, 0, kElfSegmentAlignment,
+ sizeof(Elf_Dyn)),
strtab_(this, ".strtab", 0, 1),
symtab_(this, ".symtab", SHT_SYMTAB, 0, &strtab_),
debug_frame_(this, ".debug_frame", SHT_PROGBITS, 0, nullptr, 0, sizeof(Elf_Addr), 0),
@@ -540,7 +542,7 @@ class ElfBuilder final {
// Note: loaded_size_ == 0 for tests that don't write .rodata, .text, .bss,
// .dynstr, dynsym, .hash and .dynamic. These tests should not read loaded_size_.
- CHECK(loaded_size_ == 0 || loaded_size_ == RoundUp(virtual_address_, kPageSize))
+ CHECK(loaded_size_ == 0 || loaded_size_ == RoundUp(virtual_address_, kElfSegmentAlignment))
<< loaded_size_ << " " << virtual_address_;
// Write section names and finish the section headers.
@@ -615,7 +617,8 @@ class ElfBuilder final {
section->section_index_ = 0;
} else {
if (section->header_.sh_type != SHT_NOBITS) {
- DCHECK_LE(section->header_.sh_offset, end + kPageSize) << "Large gap between sections";
+ DCHECK_LE(section->header_.sh_offset, end + kElfSegmentAlignment)
+ << "Large gap between sections";
end = std::max<off_t>(end, section->header_.sh_offset + section->header_.sh_size);
}
non_debug_sections.push_back(section);
@@ -767,7 +770,7 @@ class ElfBuilder final {
dynamic_.Add(&dyns, sizeof(dyns));
dynamic_.AllocateVirtualMemory(dynamic_.GetCacheSize());
- loaded_size_ = RoundUp(virtual_address_, kPageSize);
+ loaded_size_ = RoundUp(virtual_address_, kElfSegmentAlignment);
}
void WriteDynamicSection() {
@@ -906,7 +909,7 @@ class ElfBuilder final {
load.p_flags = PF_R;
load.p_offset = load.p_vaddr = load.p_paddr = 0;
load.p_filesz = load.p_memsz = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * kMaxProgramHeaders;
- load.p_align = kPageSize;
+ load.p_align = kElfSegmentAlignment;
phdrs.push_back(load);
}
// Create program headers for sections.
@@ -936,7 +939,7 @@ class ElfBuilder final {
prev.p_memsz = size;
} else {
// If we are adding new load, it must be aligned.
- CHECK_EQ(shdr.sh_addralign, (Elf_Word)kPageSize);
+ CHECK_EQ(shdr.sh_addralign, (Elf_Word)kElfSegmentAlignment);
phdrs.push_back(load);
}
}
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 2f23cab13f..aedeeb2fe2 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1950,7 +1950,7 @@ class ImageDumper {
CHECK_ALIGNED(image_header_.GetFieldsSection().Offset(), 4);
CHECK_ALIGNED_PARAM(image_header_.GetMethodsSection().Offset(), pointer_size);
CHECK_ALIGNED(image_header_.GetInternedStringsSection().Offset(), 8);
- CHECK_ALIGNED(image_header_.GetImageBitmapSection().Offset(), kPageSize);
+ CHECK_ALIGNED(image_header_.GetImageBitmapSection().Offset(), kElfSegmentAlignment);
for (size_t i = 0; i < ImageHeader::ImageSections::kSectionCount; i++) {
ImageHeader::ImageSections index = ImageHeader::ImageSections(i);
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 2ceda1245f..dd1f46c3ca 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1056,8 +1056,8 @@ bool ElfFileImpl<ElfTypes>::GetLoadedAddressRange(/*out*/uint8_t** vaddr_begin,
max_vaddr = end_vaddr;
}
}
- min_vaddr = RoundDown(min_vaddr, kPageSize);
- max_vaddr = RoundUp(max_vaddr, kPageSize);
+ min_vaddr = RoundDown(min_vaddr, kElfSegmentAlignment);
+ max_vaddr = RoundUp(max_vaddr, kElfSegmentAlignment);
CHECK_LT(min_vaddr, max_vaddr) << file_path_;
// Check that the range fits into the runtime address space.
if (UNLIKELY(max_vaddr - 1u > std::numeric_limits<size_t>::max())) {
@@ -1205,7 +1205,7 @@ bool ElfFileImpl<ElfTypes>::Load(File* file,
return false;
}
if (program_header->p_filesz < program_header->p_memsz &&
- !IsAligned<kPageSize>(program_header->p_filesz)) {
+ !IsAligned<kElfSegmentAlignment>(program_header->p_filesz)) {
*error_msg = StringPrintf("Unsupported unaligned p_filesz < p_memsz (%" PRIu64
" < %" PRIu64 "): %s",
static_cast<uint64_t>(program_header->p_filesz),
diff --git a/runtime/gc/collector/immune_spaces.cc b/runtime/gc/collector/immune_spaces.cc
index 84fcc3f99c..683ca8de0b 100644
--- a/runtime/gc/collector/immune_spaces.cc
+++ b/runtime/gc/collector/immune_spaces.cc
@@ -50,7 +50,8 @@ void ImmuneSpaces::CreateLargestImmuneRegion() {
// be if the app image was mapped at a random address.
space::ImageSpace* image_space = space->AsImageSpace();
// Update the end to include the other non-heap sections.
- space_end = RoundUp(reinterpret_cast<uintptr_t>(image_space->GetImageEnd()), kPageSize);
+ space_end = RoundUp(reinterpret_cast<uintptr_t>(image_space->GetImageEnd()),
+ kElfSegmentAlignment);
// For the app image case, GetOatFileBegin is where the oat file was mapped during image
// creation, the actual oat file could be somewhere else.
const OatFile* const image_oat_file = image_space->GetOatFile();
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 85647fe921..ee77283fb2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -247,7 +247,7 @@ static void VerifyBootImagesContiguity(const std::vector<gc::space::ImageSpace*>
const ImageHeader& current_header = image_spaces[i + j]->GetImageHeader();
CHECK_EQ(current_heap, image_spaces[i + j]->Begin());
CHECK_EQ(current_oat, current_header.GetOatFileBegin());
- current_heap += RoundUp(current_header.GetImageSize(), kPageSize);
+ current_heap += RoundUp(current_header.GetImageSize(), kElfSegmentAlignment);
CHECK_GT(current_header.GetOatFileEnd(), current_header.GetOatFileBegin());
current_oat = current_header.GetOatFileEnd();
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 5fec7b7bd6..e55b5b4af9 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -107,19 +107,19 @@ ImageSpace::ImageSpace(const std::string& image_filename,
}
static int32_t ChooseRelocationOffsetDelta(int32_t min_delta, int32_t max_delta) {
- CHECK_ALIGNED(min_delta, kPageSize);
- CHECK_ALIGNED(max_delta, kPageSize);
+ CHECK_ALIGNED(min_delta, kElfSegmentAlignment);
+ CHECK_ALIGNED(max_delta, kElfSegmentAlignment);
CHECK_LT(min_delta, max_delta);
int32_t r = GetRandomNumber<int32_t>(min_delta, max_delta);
if (r % 2 == 0) {
- r = RoundUp(r, kPageSize);
+ r = RoundUp(r, kElfSegmentAlignment);
} else {
- r = RoundDown(r, kPageSize);
+ r = RoundDown(r, kElfSegmentAlignment);
}
CHECK_LE(min_delta, r);
CHECK_GE(max_delta, r);
- CHECK_ALIGNED(r, kPageSize);
+ CHECK_ALIGNED(r, kElfSegmentAlignment);
return r;
}
@@ -597,7 +597,8 @@ class ImageSpace::Loader {
return nullptr;
}
- uint32_t expected_reservation_size = RoundUp(image_header.GetImageSize(), kPageSize);
+ uint32_t expected_reservation_size = RoundUp(image_header.GetImageSize(),
+ kElfSegmentAlignment);
if (!CheckImageReservationSize(*space, expected_reservation_size, error_msg) ||
!CheckImageComponentCount(*space, /*expected_component_count=*/ 1u, error_msg)) {
return nullptr;
@@ -732,7 +733,7 @@ class ImageSpace::Loader {
// The location we want to map from is the first aligned page after the end of the stored
// (possibly compressed) data.
const size_t image_bitmap_offset =
- RoundUp(sizeof(ImageHeader) + image_header.GetDataSize(), kPageSize);
+ RoundUp(sizeof(ImageHeader) + image_header.GetDataSize(), kElfSegmentAlignment);
const size_t end_of_bitmap = image_bitmap_offset + bitmap_section.Size();
if (end_of_bitmap != image_file_size) {
*error_msg = StringPrintf(
@@ -998,8 +999,10 @@ class ImageSpace::Loader {
const bool is_compressed = image_header.HasCompressedBlock();
if (!is_compressed && allow_direct_mapping) {
uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr;
+ // The reserved memory size is aligned up to kElfSegmentAlignment to ensure
+ // that the next reserved area will be aligned to the value.
return MemMap::MapFileAtAddress(address,
- image_header.GetImageSize(),
+ RoundUp(image_header.GetImageSize(), kElfSegmentAlignment),
PROT_READ | PROT_WRITE,
MAP_PRIVATE,
fd,
@@ -1012,8 +1015,10 @@ class ImageSpace::Loader {
}
// Reserve output and copy/decompress into it.
+ // The reserved memory size is aligned up to kElfSegmentAlignment to ensure
+ // that the next reserved area will be aligned to the value.
MemMap map = MemMap::MapAnonymous(image_location,
- image_header.GetImageSize(),
+ RoundUp(image_header.GetImageSize(), kElfSegmentAlignment),
PROT_READ | PROT_WRITE,
/*low_4gb=*/ true,
image_reservation,
@@ -3164,8 +3169,8 @@ class ImageSpace::BootImageLoader {
MemMap ReserveBootImageMemory(uint8_t* addr,
uint32_t reservation_size,
/*out*/std::string* error_msg) {
- DCHECK_ALIGNED(reservation_size, kPageSize);
- DCHECK_ALIGNED(addr, kPageSize);
+ DCHECK_ALIGNED(reservation_size, kElfSegmentAlignment);
+ DCHECK_ALIGNED(addr, kElfSegmentAlignment);
return MemMap::MapAnonymous("Boot image reservation",
addr,
reservation_size,
@@ -3180,7 +3185,7 @@ class ImageSpace::BootImageLoader {
/*inout*/MemMap* image_reservation,
/*out*/MemMap* extra_reservation,
/*out*/std::string* error_msg) {
- DCHECK_ALIGNED(extra_reservation_size, kPageSize);
+ DCHECK_ALIGNED(extra_reservation_size, kElfSegmentAlignment);
DCHECK(!extra_reservation->IsValid());
size_t expected_size = image_reservation->IsValid() ? image_reservation->Size() : 0u;
if (extra_reservation_size != expected_size) {
@@ -3305,7 +3310,7 @@ bool ImageSpace::LoadBootImage(const std::vector<std::string>& boot_class_path,
DCHECK(boot_image_spaces != nullptr);
DCHECK(boot_image_spaces->empty());
- DCHECK_ALIGNED(extra_reservation_size, kPageSize);
+ DCHECK_ALIGNED(extra_reservation_size, kElfSegmentAlignment);
DCHECK(extra_reservation != nullptr);
DCHECK_NE(image_isa, InstructionSet::kNone);
diff --git a/runtime/image.cc b/runtime/image.cc
index 5779ecd43d..10c053ffda 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -69,10 +69,10 @@ ImageHeader::ImageHeader(uint32_t image_reservation_size,
boot_image_checksum_(boot_image_checksum),
image_roots_(image_roots),
pointer_size_(pointer_size) {
- CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize));
+ CHECK_EQ(image_begin, RoundUp(image_begin, kElfSegmentAlignment));
if (oat_checksum != 0u) {
- CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kPageSize));
- CHECK_EQ(oat_data_begin, RoundUp(oat_data_begin, kPageSize));
+ CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kElfSegmentAlignment));
+ CHECK_EQ(oat_data_begin, RoundUp(oat_data_begin, kElfSegmentAlignment));
CHECK_LT(image_roots, oat_file_begin);
CHECK_LE(oat_file_begin, oat_data_begin);
CHECK_LT(oat_data_begin, oat_data_end);
@@ -85,6 +85,26 @@ ImageHeader::ImageHeader(uint32_t image_reservation_size,
}
void ImageHeader::RelocateImageReferences(int64_t delta) {
+ // App Images can be relocated to a page aligned address.
+ // Unlike with the Boot Image, for which the memory is reserved in advance of
+ // loading and is aligned to kElfSegmentAlignment, the App Images can be mapped
+ // without reserving memory i.e. via direct file mapping in which case the
+ // memory range is aligned by the kernel and the only guarantee is that it is
+ // aligned to the page sizes.
+ //
+ // NOTE: While this might be less than alignment required via information in
+ // the ELF header, it should be sufficient in practice as the only reason
+ // for the ELF segment alignment to be more than one page size is the
+ // compatibility of the ELF with system configurations that use larger
+ // page size.
+ //
+ // Adding preliminary memory reservation would introduce certain overhead.
+ //
+ // However, technically the alignment requirement isn't fulfilled and that
+ // might be worth addressing even if it adds certain overhead. This will have
+ // to be done in alignment with the dynamic linker's ELF loader as
+ // otherwise inconsistency would still be possible e.g. when using
+ // `dlopen`-like calls to load OAT files.
CHECK_ALIGNED(delta, kPageSize) << "relocation delta must be page aligned";
oat_file_begin_ += delta;
oat_data_begin_ += delta;
@@ -95,7 +115,7 @@ void ImageHeader::RelocateImageReferences(int64_t delta) {
}
void ImageHeader::RelocateBootImageReferences(int64_t delta) {
- CHECK_ALIGNED(delta, kPageSize) << "relocation delta must be page aligned";
+ CHECK_ALIGNED(delta, kElfSegmentAlignment) << "relocation delta must be Elf segment aligned";
DCHECK_EQ(boot_image_begin_ != 0u, boot_image_size_ != 0u);
if (boot_image_begin_ != 0u) {
boot_image_begin_ += delta;
@@ -108,8 +128,8 @@ void ImageHeader::RelocateBootImageReferences(int64_t delta) {
bool ImageHeader::IsAppImage() const {
// Unlike boot image and boot image extensions which include address space for
// oat files in their reservation size, app images are loaded separately from oat
- // files and their reservation size is the image size rounded up to full page.
- return image_reservation_size_ == RoundUp(image_size_, kPageSize);
+ // files and their reservation size is the image size rounded up to Elf alignment.
+ return image_reservation_size_ == RoundUp(image_size_, kElfSegmentAlignment);
}
uint32_t ImageHeader::GetImageSpaceCount() const {
@@ -127,7 +147,7 @@ bool ImageHeader::IsValid() const {
if (memcmp(version_, kImageVersion, sizeof(kImageVersion)) != 0) {
return false;
}
- if (!IsAligned<kPageSize>(image_reservation_size_)) {
+ if (!IsAligned<kElfSegmentAlignment>(image_reservation_size_)) {
return false;
}
// Unsigned so wraparound is well defined.
@@ -402,7 +422,7 @@ bool ImageHeader::WriteData(const ImageFileGuard& image_file,
// possibly compressed image.
ImageSection& bitmap_section = GetImageSection(ImageHeader::kSectionImageBitmap);
// Align up since data size may be unaligned if the image is compressed.
- out_offset = RoundUp(out_offset, kPageSize);
+ out_offset = RoundUp(out_offset, kElfSegmentAlignment);
bitmap_section = ImageSection(out_offset, bitmap_section.Size());
if (!image_file->PwriteFully(bitmap_data,
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 2c7a73f964..e83d27736c 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -100,7 +100,7 @@ bool OatHeader::IsValid() const {
if (version_ != kOatVersion) {
return false;
}
- if (!IsAligned<kPageSize>(executable_offset_)) {
+ if (!IsAligned<kElfSegmentAlignment>(executable_offset_)) {
return false;
}
if (!IsValidInstructionSet(instruction_set_)) {
@@ -122,8 +122,8 @@ std::string OatHeader::GetValidationErrorMessage() const {
kOatVersion[0], kOatVersion[1], kOatVersion[2], kOatVersion[3],
version_[0], version_[1], version_[2], version_[3]);
}
- if (!IsAligned<kPageSize>(executable_offset_)) {
- return "Executable offset not page-aligned.";
+ if (!IsAligned<kElfSegmentAlignment>(executable_offset_)) {
+ return "Executable offset not properly aligned.";
}
if (!IsValidInstructionSet(instruction_set_)) {
return StringPrintf("Invalid instruction set, %d.", static_cast<int>(instruction_set_));
@@ -199,13 +199,13 @@ void OatHeader::SetBcpBssInfoOffset(uint32_t bcp_info_offset) {
uint32_t OatHeader::GetExecutableOffset() const {
DCHECK(IsValid());
- DCHECK_ALIGNED(executable_offset_, kPageSize);
+ DCHECK_ALIGNED(executable_offset_, kElfSegmentAlignment);
CHECK_GT(executable_offset_, sizeof(OatHeader));
return executable_offset_;
}
void OatHeader::SetExecutableOffset(uint32_t executable_offset) {
- DCHECK_ALIGNED(executable_offset, kPageSize);
+ DCHECK_ALIGNED(executable_offset, kElfSegmentAlignment);
CHECK_GT(executable_offset, sizeof(OatHeader));
DCHECK(IsValid());
DCHECK_EQ(executable_offset_, 0U);
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 97164f725e..ae861d202a 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -622,6 +622,10 @@ bool OatFileBase::Setup(int zip_fd,
}
DCHECK_GE(static_cast<size_t>(pointer_size), alignof(GcRoot<mirror::Object>));
+ // In certain cases, ELF can be mapped at an address which is page aligned,
+ // however not aligned to kElfSegmentAlignment. While technically this isn't
+ // correct as per requirement in the ELF header, it has to be supported for
+ // now. See also the comment at ImageHeader::RelocateImageReferences.
if (!IsAligned<kPageSize>(bss_begin_) ||
!IsAlignedParam(bss_methods_, static_cast<size_t>(pointer_size)) ||
!IsAlignedParam(bss_roots_, static_cast<size_t>(pointer_size)) ||
@@ -1417,7 +1421,10 @@ bool DlOpenOatFile::Dlopen(const std::string& elf_filename,
// Take ownership of the memory used by the shared object. dlopen() does not assume
// full ownership of this memory and dlclose() shall just remap it as zero pages with
// PROT_NONE. We need to unmap the memory when destroying this oat file.
- dlopen_mmaps_.push_back(reservation->TakeReservedMemory(context.max_size));
+ // The reserved memory size is aligned up to kElfSegmentAlignment to ensure
+ // that the next reserved area will be aligned to the value.
+ dlopen_mmaps_.push_back(reservation->TakeReservedMemory(RoundUp(context.max_size,
+ kElfSegmentAlignment)));
}
#else
static_assert(!kIsTargetBuild || kIsTargetLinux || kIsTargetFuchsia,
diff --git a/runtime/runtime_image.cc b/runtime/runtime_image.cc
index 02bbf3f455..73bb9560fc 100644
--- a/runtime/runtime_image.cc
+++ b/runtime/runtime_image.cc
@@ -112,8 +112,8 @@ class RuntimeImageHelper {
// size, relocate native pointers inside classes and ImTables.
RelocateNativePointers();
- // Generate the bitmap section, stored page aligned after the sections data and of size
- // `object_section_size_` rounded up to kCardSize to match the bitmap size expected by
+ // Generate the bitmap section, stored kElfSegmentAlignment-aligned after the sections data and
+ // of size `object_section_size_` rounded up to kCardSize to match the bitmap size expected by
// Loader::Init at art::gc::space::ImageSpace.
size_t sections_end = sections_[ImageHeader::kSectionMetadata].End();
image_bitmap_ = gc::accounting::ContinuousSpaceBitmap::Create(
@@ -127,10 +127,11 @@ class RuntimeImageHelper {
}
const size_t bitmap_bytes = image_bitmap_.Size();
auto* bitmap_section = &sections_[ImageHeader::kSectionImageBitmap];
- // Bitmap section size doesn't have to be rounded up as it is located at the end of the file.
- // When mapped to memory, if the last page of the mapping is only partially filled with data,
- // the rest will be zero-filled.
- *bitmap_section = ImageSection(RoundUp(sections_end, kPageSize), bitmap_bytes);
+ // The offset of the bitmap section should be aligned to kElfSegmentAlignment to enable mapping
+ // the section from file to memory. However the section size doesn't have to be rounded up as
+ // it is located at the end of the file. When mapping file contents to memory, if the last page
+ // of the mapping is only partially filled with data, the rest will be zero-filled.
+ *bitmap_section = ImageSection(RoundUp(sections_end, kElfSegmentAlignment), bitmap_bytes);
// Compute boot image checksum and boot image components, to be stored in
// the header.
@@ -147,7 +148,7 @@ class RuntimeImageHelper {
}
header_ = ImageHeader(
- /* image_reservation_size= */ RoundUp(sections_end, kPageSize),
+ /* image_reservation_size= */ RoundUp(sections_end, kElfSegmentAlignment),
/* component_count= */ 1,
image_begin_,
sections_end,