diff options
author | 2018-07-23 08:46:01 +0000 | |
---|---|---|
committer | 2018-07-23 08:46:01 +0000 | |
commit | fa5fc80f933a63eaf8bbbed5c1ffee24e0f3fa15 (patch) | |
tree | 431236bcbc19894ac904f994ab37e1bf3cfb60fb | |
parent | fb9c672577ae9772557f72f9cecb77d4d24af585 (diff) | |
parent | 6121aa69098e3496cf1a81bf3e5e7ae70f66eacb (diff) |
Merge "Move .art.rel data to a section in .art, part 2."
-rw-r--r-- | dex2oat/linker/image_writer.cc | 190 | ||||
-rw-r--r-- | dex2oat/linker/image_writer.h | 6 | ||||
-rw-r--r-- | oatdump/oatdump.cc | 11 | ||||
-rw-r--r-- | patchoat/patchoat.cc | 142 | ||||
-rw-r--r-- | runtime/gc/space/image_space.cc | 17 | ||||
-rw-r--r-- | runtime/image.cc | 2 | ||||
-rw-r--r-- | runtime/image.h | 5 |
7 files changed, 253 insertions, 120 deletions
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc index bb8bb92d76..e10f9b3feb 100644 --- a/dex2oat/linker/image_writer.cc +++ b/dex2oat/linker/image_writer.cc @@ -27,6 +27,7 @@ #include "art_field-inl.h" #include "art_method-inl.h" +#include "base/bit_memory_region.h" #include "base/callee_save_type.h" #include "base/enums.h" #include "base/globals.h" @@ -86,6 +87,68 @@ using ::art::mirror::String; namespace art { namespace linker { +static inline size_t RelocationIndex(size_t relocation_offset, PointerSize target_ptr_size) { + static_assert(sizeof(GcRoot<mirror::Object>) == sizeof(mirror::HeapReference<mirror::Object>), + "Expecting heap GC roots and references to have the same size."); + DCHECK_LE(sizeof(GcRoot<mirror::Object>), static_cast<size_t>(target_ptr_size)); + DCHECK_ALIGNED(relocation_offset, sizeof(GcRoot<mirror::Object>)); + return relocation_offset / sizeof(GcRoot<mirror::Object>); +} + +static ArrayRef<const uint8_t> MaybeCompressData(ArrayRef<const uint8_t> source, + ImageHeader::StorageMode image_storage_mode, + /*out*/ std::vector<uint8_t>* storage) { + const uint64_t compress_start_time = NanoTime(); + + switch (image_storage_mode) { + case ImageHeader::kStorageModeLZ4: { + storage->resize(LZ4_compressBound(source.size())); + size_t data_size = LZ4_compress_default( + reinterpret_cast<char*>(const_cast<uint8_t*>(source.data())), + reinterpret_cast<char*>(storage->data()), + source.size(), + storage->size()); + storage->resize(data_size); + break; + } + case ImageHeader::kStorageModeLZ4HC: { + // Bound is same as non HC. + storage->resize(LZ4_compressBound(source.size())); + size_t data_size = LZ4_compress_HC( + reinterpret_cast<const char*>(const_cast<uint8_t*>(source.data())), + reinterpret_cast<char*>(storage->data()), + source.size(), + storage->size(), + LZ4HC_CLEVEL_MAX); + storage->resize(data_size); + break; + } + case ImageHeader::kStorageModeUncompressed: { + return source; + } + default: { + LOG(FATAL) << "Unsupported"; + UNREACHABLE(); + } + } + + DCHECK(image_storage_mode == ImageHeader::kStorageModeLZ4 || + image_storage_mode == ImageHeader::kStorageModeLZ4HC); + VLOG(compiler) << "Compressed from " << source.size() << " to " << storage->size() << " in " + << PrettyDuration(NanoTime() - compress_start_time); + if (kIsDebugBuild) { + std::vector<uint8_t> decompressed(source.size()); + const size_t decompressed_size = LZ4_decompress_safe( + reinterpret_cast<char*>(storage->data()), + reinterpret_cast<char*>(decompressed.data()), + storage->size(), + decompressed.size()); + CHECK_EQ(decompressed_size, decompressed.size()); + CHECK_EQ(memcmp(source.data(), decompressed.data(), source.size()), 0) << image_storage_mode; + } + return ArrayRef<const uint8_t>(*storage); +} + // Separate objects into multiple bins to optimize dirty memory use. static constexpr bool kBinObjects = true; @@ -239,69 +302,18 @@ bool ImageWriter::Write(int image_fd, return EXIT_FAILURE; } - std::unique_ptr<char[]> compressed_data; // Image data size excludes the bitmap and the header. ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info.image_->Begin()); - const size_t image_data_size = image_header->GetImageSize() - sizeof(ImageHeader); - char* image_data = reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader); - size_t data_size; - const char* image_data_to_write; - const uint64_t compress_start_time = NanoTime(); + ArrayRef<const uint8_t> raw_image_data(image_info.image_->Begin() + sizeof(ImageHeader), + image_header->GetImageSize() - sizeof(ImageHeader)); CHECK_EQ(image_header->storage_mode_, image_storage_mode_); - switch (image_storage_mode_) { - case ImageHeader::kStorageModeLZ4: { - const size_t compressed_max_size = LZ4_compressBound(image_data_size); - compressed_data.reset(new char[compressed_max_size]); - data_size = LZ4_compress_default( - reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader), - &compressed_data[0], - image_data_size, - compressed_max_size); - break; - } - case ImageHeader::kStorageModeLZ4HC: { - // Bound is same as non HC. - const size_t compressed_max_size = LZ4_compressBound(image_data_size); - compressed_data.reset(new char[compressed_max_size]); - data_size = LZ4_compress_HC( - reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader), - &compressed_data[0], - image_data_size, - compressed_max_size, - LZ4HC_CLEVEL_MAX); - break; - } - case ImageHeader::kStorageModeUncompressed: { - data_size = image_data_size; - image_data_to_write = image_data; - break; - } - default: { - LOG(FATAL) << "Unsupported"; - UNREACHABLE(); - } - } - - if (compressed_data != nullptr) { - image_data_to_write = &compressed_data[0]; - VLOG(compiler) << "Compressed from " << image_data_size << " to " << data_size << " in " - << PrettyDuration(NanoTime() - compress_start_time); - if (kIsDebugBuild) { - std::unique_ptr<uint8_t[]> temp(new uint8_t[image_data_size]); - const size_t decompressed_size = LZ4_decompress_safe( - reinterpret_cast<char*>(&compressed_data[0]), - reinterpret_cast<char*>(&temp[0]), - data_size, - image_data_size); - CHECK_EQ(decompressed_size, image_data_size); - CHECK_EQ(memcmp(image_data, &temp[0], image_data_size), 0) << image_storage_mode_; - } - } + std::vector<uint8_t> compressed_data; + ArrayRef<const uint8_t> image_data = + MaybeCompressData(raw_image_data, image_storage_mode_, &compressed_data); // Write out the image + fields + methods. - const bool is_compressed = compressed_data != nullptr; - if (!image_file->PwriteFully(image_data_to_write, data_size, sizeof(ImageHeader))) { + if (!image_file->PwriteFully(image_data.data(), image_data.size(), sizeof(ImageHeader))) { PLOG(ERROR) << "Failed to write image file data " << image_filename; image_file->Erase(); return false; @@ -311,14 +323,30 @@ bool ImageWriter::Write(int image_fd, // convenience. const ImageSection& bitmap_section = image_header->GetImageBitmapSection(); // Align up since data size may be unaligned if the image is compressed. - size_t bitmap_position_in_file = RoundUp(sizeof(ImageHeader) + data_size, kPageSize); - if (!is_compressed) { + size_t bitmap_position_in_file = RoundUp(sizeof(ImageHeader) + image_data.size(), kPageSize); + if (image_storage_mode_ == ImageHeader::kDefaultStorageMode) { CHECK_EQ(bitmap_position_in_file, bitmap_section.Offset()); } - if (!image_file->PwriteFully(reinterpret_cast<char*>(image_info.image_bitmap_->Begin()), + if (!image_file->PwriteFully(image_info.image_bitmap_->Begin(), bitmap_section.Size(), bitmap_position_in_file)) { - PLOG(ERROR) << "Failed to write image file " << image_filename; + PLOG(ERROR) << "Failed to write image file bitmap " << image_filename; + image_file->Erase(); + return false; + } + + // Write out relocations. + size_t relocations_position_in_file = bitmap_position_in_file + bitmap_section.Size(); + ArrayRef<const uint8_t> relocations = MaybeCompressData( + ArrayRef<const uint8_t>(image_info.relocation_bitmap_), + image_storage_mode_, + &compressed_data); + image_header->sections_[ImageHeader::kSectionImageRelocations] = + ImageSection(bitmap_section.Offset() + bitmap_section.Size(), relocations.size()); + if (!image_file->PwriteFully(relocations.data(), + relocations.size(), + relocations_position_in_file)) { + PLOG(ERROR) << "Failed to write image file relocations " << image_filename; image_file->Erase(); return false; } @@ -333,7 +361,7 @@ bool ImageWriter::Write(int image_fd, // Write header last in case the compiler gets killed in the middle of image writing. // We do not want to have a corrupted image with a valid header. // The header is uncompressed since it contains whether the image is compressed or not. - image_header->data_size_ = data_size; + image_header->data_size_ = image_data.size(); if (!image_file->PwriteFully(reinterpret_cast<char*>(image_info.image_->Begin()), sizeof(ImageHeader), 0)) { @@ -342,7 +370,7 @@ bool ImageWriter::Write(int image_fd, return false; } - CHECK_EQ(bitmap_position_in_file + bitmap_section.Size(), + CHECK_EQ(relocations_position_in_file + relocations.size(), static_cast<size_t>(image_file->GetLength())); if (image_file->FlushCloseOrErase() != 0) { PLOG(ERROR) << "Failed to flush and close image file " << image_filename; @@ -1969,6 +1997,8 @@ void ImageWriter::CreateHeader(size_t oat_index) { const size_t bitmap_bytes = image_info.image_bitmap_->Size(); auto* bitmap_section = §ions[ImageHeader::kSectionImageBitmap]; *bitmap_section = ImageSection(RoundUp(image_end, kPageSize), RoundUp(bitmap_bytes, kPageSize)); + // The relocations section shall be finished later as we do not know its actual size yet. + if (VLOG_IS_ON(compiler)) { LOG(INFO) << "Creating header for " << oat_filenames_[oat_index]; size_t idx = 0; @@ -2014,6 +2044,13 @@ void ImageWriter::CreateHeader(size_t oat_index) { /*is_pic*/compile_app_image_, image_storage_mode_, /*data_size*/0u); + + // Resize relocation bitmap for recording reference/pointer relocations. + size_t number_of_relocation_locations = RelocationIndex(image_end, target_ptr_size_); + DCHECK(image_info.relocation_bitmap_.empty()); + image_info.relocation_bitmap_.resize( + BitsToBytesRoundUp(number_of_relocation_locations * (compile_app_image_ ? 2u : 1u))); + // Record header relocations. RecordImageRelocation(&header->image_begin_, oat_index); RecordImageRelocation(&header->oat_file_begin_, oat_index); RecordImageRelocation(&header->oat_data_begin_, oat_index); @@ -2966,13 +3003,34 @@ ImageWriter::ImageInfo::ImageInfo() template <bool kCheckNotNull /* = true */> void ImageWriter::RecordImageRelocation(const void* dest, - size_t oat_index ATTRIBUTE_UNUSED, - bool app_to_boot_image ATTRIBUTE_UNUSED /* = false */) { + size_t oat_index, + bool app_to_boot_image /* = false */) { // Check that we're not recording a relocation for null. if (kCheckNotNull) { DCHECK(reinterpret_cast<const uint32_t*>(dest)[0] != 0u); } - // TODO: Record the relocation. + // Calculate the offset within the image. + ImageInfo* image_info = &image_infos_[oat_index]; + DCHECK(image_info->image_->HasAddress(dest)) + << "MemMap range " << static_cast<const void*>(image_info->image_->Begin()) + << "-" << static_cast<const void*>(image_info->image_->End()) + << " does not contain " << dest; + size_t offset = reinterpret_cast<const uint8_t*>(dest) - image_info->image_->Begin(); + ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info->image_->Begin()); + size_t image_end = image_header->GetClassTableSection().End(); + DCHECK_LT(offset, image_end); + // Calculate the location index. + size_t size = RelocationIndex(image_end, target_ptr_size_); + size_t index = RelocationIndex(offset, target_ptr_size_); + if (app_to_boot_image) { + index += size; + } + // Mark the location in the bitmap. + DCHECK(compile_app_image_ || !app_to_boot_image); + MemoryRegion region(image_info->relocation_bitmap_.data(), image_info->relocation_bitmap_.size()); + BitMemoryRegion bit_region(region, /* bit_offset */ 0u, compile_app_image_ ? 2u * size : size); + DCHECK(!bit_region.LoadBit(index)); + bit_region.StoreBit(index, /* value*/ true); } template <typename DestType> diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h index 9333d67015..9ab9c3eb6f 100644 --- a/dex2oat/linker/image_writer.h +++ b/dex2oat/linker/image_writer.h @@ -369,6 +369,12 @@ class ImageWriter FINAL { // Class table associated with this image for serialization. std::unique_ptr<ClassTable> class_table_; + + // Relocations of references/pointers. For boot image, it contains one bit + // for each location that can be relocated. For app image, it contains twice + // that many bits, first half contains relocations within this image and the + // second half contains relocations for references to the boot image. + std::vector<uint8_t> relocation_bitmap_; }; // We use the lock word to store the offset of the object in the image. diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index 21ce8c84c4..235cd6e4ea 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -1928,6 +1928,7 @@ class ImageDumper { const auto& intern_section = image_header_.GetInternedStringsSection(); const auto& class_table_section = image_header_.GetClassTableSection(); const auto& bitmap_section = image_header_.GetImageBitmapSection(); + const auto& relocations_section = image_header_.GetImageRelocationsSection(); stats_.header_bytes = header_bytes; @@ -1967,7 +1968,11 @@ class ImageDumper { CHECK_ALIGNED(bitmap_section.Offset(), kPageSize); stats_.alignment_bytes += RoundUp(bitmap_offset, kPageSize) - bitmap_offset; + // There should be no space between the bitmap and relocations. + CHECK_EQ(bitmap_section.Offset() + bitmap_section.Size(), relocations_section.Offset()); + stats_.bitmap_bytes += bitmap_section.Size(); + stats_.relocations_bytes += relocations_section.Size(); stats_.art_field_bytes += field_section.Size(); stats_.art_method_bytes += method_section.Size(); stats_.dex_cache_arrays_bytes += dex_cache_arrays_section.Size(); @@ -2400,6 +2405,7 @@ class ImageDumper { size_t interned_strings_bytes; size_t class_table_bytes; size_t bitmap_bytes; + size_t relocations_bytes; size_t alignment_bytes; size_t managed_code_bytes; @@ -2429,6 +2435,7 @@ class ImageDumper { interned_strings_bytes(0), class_table_bytes(0), bitmap_bytes(0), + relocations_bytes(0), alignment_bytes(0), managed_code_bytes(0), managed_code_bytes_ignoring_deduplication(0), @@ -2592,6 +2599,7 @@ class ImageDumper { "interned_string_bytes = %8zd (%2.0f%% of art file bytes)\n" "class_table_bytes = %8zd (%2.0f%% of art file bytes)\n" "bitmap_bytes = %8zd (%2.0f%% of art file bytes)\n" + "relocations_bytes = %8zd (%2.0f%% of art file bytes)\n" "alignment_bytes = %8zd (%2.0f%% of art file bytes)\n\n", header_bytes, PercentOfFileBytes(header_bytes), object_bytes, PercentOfFileBytes(object_bytes), @@ -2603,12 +2611,13 @@ class ImageDumper { PercentOfFileBytes(interned_strings_bytes), class_table_bytes, PercentOfFileBytes(class_table_bytes), bitmap_bytes, PercentOfFileBytes(bitmap_bytes), + relocations_bytes, PercentOfFileBytes(relocations_bytes), alignment_bytes, PercentOfFileBytes(alignment_bytes)) << std::flush; CHECK_EQ(file_bytes, header_bytes + object_bytes + art_field_bytes + art_method_bytes + dex_cache_arrays_bytes + interned_strings_bytes + class_table_bytes + - bitmap_bytes + alignment_bytes); + bitmap_bytes + relocations_bytes + alignment_bytes); } os << "object_bytes breakdown:\n"; diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc index fb42fb4d7c..a15f7b88d8 100644 --- a/patchoat/patchoat.cc +++ b/patchoat/patchoat.cc @@ -31,11 +31,13 @@ #include "art_field-inl.h" #include "art_method-inl.h" +#include "base/bit_memory_region.h" #include "base/dumpable.h" #include "base/file_utils.h" #include "base/leb128.h" #include "base/logging.h" // For InitLogging. #include "base/mutex.h" +#include "base/memory_region.h" #include "base/memory_tool.h" #include "base/os.h" #include "base/scoped_flock.h" @@ -187,10 +189,6 @@ bool PatchOat::GeneratePatch( "Original and relocated image sizes differ: %zu vs %zu", original_size, relocated_size); return false; } - if ((original_size % 4) != 0) { - *error_msg = StringPrintf("Image size not multiple of 4: %zu", original_size); - return false; - } if (original_size > UINT32_MAX) { *error_msg = StringPrintf("Image too large: %zu" , original_size); return false; @@ -206,20 +204,58 @@ bool PatchOat::GeneratePatch( return false; } + const ImageHeader* image_header = reinterpret_cast<const ImageHeader*>(original.Begin()); + if (image_header->GetStorageMode() != ImageHeader::kStorageModeUncompressed) { + *error_msg = "Unexpected compressed image."; + return false; + } + if (image_header->IsAppImage()) { + *error_msg = "Unexpected app image."; + return false; + } + if (image_header->GetPointerSize() != PointerSize::k32 && + image_header->GetPointerSize() != PointerSize::k64) { + *error_msg = "Unexpected pointer size."; + return false; + } + static_assert(sizeof(GcRoot<mirror::Object>) == sizeof(mirror::HeapReference<mirror::Object>), + "Expecting heap GC roots and references to have the same size."); + DCHECK_LE(sizeof(GcRoot<mirror::Object>), static_cast<size_t>(image_header->GetPointerSize())); + + const size_t image_bitmap_offset = RoundUp(sizeof(ImageHeader) + image_header->GetDataSize(), + kPageSize); + const size_t end_of_bitmap = image_bitmap_offset + image_header->GetImageBitmapSection().Size(); + const ImageSection& relocation_section = image_header->GetImageRelocationsSection(); + MemoryRegion relocations_data(original.Begin() + end_of_bitmap, relocation_section.Size()); + size_t image_end = image_header->GetClassTableSection().End(); + if (!IsAligned<sizeof(GcRoot<mirror::Object>)>(image_end)) { + *error_msg = StringPrintf("Unaligned image end: %zu", image_end); + return false; + } + size_t num_indexes = image_end / sizeof(GcRoot<mirror::Object>); + if (relocation_section.Size() != BitsToBytesRoundUp(num_indexes)) { + *error_msg = StringPrintf("Unexpected size of relocation section: %zu expected: %zu", + static_cast<size_t>(relocation_section.Size()), + BitsToBytesRoundUp(num_indexes)); + return false; + } + BitMemoryRegion relocation_bitmap(relocations_data, /* bit_offset */ 0u, num_indexes); + // Output the SHA-256 digest of the original output->resize(SHA256_DIGEST_LENGTH); const uint8_t* original_bytes = original.Begin(); SHA256(original_bytes, original_size, output->data()); - // Output the list of offsets at which the original and patched images differ - size_t last_diff_offset = 0; + // Check the list of offsets at which the original and patched images differ. size_t diff_offset_count = 0; const uint8_t* relocated_bytes = relocated.Begin(); - for (size_t offset = 0; offset < original_size; offset += 4) { + for (size_t index = 0; index != num_indexes; ++index) { + size_t offset = index * sizeof(GcRoot<mirror::Object>); uint32_t original_value = *reinterpret_cast<const uint32_t*>(original_bytes + offset); uint32_t relocated_value = *reinterpret_cast<const uint32_t*>(relocated_bytes + offset); off_t diff = relocated_value - original_value; if (diff == 0) { + CHECK(!relocation_bitmap.LoadBit(index)); continue; } else if (diff != expected_diff) { *error_msg = @@ -230,13 +266,11 @@ bool PatchOat::GeneratePatch( (intmax_t) diff); return false; } - - uint32_t offset_diff = offset - last_diff_offset; - last_diff_offset = offset; + CHECK(relocation_bitmap.LoadBit(index)); diff_offset_count++; - - EncodeUnsignedLeb128(output, offset_diff); } + size_t tail_bytes = original_size - image_end; + CHECK_EQ(memcmp(original_bytes + image_end, relocated_bytes + image_end, tail_bytes), 0); if (diff_offset_count == 0) { *error_msg = "Original and patched images are identical"; @@ -290,6 +324,14 @@ static bool CheckImageIdenticalToOriginalExceptForRelocation( rel_filename.c_str()); return false; } + if (rel_size != SHA256_DIGEST_LENGTH) { + *error_msg = StringPrintf("Unexpected size of image relocation file %s: %" PRId64 + ", expected %zu", + rel_filename.c_str(), + rel_size, + static_cast<size_t>(SHA256_DIGEST_LENGTH)); + return false; + } std::unique_ptr<uint8_t[]> rel(new uint8_t[rel_size]); if (!rel_file->ReadFully(rel.get(), rel_size)) { *error_msg = StringPrintf("Failed to read image relocation file %s", rel_filename.c_str()); @@ -309,10 +351,10 @@ static bool CheckImageIdenticalToOriginalExceptForRelocation( relocated_filename.c_str()); return false; } - if ((image_size % 4) != 0) { + if (static_cast<uint64_t>(image_size) < sizeof(ImageHeader)) { *error_msg = StringPrintf( - "Relocated image file %s size not multiple of 4: %" PRId64, + "Relocated image file %s too small: %" PRId64, relocated_filename.c_str(), image_size); return false; } @@ -329,16 +371,39 @@ static bool CheckImageIdenticalToOriginalExceptForRelocation( return false; } - const uint8_t* original_image_digest = rel.get(); - if (rel_size < SHA256_DIGEST_LENGTH) { - *error_msg = StringPrintf("Malformed image relocation file %s: too short", - rel_filename.c_str()); + const ImageHeader& image_header = *reinterpret_cast<const ImageHeader*>(image.get()); + if (image_header.GetStorageMode() != ImageHeader::kStorageModeUncompressed) { + *error_msg = StringPrintf("Unsuported compressed image file %s", + relocated_filename.c_str()); + return false; + } + size_t image_end = image_header.GetClassTableSection().End(); + if (image_end > static_cast<uint64_t>(image_size) || !IsAligned<4u>(image_end)) { + *error_msg = StringPrintf("Heap size too big or unaligned in image file %s: %zu", + relocated_filename.c_str(), + image_end); + return false; + } + size_t number_of_relocation_locations = image_end / 4u; + const ImageSection& relocation_section = image_header.GetImageRelocationsSection(); + if (relocation_section.Size() != BitsToBytesRoundUp(number_of_relocation_locations)) { + *error_msg = StringPrintf("Unexpected size of relocation section in image file %s: %zu" + " expected: %zu", + relocated_filename.c_str(), + static_cast<size_t>(relocation_section.Size()), + BitsToBytesRoundUp(number_of_relocation_locations)); + return false; + } + if (relocation_section.End() != image_size) { + *error_msg = StringPrintf("Relocation section does not end at file end in image file %s: %zu" + " expected: %" PRId64, + relocated_filename.c_str(), + static_cast<size_t>(relocation_section.End()), + image_size); return false; } - const ImageHeader& image_header = *reinterpret_cast<const ImageHeader*>(image.get()); off_t expected_diff = image_header.GetPatchDelta(); - if (expected_diff == 0) { *error_msg = StringPrintf("Unsuported patch delta of zero in %s", relocated_filename.c_str()); @@ -347,35 +412,14 @@ static bool CheckImageIdenticalToOriginalExceptForRelocation( // Relocated image is expected to differ from the original due to relocation. // Unrelocate the image in memory to compensate. - uint8_t* image_start = image.get(); - const uint8_t* rel_end = &rel[rel_size]; - const uint8_t* rel_ptr = &rel[SHA256_DIGEST_LENGTH]; - // The remaining .rel file consists of offsets at which relocation should've occurred. - // For each offset, we "unrelocate" the image by subtracting the expected relocation - // diff value (as specified in the image header). - // - // Each offset is encoded as a delta/diff relative to the previous offset. With the - // very first offset being encoded relative to offset 0. - // Deltas are encoded using little-endian 7 bits per byte encoding, with all bytes except - // the last one having the highest bit set. - uint32_t offset = 0; - while (rel_ptr != rel_end) { - uint32_t offset_delta = 0; - if (DecodeUnsignedLeb128Checked(&rel_ptr, rel_end, &offset_delta)) { - offset += offset_delta; - if (static_cast<int64_t>(offset) + static_cast<int64_t>(sizeof(uint32_t)) > image_size) { - *error_msg = StringPrintf("Relocation out of bounds in %s", relocated_filename.c_str()); - return false; - } - uint32_t *image_value = reinterpret_cast<uint32_t*>(image_start + offset); + MemoryRegion relocations(image.get() + relocation_section.Offset(), relocation_section.Size()); + BitMemoryRegion relocation_bitmask(relocations, + /* bit_offset */ 0u, + number_of_relocation_locations); + for (size_t index = 0; index != number_of_relocation_locations; ++index) { + if (relocation_bitmask.LoadBit(index)) { + uint32_t* image_value = reinterpret_cast<uint32_t*>(image.get() + index * 4u); *image_value -= expected_diff; - } else { - *error_msg = - StringPrintf( - "Malformed image relocation file %s: " - "last byte has it's most significant bit set", - rel_filename.c_str()); - return false; } } @@ -384,7 +428,7 @@ static bool CheckImageIdenticalToOriginalExceptForRelocation( // digest from relocation file. uint8_t image_digest[SHA256_DIGEST_LENGTH]; SHA256(image.get(), image_size, image_digest); - if (memcmp(image_digest, original_image_digest, SHA256_DIGEST_LENGTH) != 0) { + if (memcmp(image_digest, rel.get(), SHA256_DIGEST_LENGTH) != 0) { *error_msg = StringPrintf( "Relocated image %s does not match the original %s after unrelocation", diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index 0936a53f99..826f382f72 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -618,10 +618,21 @@ class ImageSpaceLoader { const size_t image_bitmap_offset = RoundUp(sizeof(ImageHeader) + image_header->GetDataSize(), kPageSize); const size_t end_of_bitmap = image_bitmap_offset + bitmap_section.Size(); - if (end_of_bitmap != image_file_size) { + const ImageSection& relocations_section = image_header->GetImageRelocationsSection(); + if (relocations_section.Offset() != bitmap_section.Offset() + bitmap_section.Size()) { *error_msg = StringPrintf( - "Image file size does not equal end of bitmap: size=%" PRIu64 " vs. %zu.", image_file_size, - end_of_bitmap); + "Relocations do not start immediately after bitmap: %u vs. %u + %u.", + relocations_section.Offset(), + bitmap_section.Offset(), + bitmap_section.Size()); + return nullptr; + } + const size_t end_of_relocations = end_of_bitmap + relocations_section.Size(); + if (end_of_relocations != image_file_size) { + *error_msg = StringPrintf( + "Image file size does not equal end of relocations: size=%" PRIu64 " vs. %zu.", + image_file_size, + end_of_relocations); return nullptr; } diff --git a/runtime/image.cc b/runtime/image.cc index 7083ee1382..028c515c91 100644 --- a/runtime/image.cc +++ b/runtime/image.cc @@ -26,7 +26,7 @@ namespace art { const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' }; -const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '2', '\0' }; // Boot image live objects. +const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '3', '\0' }; // Image relocations. ImageHeader::ImageHeader(uint32_t image_begin, uint32_t image_size, diff --git a/runtime/image.h b/runtime/image.h index 2c6fb54269..af092ad3fe 100644 --- a/runtime/image.h +++ b/runtime/image.h @@ -230,6 +230,7 @@ class PACKED(4) ImageHeader { kSectionInternedStrings, kSectionClassTable, kSectionImageBitmap, + kSectionImageRelocations, kSectionCount, // Number of elements in enum. }; @@ -286,6 +287,10 @@ class PACKED(4) ImageHeader { return GetImageSection(kSectionImageBitmap); } + const ImageSection& GetImageRelocationsSection() const { + return GetImageSection(kSectionImageRelocations); + } + template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier> ObjPtr<mirror::Object> GetImageRoot(ImageRoot image_root) const REQUIRES_SHARED(Locks::mutator_lock_); |