Revert "Revert "Load app images""
This reverts commit 1bc977cf2f8199311a97f2ba9431a184540e3e9c.
Bug: 22858531
Change-Id: Ide00bf3a73a02cba3bb364177204ad1b13f70295
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 88a6c6c..b6af908 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -115,6 +115,8 @@
// Resets all of the bytes in the card table to clean.
void ClearCardTable();
+
+ // Clear a range of cards that covers start to end, start and end must be aligned to kCardSize.
void ClearCardRange(uint8_t* start, uint8_t* end);
// Resets all of the bytes in the card table which do not map to the image space.
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 61c67f8..4cf5b4f 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -167,8 +167,12 @@
uintptr_t* address = &bitmap_begin_[index];
uintptr_t old_word = *address;
if (kSetBit) {
+ // Check the bit before setting the word incase we are trying to mark a read only bitmap
+ // like an image space bitmap. This bitmap is mapped as read only and will fault if we
+ // attempt to change any words. Since all of the objects are marked, this will never
+ // occur if we check before setting the bit. This also prevents dirty pages that would
+ // occur if the bitmap was read write and we did not check the bit.
if ((old_word & mask) == 0) {
- // Avoid dirtying the page if possible.
*address = old_word | mask;
}
} else {
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 4884e66..ea290dd 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -112,8 +112,13 @@
/*oat_data_begin*/PointerToLowMemUInt32(map->End()),
/*oat_data_end*/PointerToLowMemUInt32(map->End() + oat_size),
/*oat_file_end*/PointerToLowMemUInt32(map->End() + oat_size),
+ /*boot_image_begin*/0u,
+ /*boot_image_size*/0u,
+ /*boot_oat_begin*/0u,
+ /*boot_oat_size*/0u,
/*pointer_size*/sizeof(void*),
/*compile_pic*/false,
+ /*is_pic*/false,
ImageHeader::kStorageModeUncompressed,
/*storage_size*/0u);
return new DummyImageSpace(map.release(), live_bitmap.release());
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 2fb5e34..8cd8d73 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -273,10 +273,11 @@
std::string& image_name = image_file_names[index];
ATRACE_BEGIN("ImageSpace::Create");
std::string error_msg;
- space::ImageSpace* boot_image_space = space::ImageSpace::Create(image_name.c_str(),
- image_instruction_set,
- index > 0,
- &error_msg);
+ space::ImageSpace* boot_image_space = space::ImageSpace::CreateBootImage(
+ image_name.c_str(),
+ image_instruction_set,
+ index > 0,
+ &error_msg);
ATRACE_END();
if (boot_image_space != nullptr) {
AddSpace(boot_image_space);
@@ -491,7 +492,15 @@
ATRACE_END();
// Allocate the card table.
ATRACE_BEGIN("Create card table");
- card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
+ // We currently don't support dynamically resizing the card table.
+ // Since we don't know where in the low_4gb the app image will be located, make the card table
+ // cover the whole low_4gb. TODO: Extend the card table in AddSpace.
+ UNUSED(heap_capacity);
+ // Start at 64 KB, we can be sure there are no spaces mapped this low since the address range is
+ // reserved by the kernel.
+ static constexpr size_t kMinHeapAddress = 4 * KB;
+ card_table_.reset(accounting::CardTable::Create(reinterpret_cast<uint8_t*>(kMinHeapAddress),
+ 4 * GB - kMinHeapAddress));
CHECK(card_table_.get() != nullptr) << "Failed to create card table";
ATRACE_END();
if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
@@ -1252,10 +1261,6 @@
return FindDiscontinuousSpaceFromObject(obj, fail_ok);
}
-std::vector<space::ImageSpace*> Heap::GetBootImageSpaces() const {
- return boot_image_spaces_;
-}
-
void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
std::ostringstream oss;
size_t total_bytes_free = GetFreeMemory();
@@ -3194,7 +3199,13 @@
} else if (process_alloc_space_cards) {
TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
if (clear_alloc_space_cards) {
- card_table_->ClearCardRange(space->Begin(), space->End());
+ uint8_t* end = space->End();
+ if (space->IsImageSpace()) {
+ // Image space end is the end of the mirror objects, it is not necessarily page or card
+ // aligned. Align up so that the check in ClearCardRange does not fail.
+ end = AlignUp(end, accounting::CardTable::kCardSize);
+ }
+ card_table_->ClearCardRange(space->Begin(), end);
} else {
// No mod union table for the AllocSpace. Age the cards so that the GC knows that these
// cards were dirty before the GC started.
@@ -3989,5 +4000,43 @@
gc_disabled_for_shutdown_ = true;
}
+bool Heap::ObjectIsInBootImageSpace(mirror::Object* obj) const {
+ for (gc::space::ImageSpace* space : boot_image_spaces_) {
+ if (space->HasAddress(obj)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void Heap::GetBootImagesSize(uint32_t* boot_image_begin,
+ uint32_t* boot_image_end,
+ uint32_t* boot_oat_begin,
+ uint32_t* boot_oat_end) {
+ DCHECK(boot_image_begin != nullptr);
+ DCHECK(boot_image_end != nullptr);
+ DCHECK(boot_oat_begin != nullptr);
+ DCHECK(boot_oat_end != nullptr);
+ *boot_image_begin = 0u;
+ *boot_image_end = 0u;
+ *boot_oat_begin = 0u;
+ *boot_oat_end = 0u;
+ for (gc::space::ImageSpace* space_ : GetBootImageSpaces()) {
+ const uint32_t image_begin = PointerToLowMemUInt32(space_->Begin());
+ const uint32_t image_size = space_->GetImageHeader().GetImageSize();
+ if (*boot_image_begin == 0 || image_begin < *boot_image_begin) {
+ *boot_image_begin = image_begin;
+ }
+ *boot_image_end = std::max(*boot_image_end, image_begin + image_size);
+ const OatFile* boot_oat_file = space_->GetOatFile();
+ const uint32_t oat_begin = PointerToLowMemUInt32(boot_oat_file->Begin());
+ const uint32_t oat_size = boot_oat_file->Size();
+ if (*boot_oat_begin == 0 || oat_begin < *boot_oat_begin) {
+ *boot_oat_begin = oat_begin;
+ }
+ *boot_oat_end = std::max(*boot_oat_end, oat_begin + oat_size);
+ }
+}
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 7b531ba..1b7e2c9 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -580,7 +580,17 @@
void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
// Returns the boot image spaces. There may be multiple boot image spaces.
- std::vector<space::ImageSpace*> GetBootImageSpaces() const;
+ const std::vector<space::ImageSpace*>& GetBootImageSpaces() const {
+ return boot_image_spaces_;
+ }
+
+ bool ObjectIsInBootImageSpace(mirror::Object* obj) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ void GetBootImagesSize(uint32_t* boot_image_begin,
+ uint32_t* boot_image_end,
+ uint32_t* boot_oat_begin,
+ uint32_t* boot_oat_end);
// Permenantly disable moving garbage collection.
void DisableMovingGc() REQUIRES(!*gc_complete_lock_);
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 5f6bb8e..9ff3d8d 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -475,10 +475,10 @@
return true;
}
-ImageSpace* ImageSpace::Create(const char* image_location,
- const InstructionSet image_isa,
- bool secondary_image,
- std::string* error_msg) {
+ImageSpace* ImageSpace::CreateBootImage(const char* image_location,
+ const InstructionSet image_isa,
+ bool secondary_image,
+ std::string* error_msg) {
std::string system_filename;
bool has_system = false;
std::string cache_filename;
@@ -584,8 +584,13 @@
// assume this if we are using a relocated image (i.e. image checksum
// matches) since this is only different by the offset. We need this to
// make sure that host tests continue to work.
- space = ImageSpace::Init(image_filename->c_str(), image_location,
- !(is_system || relocated_version_used), error_msg);
+ // Since we are the boot image, pass null since we load the oat file from the boot image oat
+ // file name.
+ space = ImageSpace::Init(image_filename->c_str(),
+ image_location,
+ !(is_system || relocated_version_used),
+ /* oat_file */nullptr,
+ error_msg);
}
if (space != nullptr) {
return space;
@@ -646,7 +651,7 @@
// we leave Create.
ScopedFlock image_lock;
image_lock.Init(cache_filename.c_str(), error_msg);
- space = ImageSpace::Init(cache_filename.c_str(), image_location, true, error_msg);
+ space = ImageSpace::Init(cache_filename.c_str(), image_location, true, nullptr, error_msg);
if (space == nullptr) {
*error_msg = StringPrintf("Failed to load generated image '%s': %s",
cache_filename.c_str(), error_msg->c_str());
@@ -669,34 +674,494 @@
}
}
-ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_location,
- bool validate_oat_file, std::string* error_msg) {
+// Helper class for relocating from one range of memory to another.
+class RelocationRange {
+ public:
+ RelocationRange() = default;
+ RelocationRange(const RelocationRange&) = default;
+ RelocationRange(uintptr_t source, uintptr_t dest, uintptr_t length)
+ : source_(source),
+ dest_(dest),
+ length_(length) {}
+
+ bool ContainsSource(uintptr_t address) const {
+ return address - source_ < length_;
+ }
+
+ // Translate a source address to the destination space.
+ uintptr_t ToDest(uintptr_t address) const {
+ DCHECK(ContainsSource(address));
+ return address + Delta();
+ }
+
+ // Returns the delta between the dest from the source.
+ off_t Delta() const {
+ return dest_ - source_;
+ }
+
+ uintptr_t Source() const {
+ return source_;
+ }
+
+ uintptr_t Dest() const {
+ return dest_;
+ }
+
+ uintptr_t Length() const {
+ return length_;
+ }
+
+ private:
+ const uintptr_t source_;
+ const uintptr_t dest_;
+ const uintptr_t length_;
+};
+
+class FixupVisitor : public ValueObject {
+ public:
+ FixupVisitor(const RelocationRange& boot_image,
+ const RelocationRange& boot_oat,
+ const RelocationRange& app_image,
+ const RelocationRange& app_oat)
+ : boot_image_(boot_image),
+ boot_oat_(boot_oat),
+ app_image_(app_image),
+ app_oat_(app_oat) {}
+
+ // Return the relocated address of a heap object.
+ template <typename T>
+ ALWAYS_INLINE T* ForwardObject(T* src) const {
+ const uintptr_t uint_src = reinterpret_cast<uintptr_t>(src);
+ if (boot_image_.ContainsSource(uint_src)) {
+ return reinterpret_cast<T*>(boot_image_.ToDest(uint_src));
+ }
+ if (app_image_.ContainsSource(uint_src)) {
+ return reinterpret_cast<T*>(app_image_.ToDest(uint_src));
+ }
+ return src;
+ }
+
+ // Return the relocated address of a code pointer (contained by an oat file).
+ ALWAYS_INLINE const void* ForwardCode(const void* src) const {
+ const uintptr_t uint_src = reinterpret_cast<uintptr_t>(src);
+ if (boot_oat_.ContainsSource(uint_src)) {
+ return reinterpret_cast<const void*>(boot_oat_.ToDest(uint_src));
+ }
+ if (app_oat_.ContainsSource(uint_src)) {
+ return reinterpret_cast<const void*>(app_oat_.ToDest(uint_src));
+ }
+ return src;
+ }
+
+ protected:
+ // Source section.
+ const RelocationRange boot_image_;
+ const RelocationRange boot_oat_;
+ const RelocationRange app_image_;
+ const RelocationRange app_oat_;
+};
+
+std::ostream& operator<<(std::ostream& os, const RelocationRange& reloc) {
+ return os << "(" << reinterpret_cast<const void*>(reloc.Source()) << "-"
+ << reinterpret_cast<const void*>(reloc.Source() + reloc.Length()) << ")->("
+ << reinterpret_cast<const void*>(reloc.Dest()) << "-"
+ << reinterpret_cast<const void*>(reloc.Dest() + reloc.Length()) << ")";
+}
+
+// Adapt for mirror::Class::FixupNativePointers.
+class FixupObjectAdapter : public FixupVisitor {
+ public:
+ template<typename... Args>
+ explicit FixupObjectAdapter(Args... args) : FixupVisitor(args...) {}
+
+ template <typename T>
+ T* operator()(T* obj) const {
+ return ForwardObject(obj);
+ }
+};
+
+class FixupClassVisitor : public FixupVisitor {
+ public:
+ template<typename... Args>
+ explicit FixupClassVisitor(Args... args) : FixupVisitor(args...) {}
+
+ // The image space is contained so the GC doesn't need to know about it. Avoid requiring mutator
+ // lock to prevent possible pauses.
+ ALWAYS_INLINE void operator()(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ mirror::Class* klass = obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ DCHECK(klass != nullptr) << "Null class in image";
+ // No AsClass since our fields aren't quite fixed up yet.
+ mirror::Class* new_klass = down_cast<mirror::Class*>(ForwardObject(klass));
+ // Keep clean if possible.
+ if (klass != new_klass) {
+ obj->SetClass<kVerifyNone>(new_klass);
+ }
+ }
+};
+
+class FixupRootVisitor : public FixupVisitor {
+ public:
+ template<typename... Args>
+ explicit FixupRootVisitor(Args... args) : FixupVisitor(args...) {}
+
+ ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* ref = root->AsMirrorPtr();
+ mirror::Object* new_ref = ForwardObject(ref);
+ if (ref != new_ref) {
+ root->Assign(new_ref);
+ }
+ }
+};
+
+class FixupObjectVisitor : public FixupVisitor {
+ public:
+ template<typename... Args>
+ explicit FixupObjectVisitor(Args... args) : FixupVisitor(args...) {}
+
+ // Fix up separately since we also need to fix up method entrypoints.
+ ALWAYS_INLINE void VisitRootIfNonNull(
+ mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
+ ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+ const {}
+
+ ALWAYS_INLINE void operator()(mirror::Object* obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ // There could be overlap between ranges, we must avoid visiting the same reference twice.
+ // Avoid the class field since we already fixed it up in FixupClassVisitor.
+ if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) {
+ // Space is not yet added to the heap, don't do a read barrier.
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
+ offset);
+ // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
+ // image.
+ obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(offset, ForwardObject(ref));
+ }
+ }
+
+ // java.lang.ref.Reference visitor.
+ void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ mirror::Object* obj = ref->GetReferent<kWithoutReadBarrier>();
+ ref->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
+ mirror::Reference::ReferentOffset(),
+ ForwardObject(obj));
+ }
+
+ ALWAYS_INLINE void operator()(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ obj->VisitReferences</*visit native roots*/false, kVerifyNone, kWithoutReadBarrier>(
+ *this,
+ *this);
+ // We want to use our own class loader and not the one in the image.
+ if (obj->IsClass<kVerifyNone, kWithoutReadBarrier>()) {
+ mirror::Class* klass = obj->AsClass<kVerifyNone, kWithoutReadBarrier>();
+ FixupObjectAdapter visitor(boot_image_, boot_oat_, app_image_, app_oat_);
+ klass->FixupNativePointers(klass, sizeof(void*), visitor);
+ // Deal with the arrays.
+ mirror::PointerArray* vtable = klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
+ if (vtable != nullptr) {
+ vtable->Fixup(vtable, sizeof(void*), visitor);
+ }
+ mirror::IfTable* iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
+ if (iftable != nullptr) {
+ for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
+ if (iftable->GetMethodArrayCount(i) > 0) {
+ mirror::PointerArray* methods =
+ iftable->GetMethodArray<kVerifyNone, kWithoutReadBarrier>(i);
+ DCHECK(methods != nullptr);
+ methods->Fixup(methods, sizeof(void*), visitor);
+ }
+ }
+ }
+ }
+ }
+};
+
+class ForwardObjectAdapter {
+ public:
+ ALWAYS_INLINE ForwardObjectAdapter(const FixupVisitor* visitor) : visitor_(visitor) {}
+
+ template <typename T>
+ ALWAYS_INLINE T* operator()(T* src) const {
+ return visitor_->ForwardObject(src);
+ }
+
+ private:
+ const FixupVisitor* const visitor_;
+};
+
+class ForwardCodeAdapter {
+ public:
+ ALWAYS_INLINE ForwardCodeAdapter(const FixupVisitor* visitor) : visitor_(visitor) {}
+
+ template <typename T>
+ ALWAYS_INLINE T* operator()(T* src) const {
+ return visitor_->ForwardCode(src);
+ }
+
+ private:
+ const FixupVisitor* const visitor_;
+};
+
+class FixupArtMethodVisitor : public FixupVisitor, public ArtMethodVisitor {
+ public:
+ template<typename... Args>
+ explicit FixupArtMethodVisitor(bool fixup_heap_objects, Args... args)
+ : FixupVisitor(args...),
+ fixup_heap_objects_(fixup_heap_objects) {}
+
+ virtual void Visit(ArtMethod* method) NO_THREAD_SAFETY_ANALYSIS {
+ if (fixup_heap_objects_) {
+ method->UpdateObjectsForImageRelocation(ForwardObjectAdapter(this));
+ }
+ method->UpdateEntrypoints(ForwardCodeAdapter(this));
+ }
+
+ private:
+ const bool fixup_heap_objects_;
+};
+
+class FixupArtFieldVisitor : public FixupVisitor, public ArtFieldVisitor {
+ public:
+ template<typename... Args>
+ explicit FixupArtFieldVisitor(Args... args) : FixupVisitor(args...) {}
+
+ virtual void Visit(ArtField* field) NO_THREAD_SAFETY_ANALYSIS {
+ field->UpdateObjects(ForwardObjectAdapter(this));
+ }
+};
+
+// Relocate an image space mapped at target_base which possibly used to be at a different base
+// address. Only needs a single image space, not one for both source and destination.
+// In place means modifying a single ImageSpace in place rather than relocating from one ImageSpace
+// to another.
+static bool RelocateInPlace(ImageHeader& image_header,
+ uint8_t* target_base,
+ accounting::ContinuousSpaceBitmap* bitmap,
+ const OatFile* app_oat_file,
+ std::string* error_msg) {
+ DCHECK(error_msg != nullptr);
+ if (!image_header.IsPic()) {
+ if (image_header.GetImageBegin() == target_base) {
+ return true;
+ }
+ *error_msg = StringPrintf("Cannot relocate non-pic image for oat file %s",
+ (app_oat_file != nullptr) ? app_oat_file->GetLocation().c_str() : "");
+ return false;
+ }
+ // Set up sections.
+ uint32_t boot_image_begin = 0;
+ uint32_t boot_image_end = 0;
+ uint32_t boot_oat_begin = 0;
+ uint32_t boot_oat_end = 0;
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
+ heap->GetBootImagesSize(&boot_image_begin, &boot_image_end, &boot_oat_begin, &boot_oat_end);
+ CHECK_NE(boot_image_begin, boot_image_end)
+ << "Can not relocate app image without boot image space";
+ CHECK_NE(boot_oat_begin, boot_oat_end) << "Can not relocate app image without boot oat file";
+ const uint32_t boot_image_size = boot_image_end - boot_image_begin;
+ const uint32_t boot_oat_size = boot_oat_end - boot_oat_begin;
+ const uint32_t image_header_boot_image_size = image_header.GetBootImageSize();
+ const uint32_t image_header_boot_oat_size = image_header.GetBootOatSize();
+ if (boot_image_size != image_header_boot_image_size) {
+ *error_msg = StringPrintf("Boot image size %" PRIu64 " does not match expected size %"
+ PRIu64,
+ static_cast<uint64_t>(boot_image_size),
+ static_cast<uint64_t>(image_header_boot_image_size));
+ return false;
+ }
+ if (boot_oat_size != image_header_boot_oat_size) {
+ *error_msg = StringPrintf("Boot oat size %" PRIu64 " does not match expected size %"
+ PRIu64,
+ static_cast<uint64_t>(boot_oat_size),
+ static_cast<uint64_t>(image_header_boot_oat_size));
+ return false;
+ }
+ TimingLogger logger(__FUNCTION__, true, false);
+ RelocationRange boot_image(image_header.GetBootImageBegin(),
+ boot_image_begin,
+ boot_image_size);
+ RelocationRange boot_oat(image_header.GetBootOatBegin(),
+ boot_oat_begin,
+ boot_oat_size);
+ RelocationRange app_image(reinterpret_cast<uintptr_t>(image_header.GetImageBegin()),
+ reinterpret_cast<uintptr_t>(target_base),
+ image_header.GetImageSize());
+ // Use the oat data section since this is where the OatFile::Begin is.
+ RelocationRange app_oat(reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin()),
+ // Not necessarily in low 4GB.
+ reinterpret_cast<uintptr_t>(app_oat_file->Begin()),
+ image_header.GetOatDataEnd() - image_header.GetOatDataBegin());
+ VLOG(image) << "App image " << app_image;
+ VLOG(image) << "App oat " << app_oat;
+ VLOG(image) << "Boot image " << boot_image;
+ VLOG(image) << "Boot oat " << boot_oat;
+ // True if we need to fixup any heap pointers, otherwise only code pointers.
+ const bool fixup_image = boot_image.Delta() != 0 || app_image.Delta() != 0;
+ const bool fixup_code = boot_oat.Delta() != 0 || app_oat.Delta() != 0;
+ if (!fixup_image && !fixup_code) {
+ // Nothing to fix up.
+ return true;
+ }
+ // Need to update the image to be at the target base.
+ const ImageSection& objects_section = image_header.GetImageSection(ImageHeader::kSectionObjects);
+ uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
+ uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
+ // Two pass approach, fix up all classes first, then fix up non class-objects.
+ FixupObjectVisitor fixup_object_visitor(boot_image, boot_oat, app_image, app_oat);
+ if (fixup_image) {
+ TimingLogger::ScopedTiming timing("Fixup classes", &logger);
+ // Fixup class only touches app image classes, don't need the mutator lock since the space is
+ // not yet visible to the GC.
+ FixupClassVisitor fixup_class_visitor(boot_image, boot_oat, app_image, app_oat);
+ bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_class_visitor);
+ // Fixup objects may read fields in the boot image, use the mutator lock here for sanity. Though
+ // its probably not required.
+ ScopedObjectAccess soa(Thread::Current());
+ timing.NewTiming("Fixup objects");
+ bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_object_visitor);
+ FixupObjectAdapter fixup_adapter(boot_image, boot_oat, app_image, app_oat);
+ // Fixup image roots.
+ CHECK(app_image.ContainsSource(reinterpret_cast<uintptr_t>(image_header.GetImageRoots())));
+ image_header.RelocateImageObjects(app_image.Delta());
+ CHECK_EQ(image_header.GetImageBegin(), target_base);
+ // Fix up dex cache DexFile pointers.
+ auto* dex_caches = image_header.GetImageRoot(ImageHeader::kDexCaches)->
+ AsObjectArray<mirror::DexCache>();
+ for (int32_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
+ mirror::DexCache* dex_cache = dex_caches->Get(i);
+ // Fix up dex cache pointers.
+ GcRoot<mirror::String>* strings = dex_cache->GetStrings();
+ if (strings != nullptr) {
+ GcRoot<mirror::String>* new_strings = fixup_adapter.ForwardObject(strings);
+ if (strings != new_strings) {
+ dex_cache->SetFieldPtr64<false>(mirror::DexCache::StringsOffset(), new_strings);
+ }
+ dex_cache->FixupStrings(new_strings, fixup_adapter);
+ }
+ GcRoot<mirror::Class>* types = dex_cache->GetResolvedTypes();
+ if (types != nullptr) {
+ GcRoot<mirror::Class>* new_types = fixup_adapter.ForwardObject(types);
+ if (types != new_types) {
+ dex_cache->SetFieldPtr64<false>(mirror::DexCache::ResolvedTypesOffset(), new_types);
+ }
+ dex_cache->FixupResolvedTypes(new_types, fixup_adapter);
+ }
+ ArtMethod** methods = dex_cache->GetResolvedMethods();
+ if (methods != nullptr) {
+ ArtMethod** new_methods = fixup_adapter.ForwardObject(methods);
+ if (methods != new_methods) {
+ dex_cache->SetFieldPtr64<false>(mirror::DexCache::ResolvedMethodsOffset(), new_methods);
+ }
+ for (size_t j = 0, num = dex_cache->NumResolvedMethods(); j != num; ++j) {
+ ArtMethod* orig = mirror::DexCache::GetElementPtrSize(new_methods, j, sizeof(void*));
+ ArtMethod* copy = fixup_adapter.ForwardObject(orig);
+ if (orig != copy) {
+ mirror::DexCache::SetElementPtrSize(new_methods, j, copy, sizeof(void*));
+ }
+ }
+ }
+ ArtField** fields = dex_cache->GetResolvedFields();
+ if (fields != nullptr) {
+ ArtField** new_fields = fixup_adapter.ForwardObject(fields);
+ if (fields != new_fields) {
+ dex_cache->SetFieldPtr64<false>(mirror::DexCache::ResolvedFieldsOffset(), new_fields);
+ }
+ for (size_t j = 0, num = dex_cache->NumResolvedFields(); j != num; ++j) {
+ ArtField* orig = mirror::DexCache::GetElementPtrSize(new_fields, j, sizeof(void*));
+ ArtField* copy = fixup_adapter.ForwardObject(orig);
+ if (orig != copy) {
+ mirror::DexCache::SetElementPtrSize(new_fields, j, copy, sizeof(void*));
+ }
+ }
+ }
+ }
+ }
+ {
+ // Only touches objects in the app image, no need for mutator lock.
+ TimingLogger::ScopedTiming timing("Fixup methods", &logger);
+ FixupArtMethodVisitor method_visitor(fixup_image, boot_image, boot_oat, app_image, app_oat);
+ image_header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
+ &method_visitor,
+ target_base,
+ sizeof(void*));
+ }
+ if (fixup_image) {
+ {
+ // Only touches objects in the app image, no need for mutator lock.
+ TimingLogger::ScopedTiming timing("Fixup fields", &logger);
+ FixupArtFieldVisitor field_visitor(boot_image, boot_oat, app_image, app_oat);
+ image_header.GetImageSection(ImageHeader::kSectionArtFields).VisitPackedArtFields(
+ &field_visitor,
+ target_base);
+ }
+ // In the app image case, the image methods are actually in the boot image.
+ image_header.RelocateImageMethods(boot_image.Delta());
+ const auto& class_table_section = image_header.GetImageSection(ImageHeader::kSectionClassTable);
+ if (class_table_section.Size() > 0u) {
+ // Note that we require that ReadFromMemory does not make an internal copy of the elements.
+ // This also relies on visit roots not doing any verification which could fail after we update
+ // the roots to be the image addresses.
+ ScopedObjectAccess soa(Thread::Current());
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ ClassTable temp_table;
+ temp_table.ReadFromMemory(target_base + class_table_section.Offset());
+ FixupRootVisitor root_visitor(boot_image, boot_oat, app_image, app_oat);
+ temp_table.VisitRoots(root_visitor);
+ }
+ }
+ if (VLOG_IS_ON(image)) {
+ logger.Dump(LOG(INFO));
+ }
+ return true;
+}
+
+ImageSpace* ImageSpace::Init(const char* image_filename,
+ const char* image_location,
+ bool validate_oat_file,
+ const OatFile* oat_file,
+ std::string* error_msg) {
CHECK(image_filename != nullptr);
CHECK(image_location != nullptr);
- uint64_t start_time = 0;
- if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
- start_time = NanoTime();
- LOG(INFO) << "ImageSpace::Init entering image_filename=" << image_filename;
- }
+ TimingLogger logger(__FUNCTION__, true, false);
+ VLOG(image) << "ImageSpace::Init entering image_filename=" << image_filename;
- std::unique_ptr<File> file(OS::OpenFileForReading(image_filename));
- if (file.get() == nullptr) {
- *error_msg = StringPrintf("Failed to open '%s'", image_filename);
- return nullptr;
+ std::unique_ptr<File> file;
+ {
+ TimingLogger::ScopedTiming timing("OpenImageFile", &logger);
+ file.reset(OS::OpenFileForReading(image_filename));
+ if (file == nullptr) {
+ *error_msg = StringPrintf("Failed to open '%s'", image_filename);
+ return nullptr;
+ }
}
- ImageHeader image_header;
- bool success = file->ReadFully(&image_header, sizeof(image_header));
- if (!success || !image_header.IsValid()) {
- *error_msg = StringPrintf("Invalid image header in '%s'", image_filename);
- return nullptr;
+ ImageHeader temp_image_header;
+ ImageHeader* image_header = &temp_image_header;
+ {
+ TimingLogger::ScopedTiming timing("ReadImageHeader", &logger);
+ bool success = file->ReadFully(image_header, sizeof(*image_header));
+ if (!success || !image_header->IsValid()) {
+ *error_msg = StringPrintf("Invalid image header in '%s'", image_filename);
+ return nullptr;
+ }
}
// Check that the file is larger or equal to the header size + data size.
const uint64_t image_file_size = static_cast<uint64_t>(file->GetLength());
- if (image_file_size < sizeof(ImageHeader) + image_header.GetDataSize()) {
+ if (image_file_size < sizeof(ImageHeader) + image_header->GetDataSize()) {
*error_msg = StringPrintf("Image file truncated: %" PRIu64 " vs. %" PRIu64 ".",
image_file_size,
- image_header.GetDataSize());
+ sizeof(ImageHeader) + image_header->GetDataSize());
return nullptr;
}
@@ -704,17 +1169,17 @@
LOG(INFO) << "Dumping image sections";
for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
const auto section_idx = static_cast<ImageHeader::ImageSections>(i);
- auto& section = image_header.GetImageSection(section_idx);
+ auto& section = image_header->GetImageSection(section_idx);
LOG(INFO) << section_idx << " start="
- << reinterpret_cast<void*>(image_header.GetImageBegin() + section.Offset()) << " "
- << section;
+ << reinterpret_cast<void*>(image_header->GetImageBegin() + section.Offset()) << " "
+ << section;
}
}
- const auto& bitmap_section = image_header.GetImageSection(ImageHeader::kSectionImageBitmap);
+ const auto& bitmap_section = image_header->GetImageSection(ImageHeader::kSectionImageBitmap);
// The location we want to map from is the first aligned page after the end of the stored
// (possibly compressed) data.
- const size_t image_bitmap_offset = RoundUp(sizeof(image_header) + image_header.GetDataSize(),
+ const size_t image_bitmap_offset = RoundUp(sizeof(ImageHeader) + image_header->GetDataSize(),
kPageSize);
const size_t end_of_bitmap = image_bitmap_offset + bitmap_section.Size();
if (end_of_bitmap != image_file_size) {
@@ -724,67 +1189,84 @@
return nullptr;
}
+ // The preferred address to map the image, null specifies any address. If we manage to map the
+ // image at the image begin, the amount of fixup work required is minimized.
+ std::vector<uint8_t*> addresses(1, image_header->GetImageBegin());
+ if (image_header->IsPic()) {
+ // Can also map at a random low_4gb address since we can relocate in-place.
+ addresses.push_back(nullptr);
+ }
+
// Note: The image header is part of the image due to mmap page alignment required of offset.
std::unique_ptr<MemMap> map;
- if (image_header.GetStorageMode() == ImageHeader::kStorageModeUncompressed) {
- map.reset(MemMap::MapFileAtAddress(image_header.GetImageBegin(),
- image_header.GetImageSize(),
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE,
- file->Fd(),
- 0,
- /*low_4gb*/false,
- /*reuse*/false,
- image_filename,
- error_msg));
- } else {
- // Reserve output and decompress into it.
- map.reset(MemMap::MapAnonymous(image_location,
- image_header.GetImageBegin(),
- image_header.GetImageSize(),
- PROT_READ | PROT_WRITE,
- /*low_4gb*/false,
- /*reuse*/false,
- error_msg));
+ std::string temp_error_msg;
+ for (uint8_t* address : addresses) {
+ TimingLogger::ScopedTiming timing("MapImageFile", &logger);
+ // Only care about the error message for the last address in addresses. We want to avoid the
+ // overhead of printing the process maps if we can relocate.
+ std::string* out_error_msg = (address == addresses.back()) ? &temp_error_msg : nullptr;
+ if (image_header->GetStorageMode() == ImageHeader::kStorageModeUncompressed) {
+ map.reset(MemMap::MapFileAtAddress(address,
+ image_header->GetImageSize(),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
+ file->Fd(),
+ 0,
+ /*low_4gb*/true,
+ /*reuse*/false,
+ image_filename,
+ /*out*/out_error_msg));
+ } else {
+ // Reserve output and decompress into it.
+ map.reset(MemMap::MapAnonymous(image_location,
+ address,
+ image_header->GetImageSize(),
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/true,
+ /*reuse*/false,
+ out_error_msg));
+ if (map != nullptr) {
+ const size_t stored_size = image_header->GetDataSize();
+ const size_t write_offset = sizeof(ImageHeader); // Skip the header.
+ std::unique_ptr<MemMap> temp_map(MemMap::MapFile(sizeof(ImageHeader) + stored_size,
+ PROT_READ,
+ MAP_PRIVATE,
+ file->Fd(),
+ /*offset*/0,
+ /*low_4gb*/false,
+ image_filename,
+ out_error_msg));
+ if (temp_map == nullptr) {
+ DCHECK(!out_error_msg->empty());
+ return nullptr;
+ }
+ memcpy(map->Begin(), image_header, sizeof(ImageHeader));
+ const uint64_t start = NanoTime();
+ const size_t decompressed_size = LZ4_decompress_safe(
+ reinterpret_cast<char*>(temp_map->Begin()) + sizeof(ImageHeader),
+ reinterpret_cast<char*>(map->Begin()) + write_offset,
+ stored_size,
+ map->Size());
+ VLOG(image) << "Decompressing image took " << PrettyDuration(NanoTime() - start);
+ if (decompressed_size + sizeof(ImageHeader) != image_header->GetImageSize()) {
+ *error_msg = StringPrintf("Decompressed size does not match expected image size %zu vs %zu",
+ decompressed_size + sizeof(ImageHeader),
+ image_header->GetImageSize());
+ return nullptr;
+ }
+ }
+ }
if (map != nullptr) {
- const size_t stored_size = image_header.GetDataSize();
- const size_t write_offset = sizeof(image_header); // Skip the header.
- std::unique_ptr<MemMap> temp_map(MemMap::MapFile(sizeof(ImageHeader) + stored_size,
- PROT_READ,
- MAP_PRIVATE,
- file->Fd(),
- /*offset*/0,
- /*low_4gb*/false,
- image_filename,
- error_msg));
- if (temp_map == nullptr) {
- DCHECK(!error_msg->empty());
- return nullptr;
- }
- memcpy(map->Begin(), &image_header, sizeof(image_header));
- const uint64_t start = NanoTime();
- const size_t decompressed_size = LZ4_decompress_safe(
- reinterpret_cast<char*>(temp_map->Begin()) + sizeof(ImageHeader),
- reinterpret_cast<char*>(map->Begin()) + write_offset,
- stored_size,
- map->Size());
- // TODO: VLOG(image)
- VLOG(class_linker) << "Decompressing image took " << PrettyDuration(NanoTime() - start);
- if (decompressed_size + sizeof(ImageHeader) != image_header.GetImageSize()) {
- *error_msg = StringPrintf("Decompressed size does not match expected image size %zu vs %zu",
- decompressed_size + sizeof(ImageHeader),
- image_header.GetImageSize());
- return nullptr;
- }
+ break;
}
}
if (map == nullptr) {
- DCHECK(!error_msg->empty());
+ DCHECK(!temp_error_msg.empty());
+ *error_msg = temp_error_msg;
return nullptr;
}
- CHECK_EQ(image_header.GetImageBegin(), map->Begin());
- DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader)));
+ DCHECK_EQ(0, memcmp(image_header, map->Begin(), sizeof(ImageHeader)));
std::unique_ptr<MemMap> image_bitmap_map(MemMap::MapFileAtAddress(nullptr,
bitmap_section.Size(),
@@ -799,25 +1281,42 @@
*error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str());
return nullptr;
}
- uint32_t bitmap_index = bitmap_index_.FetchAndAddSequentiallyConsistent(1);
- std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u", image_filename,
+ // Loaded the map, use the image header from the file now in case we patch it with
+ // RelocateInPlace.
+ image_header = reinterpret_cast<ImageHeader*>(map->Begin());
+ const uint32_t bitmap_index = bitmap_index_.FetchAndAddSequentiallyConsistent(1);
+ std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u",
+ image_filename,
bitmap_index));
// Bitmap only needs to cover until the end of the mirror objects section.
- const ImageSection& image_objects = image_header.GetImageSection(ImageHeader::kSectionObjects);
- std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
+ const ImageSection& image_objects = image_header->GetImageSection(ImageHeader::kSectionObjects);
+ // We only want the mirror object, not the ArtFields and ArtMethods.
+ uint8_t* const image_end = map->Begin() + image_objects.End();
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap;
+ {
+ TimingLogger::ScopedTiming timing("CreateImageBitmap", &logger);
+ bitmap.reset(
accounting::ContinuousSpaceBitmap::CreateFromMemMap(
bitmap_name,
image_bitmap_map.release(),
reinterpret_cast<uint8_t*>(map->Begin()),
image_objects.End()));
- if (bitmap == nullptr) {
- *error_msg = StringPrintf("Could not create bitmap '%s'", bitmap_name.c_str());
- return nullptr;
+ if (bitmap == nullptr) {
+ *error_msg = StringPrintf("Could not create bitmap '%s'", bitmap_name.c_str());
+ return nullptr;
+ }
}
-
+ {
+ TimingLogger::ScopedTiming timing("RelocateImage", &logger);
+ if (!RelocateInPlace(*image_header,
+ map->Begin(),
+ bitmap.get(),
+ oat_file,
+ error_msg)) {
+ return nullptr;
+ }
+ }
// We only want the mirror object, not the ArtFields and ArtMethods.
- uint8_t* const image_end =
- map->Begin() + image_header.GetImageSection(ImageHeader::kSectionObjects).End();
std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename,
image_location,
map.release(),
@@ -829,38 +1328,61 @@
// and ArtField::java_lang_reflect_ArtField_, which are used from
// Object::SizeOf() which VerifyImageAllocations() calls, are not
// set yet at this point.
-
- space->oat_file_.reset(space->OpenOatFile(image_filename, error_msg));
- if (space->oat_file_.get() == nullptr) {
- DCHECK(!error_msg->empty());
- return nullptr;
+ if (oat_file == nullptr) {
+ TimingLogger::ScopedTiming timing("OpenOatFile", &logger);
+ space->oat_file_.reset(space->OpenOatFile(image_filename, error_msg));
+ if (space->oat_file_ == nullptr) {
+ DCHECK(!error_msg->empty());
+ return nullptr;
+ }
+ space->oat_file_non_owned_ = space->oat_file_.get();
+ } else {
+ space->oat_file_non_owned_ = oat_file;
}
- space->oat_file_non_owned_ = space->oat_file_.get();
- if (validate_oat_file && !space->ValidateOatFile(error_msg)) {
- DCHECK(!error_msg->empty());
- return nullptr;
+ if (validate_oat_file) {
+ TimingLogger::ScopedTiming timing("ValidateOatFile", &logger);
+ if (!space->ValidateOatFile(error_msg)) {
+ DCHECK(!error_msg->empty());
+ return nullptr;
+ }
}
Runtime* runtime = Runtime::Current();
- runtime->SetInstructionSet(space->oat_file_->GetOatHeader().GetInstructionSet());
- if (!runtime->HasResolutionMethod()) {
- runtime->SetResolutionMethod(image_header.GetImageMethod(ImageHeader::kResolutionMethod));
- runtime->SetImtConflictMethod(image_header.GetImageMethod(ImageHeader::kImtConflictMethod));
+ // If oat_file is null, then it is the boot image space. Use oat_file_non_owned_ from the space
+ // to set the runtime methods.
+ CHECK_EQ(oat_file != nullptr, image_header->IsAppImage());
+ if (image_header->IsAppImage()) {
+ CHECK_EQ(runtime->GetResolutionMethod(),
+ image_header->GetImageMethod(ImageHeader::kResolutionMethod));
+ CHECK_EQ(runtime->GetImtConflictMethod(),
+ image_header->GetImageMethod(ImageHeader::kImtConflictMethod));
+ CHECK_EQ(runtime->GetImtUnimplementedMethod(),
+ image_header->GetImageMethod(ImageHeader::kImtUnimplementedMethod));
+ CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kSaveAll),
+ image_header->GetImageMethod(ImageHeader::kCalleeSaveMethod));
+ CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kRefsOnly),
+ image_header->GetImageMethod(ImageHeader::kRefsOnlySaveMethod));
+ CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs),
+ image_header->GetImageMethod(ImageHeader::kRefsAndArgsSaveMethod));
+ } else if (!runtime->HasResolutionMethod()) {
+ runtime->SetInstructionSet(space->oat_file_non_owned_->GetOatHeader().GetInstructionSet());
+ runtime->SetResolutionMethod(image_header->GetImageMethod(ImageHeader::kResolutionMethod));
+ runtime->SetImtConflictMethod(image_header->GetImageMethod(ImageHeader::kImtConflictMethod));
runtime->SetImtUnimplementedMethod(
- image_header.GetImageMethod(ImageHeader::kImtUnimplementedMethod));
+ image_header->GetImageMethod(ImageHeader::kImtUnimplementedMethod));
runtime->SetCalleeSaveMethod(
- image_header.GetImageMethod(ImageHeader::kCalleeSaveMethod), Runtime::kSaveAll);
+ image_header->GetImageMethod(ImageHeader::kCalleeSaveMethod), Runtime::kSaveAll);
runtime->SetCalleeSaveMethod(
- image_header.GetImageMethod(ImageHeader::kRefsOnlySaveMethod), Runtime::kRefsOnly);
+ image_header->GetImageMethod(ImageHeader::kRefsOnlySaveMethod), Runtime::kRefsOnly);
runtime->SetCalleeSaveMethod(
- image_header.GetImageMethod(ImageHeader::kRefsAndArgsSaveMethod), Runtime::kRefsAndArgs);
+ image_header->GetImageMethod(ImageHeader::kRefsAndArgsSaveMethod), Runtime::kRefsAndArgs);
}
- if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
- LOG(INFO) << "ImageSpace::Init exiting (" << PrettyDuration(NanoTime() - start_time)
- << ") " << *space.get();
+ VLOG(image) << "ImageSpace::Init exiting " << *space.get();
+ if (VLOG_IS_ON(image)) {
+ logger.Dump(LOG(INFO));
}
return space.release();
}
@@ -1002,6 +1524,16 @@
}
}
+ImageSpace* ImageSpace::CreateFromAppImage(const char* image,
+ const OatFile* oat_file,
+ std::string* error_msg) {
+ return gc::space::ImageSpace::Init(image,
+ image,
+ /*validate_oat_file*/false,
+ oat_file,
+ /*out*/error_msg);
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 9c8e8b2..f2f4163 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -35,7 +35,7 @@
return kSpaceTypeImageSpace;
}
- // Create a Space from an image file for a specified instruction
+ // Create a boot image space from an image file for a specified instruction
// set. Cannot be used for future allocation or collected.
//
// Create also opens the OatFile associated with the image file so
@@ -43,10 +43,16 @@
// creation of the alloc space. The ReleaseOatFile will later be
// used to transfer ownership of the OatFile to the ClassLinker when
// it is initialized.
- static ImageSpace* Create(const char* image,
- InstructionSet image_isa,
- bool secondary_image,
- std::string* error_msg)
+ static ImageSpace* CreateBootImage(const char* image,
+ InstructionSet image_isa,
+ bool secondary_image,
+ std::string* error_msg)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Try to open an existing app image space.
+ static ImageSpace* CreateFromAppImage(const char* image,
+ const OatFile* oat_file,
+ std::string* error_msg)
SHARED_REQUIRES(Locks::mutator_lock_);
// Reads the image header from the specified image location for the
@@ -144,15 +150,17 @@
}
protected:
- // Tries to initialize an ImageSpace from the given image path,
- // returning null on error.
+ // Tries to initialize an ImageSpace from the given image path, returning null on error.
//
- // If validate_oat_file is false (for /system), do not verify that
- // image's OatFile is up-to-date relative to its DexFile
- // inputs. Otherwise (for /data), validate the inputs and generate
- // the OatFile in /data/dalvik-cache if necessary.
- static ImageSpace* Init(const char* image_filename, const char* image_location,
- bool validate_oat_file, std::string* error_msg)
+ // If validate_oat_file is false (for /system), do not verify that image's OatFile is up-to-date
+ // relative to its DexFile inputs. Otherwise (for /data), validate the inputs and generate the
+ // OatFile in /data/dalvik-cache if necessary. If the oat_file is null, it uses the oat file from
+ // the image.
+ static ImageSpace* Init(const char* image_filename,
+ const char* image_location,
+ bool validate_oat_file,
+ const OatFile* oat_file,
+ std::string* error_msg)
SHARED_REQUIRES(Locks::mutator_lock_);
OatFile* OpenOatFile(const char* image, std::string* error_msg) const