diff options
author | 2023-01-03 14:01:00 +0000 | |
---|---|---|
committer | 2023-01-28 16:42:34 +0000 | |
commit | df68c0a6f0d36728fa728049e5bcec20de2d0d5e (patch) | |
tree | f65b2ffdf3f998dc7980853c71d169cb489289a8 /runtime | |
parent | 09c218c61ab525b05d7181be92e78d69d035e122 (diff) |
Write classes in runtime-generated app image.
Test: 845-data-image
Bug: 260557058
Change-Id: I640b78942984ac3d3f8d24abda619d78154acd86
Diffstat (limited to 'runtime')
-rw-r--r-- | runtime/Android.bp | 1 | ||||
-rw-r--r-- | runtime/class_linker.cc | 6 | ||||
-rw-r--r-- | runtime/class_table-inl.h | 10 | ||||
-rw-r--r-- | runtime/class_table.h | 18 | ||||
-rw-r--r-- | runtime/handle_scope-inl.h | 27 | ||||
-rw-r--r-- | runtime/handle_scope.h | 9 | ||||
-rw-r--r-- | runtime/mirror/array-inl.h | 16 | ||||
-rw-r--r-- | runtime/mirror/array.h | 2 | ||||
-rw-r--r-- | runtime/mirror/class.h | 2 | ||||
-rw-r--r-- | runtime/oat.cc | 23 | ||||
-rw-r--r-- | runtime/oat.h | 14 | ||||
-rw-r--r-- | runtime/runtime_image.cc | 770 |
12 files changed, 819 insertions, 79 deletions
diff --git a/runtime/Android.bp b/runtime/Android.bp index bfe04f3aff..a1a0b4873e 100644 --- a/runtime/Android.bp +++ b/runtime/Android.bp @@ -592,6 +592,7 @@ gensrcs { "jni_id_type.h", "linear_alloc.h", "lock_word.h", + "oat.h", "oat_file.h", "process_state.h", "reflective_value_visitor.h", diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index a2f451347a..b829f5916c 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -2180,9 +2180,11 @@ bool ClassLinker::AddImageSpace( ObjPtr<mirror::Class> klass(root.Read()); // Do not update class loader for boot image classes where the app image // class loader is only the initiating loader but not the defining loader. - // Avoid read barrier since we are comparing against null. - if (klass->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) { + if (space->HasAddress(klass.Ptr())) { klass->SetClassLoader(loader); + } else { + DCHECK(klass->IsBootStrapClassLoaded()); + DCHECK(Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass.Ptr())); } } } diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h index 67eeb553a4..ecc8a0a620 100644 --- a/runtime/class_table-inl.h +++ b/runtime/class_table-inl.h @@ -213,6 +213,11 @@ inline ClassTable::TableSlot::TableSlot(ObjPtr<mirror::Class> klass, uint32_t de DCHECK_EQ(descriptor_hash, klass->DescriptorHash()); } +inline ClassTable::TableSlot::TableSlot(uint32_t ptr, uint32_t descriptor_hash) + : data_(ptr | MaskHash(descriptor_hash)) { + DCHECK_ALIGNED(ptr, kObjectAlignment); +} + template <typename Filter> inline void ClassTable::RemoveStrongRoots(const Filter& filter) { WriterMutexLock mu(Thread::Current(), lock_); @@ -227,6 +232,11 @@ inline ObjPtr<mirror::Class> ClassTable::LookupByDescriptor(ObjPtr<mirror::Class return Lookup(descriptor, hash); } +inline size_t ClassTable::Size() const { + ReaderMutexLock mu(Thread::Current(), lock_); + return classes_.size(); +} + } // namespace art #endif // ART_RUNTIME_CLASS_TABLE_INL_H_ diff --git a/runtime/class_table.h b/runtime/class_table.h index 123c069f0e..7e263737c3 100644 --- a/runtime/class_table.h +++ b/runtime/class_table.h @@ -58,18 +58,31 @@ class ClassTable { explicit TableSlot(ObjPtr<mirror::Class> klass); TableSlot(ObjPtr<mirror::Class> klass, uint32_t descriptor_hash); + TableSlot(uint32_t ptr, uint32_t descriptor_hash); TableSlot& operator=(const TableSlot& copy) { data_.store(copy.data_.load(std::memory_order_relaxed), std::memory_order_relaxed); return *this; } + uint32_t Data() const { + return data_.load(std::memory_order_relaxed); + } + bool IsNull() const REQUIRES_SHARED(Locks::mutator_lock_); uint32_t Hash() const { return MaskHash(data_.load(std::memory_order_relaxed)); } + uint32_t NonHashData() const { + return RemoveHash(Data()); + } + + static uint32_t RemoveHash(uint32_t hash) { + return hash & ~kHashMask; + } + static uint32_t MaskHash(uint32_t hash) { return hash & kHashMask; } @@ -168,6 +181,11 @@ class ClassTable { REQUIRES(!lock_) REQUIRES_SHARED(Locks::mutator_lock_); + // Returns the number of classes in the class table. + size_t Size() const + REQUIRES(!lock_) + REQUIRES_SHARED(Locks::mutator_lock_); + // Update a class in the table with the new class. Returns the existing class which was replaced. ObjPtr<mirror::Class> UpdateClass(const char* descriptor, ObjPtr<mirror::Class> new_klass, diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h index 3aa9e5221d..60a82a29ae 100644 --- a/runtime/handle_scope-inl.h +++ b/runtime/handle_scope-inl.h @@ -121,6 +121,15 @@ inline void HandleScope::VisitRoots(Visitor& visitor) { } } +template <typename Visitor> +inline void HandleScope::VisitHandles(Visitor& visitor) { + for (size_t i = 0, count = NumberOfReferences(); i < count; ++i) { + if (GetHandle(i) != nullptr) { + visitor.Visit(GetHandle(i)); + } + } +} + template<size_t kNumReferences> template<class T> inline MutableHandle<T> FixedSizeHandleScope<kNumReferences>::NewHandle(T* object) { return NewHandle(ObjPtr<T>(object)); @@ -179,6 +188,15 @@ inline void BaseHandleScope::VisitRoots(Visitor& visitor) { } } +template <typename Visitor> +inline void BaseHandleScope::VisitHandles(Visitor& visitor) { + if (LIKELY(!IsVariableSized())) { + AsHandleScope()->VisitHandles(visitor); + } else { + AsVariableSized()->VisitHandles(visitor); + } +} + inline VariableSizedHandleScope* BaseHandleScope::AsVariableSized() { DCHECK(IsVariableSized()); return down_cast<VariableSizedHandleScope*>(this); @@ -269,6 +287,15 @@ inline void VariableSizedHandleScope::VisitRoots(Visitor& visitor) { } } +template <typename Visitor> +inline void VariableSizedHandleScope::VisitHandles(Visitor& visitor) { + LocalScopeType* cur = current_scope_; + while (cur != nullptr) { + cur->VisitHandles(visitor); + cur = reinterpret_cast<LocalScopeType*>(cur->GetLink()); + } +} + } // namespace art #endif // ART_RUNTIME_HANDLE_SCOPE_INL_H_ diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h index 89127e4cf3..a43e889111 100644 --- a/runtime/handle_scope.h +++ b/runtime/handle_scope.h @@ -56,6 +56,9 @@ class PACKED(4) BaseHandleScope { template <typename Visitor> ALWAYS_INLINE void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_); + template <typename Visitor> + ALWAYS_INLINE void VisitHandles(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_); + // Link to previous BaseHandleScope or null. BaseHandleScope* GetLink() const { return link_; @@ -148,6 +151,9 @@ class PACKED(4) HandleScope : public BaseHandleScope { template <typename Visitor> ALWAYS_INLINE void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_); + template <typename Visitor> + ALWAYS_INLINE void VisitHandles(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_); + protected: // Return backing storage used for references. ALWAYS_INLINE StackReference<mirror::Object>* GetReferences() const { @@ -261,6 +267,9 @@ class VariableSizedHandleScope : public BaseHandleScope { template <typename Visitor> void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_); + template <typename Visitor> + ALWAYS_INLINE void VisitHandles(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_); + private: static constexpr size_t kLocalScopeSize = 64u; static constexpr size_t kSizeOfReferencesPerScope = diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h index a7faa376cd..2bdf8277cd 100644 --- a/runtime/mirror/array-inl.h +++ b/runtime/mirror/array-inl.h @@ -240,13 +240,16 @@ inline T PointerArray::GetElementPtrSizeUnchecked(uint32_t idx) { // C style casts here since we sometimes have T be a pointer, or sometimes an integer // (for stack traces). using ConversionType = typename std::conditional_t<std::is_pointer_v<T>, uintptr_t, T>; + // Note: we cast the array directly when unchecked as this code gets called by + // runtime_image, which can pass a 64bit pointer and therefore cannot be held + // by an ObjPtr. if (kPointerSize == PointerSize::k64) { uint64_t value = - static_cast<uint64_t>(AsLongArrayUnchecked<kVerifyFlags>()->GetWithoutChecks(idx)); + static_cast<uint64_t>(reinterpret_cast<LongArray*>(this)->GetWithoutChecks(idx)); return (T) dchecked_integral_cast<ConversionType>(value); } else { uint32_t value = - static_cast<uint32_t>(AsIntArrayUnchecked<kVerifyFlags>()->GetWithoutChecks(idx)); + static_cast<uint32_t>(reinterpret_cast<IntArray*>(this)->GetWithoutChecks(idx)); return (T) dchecked_integral_cast<ConversionType>(value); } } @@ -261,12 +264,15 @@ inline T PointerArray::GetElementPtrSize(uint32_t idx, PointerSize ptr_size) { template<bool kTransactionActive, bool kCheckTransaction, bool kUnchecked> inline void PointerArray::SetElementPtrSize(uint32_t idx, uint64_t element, PointerSize ptr_size) { + // Note: we cast the array directly when unchecked as this code gets called by + // runtime_image, which can pass a 64bit pointer and therefore cannot be held + // by an ObjPtr. if (ptr_size == PointerSize::k64) { - (kUnchecked ? ObjPtr<LongArray>::DownCast(ObjPtr<Object>(this)) : AsLongArray())-> + (kUnchecked ? reinterpret_cast<LongArray*>(this) : AsLongArray().Ptr())-> SetWithoutChecks<kTransactionActive, kCheckTransaction>(idx, element); } else { uint32_t element32 = dchecked_integral_cast<uint32_t>(element); - (kUnchecked ? ObjPtr<IntArray>::DownCast(ObjPtr<Object>(this)) : AsIntArray()) + (kUnchecked ? reinterpret_cast<IntArray*>(this) : AsIntArray().Ptr()) ->SetWithoutChecks<kTransactionActive, kCheckTransaction>(idx, element32); } } @@ -278,7 +284,7 @@ inline void PointerArray::SetElementPtrSize(uint32_t idx, T* element, PointerSiz } template <VerifyObjectFlags kVerifyFlags, typename Visitor> -inline void PointerArray::Fixup(ObjPtr<mirror::PointerArray> dest, +inline void PointerArray::Fixup(mirror::PointerArray* dest, PointerSize pointer_size, const Visitor& visitor) { for (size_t i = 0, count = GetLength(); i < count; ++i) { diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h index dfe7d475c1..0116fdee6f 100644 --- a/runtime/mirror/array.h +++ b/runtime/mirror/array.h @@ -264,7 +264,7 @@ class PointerArray : public Array { // Fixup the pointers in the dest arrays by passing our pointers through the visitor. Only copies // to dest if visitor(source_ptr) != source_ptr. template <VerifyObjectFlags kVerifyFlags = kVerifyNone, typename Visitor> - void Fixup(ObjPtr<mirror::PointerArray> dest, PointerSize pointer_size, const Visitor& visitor) + void Fixup(mirror::PointerArray* dest, PointerSize pointer_size, const Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_); // Works like memcpy(), except we guarantee not to allow tearing of array values (ie using smaller diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index bd37534b6b..b9eb9d05b4 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -65,6 +65,7 @@ template<typename T> class StrideIterator; template<size_t kNumReferences> class PACKED(4) StackHandleScope; class Thread; class DexCacheVisitor; +class RuntimeImageHelper; namespace mirror { @@ -1579,6 +1580,7 @@ class MANAGED Class final : public Object { friend struct art::ClassOffsets; // for verifying offset information friend class Object; // For VisitReferences friend class linker::ImageWriter; // For SetStatusInternal + friend class art::RuntimeImageHelper; // For SetStatusInternal DISALLOW_IMPLICIT_CONSTRUCTORS(Class); }; diff --git a/runtime/oat.cc b/runtime/oat.cc index 42bb7d4894..2c7a73f964 100644 --- a/runtime/oat.cc +++ b/runtime/oat.cc @@ -467,4 +467,27 @@ void OatHeader::Flatten(const SafeMap<std::string, std::string>* key_value_store key_value_store_size_ = data_ptr - reinterpret_cast<char*>(&key_value_store_); } +const uint8_t* OatHeader::GetOatAddress(StubType type) const { + DCHECK_LE(type, StubType::kLast); + switch (type) { + // TODO: We could maybe clean this up if we stored them in an array in the oat header. + case StubType::kQuickGenericJNITrampoline: + return static_cast<const uint8_t*>(GetQuickGenericJniTrampoline()); + case StubType::kJNIDlsymLookupTrampoline: + return static_cast<const uint8_t*>(GetJniDlsymLookupTrampoline()); + case StubType::kJNIDlsymLookupCriticalTrampoline: + return static_cast<const uint8_t*>(GetJniDlsymLookupCriticalTrampoline()); + case StubType::kQuickIMTConflictTrampoline: + return static_cast<const uint8_t*>(GetQuickImtConflictTrampoline()); + case StubType::kQuickResolutionTrampoline: + return static_cast<const uint8_t*>(GetQuickResolutionTrampoline()); + case StubType::kQuickToInterpreterBridge: + return static_cast<const uint8_t*>(GetQuickToInterpreterBridge()); + case StubType::kNterpTrampoline: + return static_cast<const uint8_t*>(GetNterpTrampoline()); + default: + UNREACHABLE(); + } +} + } // namespace art diff --git a/runtime/oat.h b/runtime/oat.h index 3b32e11daa..e062baaee9 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -29,6 +29,18 @@ namespace art { enum class InstructionSet; class InstructionSetFeatures; +enum class StubType { + kJNIDlsymLookupTrampoline, + kJNIDlsymLookupCriticalTrampoline, + kQuickGenericJNITrampoline, + kQuickIMTConflictTrampoline, + kQuickResolutionTrampoline, + kQuickToInterpreterBridge, + kNterpTrampoline, + kLast = kNterpTrampoline, +}; +std::ostream& operator<<(std::ostream& stream, StubType stub_type); + class PACKED(4) OatHeader { public: static constexpr std::array<uint8_t, 4> kOatMagic { { 'o', 'a', 't', '\n' } }; @@ -111,6 +123,8 @@ class PACKED(4) OatHeader { bool IsConcurrentCopying() const; bool RequiresImage() const; + const uint8_t* GetOatAddress(StubType type) const; + private: bool KeyHasValue(const char* key, const char* value, size_t value_size) const; diff --git a/runtime/runtime_image.cc b/runtime/runtime_image.cc index 7137991dcc..9d59c4d363 100644 --- a/runtime/runtime_image.cc +++ b/runtime/runtime_image.cc @@ -22,9 +22,12 @@ #include "android-base/stringprintf.h" +#include "base/arena_allocator.h" +#include "base/arena_containers.h" #include "base/bit_utils.h" #include "base/file_utils.h" #include "base/length_prefixed_array.h" +#include "base/stl_util.h" #include "base/unix_file/fd_file.h" #include "base/utils.h" #include "class_loader_utils.h" @@ -37,23 +40,36 @@ #include "mirror/object_array-inl.h" #include "mirror/object_array.h" #include "mirror/string-inl.h" +#include "oat.h" #include "scoped_thread_state_change-inl.h" #include "vdex_file.h" namespace art { /** + * The native data structures that we store in the image. + */ +enum class NativeRelocationKind { + kArtFieldArray, + kArtMethodArray, + kArtMethod, + kImTable, +}; + +/** * Helper class to generate an app image at runtime. */ class RuntimeImageHelper { public: explicit RuntimeImageHelper(gc::Heap* heap) : + sections_(ImageHeader::kSectionCount), boot_image_begin_(heap->GetBootImagesStartAddress()), boot_image_size_(heap->GetBootImagesSize()), image_begin_(boot_image_begin_ + boot_image_size_), // Note: image relocation considers the image header in the bitmap. object_section_size_(sizeof(ImageHeader)), - intern_table_(InternStringHash(this), InternStringEquals(this)) {} + intern_table_(InternStringHash(this), InternStringEquals(this)), + class_table_(ClassDescriptorHash(this), ClassDescriptorEquals()) {} bool Generate(std::string* error_msg) { @@ -62,12 +78,15 @@ class RuntimeImageHelper { } // Generate the sections information stored in the header. - dchecked_vector<ImageSection> sections(ImageHeader::kSectionCount); - CreateImageSections(sections); + CreateImageSections(); + + // Now that all sections have been created and we know their offset and + // size, relocate native pointers inside classes and ImTables. + RelocateNativePointers(); // Generate the bitmap section, stored page aligned after the sections data // and of size `object_section_size_` page aligned. - size_t sections_end = sections[ImageHeader::kSectionMetadata].End(); + size_t sections_end = sections_[ImageHeader::kSectionMetadata].End(); image_bitmap_ = gc::accounting::ContinuousSpaceBitmap::Create( "image bitmap", reinterpret_cast<uint8_t*>(image_begin_), @@ -78,7 +97,7 @@ class RuntimeImageHelper { reinterpret_cast<mirror::Object*>(image_begin_ + sizeof(ImageHeader) + offset)); } const size_t bitmap_bytes = image_bitmap_.Size(); - auto* bitmap_section = §ions[ImageHeader::kSectionImageBitmap]; + auto* bitmap_section = §ions_[ImageHeader::kSectionImageBitmap]; *bitmap_section = ImageSection(RoundUp(sections_end, kPageSize), RoundUp(bitmap_bytes, kPageSize)); @@ -101,7 +120,7 @@ class RuntimeImageHelper { /* component_count= */ 1, image_begin_, sections_end, - sections.data(), + sections_.data(), /* image_roots= */ image_begin_ + sizeof(ImageHeader), /* oat_checksum= */ 0, /* oat_file_begin= */ 0, @@ -123,8 +142,20 @@ class RuntimeImageHelper { return true; } - const std::vector<uint8_t>& GetData() const { - return image_data_; + const std::vector<uint8_t>& GetObjects() const { + return objects_; + } + + const std::vector<uint8_t>& GetArtMethods() const { + return art_methods_; + } + + const std::vector<uint8_t>& GetArtFields() const { + return art_fields_; + } + + const std::vector<uint8_t>& GetImTables() const { + return im_tables_; } const ImageHeader& GetHeader() const { @@ -143,12 +174,16 @@ class RuntimeImageHelper { intern_table_.WriteToMemory(data.data()); } + void GenerateClassTableData(std::vector<uint8_t>& data) const { + class_table_.WriteToMemory(data.data()); + } + private: bool IsInBootImage(const void* obj) const { return reinterpret_cast<uintptr_t>(obj) - boot_image_begin_ < boot_image_size_; } - // Returns a pointer that can be stored in `image_data_`: + // Returns a pointer that can be stored in `objects_`: // - The pointer itself for boot image objects, // - The offset in the image for all other objects. mirror::Object* GetOrComputeImageAddress(ObjPtr<mirror::Object> object) @@ -156,57 +191,90 @@ class RuntimeImageHelper { if (object == nullptr || IsInBootImage(object.Ptr())) { DCHECK(object == nullptr || Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(object)); return object.Ptr(); - } else if (object->IsClassLoader()) { + } + + if (object->IsClassLoader()) { // DexCache and Class point to class loaders. For runtime-generated app // images, we don't encode the class loader. It will be set when the // runtime is loading the image. return nullptr; + } + + if (object->GetClass() == GetClassRoot<mirror::ClassExt>()) { + // No need to encode `ClassExt`. If needed, it will be reconstructed at + // runtime. + return nullptr; + } + + uint32_t offset = 0u; + if (object->IsClass()) { + offset = CopyClass(object->AsClass()); + } else if (object->IsDexCache()) { + offset = CopyDexCache(object->AsDexCache()); } else { - uint32_t offset = CopyObject(object); - return reinterpret_cast<mirror::Object*>(image_begin_ + sizeof(ImageHeader) + offset); + offset = CopyObject(object); } + return reinterpret_cast<mirror::Object*>(image_begin_ + sizeof(ImageHeader) + offset); } - void CreateImageSections(dchecked_vector<ImageSection>& sections) const { - sections[ImageHeader::kSectionObjects] = - ImageSection(0u, object_section_size_); - sections[ImageHeader::kSectionArtFields] = - ImageSection(sections[ImageHeader::kSectionObjects].End(), 0u); - sections[ImageHeader::kSectionArtMethods] = - ImageSection(sections[ImageHeader::kSectionArtFields].End(), 0u); - sections[ImageHeader::kSectionImTables] = - ImageSection(sections[ImageHeader::kSectionArtMethods].End(), 0u); - sections[ImageHeader::kSectionIMTConflictTables] = - ImageSection(sections[ImageHeader::kSectionImTables].End(), 0u); - sections[ImageHeader::kSectionRuntimeMethods] = - ImageSection(sections[ImageHeader::kSectionIMTConflictTables].End(), 0u); + void CreateImageSections() { + sections_[ImageHeader::kSectionObjects] = ImageSection(0u, object_section_size_); + sections_[ImageHeader::kSectionArtFields] = + ImageSection(sections_[ImageHeader::kSectionObjects].End(), art_fields_.size()); + + // Round up to the alignment for ArtMethod. + static_assert(IsAligned<sizeof(void*)>(ArtMethod::Size(kRuntimePointerSize))); + size_t cur_pos = RoundUp(sections_[ImageHeader::kSectionArtFields].End(), sizeof(void*)); + sections_[ImageHeader::kSectionArtMethods] = ImageSection(cur_pos, art_methods_.size()); + + // Round up to the alignment for ImTables. + cur_pos = RoundUp(sections_[ImageHeader::kSectionArtMethods].End(), sizeof(void*)); + sections_[ImageHeader::kSectionImTables] = ImageSection(cur_pos, im_tables_.size()); + + // Round up to the alignment for conflict tables. + cur_pos = RoundUp(sections_[ImageHeader::kSectionImTables].End(), sizeof(void*)); + sections_[ImageHeader::kSectionIMTConflictTables] = ImageSection(cur_pos, 0u); + + sections_[ImageHeader::kSectionRuntimeMethods] = + ImageSection(sections_[ImageHeader::kSectionIMTConflictTables].End(), 0u); // Round up to the alignment the string table expects. See HashSet::WriteToMemory. - size_t cur_pos = RoundUp(sections[ImageHeader::kSectionRuntimeMethods].End(), sizeof(uint64_t)); + cur_pos = RoundUp(sections_[ImageHeader::kSectionRuntimeMethods].End(), sizeof(uint64_t)); size_t intern_table_bytes = intern_table_.WriteToMemory(nullptr); - sections[ImageHeader::kSectionInternedStrings] = ImageSection(cur_pos, intern_table_bytes); + sections_[ImageHeader::kSectionInternedStrings] = ImageSection(cur_pos, intern_table_bytes); // Obtain the new position and round it up to the appropriate alignment. - cur_pos = RoundUp(sections[ImageHeader::kSectionInternedStrings].End(), sizeof(uint64_t)); - sections[ImageHeader::kSectionClassTable] = ImageSection(cur_pos, 0u); + cur_pos = RoundUp(sections_[ImageHeader::kSectionInternedStrings].End(), sizeof(uint64_t)); + + size_t class_table_bytes = class_table_.WriteToMemory(nullptr); + sections_[ImageHeader::kSectionClassTable] = ImageSection(cur_pos, class_table_bytes); // Round up to the alignment of the offsets we are going to store. - cur_pos = RoundUp(sections[ImageHeader::kSectionClassTable].End(), sizeof(uint32_t)); - sections[ImageHeader::kSectionStringReferenceOffsets] = ImageSection(cur_pos, 0u); + cur_pos = RoundUp(sections_[ImageHeader::kSectionClassTable].End(), sizeof(uint32_t)); + sections_[ImageHeader::kSectionStringReferenceOffsets] = ImageSection(cur_pos, 0u); // Round up to the alignment of the offsets we are going to store. cur_pos = - RoundUp(sections[ImageHeader::kSectionStringReferenceOffsets].End(), sizeof(uint32_t)); + RoundUp(sections_[ImageHeader::kSectionStringReferenceOffsets].End(), sizeof(uint32_t)); - sections[ImageHeader::kSectionMetadata] = ImageSection(cur_pos, 0u); + sections_[ImageHeader::kSectionMetadata] = ImageSection(cur_pos, 0u); } - // Returns the copied mirror Object. This is really its content, it should not + // Returns the copied mirror Object if in the image, or the object directly if + // in the boot image. For the copy, this is really its content, it should not // be returned as an `ObjPtr` (as it's not a GC object), nor stored anywhere. template<typename T> T* FromImageOffsetToRuntimeContent(uint32_t offset) { - uint32_t vector_data_offset = offset - sizeof(ImageHeader) - image_begin_; - return reinterpret_cast<T*>(image_data_.data() + vector_data_offset); + if (offset == 0u || IsInBootImage(reinterpret_cast<const void*>(offset))) { + return reinterpret_cast<T*>(offset); + } + uint32_t vector_data_offset = FromImageOffsetToVectorOffset(offset); + return reinterpret_cast<T*>(objects_.data() + vector_data_offset); + } + + uint32_t FromImageOffsetToVectorOffset(uint32_t offset) const { + DCHECK(!IsInBootImage(reinterpret_cast<const void*>(offset))); + return offset - sizeof(ImageHeader) - image_begin_; } class InternStringHash { @@ -252,6 +320,39 @@ class RuntimeImageHelper { using InternTableSet = HashSet<uint32_t, DefaultEmptyFn<uint32_t>, InternStringHash, InternStringEquals>; + class ClassDescriptorHash { + public: + explicit ClassDescriptorHash(RuntimeImageHelper* helper) : helper_(helper) {} + + uint32_t operator()(const ClassTable::TableSlot& slot) const NO_THREAD_SAFETY_ANALYSIS { + uint32_t ptr = slot.NonHashData(); + if (helper_->IsInBootImage(reinterpret_cast32<const void*>(ptr))) { + return reinterpret_cast32<mirror::Class*>(ptr)->DescriptorHash(); + } + return helper_->class_hashes_[helper_->FromImageOffsetToVectorOffset(ptr)]; + } + + private: + RuntimeImageHelper* helper_; + }; + + class ClassDescriptorEquals { + public: + ClassDescriptorEquals() {} + + bool operator()(const ClassTable::TableSlot& a, const ClassTable::TableSlot& b) + const NO_THREAD_SAFETY_ANALYSIS { + // No need to fetch the descriptor: we know the classes we are inserting + // in the ClassTable are unique. + return a.Data() == b.Data(); + } + }; + + using ClassTableSet = HashSet<ClassTable::TableSlot, + ClassTable::TableSlotEmptyFn, + ClassDescriptorHash, + ClassDescriptorEquals>; + void VisitDexCache(ObjPtr<mirror::DexCache> dex_cache) REQUIRES_SHARED(Locks::mutator_lock_) { const DexFile& dex_file = *dex_cache->GetDexFile(); // Currently only copy string objects into the image. Populate the intern @@ -270,11 +371,410 @@ class RuntimeImageHelper { } } - void VisitDexCaches(Handle<mirror::ObjectArray<mirror::Object>> dex_cache_array) + // Helper class to collect classes that we will generate in the image. + class ClassTableVisitor { + public: + ClassTableVisitor(Handle<mirror::ClassLoader> loader, VariableSizedHandleScope& handles) + : loader_(loader), handles_(handles) {} + + bool operator()(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) { + // Record app classes and boot classpath classes: app classes will be + // generated in the image and put in the class table, boot classpath + // classes will be put in the class table. + ObjPtr<mirror::ClassLoader> class_loader = klass->GetClassLoader(); + if (class_loader == loader_.Get() || class_loader == nullptr) { + handles_.NewHandle(klass); + } + return true; + } + + private: + Handle<mirror::ClassLoader> loader_; + VariableSizedHandleScope& handles_; + }; + + // Helper class visitor to filter out classes we cannot emit. + class PruneVisitor { + public: + PruneVisitor(Thread* self, + RuntimeImageHelper* helper, + const ArenaSet<const DexFile*>& dex_files, + ArenaVector<Handle<mirror::Class>>& classes, + ArenaAllocator& allocator) + : self_(self), + helper_(helper), + dex_files_(dex_files), + visited_(allocator.Adapter()), + classes_to_write_(classes) {} + + bool CanEmitHelper(Handle<mirror::Class> cls) REQUIRES_SHARED(Locks::mutator_lock_) { + // Only emit classes that are resolved and not erroneous. + if (!cls->IsResolved() || cls->IsErroneous()) { + return false; + } + + // Classes in the boot image can be trivially encoded directly. + if (helper_->IsInBootImage(cls.Get())) { + return true; + } + + // If the class comes from a dex file which is not part of the primary + // APK, don't encode it. + if (!ContainsElement(dex_files_, &cls->GetDexFile())) { + return false; + } + + // Ensure pointers to classes in `cls` can also be emitted. + StackHandleScope<1> hs(self_); + MutableHandle<mirror::Class> other_class = hs.NewHandle(cls->GetSuperClass()); + if (!CanEmit(other_class)) { + return false; + } + + other_class.Assign(cls->GetComponentType()); + if (!CanEmit(other_class)) { + return false; + } + + for (size_t i = 0, num_interfaces = cls->NumDirectInterfaces(); i < num_interfaces; ++i) { + other_class.Assign(cls->GetDirectInterface(i)); + if (!CanEmit(other_class)) { + return false; + } + } + return true; + } + + bool CanEmit(Handle<mirror::Class> cls) REQUIRES_SHARED(Locks::mutator_lock_) { + if (cls == nullptr) { + return true; + } + const dex::ClassDef* class_def = cls->GetClassDef(); + if (class_def == nullptr) { + // Covers array classes and proxy classes. + // TODO: Handle these differently. + return false; + } + auto existing = visited_.find(class_def); + if (existing != visited_.end()) { + // Already processed; + return existing->second == VisitState::kCanEmit; + } + + visited_.Put(class_def, VisitState::kVisiting); + if (CanEmitHelper(cls)) { + visited_.Overwrite(class_def, VisitState::kCanEmit); + return true; + } else { + visited_.Overwrite(class_def, VisitState::kCannotEmit); + return false; + } + } + + void Visit(Handle<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_) { + MutableHandle<mirror::Class> cls(obj.GetReference()); + if (CanEmit(cls)) { + if (cls->IsBootStrapClassLoaded()) { + DCHECK(helper_->IsInBootImage(cls.Get())); + // Insert the bootclasspath class in the class table. + uint32_t hash = cls->DescriptorHash(); + helper_->class_table_.InsertWithHash(ClassTable::TableSlot(cls.Get(), hash), hash); + } else { + classes_to_write_.push_back(cls); + } + } + } + + private: + enum class VisitState { + kVisiting, + kCanEmit, + kCannotEmit, + }; + + Thread* const self_; + RuntimeImageHelper* const helper_; + const ArenaSet<const DexFile*>& dex_files_; + ArenaSafeMap<const dex::ClassDef*, VisitState> visited_; + ArenaVector<Handle<mirror::Class>>& classes_to_write_; + }; + + void EmitStringsAndClasses(Thread* self, + Handle<mirror::ObjectArray<mirror::Object>> dex_cache_array) REQUIRES_SHARED(Locks::mutator_lock_) { + ArenaAllocator allocator(Runtime::Current()->GetArenaPool()); + ArenaSet<const DexFile*> dex_files(allocator.Adapter()); for (int32_t i = 0; i < dex_cache_array->GetLength(); ++i) { + dex_files.insert(dex_cache_array->Get(i)->AsDexCache()->GetDexFile()); VisitDexCache(ObjPtr<mirror::DexCache>::DownCast((dex_cache_array->Get(i)))); } + + StackHandleScope<1> hs(self); + Handle<mirror::ClassLoader> loader = hs.NewHandle( + dex_cache_array->Get(0)->AsDexCache()->GetClassLoader()); + ClassTable* const class_table = loader->GetClassTable(); + if (class_table == nullptr) { + return; + } + + VariableSizedHandleScope handles(self); + { + ClassTableVisitor class_table_visitor(loader, handles); + class_table->Visit(class_table_visitor); + } + + ArenaVector<Handle<mirror::Class>> classes_to_write(allocator.Adapter()); + classes_to_write.reserve(class_table->Size()); + { + PruneVisitor prune_visitor(self, this, dex_files, classes_to_write, allocator); + handles.VisitHandles(prune_visitor); + } + + for (Handle<mirror::Class> cls : classes_to_write) { + ScopedAssertNoThreadSuspension sants("Writing class"); + CopyClass(cls.Get()); + } + } + + // Helper visitor returning the location of a native pointer in the image. + class NativePointerVisitor { + public: + explicit NativePointerVisitor(RuntimeImageHelper* helper) : helper_(helper) {} + + template <typename T> + T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED) const { + return helper_->NativeLocationInImage(ptr); + } + + template <typename T> T* operator()(T* ptr) const { + return helper_->NativeLocationInImage(ptr); + } + + private: + RuntimeImageHelper* helper_; + }; + + template <typename T> T* NativeLocationInImage(T* ptr) const { + if (ptr == nullptr || IsInBootImage(ptr)) { + return ptr; + } + + auto it = native_relocations_.find(ptr); + DCHECK(it != native_relocations_.end()); + switch (it->second.first) { + case NativeRelocationKind::kArtMethod: + case NativeRelocationKind::kArtMethodArray: { + uint32_t offset = sections_[ImageHeader::kSectionArtMethods].Offset(); + return reinterpret_cast<T*>(image_begin_ + offset + it->second.second); + } + case NativeRelocationKind::kArtFieldArray: { + uint32_t offset = sections_[ImageHeader::kSectionArtFields].Offset(); + return reinterpret_cast<T*>(image_begin_ + offset + it->second.second); + } + case NativeRelocationKind::kImTable: { + uint32_t offset = sections_[ImageHeader::kSectionImTables].Offset(); + return reinterpret_cast<T*>(image_begin_ + offset + it->second.second); + } + } + } + + template <typename Visitor> + void RelocateMethodPointerArrays(mirror::Class* klass, const Visitor& visitor) + REQUIRES_SHARED(Locks::mutator_lock_) { + // A bit of magic here: we cast contents from our buffer to mirror::Class, + // and do pointer comparison between 1) these classes, and 2) boot image objects. + // Both kinds do not move. + + // See if we need to fixup the vtable field. + mirror::Class* super = FromImageOffsetToRuntimeContent<mirror::Class>( + reinterpret_cast32<uint32_t>( + klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>().Ptr())); + DCHECK(super != nullptr) << "j.l.Object should never be in an app runtime image"; + mirror::PointerArray* vtable = FromImageOffsetToRuntimeContent<mirror::PointerArray>( + reinterpret_cast32<uint32_t>(klass->GetVTable<kVerifyNone, kWithoutReadBarrier>().Ptr())); + mirror::PointerArray* super_vtable = FromImageOffsetToRuntimeContent<mirror::PointerArray>( + reinterpret_cast32<uint32_t>(super->GetVTable<kVerifyNone, kWithoutReadBarrier>().Ptr())); + if (vtable != nullptr && vtable != super_vtable) { + DCHECK(!IsInBootImage(vtable)); + vtable->Fixup(vtable, kRuntimePointerSize, visitor); + } + + // See if we need to fixup entries in the IfTable. + mirror::IfTable* iftable = FromImageOffsetToRuntimeContent<mirror::IfTable>( + reinterpret_cast32<uint32_t>( + klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>().Ptr())); + mirror::IfTable* super_iftable = FromImageOffsetToRuntimeContent<mirror::IfTable>( + reinterpret_cast32<uint32_t>( + super->GetIfTable<kVerifyNone, kWithoutReadBarrier>().Ptr())); + int32_t iftable_count = iftable->Count(); + int32_t super_iftable_count = super_iftable->Count(); + for (int32_t i = 0; i < iftable_count; ++i) { + mirror::PointerArray* methods = FromImageOffsetToRuntimeContent<mirror::PointerArray>( + reinterpret_cast32<uint32_t>( + iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i).Ptr())); + mirror::PointerArray* super_methods = (i < super_iftable_count) + ? FromImageOffsetToRuntimeContent<mirror::PointerArray>( + reinterpret_cast32<uint32_t>( + super_iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i).Ptr())) + : nullptr; + if (methods != super_methods) { + DCHECK(!IsInBootImage(methods)); + methods->Fixup(methods, kRuntimePointerSize, visitor); + } + } + } + + void RelocateNativePointers() { + ScopedObjectAccess soa(Thread::Current()); + NativePointerVisitor visitor(this); + for (auto it : classes_) { + mirror::Class* cls = reinterpret_cast<mirror::Class*>(&objects_[it.second]); + cls->FixupNativePointers(cls, kRuntimePointerSize, visitor); + RelocateMethodPointerArrays(cls, visitor); + } + for (auto it : native_relocations_) { + if (it.second.first == NativeRelocationKind::kImTable) { + ImTable* im_table = reinterpret_cast<ImTable*>(im_tables_.data() + it.second.second); + RelocateImTable(im_table, visitor); + } + } + } + + void RelocateImTable(ImTable* im_table, const NativePointerVisitor& visitor) { + for (size_t i = 0; i < ImTable::kSize; ++i) { + ArtMethod* method = im_table->Get(i, kRuntimePointerSize); + ArtMethod* new_method = nullptr; + if (method->IsRuntimeMethod() && !IsInBootImage(method)) { + // New IMT conflict method: just use the boot image version. + // TODO: Consider copying the new IMT conflict method. + new_method = Runtime::Current()->GetImtConflictMethod(); + DCHECK(IsInBootImage(new_method)); + } else { + new_method = visitor(method); + } + if (method != new_method) { + im_table->Set(i, new_method, kRuntimePointerSize); + } + } + } + + void CopyFieldArrays(ObjPtr<mirror::Class> cls, uint32_t class_image_address) + REQUIRES_SHARED(Locks::mutator_lock_) { + LengthPrefixedArray<ArtField>* fields[] = { + cls->GetSFieldsPtr(), cls->GetIFieldsPtr(), + }; + for (LengthPrefixedArray<ArtField>* cur_fields : fields) { + if (cur_fields != nullptr) { + // Copy the array. + size_t number_of_fields = cur_fields->size(); + size_t size = LengthPrefixedArray<ArtField>::ComputeSize(number_of_fields); + size_t offset = art_fields_.size(); + art_fields_.resize(offset + size); + auto* dest_array = + reinterpret_cast<LengthPrefixedArray<ArtField>*>(art_fields_.data() + offset); + memcpy(dest_array, cur_fields, size); + native_relocations_[cur_fields] = + std::make_pair(NativeRelocationKind::kArtFieldArray, offset); + + // Update the class pointer of individual fields. + for (size_t i = 0; i != number_of_fields; ++i) { + dest_array->At(i).GetDeclaringClassAddressWithoutBarrier()->Assign( + reinterpret_cast<mirror::Class*>(class_image_address)); + } + } + } + } + + void CopyMethodArrays(ObjPtr<mirror::Class> cls, uint32_t class_image_address) + REQUIRES_SHARED(Locks::mutator_lock_) { + size_t number_of_methods = cls->NumMethods(); + if (number_of_methods == 0) { + return; + } + + size_t size = LengthPrefixedArray<ArtMethod>::ComputeSize(number_of_methods); + size_t offset = art_methods_.size(); + art_methods_.resize(offset + size); + auto* dest_array = + reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(art_methods_.data() + offset); + memcpy(dest_array, cls->GetMethodsPtr(), size); + native_relocations_[cls->GetMethodsPtr()] = + std::make_pair(NativeRelocationKind::kArtMethodArray, offset); + + for (size_t i = 0; i != number_of_methods; ++i) { + ArtMethod* method = &cls->GetMethodsPtr()->At(i); + ArtMethod* copy = &dest_array->At(i); + + // Update the class pointer. + ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass(); + if (declaring_class == cls) { + copy->GetDeclaringClassAddressWithoutBarrier()->Assign( + reinterpret_cast<mirror::Class*>(class_image_address)); + } else { + DCHECK(method->IsCopied()); + if (!IsInBootImage(declaring_class.Ptr())) { + DCHECK(classes_.find(declaring_class->GetClassDef()) != classes_.end()); + copy->GetDeclaringClassAddressWithoutBarrier()->Assign( + reinterpret_cast<mirror::Class*>( + image_begin_ + sizeof(ImageHeader) + classes_[declaring_class->GetClassDef()])); + } + } + + // Record the native relocation of the method. + uintptr_t copy_offset = + reinterpret_cast<uintptr_t>(copy) - reinterpret_cast<uintptr_t>(art_methods_.data()); + native_relocations_[method] = std::make_pair(NativeRelocationKind::kArtMethod, copy_offset); + + // Ignore the single-implementation info for abstract method. + if (method->IsAbstract()) { + copy->SetHasSingleImplementation(false); + copy->SetSingleImplementation(nullptr, kRuntimePointerSize); + } + + // Set the entrypoint and data pointer of the method. + const std::vector<gc::space::ImageSpace*>& image_spaces = + Runtime::Current()->GetHeap()->GetBootImageSpaces(); + DCHECK(!image_spaces.empty()); + const OatFile* oat_file = image_spaces[0]->GetOatFile(); + DCHECK(oat_file != nullptr); + const OatHeader& header = oat_file->GetOatHeader(); + const uint8_t* address = header.GetOatAddress(method->IsNative() + ? StubType::kQuickGenericJNITrampoline + : StubType::kQuickToInterpreterBridge); + copy->SetEntryPointFromQuickCompiledCode(address); + + if (method->IsNative()) { + StubType stub_type = method->IsCriticalNative() + ? StubType::kJNIDlsymLookupCriticalTrampoline + : StubType::kJNIDlsymLookupTrampoline; + copy->SetEntryPointFromJni(header.GetOatAddress(stub_type)); + } else if (method->IsInvokable()) { + DCHECK(method->HasCodeItem()) << method->PrettyMethod(); + ptrdiff_t code_item_offset = reinterpret_cast<const uint8_t*>(method->GetCodeItem()) - + method->GetDexFile()->DataBegin(); + copy->SetDataPtrSize( + reinterpret_cast<const void*>(code_item_offset), kRuntimePointerSize); + } + } + } + + void CopyImTable(ObjPtr<mirror::Class> cls) REQUIRES_SHARED(Locks::mutator_lock_) { + ImTable* table = cls->GetImt(kRuntimePointerSize); + + // If the table is null or shared and/or already emitted, we can skip. + if (table == nullptr || IsInBootImage(table) || HasNativeRelocation(table)) { + return; + } + const size_t size = ImTable::SizeInBytes(kRuntimePointerSize); + size_t offset = im_tables_.size(); + im_tables_.resize(offset + size); + uint8_t* dest = im_tables_.data() + offset; + memcpy(dest, table, size); + native_relocations_[table] = std::make_pair(NativeRelocationKind::kImTable, offset); + } + + bool HasNativeRelocation(void* ptr) const { + return native_relocations_.find(ptr) != native_relocations_.end(); } bool WriteObjects(std::string* error_msg) { @@ -373,8 +873,9 @@ class RuntimeImageHelper { CopyObject(image_roots.Get()); } - // Copy objects stored in the dex caches. - VisitDexCaches(dex_cache_array); + // Emit string referenced in dex caches, and classes defined in the app class loader. + EmitStringsAndClasses(soa.Self(), dex_cache_array); + return true; } @@ -394,12 +895,15 @@ class RuntimeImageHelper { void operator()(ObjPtr<mirror::Object> obj, MemberOffset offset, - bool is_static ATTRIBUTE_UNUSED) const + bool is_static) const REQUIRES_SHARED(Locks::mutator_lock_) { - ObjPtr<mirror::Object> ref = obj->GetFieldObject<mirror::Object>(offset); + // We don't copy static fields, instead classes will be marked as resolved + // and initialized at runtime. + ObjPtr<mirror::Object> ref = + is_static ? nullptr : obj->GetFieldObject<mirror::Object>(offset); mirror::Object* address = image_->GetOrComputeImageAddress(ref.Ptr()); mirror::Object* copy = - reinterpret_cast<mirror::Object*>(image_->image_data_.data() + copy_offset_); + reinterpret_cast<mirror::Object*>(image_->objects_.data() + copy_offset_); copy->GetFieldObjectReferenceAddr<kVerifyNone>(offset)->Assign(address); } @@ -415,31 +919,85 @@ class RuntimeImageHelper { size_t copy_offset_; }; - // Copy `obj` in `image_data_` and relocate references. Returns the offset + uint32_t CopyDexCache(ObjPtr<mirror::DexCache> cache) REQUIRES_SHARED(Locks::mutator_lock_) { + auto it = dex_caches_.find(cache->GetDexFile()); + if (it != dex_caches_.end()) { + return it->second; + } + uint32_t offset = CopyObject(cache); + dex_caches_[cache->GetDexFile()] = offset; + // For dex caches, clear pointers to data that will be set at runtime. + mirror::Object* copy = reinterpret_cast<mirror::Object*>(objects_.data() + offset); + reinterpret_cast<mirror::DexCache*>(copy)->ResetNativeArrays(); + reinterpret_cast<mirror::DexCache*>(copy)->SetDexFile(nullptr); + return offset; + } + + uint32_t CopyClass(ObjPtr<mirror::Class> cls) REQUIRES_SHARED(Locks::mutator_lock_) { + const dex::ClassDef* class_def = cls->GetClassDef(); + auto it = classes_.find(class_def); + if (it != classes_.end()) { + return it->second; + } + uint32_t offset = CopyObject(cls); + classes_[class_def] = offset; + + uint32_t hash = cls->DescriptorHash(); + // Save the hash, the `HashSet` implementation requires to find it. + class_hashes_[offset] = hash; + uint32_t class_image_address = image_begin_ + sizeof(ImageHeader) + offset; + bool inserted = + class_table_.InsertWithHash(ClassTable::TableSlot(class_image_address, hash), hash).second; + DCHECK(inserted) << "Class " << cls->PrettyDescriptor() + << " (" << cls.Ptr() << ") already inserted"; + + // Clear internal state. + mirror::Class* copy = reinterpret_cast<mirror::Class*>(objects_.data() + offset); + copy->SetClinitThreadId(static_cast<pid_t>(0u)); + copy->SetStatusInternal(cls->IsVerified() ? ClassStatus::kVerified : ClassStatus::kResolved); + copy->SetObjectSizeAllocFastPath(std::numeric_limits<uint32_t>::max()); + copy->SetAccessFlags(copy->GetAccessFlags() & ~kAccRecursivelyInitialized); + + // Clear static field values. + MemberOffset static_offset = cls->GetFirstReferenceStaticFieldOffset(kRuntimePointerSize); + memset(objects_.data() + offset + static_offset.Uint32Value(), + 0, + cls->GetClassSize() - static_offset.Uint32Value()); + + CopyFieldArrays(cls, class_image_address); + CopyMethodArrays(cls, class_image_address); + if (cls->ShouldHaveImt()) { + CopyImTable(cls); + } + + return offset; + } + + // Copy `obj` in `objects_` and relocate references. Returns the offset // within our buffer. uint32_t CopyObject(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_) { - // Copy the object in `image_data_`. + // Copy the object in `objects_`. size_t object_size = obj->SizeOf(); - size_t offset = image_data_.size(); + size_t offset = objects_.size(); DCHECK(IsAligned<kObjectAlignment>(offset)); object_offsets_.push_back(offset); - image_data_.resize(RoundUp(image_data_.size() + object_size, kObjectAlignment)); - memcpy(image_data_.data() + offset, obj.Ptr(), object_size); + objects_.resize(RoundUp(offset + object_size, kObjectAlignment)); + memcpy(objects_.data() + offset, obj.Ptr(), object_size); object_section_size_ += RoundUp(object_size, kObjectAlignment); // Fixup reference pointers. FixupVisitor visitor(this, offset); obj->VisitReferences</*kVisitNativeRoots=*/ false>(visitor, visitor); - mirror::Object* copy = reinterpret_cast<mirror::Object*>(image_data_.data() + offset); + mirror::Object* copy = reinterpret_cast<mirror::Object*>(objects_.data() + offset); // Clear any lockword data. copy->SetLockWord(LockWord::Default(), /* as_volatile= */ false); - // For dex caches, clear pointers to data that will be set at runtime. - if (obj->IsDexCache()) { - reinterpret_cast<mirror::DexCache*>(copy)->ResetNativeArrays(); - reinterpret_cast<mirror::DexCache*>(copy)->SetDexFile(nullptr); + if (obj->IsString()) { + // Ensure a string always has a hashcode stored. This is checked at + // runtime because boot images don't want strings dirtied due to hashcode. + reinterpret_cast<mirror::String*>(copy)->GetHashCode(); } return offset; } @@ -538,16 +1096,28 @@ class RuntimeImageHelper { // sections. ImageHeader header_; - // Contents of the image sections. - std::vector<uint8_t> image_data_; + // Contents of the various sections. + std::vector<uint8_t> objects_; + std::vector<uint8_t> art_fields_; + std::vector<uint8_t> art_methods_; + std::vector<uint8_t> im_tables_; - // Bitmap of live objects in `image_data_`. Populated from `object_offsets_` + // Bitmap of live objects in `objects_`. Populated from `object_offsets_` // once we know `object_section_size`. gc::accounting::ContinuousSpaceBitmap image_bitmap_; - // A list of offsets in `image_data_` where objects begin. + // Sections stored in the header. + dchecked_vector<ImageSection> sections_; + + // A list of offsets in `objects_` where objects begin. std::vector<uint32_t> object_offsets_; + std::map<const dex::ClassDef*, uint32_t> classes_; + std::map<const DexFile*, uint32_t> dex_caches_; + std::map<uint32_t, uint32_t> class_hashes_; + + std::map<void*, std::pair<NativeRelocationKind, uint32_t>> native_relocations_; + // Cached values of boot image information. const uint32_t boot_image_begin_; const uint32_t boot_image_size_; @@ -563,6 +1133,13 @@ class RuntimeImageHelper { // The intern table for strings that we will write to disk. InternTableSet intern_table_; + + // The class table holding classes that we will write to disk. + ClassTableSet class_table_; + + friend class ClassDescriptorHash; + friend class PruneVisitor; + friend class NativePointerVisitor; }; std::string RuntimeImage::GetRuntimeImagePath(const std::string& dex_location) { @@ -601,43 +1178,94 @@ bool RuntimeImage::WriteImageToDisk(std::string* error_msg) { return false; } - // Write section infos. The header is written at the end in case we get killed. - if (!out->Write(reinterpret_cast<const char*>(image.GetData().data()), - image.GetData().size(), - sizeof(ImageHeader))) { + // Write objects. The header is written at the end in case we get killed. + if (out->Write(reinterpret_cast<const char*>(image.GetObjects().data()), + image.GetObjects().size(), + sizeof(ImageHeader)) != static_cast<int64_t>(image.GetObjects().size())) { *error_msg = "Could not write image data to " + temp_path; - out->Unlink(); + out->Erase(/*unlink=*/true); return false; } { + // Write fields. + auto fields_section = image.GetHeader().GetImageSection(ImageHeader::kSectionArtFields); + if (out->Write(reinterpret_cast<const char*>(image.GetArtFields().data()), + fields_section.Size(), + fields_section.Offset()) != fields_section.Size()) { + *error_msg = "Could not write fields section " + temp_path; + out->Erase(/*unlink=*/true); + return false; + } + } + + { + // Write methods. + auto methods_section = image.GetHeader().GetImageSection(ImageHeader::kSectionArtMethods); + if (out->Write(reinterpret_cast<const char*>(image.GetArtMethods().data()), + methods_section.Size(), + methods_section.Offset()) != methods_section.Size()) { + *error_msg = "Could not write methods section " + temp_path; + out->Erase(/*unlink=*/true); + return false; + } + } + + { + // Write im tables. + auto im_tables_section = image.GetHeader().GetImageSection(ImageHeader::kSectionImTables); + if (out->Write(reinterpret_cast<const char*>(image.GetImTables().data()), + im_tables_section.Size(), + im_tables_section.Offset()) != im_tables_section.Size()) { + *error_msg = "Could not write ImTable section " + temp_path; + out->Erase(/*unlink=*/true); + return false; + } + } + + { // Write intern string set. auto intern_section = image.GetHeader().GetImageSection(ImageHeader::kSectionInternedStrings); std::vector<uint8_t> intern_data(intern_section.Size()); image.GenerateInternData(intern_data); - if (!out->Write(reinterpret_cast<const char*>(intern_data.data()), - intern_section.Size(), - intern_section.Offset())) { + if (out->Write(reinterpret_cast<const char*>(intern_data.data()), + intern_section.Size(), + intern_section.Offset()) != intern_section.Size()) { *error_msg = "Could not write intern section " + temp_path; - out->Unlink(); + out->Erase(/*unlink=*/true); + return false; + } + } + + { + // Write class table. + auto class_table_section = image.GetHeader().GetImageSection(ImageHeader::kSectionClassTable); + std::vector<uint8_t> class_table_data(class_table_section.Size()); + image.GenerateClassTableData(class_table_data); + if (out->Write(reinterpret_cast<const char*>(class_table_data.data()), + class_table_section.Size(), + class_table_section.Offset()) != class_table_section.Size()) { + *error_msg = "Could not write class table section " + temp_path; + out->Erase(/*unlink=*/true); return false; } } // Write bitmap. auto bitmap_section = image.GetHeader().GetImageSection(ImageHeader::kSectionImageBitmap); - if (!out->Write(reinterpret_cast<const char*>(image.GetImageBitmap().Begin()), - bitmap_section.Size(), - bitmap_section.Offset())) { + if (out->Write(reinterpret_cast<const char*>(image.GetImageBitmap().Begin()), + bitmap_section.Size(), + bitmap_section.Offset()) != bitmap_section.Size()) { *error_msg = "Could not write image bitmap " + temp_path; - out->Unlink(); + out->Erase(/*unlink=*/true); return false; } // Now write header. - if (!out->Write(reinterpret_cast<const char*>(&image.GetHeader()), sizeof(ImageHeader), 0u)) { + if (out->Write(reinterpret_cast<const char*>(&image.GetHeader()), sizeof(ImageHeader), 0u) != + sizeof(ImageHeader)) { *error_msg = "Could not write image header to " + temp_path; - out->Unlink(); + out->Erase(/*unlink=*/true); return false; } |