Write classes in runtime-generated app image. am: df68c0a6f0 am: 9ff3b10a03 am: e9e9ebcb73
Original change: https://android-review.googlesource.com/c/platform/art/+/2373830
Change-Id: I77abe7a6c3689e7e2a3a7e594a84650b488db223
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 2c3c220..db843e4 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -3294,25 +3294,7 @@
const OatFile* oat_file = image_spaces[0]->GetOatFile();
CHECK(oat_file != nullptr);
const OatHeader& header = oat_file->GetOatHeader();
- switch (type) {
- // TODO: We could maybe clean this up if we stored them in an array in the oat header.
- case StubType::kQuickGenericJNITrampoline:
- return static_cast<const uint8_t*>(header.GetQuickGenericJniTrampoline());
- case StubType::kJNIDlsymLookupTrampoline:
- return static_cast<const uint8_t*>(header.GetJniDlsymLookupTrampoline());
- case StubType::kJNIDlsymLookupCriticalTrampoline:
- return static_cast<const uint8_t*>(header.GetJniDlsymLookupCriticalTrampoline());
- case StubType::kQuickIMTConflictTrampoline:
- return static_cast<const uint8_t*>(header.GetQuickImtConflictTrampoline());
- case StubType::kQuickResolutionTrampoline:
- return static_cast<const uint8_t*>(header.GetQuickResolutionTrampoline());
- case StubType::kQuickToInterpreterBridge:
- return static_cast<const uint8_t*>(header.GetQuickToInterpreterBridge());
- case StubType::kNterpTrampoline:
- return static_cast<const uint8_t*>(header.GetNterpTrampoline());
- default:
- UNREACHABLE();
- }
+ return header.GetOatAddress(type);
}
const ImageInfo& primary_image_info = GetImageInfo(0);
return GetOatAddressForOffset(primary_image_info.GetStubOffset(type), primary_image_info);
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index e5eeacc..f81df4e 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -45,6 +45,7 @@
#include "intern_table.h"
#include "lock_word.h"
#include "mirror/dex_cache.h"
+#include "oat.h"
#include "oat_file.h"
#include "obj_ptr.h"
@@ -229,18 +230,6 @@
};
friend std::ostream& operator<<(std::ostream& stream, NativeObjectRelocationType type);
- enum class StubType {
- kJNIDlsymLookupTrampoline,
- kJNIDlsymLookupCriticalTrampoline,
- kQuickGenericJNITrampoline,
- kQuickIMTConflictTrampoline,
- kQuickResolutionTrampoline,
- kQuickToInterpreterBridge,
- kNterpTrampoline,
- kLast = kNterpTrampoline,
- };
- friend std::ostream& operator<<(std::ostream& stream, StubType stub_type);
-
static constexpr size_t kBinBits =
MinimumBitsToStore<uint32_t>(static_cast<size_t>(Bin::kMirrorCount) - 1);
// uint32 = typeof(lockword_)
@@ -712,7 +701,6 @@
std::ostream& operator<<(std::ostream& stream, ImageWriter::Bin bin);
std::ostream& operator<<(std::ostream& stream, ImageWriter::NativeObjectRelocationType type);
-std::ostream& operator<<(std::ostream& stream, ImageWriter::StubType stub_type);
} // namespace linker
} // namespace art
diff --git a/libartbase/base/safe_map.h b/libartbase/base/safe_map.h
index c6d4353..fa13fe0 100644
--- a/libartbase/base/safe_map.h
+++ b/libartbase/base/safe_map.h
@@ -49,8 +49,9 @@
SafeMap() = default;
SafeMap(const SafeMap&) = default;
SafeMap(SafeMap&&) noexcept = default;
+ explicit SafeMap(const allocator_type& allocator) : map_(allocator) {}
explicit SafeMap(const key_compare& cmp, const allocator_type& allocator = allocator_type())
- : map_(cmp, allocator) {
+ : map_(cmp, allocator) {
}
Self& operator=(const Self& rhs) {
diff --git a/runtime/Android.bp b/runtime/Android.bp
index bfe04f3..a1a0b48 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -592,6 +592,7 @@
"jni_id_type.h",
"linear_alloc.h",
"lock_word.h",
+ "oat.h",
"oat_file.h",
"process_state.h",
"reflective_value_visitor.h",
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index a2f4513..b829f59 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2180,9 +2180,11 @@
ObjPtr<mirror::Class> klass(root.Read());
// Do not update class loader for boot image classes where the app image
// class loader is only the initiating loader but not the defining loader.
- // Avoid read barrier since we are comparing against null.
- if (klass->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
+ if (space->HasAddress(klass.Ptr())) {
klass->SetClassLoader(loader);
+ } else {
+ DCHECK(klass->IsBootStrapClassLoaded());
+ DCHECK(Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass.Ptr()));
}
}
}
diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h
index 67eeb55..ecc8a0a 100644
--- a/runtime/class_table-inl.h
+++ b/runtime/class_table-inl.h
@@ -213,6 +213,11 @@
DCHECK_EQ(descriptor_hash, klass->DescriptorHash());
}
+inline ClassTable::TableSlot::TableSlot(uint32_t ptr, uint32_t descriptor_hash)
+ : data_(ptr | MaskHash(descriptor_hash)) {
+ DCHECK_ALIGNED(ptr, kObjectAlignment);
+}
+
template <typename Filter>
inline void ClassTable::RemoveStrongRoots(const Filter& filter) {
WriterMutexLock mu(Thread::Current(), lock_);
@@ -227,6 +232,11 @@
return Lookup(descriptor, hash);
}
+inline size_t ClassTable::Size() const {
+ ReaderMutexLock mu(Thread::Current(), lock_);
+ return classes_.size();
+}
+
} // namespace art
#endif // ART_RUNTIME_CLASS_TABLE_INL_H_
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 123c069..7e26373 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -58,18 +58,31 @@
explicit TableSlot(ObjPtr<mirror::Class> klass);
TableSlot(ObjPtr<mirror::Class> klass, uint32_t descriptor_hash);
+ TableSlot(uint32_t ptr, uint32_t descriptor_hash);
TableSlot& operator=(const TableSlot& copy) {
data_.store(copy.data_.load(std::memory_order_relaxed), std::memory_order_relaxed);
return *this;
}
+ uint32_t Data() const {
+ return data_.load(std::memory_order_relaxed);
+ }
+
bool IsNull() const REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t Hash() const {
return MaskHash(data_.load(std::memory_order_relaxed));
}
+ uint32_t NonHashData() const {
+ return RemoveHash(Data());
+ }
+
+ static uint32_t RemoveHash(uint32_t hash) {
+ return hash & ~kHashMask;
+ }
+
static uint32_t MaskHash(uint32_t hash) {
return hash & kHashMask;
}
@@ -168,6 +181,11 @@
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Returns the number of classes in the class table.
+ size_t Size() const
+ REQUIRES(!lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Update a class in the table with the new class. Returns the existing class which was replaced.
ObjPtr<mirror::Class> UpdateClass(const char* descriptor,
ObjPtr<mirror::Class> new_klass,
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index 3aa9e52..60a82a2 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -121,6 +121,15 @@
}
}
+template <typename Visitor>
+inline void HandleScope::VisitHandles(Visitor& visitor) {
+ for (size_t i = 0, count = NumberOfReferences(); i < count; ++i) {
+ if (GetHandle(i) != nullptr) {
+ visitor.Visit(GetHandle(i));
+ }
+ }
+}
+
template<size_t kNumReferences> template<class T>
inline MutableHandle<T> FixedSizeHandleScope<kNumReferences>::NewHandle(T* object) {
return NewHandle(ObjPtr<T>(object));
@@ -179,6 +188,15 @@
}
}
+template <typename Visitor>
+inline void BaseHandleScope::VisitHandles(Visitor& visitor) {
+ if (LIKELY(!IsVariableSized())) {
+ AsHandleScope()->VisitHandles(visitor);
+ } else {
+ AsVariableSized()->VisitHandles(visitor);
+ }
+}
+
inline VariableSizedHandleScope* BaseHandleScope::AsVariableSized() {
DCHECK(IsVariableSized());
return down_cast<VariableSizedHandleScope*>(this);
@@ -269,6 +287,15 @@
}
}
+template <typename Visitor>
+inline void VariableSizedHandleScope::VisitHandles(Visitor& visitor) {
+ LocalScopeType* cur = current_scope_;
+ while (cur != nullptr) {
+ cur->VisitHandles(visitor);
+ cur = reinterpret_cast<LocalScopeType*>(cur->GetLink());
+ }
+}
+
} // namespace art
#endif // ART_RUNTIME_HANDLE_SCOPE_INL_H_
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 89127e4..a43e889 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -56,6 +56,9 @@
template <typename Visitor>
ALWAYS_INLINE void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+ template <typename Visitor>
+ ALWAYS_INLINE void VisitHandles(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
// Link to previous BaseHandleScope or null.
BaseHandleScope* GetLink() const {
return link_;
@@ -148,6 +151,9 @@
template <typename Visitor>
ALWAYS_INLINE void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+ template <typename Visitor>
+ ALWAYS_INLINE void VisitHandles(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
protected:
// Return backing storage used for references.
ALWAYS_INLINE StackReference<mirror::Object>* GetReferences() const {
@@ -261,6 +267,9 @@
template <typename Visitor>
void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+ template <typename Visitor>
+ ALWAYS_INLINE void VisitHandles(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
static constexpr size_t kLocalScopeSize = 64u;
static constexpr size_t kSizeOfReferencesPerScope =
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index a7faa37..2bdf827 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -240,13 +240,16 @@
// C style casts here since we sometimes have T be a pointer, or sometimes an integer
// (for stack traces).
using ConversionType = typename std::conditional_t<std::is_pointer_v<T>, uintptr_t, T>;
+ // Note: we cast the array directly when unchecked as this code gets called by
+ // runtime_image, which can pass a 64bit pointer and therefore cannot be held
+ // by an ObjPtr.
if (kPointerSize == PointerSize::k64) {
uint64_t value =
- static_cast<uint64_t>(AsLongArrayUnchecked<kVerifyFlags>()->GetWithoutChecks(idx));
+ static_cast<uint64_t>(reinterpret_cast<LongArray*>(this)->GetWithoutChecks(idx));
return (T) dchecked_integral_cast<ConversionType>(value);
} else {
uint32_t value =
- static_cast<uint32_t>(AsIntArrayUnchecked<kVerifyFlags>()->GetWithoutChecks(idx));
+ static_cast<uint32_t>(reinterpret_cast<IntArray*>(this)->GetWithoutChecks(idx));
return (T) dchecked_integral_cast<ConversionType>(value);
}
}
@@ -261,12 +264,15 @@
template<bool kTransactionActive, bool kCheckTransaction, bool kUnchecked>
inline void PointerArray::SetElementPtrSize(uint32_t idx, uint64_t element, PointerSize ptr_size) {
+ // Note: we cast the array directly when unchecked as this code gets called by
+ // runtime_image, which can pass a 64bit pointer and therefore cannot be held
+ // by an ObjPtr.
if (ptr_size == PointerSize::k64) {
- (kUnchecked ? ObjPtr<LongArray>::DownCast(ObjPtr<Object>(this)) : AsLongArray())->
+ (kUnchecked ? reinterpret_cast<LongArray*>(this) : AsLongArray().Ptr())->
SetWithoutChecks<kTransactionActive, kCheckTransaction>(idx, element);
} else {
uint32_t element32 = dchecked_integral_cast<uint32_t>(element);
- (kUnchecked ? ObjPtr<IntArray>::DownCast(ObjPtr<Object>(this)) : AsIntArray())
+ (kUnchecked ? reinterpret_cast<IntArray*>(this) : AsIntArray().Ptr())
->SetWithoutChecks<kTransactionActive, kCheckTransaction>(idx, element32);
}
}
@@ -278,7 +284,7 @@
}
template <VerifyObjectFlags kVerifyFlags, typename Visitor>
-inline void PointerArray::Fixup(ObjPtr<mirror::PointerArray> dest,
+inline void PointerArray::Fixup(mirror::PointerArray* dest,
PointerSize pointer_size,
const Visitor& visitor) {
for (size_t i = 0, count = GetLength(); i < count; ++i) {
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index dfe7d47..0116fde 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -264,7 +264,7 @@
// Fixup the pointers in the dest arrays by passing our pointers through the visitor. Only copies
// to dest if visitor(source_ptr) != source_ptr.
template <VerifyObjectFlags kVerifyFlags = kVerifyNone, typename Visitor>
- void Fixup(ObjPtr<mirror::PointerArray> dest, PointerSize pointer_size, const Visitor& visitor)
+ void Fixup(mirror::PointerArray* dest, PointerSize pointer_size, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
// Works like memcpy(), except we guarantee not to allow tearing of array values (ie using smaller
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index bd37534..b9eb9d0 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -65,6 +65,7 @@
template<size_t kNumReferences> class PACKED(4) StackHandleScope;
class Thread;
class DexCacheVisitor;
+class RuntimeImageHelper;
namespace mirror {
@@ -1579,6 +1580,7 @@
friend struct art::ClassOffsets; // for verifying offset information
friend class Object; // For VisitReferences
friend class linker::ImageWriter; // For SetStatusInternal
+ friend class art::RuntimeImageHelper; // For SetStatusInternal
DISALLOW_IMPLICIT_CONSTRUCTORS(Class);
};
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 42bb7d4..2c7a73f 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -467,4 +467,27 @@
key_value_store_size_ = data_ptr - reinterpret_cast<char*>(&key_value_store_);
}
+const uint8_t* OatHeader::GetOatAddress(StubType type) const {
+ DCHECK_LE(type, StubType::kLast);
+ switch (type) {
+ // TODO: We could maybe clean this up if we stored them in an array in the oat header.
+ case StubType::kQuickGenericJNITrampoline:
+ return static_cast<const uint8_t*>(GetQuickGenericJniTrampoline());
+ case StubType::kJNIDlsymLookupTrampoline:
+ return static_cast<const uint8_t*>(GetJniDlsymLookupTrampoline());
+ case StubType::kJNIDlsymLookupCriticalTrampoline:
+ return static_cast<const uint8_t*>(GetJniDlsymLookupCriticalTrampoline());
+ case StubType::kQuickIMTConflictTrampoline:
+ return static_cast<const uint8_t*>(GetQuickImtConflictTrampoline());
+ case StubType::kQuickResolutionTrampoline:
+ return static_cast<const uint8_t*>(GetQuickResolutionTrampoline());
+ case StubType::kQuickToInterpreterBridge:
+ return static_cast<const uint8_t*>(GetQuickToInterpreterBridge());
+ case StubType::kNterpTrampoline:
+ return static_cast<const uint8_t*>(GetNterpTrampoline());
+ default:
+ UNREACHABLE();
+ }
+}
+
} // namespace art
diff --git a/runtime/oat.h b/runtime/oat.h
index 3b32e11..e062baa 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -29,6 +29,18 @@
enum class InstructionSet;
class InstructionSetFeatures;
+enum class StubType {
+ kJNIDlsymLookupTrampoline,
+ kJNIDlsymLookupCriticalTrampoline,
+ kQuickGenericJNITrampoline,
+ kQuickIMTConflictTrampoline,
+ kQuickResolutionTrampoline,
+ kQuickToInterpreterBridge,
+ kNterpTrampoline,
+ kLast = kNterpTrampoline,
+};
+std::ostream& operator<<(std::ostream& stream, StubType stub_type);
+
class PACKED(4) OatHeader {
public:
static constexpr std::array<uint8_t, 4> kOatMagic { { 'o', 'a', 't', '\n' } };
@@ -111,6 +123,8 @@
bool IsConcurrentCopying() const;
bool RequiresImage() const;
+ const uint8_t* GetOatAddress(StubType type) const;
+
private:
bool KeyHasValue(const char* key, const char* value, size_t value_size) const;
diff --git a/runtime/runtime_image.cc b/runtime/runtime_image.cc
index 7137991..9d59c4d 100644
--- a/runtime/runtime_image.cc
+++ b/runtime/runtime_image.cc
@@ -22,9 +22,12 @@
#include "android-base/stringprintf.h"
+#include "base/arena_allocator.h"
+#include "base/arena_containers.h"
#include "base/bit_utils.h"
#include "base/file_utils.h"
#include "base/length_prefixed_array.h"
+#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "base/utils.h"
#include "class_loader_utils.h"
@@ -37,23 +40,36 @@
#include "mirror/object_array-inl.h"
#include "mirror/object_array.h"
#include "mirror/string-inl.h"
+#include "oat.h"
#include "scoped_thread_state_change-inl.h"
#include "vdex_file.h"
namespace art {
/**
+ * The native data structures that we store in the image.
+ */
+enum class NativeRelocationKind {
+ kArtFieldArray,
+ kArtMethodArray,
+ kArtMethod,
+ kImTable,
+};
+
+/**
* Helper class to generate an app image at runtime.
*/
class RuntimeImageHelper {
public:
explicit RuntimeImageHelper(gc::Heap* heap) :
+ sections_(ImageHeader::kSectionCount),
boot_image_begin_(heap->GetBootImagesStartAddress()),
boot_image_size_(heap->GetBootImagesSize()),
image_begin_(boot_image_begin_ + boot_image_size_),
// Note: image relocation considers the image header in the bitmap.
object_section_size_(sizeof(ImageHeader)),
- intern_table_(InternStringHash(this), InternStringEquals(this)) {}
+ intern_table_(InternStringHash(this), InternStringEquals(this)),
+ class_table_(ClassDescriptorHash(this), ClassDescriptorEquals()) {}
bool Generate(std::string* error_msg) {
@@ -62,12 +78,15 @@
}
// Generate the sections information stored in the header.
- dchecked_vector<ImageSection> sections(ImageHeader::kSectionCount);
- CreateImageSections(sections);
+ CreateImageSections();
+
+ // Now that all sections have been created and we know their offset and
+ // size, relocate native pointers inside classes and ImTables.
+ RelocateNativePointers();
// Generate the bitmap section, stored page aligned after the sections data
// and of size `object_section_size_` page aligned.
- size_t sections_end = sections[ImageHeader::kSectionMetadata].End();
+ size_t sections_end = sections_[ImageHeader::kSectionMetadata].End();
image_bitmap_ = gc::accounting::ContinuousSpaceBitmap::Create(
"image bitmap",
reinterpret_cast<uint8_t*>(image_begin_),
@@ -78,7 +97,7 @@
reinterpret_cast<mirror::Object*>(image_begin_ + sizeof(ImageHeader) + offset));
}
const size_t bitmap_bytes = image_bitmap_.Size();
- auto* bitmap_section = §ions[ImageHeader::kSectionImageBitmap];
+ auto* bitmap_section = §ions_[ImageHeader::kSectionImageBitmap];
*bitmap_section = ImageSection(RoundUp(sections_end, kPageSize),
RoundUp(bitmap_bytes, kPageSize));
@@ -101,7 +120,7 @@
/* component_count= */ 1,
image_begin_,
sections_end,
- sections.data(),
+ sections_.data(),
/* image_roots= */ image_begin_ + sizeof(ImageHeader),
/* oat_checksum= */ 0,
/* oat_file_begin= */ 0,
@@ -123,8 +142,20 @@
return true;
}
- const std::vector<uint8_t>& GetData() const {
- return image_data_;
+ const std::vector<uint8_t>& GetObjects() const {
+ return objects_;
+ }
+
+ const std::vector<uint8_t>& GetArtMethods() const {
+ return art_methods_;
+ }
+
+ const std::vector<uint8_t>& GetArtFields() const {
+ return art_fields_;
+ }
+
+ const std::vector<uint8_t>& GetImTables() const {
+ return im_tables_;
}
const ImageHeader& GetHeader() const {
@@ -143,12 +174,16 @@
intern_table_.WriteToMemory(data.data());
}
+ void GenerateClassTableData(std::vector<uint8_t>& data) const {
+ class_table_.WriteToMemory(data.data());
+ }
+
private:
bool IsInBootImage(const void* obj) const {
return reinterpret_cast<uintptr_t>(obj) - boot_image_begin_ < boot_image_size_;
}
- // Returns a pointer that can be stored in `image_data_`:
+ // Returns a pointer that can be stored in `objects_`:
// - The pointer itself for boot image objects,
// - The offset in the image for all other objects.
mirror::Object* GetOrComputeImageAddress(ObjPtr<mirror::Object> object)
@@ -156,57 +191,90 @@
if (object == nullptr || IsInBootImage(object.Ptr())) {
DCHECK(object == nullptr || Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(object));
return object.Ptr();
- } else if (object->IsClassLoader()) {
+ }
+
+ if (object->IsClassLoader()) {
// DexCache and Class point to class loaders. For runtime-generated app
// images, we don't encode the class loader. It will be set when the
// runtime is loading the image.
return nullptr;
- } else {
- uint32_t offset = CopyObject(object);
- return reinterpret_cast<mirror::Object*>(image_begin_ + sizeof(ImageHeader) + offset);
}
+
+ if (object->GetClass() == GetClassRoot<mirror::ClassExt>()) {
+ // No need to encode `ClassExt`. If needed, it will be reconstructed at
+ // runtime.
+ return nullptr;
+ }
+
+ uint32_t offset = 0u;
+ if (object->IsClass()) {
+ offset = CopyClass(object->AsClass());
+ } else if (object->IsDexCache()) {
+ offset = CopyDexCache(object->AsDexCache());
+ } else {
+ offset = CopyObject(object);
+ }
+ return reinterpret_cast<mirror::Object*>(image_begin_ + sizeof(ImageHeader) + offset);
}
- void CreateImageSections(dchecked_vector<ImageSection>& sections) const {
- sections[ImageHeader::kSectionObjects] =
- ImageSection(0u, object_section_size_);
- sections[ImageHeader::kSectionArtFields] =
- ImageSection(sections[ImageHeader::kSectionObjects].End(), 0u);
- sections[ImageHeader::kSectionArtMethods] =
- ImageSection(sections[ImageHeader::kSectionArtFields].End(), 0u);
- sections[ImageHeader::kSectionImTables] =
- ImageSection(sections[ImageHeader::kSectionArtMethods].End(), 0u);
- sections[ImageHeader::kSectionIMTConflictTables] =
- ImageSection(sections[ImageHeader::kSectionImTables].End(), 0u);
- sections[ImageHeader::kSectionRuntimeMethods] =
- ImageSection(sections[ImageHeader::kSectionIMTConflictTables].End(), 0u);
+ void CreateImageSections() {
+ sections_[ImageHeader::kSectionObjects] = ImageSection(0u, object_section_size_);
+ sections_[ImageHeader::kSectionArtFields] =
+ ImageSection(sections_[ImageHeader::kSectionObjects].End(), art_fields_.size());
+
+ // Round up to the alignment for ArtMethod.
+ static_assert(IsAligned<sizeof(void*)>(ArtMethod::Size(kRuntimePointerSize)));
+ size_t cur_pos = RoundUp(sections_[ImageHeader::kSectionArtFields].End(), sizeof(void*));
+ sections_[ImageHeader::kSectionArtMethods] = ImageSection(cur_pos, art_methods_.size());
+
+ // Round up to the alignment for ImTables.
+ cur_pos = RoundUp(sections_[ImageHeader::kSectionArtMethods].End(), sizeof(void*));
+ sections_[ImageHeader::kSectionImTables] = ImageSection(cur_pos, im_tables_.size());
+
+ // Round up to the alignment for conflict tables.
+ cur_pos = RoundUp(sections_[ImageHeader::kSectionImTables].End(), sizeof(void*));
+ sections_[ImageHeader::kSectionIMTConflictTables] = ImageSection(cur_pos, 0u);
+
+ sections_[ImageHeader::kSectionRuntimeMethods] =
+ ImageSection(sections_[ImageHeader::kSectionIMTConflictTables].End(), 0u);
// Round up to the alignment the string table expects. See HashSet::WriteToMemory.
- size_t cur_pos = RoundUp(sections[ImageHeader::kSectionRuntimeMethods].End(), sizeof(uint64_t));
+ cur_pos = RoundUp(sections_[ImageHeader::kSectionRuntimeMethods].End(), sizeof(uint64_t));
size_t intern_table_bytes = intern_table_.WriteToMemory(nullptr);
- sections[ImageHeader::kSectionInternedStrings] = ImageSection(cur_pos, intern_table_bytes);
+ sections_[ImageHeader::kSectionInternedStrings] = ImageSection(cur_pos, intern_table_bytes);
// Obtain the new position and round it up to the appropriate alignment.
- cur_pos = RoundUp(sections[ImageHeader::kSectionInternedStrings].End(), sizeof(uint64_t));
- sections[ImageHeader::kSectionClassTable] = ImageSection(cur_pos, 0u);
+ cur_pos = RoundUp(sections_[ImageHeader::kSectionInternedStrings].End(), sizeof(uint64_t));
+
+ size_t class_table_bytes = class_table_.WriteToMemory(nullptr);
+ sections_[ImageHeader::kSectionClassTable] = ImageSection(cur_pos, class_table_bytes);
// Round up to the alignment of the offsets we are going to store.
- cur_pos = RoundUp(sections[ImageHeader::kSectionClassTable].End(), sizeof(uint32_t));
- sections[ImageHeader::kSectionStringReferenceOffsets] = ImageSection(cur_pos, 0u);
+ cur_pos = RoundUp(sections_[ImageHeader::kSectionClassTable].End(), sizeof(uint32_t));
+ sections_[ImageHeader::kSectionStringReferenceOffsets] = ImageSection(cur_pos, 0u);
// Round up to the alignment of the offsets we are going to store.
cur_pos =
- RoundUp(sections[ImageHeader::kSectionStringReferenceOffsets].End(), sizeof(uint32_t));
+ RoundUp(sections_[ImageHeader::kSectionStringReferenceOffsets].End(), sizeof(uint32_t));
- sections[ImageHeader::kSectionMetadata] = ImageSection(cur_pos, 0u);
+ sections_[ImageHeader::kSectionMetadata] = ImageSection(cur_pos, 0u);
}
- // Returns the copied mirror Object. This is really its content, it should not
+ // Returns the copied mirror Object if in the image, or the object directly if
+ // in the boot image. For the copy, this is really its content, it should not
// be returned as an `ObjPtr` (as it's not a GC object), nor stored anywhere.
template<typename T> T* FromImageOffsetToRuntimeContent(uint32_t offset) {
- uint32_t vector_data_offset = offset - sizeof(ImageHeader) - image_begin_;
- return reinterpret_cast<T*>(image_data_.data() + vector_data_offset);
+ if (offset == 0u || IsInBootImage(reinterpret_cast<const void*>(offset))) {
+ return reinterpret_cast<T*>(offset);
+ }
+ uint32_t vector_data_offset = FromImageOffsetToVectorOffset(offset);
+ return reinterpret_cast<T*>(objects_.data() + vector_data_offset);
+ }
+
+ uint32_t FromImageOffsetToVectorOffset(uint32_t offset) const {
+ DCHECK(!IsInBootImage(reinterpret_cast<const void*>(offset)));
+ return offset - sizeof(ImageHeader) - image_begin_;
}
class InternStringHash {
@@ -252,6 +320,39 @@
using InternTableSet =
HashSet<uint32_t, DefaultEmptyFn<uint32_t>, InternStringHash, InternStringEquals>;
+ class ClassDescriptorHash {
+ public:
+ explicit ClassDescriptorHash(RuntimeImageHelper* helper) : helper_(helper) {}
+
+ uint32_t operator()(const ClassTable::TableSlot& slot) const NO_THREAD_SAFETY_ANALYSIS {
+ uint32_t ptr = slot.NonHashData();
+ if (helper_->IsInBootImage(reinterpret_cast32<const void*>(ptr))) {
+ return reinterpret_cast32<mirror::Class*>(ptr)->DescriptorHash();
+ }
+ return helper_->class_hashes_[helper_->FromImageOffsetToVectorOffset(ptr)];
+ }
+
+ private:
+ RuntimeImageHelper* helper_;
+ };
+
+ class ClassDescriptorEquals {
+ public:
+ ClassDescriptorEquals() {}
+
+ bool operator()(const ClassTable::TableSlot& a, const ClassTable::TableSlot& b)
+ const NO_THREAD_SAFETY_ANALYSIS {
+ // No need to fetch the descriptor: we know the classes we are inserting
+ // in the ClassTable are unique.
+ return a.Data() == b.Data();
+ }
+ };
+
+ using ClassTableSet = HashSet<ClassTable::TableSlot,
+ ClassTable::TableSlotEmptyFn,
+ ClassDescriptorHash,
+ ClassDescriptorEquals>;
+
void VisitDexCache(ObjPtr<mirror::DexCache> dex_cache) REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile& dex_file = *dex_cache->GetDexFile();
// Currently only copy string objects into the image. Populate the intern
@@ -270,11 +371,410 @@
}
}
- void VisitDexCaches(Handle<mirror::ObjectArray<mirror::Object>> dex_cache_array)
+ // Helper class to collect classes that we will generate in the image.
+ class ClassTableVisitor {
+ public:
+ ClassTableVisitor(Handle<mirror::ClassLoader> loader, VariableSizedHandleScope& handles)
+ : loader_(loader), handles_(handles) {}
+
+ bool operator()(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Record app classes and boot classpath classes: app classes will be
+ // generated in the image and put in the class table, boot classpath
+ // classes will be put in the class table.
+ ObjPtr<mirror::ClassLoader> class_loader = klass->GetClassLoader();
+ if (class_loader == loader_.Get() || class_loader == nullptr) {
+ handles_.NewHandle(klass);
+ }
+ return true;
+ }
+
+ private:
+ Handle<mirror::ClassLoader> loader_;
+ VariableSizedHandleScope& handles_;
+ };
+
+ // Helper class visitor to filter out classes we cannot emit.
+ class PruneVisitor {
+ public:
+ PruneVisitor(Thread* self,
+ RuntimeImageHelper* helper,
+ const ArenaSet<const DexFile*>& dex_files,
+ ArenaVector<Handle<mirror::Class>>& classes,
+ ArenaAllocator& allocator)
+ : self_(self),
+ helper_(helper),
+ dex_files_(dex_files),
+ visited_(allocator.Adapter()),
+ classes_to_write_(classes) {}
+
+ bool CanEmitHelper(Handle<mirror::Class> cls) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Only emit classes that are resolved and not erroneous.
+ if (!cls->IsResolved() || cls->IsErroneous()) {
+ return false;
+ }
+
+ // Classes in the boot image can be trivially encoded directly.
+ if (helper_->IsInBootImage(cls.Get())) {
+ return true;
+ }
+
+ // If the class comes from a dex file which is not part of the primary
+ // APK, don't encode it.
+ if (!ContainsElement(dex_files_, &cls->GetDexFile())) {
+ return false;
+ }
+
+ // Ensure pointers to classes in `cls` can also be emitted.
+ StackHandleScope<1> hs(self_);
+ MutableHandle<mirror::Class> other_class = hs.NewHandle(cls->GetSuperClass());
+ if (!CanEmit(other_class)) {
+ return false;
+ }
+
+ other_class.Assign(cls->GetComponentType());
+ if (!CanEmit(other_class)) {
+ return false;
+ }
+
+ for (size_t i = 0, num_interfaces = cls->NumDirectInterfaces(); i < num_interfaces; ++i) {
+ other_class.Assign(cls->GetDirectInterface(i));
+ if (!CanEmit(other_class)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool CanEmit(Handle<mirror::Class> cls) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (cls == nullptr) {
+ return true;
+ }
+ const dex::ClassDef* class_def = cls->GetClassDef();
+ if (class_def == nullptr) {
+ // Covers array classes and proxy classes.
+ // TODO: Handle these differently.
+ return false;
+ }
+ auto existing = visited_.find(class_def);
+ if (existing != visited_.end()) {
+ // Already processed;
+ return existing->second == VisitState::kCanEmit;
+ }
+
+ visited_.Put(class_def, VisitState::kVisiting);
+ if (CanEmitHelper(cls)) {
+ visited_.Overwrite(class_def, VisitState::kCanEmit);
+ return true;
+ } else {
+ visited_.Overwrite(class_def, VisitState::kCannotEmit);
+ return false;
+ }
+ }
+
+ void Visit(Handle<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ MutableHandle<mirror::Class> cls(obj.GetReference());
+ if (CanEmit(cls)) {
+ if (cls->IsBootStrapClassLoaded()) {
+ DCHECK(helper_->IsInBootImage(cls.Get()));
+ // Insert the bootclasspath class in the class table.
+ uint32_t hash = cls->DescriptorHash();
+ helper_->class_table_.InsertWithHash(ClassTable::TableSlot(cls.Get(), hash), hash);
+ } else {
+ classes_to_write_.push_back(cls);
+ }
+ }
+ }
+
+ private:
+ enum class VisitState {
+ kVisiting,
+ kCanEmit,
+ kCannotEmit,
+ };
+
+ Thread* const self_;
+ RuntimeImageHelper* const helper_;
+ const ArenaSet<const DexFile*>& dex_files_;
+ ArenaSafeMap<const dex::ClassDef*, VisitState> visited_;
+ ArenaVector<Handle<mirror::Class>>& classes_to_write_;
+ };
+
+ void EmitStringsAndClasses(Thread* self,
+ Handle<mirror::ObjectArray<mirror::Object>> dex_cache_array)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArenaAllocator allocator(Runtime::Current()->GetArenaPool());
+ ArenaSet<const DexFile*> dex_files(allocator.Adapter());
for (int32_t i = 0; i < dex_cache_array->GetLength(); ++i) {
+ dex_files.insert(dex_cache_array->Get(i)->AsDexCache()->GetDexFile());
VisitDexCache(ObjPtr<mirror::DexCache>::DownCast((dex_cache_array->Get(i))));
}
+
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> loader = hs.NewHandle(
+ dex_cache_array->Get(0)->AsDexCache()->GetClassLoader());
+ ClassTable* const class_table = loader->GetClassTable();
+ if (class_table == nullptr) {
+ return;
+ }
+
+ VariableSizedHandleScope handles(self);
+ {
+ ClassTableVisitor class_table_visitor(loader, handles);
+ class_table->Visit(class_table_visitor);
+ }
+
+ ArenaVector<Handle<mirror::Class>> classes_to_write(allocator.Adapter());
+ classes_to_write.reserve(class_table->Size());
+ {
+ PruneVisitor prune_visitor(self, this, dex_files, classes_to_write, allocator);
+ handles.VisitHandles(prune_visitor);
+ }
+
+ for (Handle<mirror::Class> cls : classes_to_write) {
+ ScopedAssertNoThreadSuspension sants("Writing class");
+ CopyClass(cls.Get());
+ }
+ }
+
+ // Helper visitor returning the location of a native pointer in the image.
+ class NativePointerVisitor {
+ public:
+ explicit NativePointerVisitor(RuntimeImageHelper* helper) : helper_(helper) {}
+
+ template <typename T>
+ T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED) const {
+ return helper_->NativeLocationInImage(ptr);
+ }
+
+ template <typename T> T* operator()(T* ptr) const {
+ return helper_->NativeLocationInImage(ptr);
+ }
+
+ private:
+ RuntimeImageHelper* helper_;
+ };
+
+ template <typename T> T* NativeLocationInImage(T* ptr) const {
+ if (ptr == nullptr || IsInBootImage(ptr)) {
+ return ptr;
+ }
+
+ auto it = native_relocations_.find(ptr);
+ DCHECK(it != native_relocations_.end());
+ switch (it->second.first) {
+ case NativeRelocationKind::kArtMethod:
+ case NativeRelocationKind::kArtMethodArray: {
+ uint32_t offset = sections_[ImageHeader::kSectionArtMethods].Offset();
+ return reinterpret_cast<T*>(image_begin_ + offset + it->second.second);
+ }
+ case NativeRelocationKind::kArtFieldArray: {
+ uint32_t offset = sections_[ImageHeader::kSectionArtFields].Offset();
+ return reinterpret_cast<T*>(image_begin_ + offset + it->second.second);
+ }
+ case NativeRelocationKind::kImTable: {
+ uint32_t offset = sections_[ImageHeader::kSectionImTables].Offset();
+ return reinterpret_cast<T*>(image_begin_ + offset + it->second.second);
+ }
+ }
+ }
+
+ template <typename Visitor>
+ void RelocateMethodPointerArrays(mirror::Class* klass, const Visitor& visitor)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // A bit of magic here: we cast contents from our buffer to mirror::Class,
+ // and do pointer comparison between 1) these classes, and 2) boot image objects.
+ // Both kinds do not move.
+
+ // See if we need to fixup the vtable field.
+ mirror::Class* super = FromImageOffsetToRuntimeContent<mirror::Class>(
+ reinterpret_cast32<uint32_t>(
+ klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>().Ptr()));
+ DCHECK(super != nullptr) << "j.l.Object should never be in an app runtime image";
+ mirror::PointerArray* vtable = FromImageOffsetToRuntimeContent<mirror::PointerArray>(
+ reinterpret_cast32<uint32_t>(klass->GetVTable<kVerifyNone, kWithoutReadBarrier>().Ptr()));
+ mirror::PointerArray* super_vtable = FromImageOffsetToRuntimeContent<mirror::PointerArray>(
+ reinterpret_cast32<uint32_t>(super->GetVTable<kVerifyNone, kWithoutReadBarrier>().Ptr()));
+ if (vtable != nullptr && vtable != super_vtable) {
+ DCHECK(!IsInBootImage(vtable));
+ vtable->Fixup(vtable, kRuntimePointerSize, visitor);
+ }
+
+ // See if we need to fixup entries in the IfTable.
+ mirror::IfTable* iftable = FromImageOffsetToRuntimeContent<mirror::IfTable>(
+ reinterpret_cast32<uint32_t>(
+ klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>().Ptr()));
+ mirror::IfTable* super_iftable = FromImageOffsetToRuntimeContent<mirror::IfTable>(
+ reinterpret_cast32<uint32_t>(
+ super->GetIfTable<kVerifyNone, kWithoutReadBarrier>().Ptr()));
+ int32_t iftable_count = iftable->Count();
+ int32_t super_iftable_count = super_iftable->Count();
+ for (int32_t i = 0; i < iftable_count; ++i) {
+ mirror::PointerArray* methods = FromImageOffsetToRuntimeContent<mirror::PointerArray>(
+ reinterpret_cast32<uint32_t>(
+ iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i).Ptr()));
+ mirror::PointerArray* super_methods = (i < super_iftable_count)
+ ? FromImageOffsetToRuntimeContent<mirror::PointerArray>(
+ reinterpret_cast32<uint32_t>(
+ super_iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i).Ptr()))
+ : nullptr;
+ if (methods != super_methods) {
+ DCHECK(!IsInBootImage(methods));
+ methods->Fixup(methods, kRuntimePointerSize, visitor);
+ }
+ }
+ }
+
+ void RelocateNativePointers() {
+ ScopedObjectAccess soa(Thread::Current());
+ NativePointerVisitor visitor(this);
+ for (auto it : classes_) {
+ mirror::Class* cls = reinterpret_cast<mirror::Class*>(&objects_[it.second]);
+ cls->FixupNativePointers(cls, kRuntimePointerSize, visitor);
+ RelocateMethodPointerArrays(cls, visitor);
+ }
+ for (auto it : native_relocations_) {
+ if (it.second.first == NativeRelocationKind::kImTable) {
+ ImTable* im_table = reinterpret_cast<ImTable*>(im_tables_.data() + it.second.second);
+ RelocateImTable(im_table, visitor);
+ }
+ }
+ }
+
+ void RelocateImTable(ImTable* im_table, const NativePointerVisitor& visitor) {
+ for (size_t i = 0; i < ImTable::kSize; ++i) {
+ ArtMethod* method = im_table->Get(i, kRuntimePointerSize);
+ ArtMethod* new_method = nullptr;
+ if (method->IsRuntimeMethod() && !IsInBootImage(method)) {
+ // New IMT conflict method: just use the boot image version.
+ // TODO: Consider copying the new IMT conflict method.
+ new_method = Runtime::Current()->GetImtConflictMethod();
+ DCHECK(IsInBootImage(new_method));
+ } else {
+ new_method = visitor(method);
+ }
+ if (method != new_method) {
+ im_table->Set(i, new_method, kRuntimePointerSize);
+ }
+ }
+ }
+
+ void CopyFieldArrays(ObjPtr<mirror::Class> cls, uint32_t class_image_address)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ LengthPrefixedArray<ArtField>* fields[] = {
+ cls->GetSFieldsPtr(), cls->GetIFieldsPtr(),
+ };
+ for (LengthPrefixedArray<ArtField>* cur_fields : fields) {
+ if (cur_fields != nullptr) {
+ // Copy the array.
+ size_t number_of_fields = cur_fields->size();
+ size_t size = LengthPrefixedArray<ArtField>::ComputeSize(number_of_fields);
+ size_t offset = art_fields_.size();
+ art_fields_.resize(offset + size);
+ auto* dest_array =
+ reinterpret_cast<LengthPrefixedArray<ArtField>*>(art_fields_.data() + offset);
+ memcpy(dest_array, cur_fields, size);
+ native_relocations_[cur_fields] =
+ std::make_pair(NativeRelocationKind::kArtFieldArray, offset);
+
+ // Update the class pointer of individual fields.
+ for (size_t i = 0; i != number_of_fields; ++i) {
+ dest_array->At(i).GetDeclaringClassAddressWithoutBarrier()->Assign(
+ reinterpret_cast<mirror::Class*>(class_image_address));
+ }
+ }
+ }
+ }
+
+ void CopyMethodArrays(ObjPtr<mirror::Class> cls, uint32_t class_image_address)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ size_t number_of_methods = cls->NumMethods();
+ if (number_of_methods == 0) {
+ return;
+ }
+
+ size_t size = LengthPrefixedArray<ArtMethod>::ComputeSize(number_of_methods);
+ size_t offset = art_methods_.size();
+ art_methods_.resize(offset + size);
+ auto* dest_array =
+ reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(art_methods_.data() + offset);
+ memcpy(dest_array, cls->GetMethodsPtr(), size);
+ native_relocations_[cls->GetMethodsPtr()] =
+ std::make_pair(NativeRelocationKind::kArtMethodArray, offset);
+
+ for (size_t i = 0; i != number_of_methods; ++i) {
+ ArtMethod* method = &cls->GetMethodsPtr()->At(i);
+ ArtMethod* copy = &dest_array->At(i);
+
+ // Update the class pointer.
+ ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
+ if (declaring_class == cls) {
+ copy->GetDeclaringClassAddressWithoutBarrier()->Assign(
+ reinterpret_cast<mirror::Class*>(class_image_address));
+ } else {
+ DCHECK(method->IsCopied());
+ if (!IsInBootImage(declaring_class.Ptr())) {
+ DCHECK(classes_.find(declaring_class->GetClassDef()) != classes_.end());
+ copy->GetDeclaringClassAddressWithoutBarrier()->Assign(
+ reinterpret_cast<mirror::Class*>(
+ image_begin_ + sizeof(ImageHeader) + classes_[declaring_class->GetClassDef()]));
+ }
+ }
+
+ // Record the native relocation of the method.
+ uintptr_t copy_offset =
+ reinterpret_cast<uintptr_t>(copy) - reinterpret_cast<uintptr_t>(art_methods_.data());
+ native_relocations_[method] = std::make_pair(NativeRelocationKind::kArtMethod, copy_offset);
+
+ // Ignore the single-implementation info for abstract method.
+ if (method->IsAbstract()) {
+ copy->SetHasSingleImplementation(false);
+ copy->SetSingleImplementation(nullptr, kRuntimePointerSize);
+ }
+
+ // Set the entrypoint and data pointer of the method.
+ const std::vector<gc::space::ImageSpace*>& image_spaces =
+ Runtime::Current()->GetHeap()->GetBootImageSpaces();
+ DCHECK(!image_spaces.empty());
+ const OatFile* oat_file = image_spaces[0]->GetOatFile();
+ DCHECK(oat_file != nullptr);
+ const OatHeader& header = oat_file->GetOatHeader();
+ const uint8_t* address = header.GetOatAddress(method->IsNative()
+ ? StubType::kQuickGenericJNITrampoline
+ : StubType::kQuickToInterpreterBridge);
+ copy->SetEntryPointFromQuickCompiledCode(address);
+
+ if (method->IsNative()) {
+ StubType stub_type = method->IsCriticalNative()
+ ? StubType::kJNIDlsymLookupCriticalTrampoline
+ : StubType::kJNIDlsymLookupTrampoline;
+ copy->SetEntryPointFromJni(header.GetOatAddress(stub_type));
+ } else if (method->IsInvokable()) {
+ DCHECK(method->HasCodeItem()) << method->PrettyMethod();
+ ptrdiff_t code_item_offset = reinterpret_cast<const uint8_t*>(method->GetCodeItem()) -
+ method->GetDexFile()->DataBegin();
+ copy->SetDataPtrSize(
+ reinterpret_cast<const void*>(code_item_offset), kRuntimePointerSize);
+ }
+ }
+ }
+
+ void CopyImTable(ObjPtr<mirror::Class> cls) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ImTable* table = cls->GetImt(kRuntimePointerSize);
+
+ // If the table is null or shared and/or already emitted, we can skip.
+ if (table == nullptr || IsInBootImage(table) || HasNativeRelocation(table)) {
+ return;
+ }
+ const size_t size = ImTable::SizeInBytes(kRuntimePointerSize);
+ size_t offset = im_tables_.size();
+ im_tables_.resize(offset + size);
+ uint8_t* dest = im_tables_.data() + offset;
+ memcpy(dest, table, size);
+ native_relocations_[table] = std::make_pair(NativeRelocationKind::kImTable, offset);
+ }
+
+ bool HasNativeRelocation(void* ptr) const {
+ return native_relocations_.find(ptr) != native_relocations_.end();
}
bool WriteObjects(std::string* error_msg) {
@@ -373,8 +873,9 @@
CopyObject(image_roots.Get());
}
- // Copy objects stored in the dex caches.
- VisitDexCaches(dex_cache_array);
+ // Emit string referenced in dex caches, and classes defined in the app class loader.
+ EmitStringsAndClasses(soa.Self(), dex_cache_array);
+
return true;
}
@@ -394,12 +895,15 @@
void operator()(ObjPtr<mirror::Object> obj,
MemberOffset offset,
- bool is_static ATTRIBUTE_UNUSED) const
+ bool is_static) const
REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::Object> ref = obj->GetFieldObject<mirror::Object>(offset);
+ // We don't copy static fields, instead classes will be marked as resolved
+ // and initialized at runtime.
+ ObjPtr<mirror::Object> ref =
+ is_static ? nullptr : obj->GetFieldObject<mirror::Object>(offset);
mirror::Object* address = image_->GetOrComputeImageAddress(ref.Ptr());
mirror::Object* copy =
- reinterpret_cast<mirror::Object*>(image_->image_data_.data() + copy_offset_);
+ reinterpret_cast<mirror::Object*>(image_->objects_.data() + copy_offset_);
copy->GetFieldObjectReferenceAddr<kVerifyNone>(offset)->Assign(address);
}
@@ -415,31 +919,85 @@
size_t copy_offset_;
};
- // Copy `obj` in `image_data_` and relocate references. Returns the offset
+ uint32_t CopyDexCache(ObjPtr<mirror::DexCache> cache) REQUIRES_SHARED(Locks::mutator_lock_) {
+ auto it = dex_caches_.find(cache->GetDexFile());
+ if (it != dex_caches_.end()) {
+ return it->second;
+ }
+ uint32_t offset = CopyObject(cache);
+ dex_caches_[cache->GetDexFile()] = offset;
+ // For dex caches, clear pointers to data that will be set at runtime.
+ mirror::Object* copy = reinterpret_cast<mirror::Object*>(objects_.data() + offset);
+ reinterpret_cast<mirror::DexCache*>(copy)->ResetNativeArrays();
+ reinterpret_cast<mirror::DexCache*>(copy)->SetDexFile(nullptr);
+ return offset;
+ }
+
+ uint32_t CopyClass(ObjPtr<mirror::Class> cls) REQUIRES_SHARED(Locks::mutator_lock_) {
+ const dex::ClassDef* class_def = cls->GetClassDef();
+ auto it = classes_.find(class_def);
+ if (it != classes_.end()) {
+ return it->second;
+ }
+ uint32_t offset = CopyObject(cls);
+ classes_[class_def] = offset;
+
+ uint32_t hash = cls->DescriptorHash();
+ // Save the hash, the `HashSet` implementation requires to find it.
+ class_hashes_[offset] = hash;
+ uint32_t class_image_address = image_begin_ + sizeof(ImageHeader) + offset;
+ bool inserted =
+ class_table_.InsertWithHash(ClassTable::TableSlot(class_image_address, hash), hash).second;
+ DCHECK(inserted) << "Class " << cls->PrettyDescriptor()
+ << " (" << cls.Ptr() << ") already inserted";
+
+ // Clear internal state.
+ mirror::Class* copy = reinterpret_cast<mirror::Class*>(objects_.data() + offset);
+ copy->SetClinitThreadId(static_cast<pid_t>(0u));
+ copy->SetStatusInternal(cls->IsVerified() ? ClassStatus::kVerified : ClassStatus::kResolved);
+ copy->SetObjectSizeAllocFastPath(std::numeric_limits<uint32_t>::max());
+ copy->SetAccessFlags(copy->GetAccessFlags() & ~kAccRecursivelyInitialized);
+
+ // Clear static field values.
+ MemberOffset static_offset = cls->GetFirstReferenceStaticFieldOffset(kRuntimePointerSize);
+ memset(objects_.data() + offset + static_offset.Uint32Value(),
+ 0,
+ cls->GetClassSize() - static_offset.Uint32Value());
+
+ CopyFieldArrays(cls, class_image_address);
+ CopyMethodArrays(cls, class_image_address);
+ if (cls->ShouldHaveImt()) {
+ CopyImTable(cls);
+ }
+
+ return offset;
+ }
+
+ // Copy `obj` in `objects_` and relocate references. Returns the offset
// within our buffer.
uint32_t CopyObject(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_) {
- // Copy the object in `image_data_`.
+ // Copy the object in `objects_`.
size_t object_size = obj->SizeOf();
- size_t offset = image_data_.size();
+ size_t offset = objects_.size();
DCHECK(IsAligned<kObjectAlignment>(offset));
object_offsets_.push_back(offset);
- image_data_.resize(RoundUp(image_data_.size() + object_size, kObjectAlignment));
- memcpy(image_data_.data() + offset, obj.Ptr(), object_size);
+ objects_.resize(RoundUp(offset + object_size, kObjectAlignment));
+ memcpy(objects_.data() + offset, obj.Ptr(), object_size);
object_section_size_ += RoundUp(object_size, kObjectAlignment);
// Fixup reference pointers.
FixupVisitor visitor(this, offset);
obj->VisitReferences</*kVisitNativeRoots=*/ false>(visitor, visitor);
- mirror::Object* copy = reinterpret_cast<mirror::Object*>(image_data_.data() + offset);
+ mirror::Object* copy = reinterpret_cast<mirror::Object*>(objects_.data() + offset);
// Clear any lockword data.
copy->SetLockWord(LockWord::Default(), /* as_volatile= */ false);
- // For dex caches, clear pointers to data that will be set at runtime.
- if (obj->IsDexCache()) {
- reinterpret_cast<mirror::DexCache*>(copy)->ResetNativeArrays();
- reinterpret_cast<mirror::DexCache*>(copy)->SetDexFile(nullptr);
+ if (obj->IsString()) {
+ // Ensure a string always has a hashcode stored. This is checked at
+ // runtime because boot images don't want strings dirtied due to hashcode.
+ reinterpret_cast<mirror::String*>(copy)->GetHashCode();
}
return offset;
}
@@ -538,16 +1096,28 @@
// sections.
ImageHeader header_;
- // Contents of the image sections.
- std::vector<uint8_t> image_data_;
+ // Contents of the various sections.
+ std::vector<uint8_t> objects_;
+ std::vector<uint8_t> art_fields_;
+ std::vector<uint8_t> art_methods_;
+ std::vector<uint8_t> im_tables_;
- // Bitmap of live objects in `image_data_`. Populated from `object_offsets_`
+ // Bitmap of live objects in `objects_`. Populated from `object_offsets_`
// once we know `object_section_size`.
gc::accounting::ContinuousSpaceBitmap image_bitmap_;
- // A list of offsets in `image_data_` where objects begin.
+ // Sections stored in the header.
+ dchecked_vector<ImageSection> sections_;
+
+ // A list of offsets in `objects_` where objects begin.
std::vector<uint32_t> object_offsets_;
+ std::map<const dex::ClassDef*, uint32_t> classes_;
+ std::map<const DexFile*, uint32_t> dex_caches_;
+ std::map<uint32_t, uint32_t> class_hashes_;
+
+ std::map<void*, std::pair<NativeRelocationKind, uint32_t>> native_relocations_;
+
// Cached values of boot image information.
const uint32_t boot_image_begin_;
const uint32_t boot_image_size_;
@@ -563,6 +1133,13 @@
// The intern table for strings that we will write to disk.
InternTableSet intern_table_;
+
+ // The class table holding classes that we will write to disk.
+ ClassTableSet class_table_;
+
+ friend class ClassDescriptorHash;
+ friend class PruneVisitor;
+ friend class NativePointerVisitor;
};
std::string RuntimeImage::GetRuntimeImagePath(const std::string& dex_location) {
@@ -601,43 +1178,94 @@
return false;
}
- // Write section infos. The header is written at the end in case we get killed.
- if (!out->Write(reinterpret_cast<const char*>(image.GetData().data()),
- image.GetData().size(),
- sizeof(ImageHeader))) {
+ // Write objects. The header is written at the end in case we get killed.
+ if (out->Write(reinterpret_cast<const char*>(image.GetObjects().data()),
+ image.GetObjects().size(),
+ sizeof(ImageHeader)) != static_cast<int64_t>(image.GetObjects().size())) {
*error_msg = "Could not write image data to " + temp_path;
- out->Unlink();
+ out->Erase(/*unlink=*/true);
return false;
}
{
+ // Write fields.
+ auto fields_section = image.GetHeader().GetImageSection(ImageHeader::kSectionArtFields);
+ if (out->Write(reinterpret_cast<const char*>(image.GetArtFields().data()),
+ fields_section.Size(),
+ fields_section.Offset()) != fields_section.Size()) {
+ *error_msg = "Could not write fields section " + temp_path;
+ out->Erase(/*unlink=*/true);
+ return false;
+ }
+ }
+
+ {
+ // Write methods.
+ auto methods_section = image.GetHeader().GetImageSection(ImageHeader::kSectionArtMethods);
+ if (out->Write(reinterpret_cast<const char*>(image.GetArtMethods().data()),
+ methods_section.Size(),
+ methods_section.Offset()) != methods_section.Size()) {
+ *error_msg = "Could not write methods section " + temp_path;
+ out->Erase(/*unlink=*/true);
+ return false;
+ }
+ }
+
+ {
+ // Write im tables.
+ auto im_tables_section = image.GetHeader().GetImageSection(ImageHeader::kSectionImTables);
+ if (out->Write(reinterpret_cast<const char*>(image.GetImTables().data()),
+ im_tables_section.Size(),
+ im_tables_section.Offset()) != im_tables_section.Size()) {
+ *error_msg = "Could not write ImTable section " + temp_path;
+ out->Erase(/*unlink=*/true);
+ return false;
+ }
+ }
+
+ {
// Write intern string set.
auto intern_section = image.GetHeader().GetImageSection(ImageHeader::kSectionInternedStrings);
std::vector<uint8_t> intern_data(intern_section.Size());
image.GenerateInternData(intern_data);
- if (!out->Write(reinterpret_cast<const char*>(intern_data.data()),
- intern_section.Size(),
- intern_section.Offset())) {
+ if (out->Write(reinterpret_cast<const char*>(intern_data.data()),
+ intern_section.Size(),
+ intern_section.Offset()) != intern_section.Size()) {
*error_msg = "Could not write intern section " + temp_path;
- out->Unlink();
+ out->Erase(/*unlink=*/true);
+ return false;
+ }
+ }
+
+ {
+ // Write class table.
+ auto class_table_section = image.GetHeader().GetImageSection(ImageHeader::kSectionClassTable);
+ std::vector<uint8_t> class_table_data(class_table_section.Size());
+ image.GenerateClassTableData(class_table_data);
+ if (out->Write(reinterpret_cast<const char*>(class_table_data.data()),
+ class_table_section.Size(),
+ class_table_section.Offset()) != class_table_section.Size()) {
+ *error_msg = "Could not write class table section " + temp_path;
+ out->Erase(/*unlink=*/true);
return false;
}
}
// Write bitmap.
auto bitmap_section = image.GetHeader().GetImageSection(ImageHeader::kSectionImageBitmap);
- if (!out->Write(reinterpret_cast<const char*>(image.GetImageBitmap().Begin()),
- bitmap_section.Size(),
- bitmap_section.Offset())) {
+ if (out->Write(reinterpret_cast<const char*>(image.GetImageBitmap().Begin()),
+ bitmap_section.Size(),
+ bitmap_section.Offset()) != bitmap_section.Size()) {
*error_msg = "Could not write image bitmap " + temp_path;
- out->Unlink();
+ out->Erase(/*unlink=*/true);
return false;
}
// Now write header.
- if (!out->Write(reinterpret_cast<const char*>(&image.GetHeader()), sizeof(ImageHeader), 0u)) {
+ if (out->Write(reinterpret_cast<const char*>(&image.GetHeader()), sizeof(ImageHeader), 0u) !=
+ sizeof(ImageHeader)) {
*error_msg = "Could not write image header to " + temp_path;
- out->Unlink();
+ out->Erase(/*unlink=*/true);
return false;
}
diff --git a/test/845-data-image/src-art/Main.java b/test/845-data-image/src-art/Main.java
index c8d3e62..71dd770 100644
--- a/test/845-data-image/src-art/Main.java
+++ b/test/845-data-image/src-art/Main.java
@@ -18,10 +18,76 @@
import dalvik.system.VMRuntime;
import java.io.File;
import java.io.IOException;
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.Method;
import java.util.concurrent.CyclicBarrier;
-public class Main {
+// Add an interface for testing generating classes and interfaces.
+interface Itf {
+ public int someMethod();
+ public default int someDefaultMethod() { return 42; }
+}
+// Add a second interface with many methods to force a conflict in the IMT. We want a second
+// interface to make sure `Itf` gets entries with the imt_unimplemented_method runtime method.
+interface Itf2 {
+ default int defaultMethod1() { return 1; }
+ default int defaultMethod2() { return 2; }
+ default int defaultMethod3() { return 3; }
+ default int defaultMethod4() { return 4; }
+ default int defaultMethod5() { return 5; }
+ default int defaultMethod6() { return 6; }
+ default int defaultMethod7() { return 7; }
+ default int defaultMethod8() { return 8; }
+ default int defaultMethod9() { return 9; }
+ default int defaultMethod10() { return 10; }
+ default int defaultMethod11() { return 11; }
+ default int defaultMethod12() { return 12; }
+ default int defaultMethod13() { return 13; }
+ default int defaultMethod14() { return 14; }
+ default int defaultMethod15() { return 15; }
+ default int defaultMethod16() { return 16; }
+ default int defaultMethod17() { return 17; }
+ default int defaultMethod18() { return 18; }
+ default int defaultMethod19() { return 19; }
+ default int defaultMethod20() { return 20; }
+ default int defaultMethod21() { return 21; }
+ default int defaultMethod22() { return 22; }
+ default int defaultMethod23() { return 23; }
+ default int defaultMethod24() { return 24; }
+ default int defaultMethod25() { return 25; }
+ default int defaultMethod26() { return 26; }
+ default int defaultMethod27() { return 27; }
+ default int defaultMethod28() { return 28; }
+ default int defaultMethod29() { return 29; }
+ default int defaultMethod30() { return 30; }
+ default int defaultMethod31() { return 31; }
+ default int defaultMethod32() { return 32; }
+ default int defaultMethod33() { return 33; }
+ default int defaultMethod34() { return 34; }
+ default int defaultMethod35() { return 35; }
+ default int defaultMethod36() { return 36; }
+ default int defaultMethod37() { return 37; }
+ default int defaultMethod38() { return 38; }
+ default int defaultMethod39() { return 39; }
+ default int defaultMethod40() { return 40; }
+ default int defaultMethod41() { return 41; }
+ default int defaultMethod42() { return 42; }
+ default int defaultMethod43() { return 43; }
+ default int defaultMethod44() { return 44; }
+ default int defaultMethod45() { return 45; }
+ default int defaultMethod46() { return 46; }
+ default int defaultMethod47() { return 47; }
+ default int defaultMethod48() { return 48; }
+ default int defaultMethod49() { return 49; }
+ default int defaultMethod50() { return 50; }
+ default int defaultMethod51() { return 51; }
+}
+
+class Itf2Impl implements Itf2 {
+}
+
+public class Main implements Itf {
static String myString = "MyString";
static class MyThread extends Thread {
@@ -43,6 +109,7 @@
}
}
}
+
public static void main(String[] args) throws Exception {
System.loadLibrary(args[0]);
@@ -76,6 +143,8 @@
}
}
+ runClassTests();
+
// Test that we emit an empty lock word. If we are not, then this synchronized call here would
// block on a run with the runtime image.
synchronized (myString) {
@@ -106,6 +175,115 @@
}
}
+ static class MyProxy implements InvocationHandler {
+
+ private Object obj;
+
+ public static Object newInstance(Object obj) {
+ return java.lang.reflect.Proxy.newProxyInstance(
+ obj.getClass().getClassLoader(),
+ obj.getClass().getInterfaces(),
+ new MyProxy(obj));
+ }
+
+ private MyProxy(Object obj) {
+ this.obj = obj;
+ }
+
+ public Object invoke(Object proxy, Method m, Object[] args) throws Throwable {
+ return m.invoke(obj, args);
+ }
+ }
+
+ public static Itf itf = new Main();
+ public static Itf2 itf2 = new Itf2Impl();
+
+ public static void runClassTests() {
+ // Test Class.getName, app images expect all strings to have hash codes.
+ assertEquals("Main", Main.class.getName());
+
+ // Basic tests for invokes with a copied method.
+ assertEquals(3, new Main().someMethod());
+ assertEquals(42, new Main().someDefaultMethod());
+
+ assertEquals(3, itf.someMethod());
+ assertEquals(42, itf.someDefaultMethod());
+
+ // Test with a proxy class.
+ Itf foo = (Itf) MyProxy.newInstance(new Main());
+ assertEquals(3, foo.someMethod());
+ assertEquals(42, foo.someDefaultMethod());
+
+ // Call all interface methods to trigger the creation of a imt conflict method.
+ itf2.defaultMethod1();
+ itf2.defaultMethod2();
+ itf2.defaultMethod3();
+ itf2.defaultMethod4();
+ itf2.defaultMethod5();
+ itf2.defaultMethod6();
+ itf2.defaultMethod7();
+ itf2.defaultMethod8();
+ itf2.defaultMethod9();
+ itf2.defaultMethod10();
+ itf2.defaultMethod11();
+ itf2.defaultMethod12();
+ itf2.defaultMethod13();
+ itf2.defaultMethod14();
+ itf2.defaultMethod15();
+ itf2.defaultMethod16();
+ itf2.defaultMethod17();
+ itf2.defaultMethod18();
+ itf2.defaultMethod19();
+ itf2.defaultMethod20();
+ itf2.defaultMethod21();
+ itf2.defaultMethod22();
+ itf2.defaultMethod23();
+ itf2.defaultMethod24();
+ itf2.defaultMethod25();
+ itf2.defaultMethod26();
+ itf2.defaultMethod27();
+ itf2.defaultMethod28();
+ itf2.defaultMethod29();
+ itf2.defaultMethod30();
+ itf2.defaultMethod31();
+ itf2.defaultMethod32();
+ itf2.defaultMethod33();
+ itf2.defaultMethod34();
+ itf2.defaultMethod35();
+ itf2.defaultMethod36();
+ itf2.defaultMethod37();
+ itf2.defaultMethod38();
+ itf2.defaultMethod39();
+ itf2.defaultMethod40();
+ itf2.defaultMethod41();
+ itf2.defaultMethod42();
+ itf2.defaultMethod43();
+ itf2.defaultMethod44();
+ itf2.defaultMethod45();
+ itf2.defaultMethod46();
+ itf2.defaultMethod47();
+ itf2.defaultMethod48();
+ itf2.defaultMethod49();
+ itf2.defaultMethod50();
+ itf2.defaultMethod51();
+ }
+
+ private static void assertEquals(int expected, int actual) {
+ if (expected != actual) {
+ throw new Error("Expected " + expected + ", got " + actual);
+ }
+ }
+
+ private static void assertEquals(String expected, String actual) {
+ if (!expected.equals(actual)) {
+ throw new Error("Expected \"" + expected + "\", got \"" + actual + "\"");
+ }
+ }
+
+ public int someMethod() {
+ return 3;
+ }
+
private static native boolean hasOatFile();
private static native boolean hasImage();
private static native String getCompilerFilter(Class<?> cls);