Include field and method array dex cache in runtime app image.

To speed up app startup. Array caches that take too much space will
be released after the app has finished startup.

Bug: 260557058
Test: test.py
Change-Id: I189655c09bc14099e0c2ed43134888c043e63d86
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 4df26e4..53e880d 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -2766,6 +2766,15 @@
           ImageSection(cur_pos, sizeof(string_reference_offsets_[0]) * num_string_references_);
 
   /*
+   * DexCache arrays section
+   */
+
+  // Round up to the alignment dex caches arrays expects.
+  cur_pos = RoundUp(sections[ImageHeader::kSectionStringReferenceOffsets].End(), sizeof(uint32_t));
+  // We don't generate dex cache arrays in an image generated by dex2oat.
+  sections[ImageHeader::kSectionDexCacheArrays] = ImageSection(cur_pos, 0u);
+
+  /*
    * Metadata section.
    */
 
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 2e4ddcd..4860509 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -380,6 +380,34 @@
       const {}
   void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
 
+  template <typename T> void VisitNativeDexCacheArray(mirror::NativeArray<T>* array)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (array == nullptr) {
+      return;
+    }
+    uint32_t size = reinterpret_cast<uint32_t*>(array)[-1];
+    for (uint32_t i = 0; i < size; ++i) {
+      PatchNativePointer(array->GetPtrEntryPtrSize(i, kPointerSize));
+    }
+  }
+
+  void VisitDexCacheArrays(ObjPtr<mirror::DexCache> dex_cache)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    mirror::NativeArray<ArtMethod>* old_resolved_methods = dex_cache->GetResolvedMethodsArray();
+    if (old_resolved_methods != nullptr) {
+      mirror::NativeArray<ArtMethod>* resolved_methods = native_visitor_(old_resolved_methods);
+      dex_cache->SetResolvedMethodsArray(resolved_methods);
+      VisitNativeDexCacheArray(resolved_methods);
+    }
+
+    mirror::NativeArray<ArtField>* old_resolved_fields = dex_cache->GetResolvedFieldsArray();
+    if (old_resolved_fields != nullptr) {
+      mirror::NativeArray<ArtField>* resolved_fields = native_visitor_(old_resolved_fields);
+      dex_cache->SetResolvedFieldsArray(resolved_fields);
+      VisitNativeDexCacheArray(resolved_fields);
+    }
+  }
+
   template <bool kMayBeNull = true, typename T>
   ALWAYS_INLINE void PatchGcRoot(/*inout*/GcRoot<T>* root) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1308,6 +1336,15 @@
       image_header->RelocateImageReferences(app_image_objects.Delta());
       image_header->RelocateBootImageReferences(boot_image.Delta());
       CHECK_EQ(image_header->GetImageBegin(), target_base);
+
+      // Fix up dex cache arrays.
+      ObjPtr<mirror::ObjectArray<mirror::DexCache>> dex_caches =
+          image_header->GetImageRoot<kWithoutReadBarrier>(ImageHeader::kDexCaches)
+              ->AsObjectArray<mirror::DexCache, kVerifyNone>();
+      for (int32_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
+        ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get<kVerifyNone, kWithoutReadBarrier>(i);
+        patch_object_visitor.VisitDexCacheArrays(dex_cache);
+      }
     }
     {
       // Only touches objects in the app image, no need for mutator lock.
diff --git a/runtime/image.cc b/runtime/image.cc
index 133782d..feb2536 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -31,8 +31,8 @@
 namespace art {
 
 const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-// Last change: StringBuilderAppend for float/double.
-const uint8_t ImageHeader::kImageVersion[] = { '1', '0', '7', '\0' };
+// Last change: Add DexCacheSection.
+const uint8_t ImageHeader::kImageVersion[] = { '1', '0', '8', '\0' };
 
 ImageHeader::ImageHeader(uint32_t image_reservation_size,
                          uint32_t component_count,
@@ -240,6 +240,7 @@
     case kSectionInternedStrings: return "InternedStrings";
     case kSectionClassTable: return "ClassTable";
     case kSectionStringReferenceOffsets: return "StringReferenceOffsets";
+    case kSectionDexCacheArrays: return "DexCacheArrays";
     case kSectionMetadata: return "Metadata";
     case kSectionImageBitmap: return "ImageBitmap";
     case kSectionCount: return nullptr;
diff --git a/runtime/image.h b/runtime/image.h
index 0425a24..0ec112f 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -264,6 +264,7 @@
     kSectionInternedStrings,
     kSectionClassTable,
     kSectionStringReferenceOffsets,
+    kSectionDexCacheArrays,
     kSectionMetadata,
     kSectionImageBitmap,
     kSectionCount,  // Number of elements in enum.
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index 7cc11fc..d1ddb79 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -45,12 +45,6 @@
   DCHECK(GetResolvedMethodTypes() == nullptr);
   DCHECK(GetResolvedCallSites() == nullptr);
 
-  DCHECK(GetStringsArray() == nullptr);
-  DCHECK(GetResolvedTypesArray() == nullptr);
-  DCHECK(GetResolvedMethodsArray() == nullptr);
-  DCHECK(GetResolvedFieldsArray() == nullptr);
-  DCHECK(GetResolvedMethodTypesArray() == nullptr);
-
   ScopedAssertNoThreadSuspension sants(__FUNCTION__);
 
   SetDexFile(dex_file);
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index cc01b6b..856dd1f 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -229,6 +229,16 @@
     return entries_[index];
   }
 
+  T** GetPtrEntryPtrSize(uint32_t index, PointerSize ptr_size) {
+    if (ptr_size == PointerSize::k64) {
+      return reinterpret_cast<T**>(
+          reinterpret_cast64<uint64_t>(entries_) + index * sizeof(uint64_t));
+    } else {
+      return reinterpret_cast<T**>(
+          reinterpret_cast32<uint32_t>(entries_) + index * sizeof(uint32_t));
+    }
+  }
+
   void Set(uint32_t index, T* value) {
     entries_[index] = value;
   }
@@ -245,6 +255,8 @@
   // Size of java.lang.DexCache.class.
   static uint32_t ClassSize(PointerSize pointer_size);
 
+  // Note: update the image version in image.cc if changing any of these cache sizes.
+
   // Size of type dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
   static constexpr size_t kDexCacheTypeCacheSize = 1024;
   static_assert(IsPowerOfTwo(kDexCacheTypeCacheSize),
@@ -369,6 +381,13 @@
   // allocator.
   void UnlinkStartupCaches() REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Returns whether we should allocate a full array given the number of elements.
+  // Note: update the image version in image.cc if changing this method.
+  static bool ShouldAllocateFullArray(size_t number_of_elements, size_t dex_cache_size) {
+    return number_of_elements <= dex_cache_size;
+  }
+
+
 // NOLINTBEGIN(bugprone-macro-parentheses)
 #define DEFINE_ARRAY(name, array_kind, getter_setter, type, ids, alloc_kind) \
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> \
@@ -549,12 +568,6 @@
   // the runtime and oat files.
   bool ShouldAllocateFullArrayAtStartup() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Returns whether we should allocate a full array given the number of
-  // elements.
-  static bool ShouldAllocateFullArray(size_t number_of_elements, size_t dex_cache_size) {
-    return number_of_elements <= dex_cache_size;
-  }
-
   HeapReference<ClassLoader> class_loader_;
   HeapReference<String> location_;
 
diff --git a/runtime/runtime_image.cc b/runtime/runtime_image.cc
index b9ce880..e190084 100644
--- a/runtime/runtime_image.cc
+++ b/runtime/runtime_image.cc
@@ -60,6 +60,12 @@
   kArtMethodArray,
   kArtMethod,
   kImTable,
+  // For dex cache arrays which can stay in memory even after startup. Those are
+  // dex cache arrays whose size is below a given threshold, defined by
+  // DexCache::ShouldAllocateFullArray.
+  kFullNativeDexCacheArray,
+  // For dex cache arrays which we will want to release after app startup.
+  kStartupNativeDexCacheArray,
 };
 
 /**
@@ -164,6 +170,14 @@
     return im_tables_;
   }
 
+  const std::vector<uint8_t>& GetMetadata() const {
+    return metadata_;
+  }
+
+  const std::vector<uint8_t>& GetDexCacheArrays() const {
+    return dex_cache_arrays_;
+  }
+
   const ImageHeader& GetHeader() const {
     return header_;
   }
@@ -260,11 +274,15 @@
     cur_pos = RoundUp(sections_[ImageHeader::kSectionClassTable].End(), sizeof(uint32_t));
     sections_[ImageHeader::kSectionStringReferenceOffsets] = ImageSection(cur_pos, 0u);
 
-    // Round up to the alignment of the offsets we are going to store.
+    // Round up to the alignment dex caches arrays expects.
     cur_pos =
         RoundUp(sections_[ImageHeader::kSectionStringReferenceOffsets].End(), sizeof(uint32_t));
+    sections_[ImageHeader::kSectionDexCacheArrays] =
+        ImageSection(cur_pos, dex_cache_arrays_.size());
 
-    sections_[ImageHeader::kSectionMetadata] = ImageSection(cur_pos, 0u);
+    // Round up to the alignment expected for the metadata.
+    cur_pos = RoundUp(sections_[ImageHeader::kSectionDexCacheArrays].End(), sizeof(uint32_t));
+    sections_[ImageHeader::kSectionMetadata] = ImageSection(cur_pos, metadata_.size());
   }
 
   // Returns the copied mirror Object if in the image, or the object directly if
@@ -549,24 +567,27 @@
 
     template <typename T>
     T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED) const {
-      return helper_->NativeLocationInImage(ptr);
+      return helper_->NativeLocationInImage(ptr, /* must_have_relocation= */ true);
     }
 
-    template <typename T> T* operator()(T* ptr) const {
-      return helper_->NativeLocationInImage(ptr);
+    template <typename T> T* operator()(T* ptr, bool must_have_relocation = true) const {
+      return helper_->NativeLocationInImage(ptr, must_have_relocation);
     }
 
    private:
     RuntimeImageHelper* helper_;
   };
 
-  template <typename T> T* NativeLocationInImage(T* ptr) const {
+  template <typename T> T* NativeLocationInImage(T* ptr, bool must_have_relocation) const {
     if (ptr == nullptr || IsInBootImage(ptr)) {
       return ptr;
     }
 
     auto it = native_relocations_.find(ptr);
-    DCHECK(it != native_relocations_.end());
+    if (it == native_relocations_.end()) {
+      DCHECK(!must_have_relocation);
+      return nullptr;
+    }
     switch (it->second.first) {
       case NativeRelocationKind::kArtMethod:
       case NativeRelocationKind::kArtMethodArray: {
@@ -581,6 +602,14 @@
         uint32_t offset = sections_[ImageHeader::kSectionImTables].Offset();
         return reinterpret_cast<T*>(image_begin_ + offset + it->second.second);
       }
+      case NativeRelocationKind::kStartupNativeDexCacheArray: {
+        uint32_t offset = sections_[ImageHeader::kSectionMetadata].Offset();
+        return reinterpret_cast<T*>(image_begin_ + offset + it->second.second);
+      }
+      case NativeRelocationKind::kFullNativeDexCacheArray: {
+        uint32_t offset = sections_[ImageHeader::kSectionDexCacheArrays].Offset();
+        return reinterpret_cast<T*>(image_begin_ + offset + it->second.second);
+      }
     }
   }
 
@@ -630,6 +659,42 @@
     }
   }
 
+  template <typename Visitor, typename T>
+  void RelocateNativeDexCacheArray(mirror::NativeArray<T>* old_method_array,
+                                   uint32_t num_ids,
+                                   const Visitor& visitor)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (old_method_array == nullptr) {
+      return;
+    }
+
+    auto it = native_relocations_[old_method_array];
+    std::vector<uint8_t>& data = (it.first == NativeRelocationKind::kFullNativeDexCacheArray)
+        ? dex_cache_arrays_ : metadata_;
+
+    mirror::NativeArray<T>* content_array =
+        reinterpret_cast<mirror::NativeArray<T>*>(data.data() + it.second);
+    for (uint32_t i = 0; i < num_ids; ++i) {
+      // We may not have relocations for some entries, in which case we'll
+      // just store null.
+      content_array->Set(i, visitor(content_array->Get(i), /* must_have_relocation= */ false));
+    }
+  }
+
+  template <typename Visitor>
+  void RelocateDexCacheArrays(mirror::DexCache* cache,
+                              const DexFile& dex_file,
+                              const Visitor& visitor)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    mirror::NativeArray<ArtMethod>* old_method_array = cache->GetResolvedMethodsArray();
+    cache->SetResolvedMethodsArray(visitor(old_method_array));
+    RelocateNativeDexCacheArray(old_method_array, dex_file.NumMethodIds(), visitor);
+
+    mirror::NativeArray<ArtField>* old_field_array = cache->GetResolvedFieldsArray();
+    cache->SetResolvedFieldsArray(visitor(old_field_array));
+    RelocateNativeDexCacheArray(old_field_array, dex_file.NumFieldIds(), visitor);
+  }
+
   void RelocateNativePointers() {
     ScopedObjectAccess soa(Thread::Current());
     NativePointerVisitor visitor(this);
@@ -644,6 +709,10 @@
         RelocateImTable(im_table, visitor);
       }
     }
+    for (auto it : dex_caches_) {
+      mirror::DexCache* cache = reinterpret_cast<mirror::DexCache*>(&objects_[it.second]);
+      RelocateDexCacheArrays(cache, *it.first, visitor);
+    }
   }
 
   void RelocateImTable(ImTable* im_table, const NativePointerVisitor& visitor) {
@@ -960,6 +1029,29 @@
     size_t copy_offset_;
   };
 
+  template <typename T>
+  void CopyNativeDexCacheArray(uint32_t num_entries,
+                               uint32_t max_entries,
+                               mirror::NativeArray<T>* array) {
+    if (array == nullptr) {
+      return;
+    }
+    size_t size = num_entries * sizeof(T);
+
+    bool only_startup = !mirror::DexCache::ShouldAllocateFullArray(num_entries, max_entries);
+    std::vector<uint8_t>& data = only_startup ? metadata_ : dex_cache_arrays_;
+    NativeRelocationKind relocation_kind = only_startup
+        ? NativeRelocationKind::kStartupNativeDexCacheArray
+        : NativeRelocationKind::kFullNativeDexCacheArray;
+    size_t offset = data.size() + sizeof(uint32_t);
+    data.resize(offset + size);
+    // We need to store `num_entries` because ImageSpace doesn't have
+    // access to the dex files when relocating dex caches.
+    reinterpret_cast<uint32_t*>(data.data() + offset)[-1] = num_entries;
+    memcpy(data.data() + offset, array, size);
+    native_relocations_[array] = std::make_pair(relocation_kind, offset);
+  }
+
   uint32_t CopyDexCache(ObjPtr<mirror::DexCache> cache) REQUIRES_SHARED(Locks::mutator_lock_) {
     auto it = dex_caches_.find(cache->GetDexFile());
     if (it != dex_caches_.end()) {
@@ -971,6 +1063,21 @@
     mirror::Object* copy = reinterpret_cast<mirror::Object*>(objects_.data() + offset);
     reinterpret_cast<mirror::DexCache*>(copy)->ResetNativeArrays();
     reinterpret_cast<mirror::DexCache*>(copy)->SetDexFile(nullptr);
+
+    mirror::NativeArray<ArtMethod>* resolved_methods = cache->GetResolvedMethodsArray();
+    CopyNativeDexCacheArray(cache->GetDexFile()->NumMethodIds(),
+                            mirror::DexCache::kDexCacheMethodCacheSize,
+                            resolved_methods);
+    // Store the array pointer in the dex cache, which will be relocated at the end.
+    reinterpret_cast<mirror::DexCache*>(copy)->SetResolvedMethodsArray(resolved_methods);
+
+    mirror::NativeArray<ArtField>* resolved_fields = cache->GetResolvedFieldsArray();
+    CopyNativeDexCacheArray(cache->GetDexFile()->NumFieldIds(),
+                            mirror::DexCache::kDexCacheFieldCacheSize,
+                            resolved_fields);
+    // Store the array pointer in the dex cache, which will be relocated at the end.
+    reinterpret_cast<mirror::DexCache*>(copy)->SetResolvedFieldsArray(resolved_fields);
+
     return offset;
   }
 
@@ -1146,6 +1253,8 @@
   std::vector<uint8_t> art_fields_;
   std::vector<uint8_t> art_methods_;
   std::vector<uint8_t> im_tables_;
+  std::vector<uint8_t> metadata_;
+  std::vector<uint8_t> dex_cache_arrays_;
 
   // Bitmap of live objects in `objects_`. Populated from `object_offsets_`
   // once we know `object_section_size`.
@@ -1330,6 +1439,26 @@
     return false;
   }
 
+  // Write metadata section.
+  auto metadata_section = image.GetHeader().GetImageSection(ImageHeader::kSectionMetadata);
+  if (out->Write(reinterpret_cast<const char*>(image.GetMetadata().data()),
+                 metadata_section.Size(),
+                 metadata_section.Offset()) != metadata_section.Size()) {
+    *error_msg = "Could not write metadata " + temp_path;
+    out->Erase(/*unlink=*/true);
+    return false;
+  }
+
+  // Write dex cache array section.
+  auto dex_cache_section = image.GetHeader().GetImageSection(ImageHeader::kSectionDexCacheArrays);
+  if (out->Write(reinterpret_cast<const char*>(image.GetDexCacheArrays().data()),
+                 dex_cache_section.Size(),
+                 dex_cache_section.Offset()) != dex_cache_section.Size()) {
+    *error_msg = "Could not write dex cache arrays " + temp_path;
+    out->Erase(/*unlink=*/true);
+    return false;
+  }
+
   // Now write header.
   if (out->Write(reinterpret_cast<const char*>(&image.GetHeader()), sizeof(ImageHeader), 0u) !=
           sizeof(ImageHeader)) {