summaryrefslogtreecommitdiff
path: root/runtime/mirror/dex_cache-inl.h
diff options
context:
space:
mode:
author David Srbecky <dsrbecky@google.com> 2021-09-30 14:36:32 +0000
committer David Srbecky <dsrbecky@google.com> 2021-10-07 12:10:32 +0000
commit33df0e3e47adc053c34a0ad3f4bb78ee3dd40e7f (patch)
tree401957f10b116a082e7c4ed58bb86bff3c4e2615 /runtime/mirror/dex_cache-inl.h
parentce8198e1e826142a5dc032f22a60e2c41eaeff96 (diff)
Revert^4 "Lazily allocate DexCache arrays."
We rarely need the DexCache for compiled code. Delay the allocation in hope we never need it. This reduces DexCache memory usage by ~25% at startup. Bug: b/181097963 Test: test.py -b --host Change-Id: I1f654aeb538dfed013705a61b1955af1f6b94fe7
Diffstat (limited to 'runtime/mirror/dex_cache-inl.h')
-rw-r--r--runtime/mirror/dex_cache-inl.h130
1 files changed, 114 insertions, 16 deletions
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index c37aaefade..a8c2a75a61 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -28,6 +28,7 @@
#include "class_linker.h"
#include "dex/dex_file.h"
#include "gc_root-inl.h"
+#include "linear_alloc.h"
#include "mirror/call_site.h"
#include "mirror/class.h"
#include "mirror/method_type.h"
@@ -41,6 +42,39 @@
namespace art {
namespace mirror {
+template<typename DexCachePair>
+static void InitializeArray(std::atomic<DexCachePair>* array) {
+ DexCachePair::Initialize(array);
+}
+
+template<typename T>
+static void InitializeArray(GcRoot<T>*) {
+ // No special initialization is needed.
+}
+
+template<typename T, size_t kMaxCacheSize>
+T* DexCache::AllocArray(MemberOffset obj_offset, MemberOffset num_offset, size_t num) {
+ ReadBarrier::AssertToSpaceInvariant(this);
+ num = std::min<size_t>(num, kMaxCacheSize);
+ if (num == 0) {
+ return nullptr;
+ }
+ Thread* self = Thread::Current();
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ LinearAlloc* alloc = linker->GetOrCreateAllocatorForClassLoader(GetClassLoader());
+ MutexLock mu(self, *Locks::dex_cache_lock_); // Avoid allocation by multiple threads.
+ T* array = GetFieldPtr64<T*>(obj_offset);
+ if (array != nullptr) {
+ DCHECK(alloc->Contains(array));
+ return array; // Other thread just allocated the array.
+ }
+ array = reinterpret_cast<T*>(alloc->AllocAlign16(self, RoundUp(num * sizeof(T), 16)));
+ InitializeArray(array); // Ensure other threads see the array initialized.
+ SetField32Volatile<false, false>(num_offset, num);
+ SetField64Volatile<false, false>(obj_offset, reinterpret_cast64<uint64_t>(array));
+ return array;
+}
+
template <typename T>
inline DexCachePair<T>::DexCachePair(ObjPtr<T> object, uint32_t index)
: object(object), index(index) {}
@@ -83,13 +117,22 @@ inline uint32_t DexCache::StringSlotIndex(dex::StringIndex string_idx) {
}
inline String* DexCache::GetResolvedString(dex::StringIndex string_idx) {
- return GetStrings()[StringSlotIndex(string_idx)].load(
+ StringDexCacheType* strings = GetStrings();
+ if (UNLIKELY(strings == nullptr)) {
+ return nullptr;
+ }
+ return strings[StringSlotIndex(string_idx)].load(
std::memory_order_relaxed).GetObjectForIndex(string_idx.index_);
}
inline void DexCache::SetResolvedString(dex::StringIndex string_idx, ObjPtr<String> resolved) {
DCHECK(resolved != nullptr);
- GetStrings()[StringSlotIndex(string_idx)].store(
+ StringDexCacheType* strings = GetStrings();
+ if (UNLIKELY(strings == nullptr)) {
+ strings = AllocArray<StringDexCacheType, kDexCacheStringCacheSize>(
+ StringsOffset(), NumStringsOffset(), GetDexFile()->NumStringIds());
+ }
+ strings[StringSlotIndex(string_idx)].store(
StringDexCachePair(resolved, string_idx.index_), std::memory_order_relaxed);
Runtime* const runtime = Runtime::Current();
if (UNLIKELY(runtime->IsActiveTransaction())) {
@@ -103,7 +146,11 @@ inline void DexCache::SetResolvedString(dex::StringIndex string_idx, ObjPtr<Stri
inline void DexCache::ClearString(dex::StringIndex string_idx) {
DCHECK(Runtime::Current()->IsAotCompiler());
uint32_t slot_idx = StringSlotIndex(string_idx);
- StringDexCacheType* slot = &GetStrings()[slot_idx];
+ StringDexCacheType* strings = GetStrings();
+ if (UNLIKELY(strings == nullptr)) {
+ return;
+ }
+ StringDexCacheType* slot = &strings[slot_idx];
// This is racy but should only be called from the transactional interpreter.
if (slot->load(std::memory_order_relaxed).index == string_idx.index_) {
StringDexCachePair cleared(nullptr, StringDexCachePair::InvalidIndexForSlot(slot_idx));
@@ -121,18 +168,27 @@ inline uint32_t DexCache::TypeSlotIndex(dex::TypeIndex type_idx) {
inline Class* DexCache::GetResolvedType(dex::TypeIndex type_idx) {
// It is theorized that a load acquire is not required since obtaining the resolved class will
// always have an address dependency or a lock.
- return GetResolvedTypes()[TypeSlotIndex(type_idx)].load(
+ TypeDexCacheType* resolved_types = GetResolvedTypes();
+ if (UNLIKELY(resolved_types == nullptr)) {
+ return nullptr;
+ }
+ return resolved_types[TypeSlotIndex(type_idx)].load(
std::memory_order_relaxed).GetObjectForIndex(type_idx.index_);
}
inline void DexCache::SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> resolved) {
DCHECK(resolved != nullptr);
DCHECK(resolved->IsResolved()) << resolved->GetStatus();
+ TypeDexCacheType* resolved_types = GetResolvedTypes();
+ if (UNLIKELY(resolved_types == nullptr)) {
+ resolved_types = AllocArray<TypeDexCacheType, kDexCacheTypeCacheSize>(
+ ResolvedTypesOffset(), NumResolvedTypesOffset(), GetDexFile()->NumTypeIds());
+ }
// TODO default transaction support.
// Use a release store for SetResolvedType. This is done to prevent other threads from seeing a
// class but not necessarily seeing the loaded members like the static fields array.
// See b/32075261.
- GetResolvedTypes()[TypeSlotIndex(type_idx)].store(
+ resolved_types[TypeSlotIndex(type_idx)].store(
TypeDexCachePair(resolved, type_idx.index_), std::memory_order_release);
// TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
WriteBarrier::ForEveryFieldWrite(this);
@@ -140,8 +196,12 @@ inline void DexCache::SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> res
inline void DexCache::ClearResolvedType(dex::TypeIndex type_idx) {
DCHECK(Runtime::Current()->IsAotCompiler());
+ TypeDexCacheType* resolved_types = GetResolvedTypes();
+ if (UNLIKELY(resolved_types == nullptr)) {
+ return;
+ }
uint32_t slot_idx = TypeSlotIndex(type_idx);
- TypeDexCacheType* slot = &GetResolvedTypes()[slot_idx];
+ TypeDexCacheType* slot = &resolved_types[slot_idx];
// This is racy but should only be called from the single-threaded ImageWriter and tests.
if (slot->load(std::memory_order_relaxed).index == type_idx.index_) {
TypeDexCachePair cleared(nullptr, TypeDexCachePair::InvalidIndexForSlot(slot_idx));
@@ -158,13 +218,22 @@ inline uint32_t DexCache::MethodTypeSlotIndex(dex::ProtoIndex proto_idx) {
}
inline MethodType* DexCache::GetResolvedMethodType(dex::ProtoIndex proto_idx) {
- return GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].load(
+ MethodTypeDexCacheType* methods = GetResolvedMethodTypes();
+ if (UNLIKELY(methods == nullptr)) {
+ return nullptr;
+ }
+ return methods[MethodTypeSlotIndex(proto_idx)].load(
std::memory_order_relaxed).GetObjectForIndex(proto_idx.index_);
}
inline void DexCache::SetResolvedMethodType(dex::ProtoIndex proto_idx, MethodType* resolved) {
DCHECK(resolved != nullptr);
- GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].store(
+ MethodTypeDexCacheType* methods = GetResolvedMethodTypes();
+ if (UNLIKELY(methods == nullptr)) {
+ methods = AllocArray<MethodTypeDexCacheType, kDexCacheMethodTypeCacheSize>(
+ ResolvedMethodTypesOffset(), NumResolvedMethodTypesOffset(), GetDexFile()->NumProtoIds());
+ }
+ methods[MethodTypeSlotIndex(proto_idx)].store(
MethodTypeDexCachePair(resolved, proto_idx.index_), std::memory_order_relaxed);
Runtime* const runtime = Runtime::Current();
if (UNLIKELY(runtime->IsActiveTransaction())) {
@@ -190,7 +259,11 @@ inline void DexCache::ClearMethodType(dex::ProtoIndex proto_idx) {
inline CallSite* DexCache::GetResolvedCallSite(uint32_t call_site_idx) {
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
DCHECK_LT(call_site_idx, GetDexFile()->NumCallSiteIds());
- GcRoot<mirror::CallSite>& target = GetResolvedCallSites()[call_site_idx];
+ GcRoot<CallSite>* call_sites = GetResolvedCallSites();
+ if (UNLIKELY(call_sites == nullptr)) {
+ return nullptr;
+ }
+ GcRoot<mirror::CallSite>& target = call_sites[call_site_idx];
Atomic<GcRoot<mirror::CallSite>>& ref =
reinterpret_cast<Atomic<GcRoot<mirror::CallSite>>&>(target);
return ref.load(std::memory_order_seq_cst).Read();
@@ -203,7 +276,12 @@ inline ObjPtr<CallSite> DexCache::SetResolvedCallSite(uint32_t call_site_idx,
GcRoot<mirror::CallSite> null_call_site(nullptr);
GcRoot<mirror::CallSite> candidate(call_site);
- GcRoot<mirror::CallSite>& target = GetResolvedCallSites()[call_site_idx];
+ GcRoot<CallSite>* call_sites = GetResolvedCallSites();
+ if (UNLIKELY(call_sites == nullptr)) {
+ call_sites = AllocArray<GcRoot<CallSite>, std::numeric_limits<size_t>::max()>(
+ ResolvedCallSitesOffset(), NumResolvedCallSitesOffset(), GetDexFile()->NumCallSiteIds());
+ }
+ GcRoot<mirror::CallSite>& target = call_sites[call_site_idx];
// The first assignment for a given call site wins.
Atomic<GcRoot<mirror::CallSite>>& ref =
@@ -225,14 +303,23 @@ inline uint32_t DexCache::FieldSlotIndex(uint32_t field_idx) {
}
inline ArtField* DexCache::GetResolvedField(uint32_t field_idx) {
- auto pair = GetNativePair(GetResolvedFields(), FieldSlotIndex(field_idx));
+ FieldDexCacheType* fields = GetResolvedFields();
+ if (UNLIKELY(fields == nullptr)) {
+ return nullptr;
+ }
+ auto pair = GetNativePair(fields, FieldSlotIndex(field_idx));
return pair.GetObjectForIndex(field_idx);
}
inline void DexCache::SetResolvedField(uint32_t field_idx, ArtField* field) {
DCHECK(field != nullptr);
FieldDexCachePair pair(field, field_idx);
- SetNativePair(GetResolvedFields(), FieldSlotIndex(field_idx), pair);
+ FieldDexCacheType* fields = GetResolvedFields();
+ if (UNLIKELY(fields == nullptr)) {
+ fields = AllocArray<FieldDexCacheType, kDexCacheFieldCacheSize>(
+ ResolvedFieldsOffset(), NumResolvedFieldsOffset(), GetDexFile()->NumFieldIds());
+ }
+ SetNativePair(fields, FieldSlotIndex(field_idx), pair);
}
inline uint32_t DexCache::MethodSlotIndex(uint32_t method_idx) {
@@ -243,14 +330,23 @@ inline uint32_t DexCache::MethodSlotIndex(uint32_t method_idx) {
}
inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx) {
- auto pair = GetNativePair(GetResolvedMethods(), MethodSlotIndex(method_idx));
+ MethodDexCacheType* methods = GetResolvedMethods();
+ if (UNLIKELY(methods == nullptr)) {
+ return nullptr;
+ }
+ auto pair = GetNativePair(methods, MethodSlotIndex(method_idx));
return pair.GetObjectForIndex(method_idx);
}
inline void DexCache::SetResolvedMethod(uint32_t method_idx, ArtMethod* method) {
DCHECK(method != nullptr);
MethodDexCachePair pair(method, method_idx);
- SetNativePair(GetResolvedMethods(), MethodSlotIndex(method_idx), pair);
+ MethodDexCacheType* methods = GetResolvedMethods();
+ if (UNLIKELY(methods == nullptr)) {
+ methods = AllocArray<MethodDexCacheType, kDexCacheMethodCacheSize>(
+ ResolvedMethodsOffset(), NumResolvedMethodsOffset(), GetDexFile()->NumMethodIds());
+ }
+ SetNativePair(methods, MethodSlotIndex(method_idx), pair);
}
template <typename T>
@@ -291,7 +387,9 @@ inline void VisitDexCachePairs(std::atomic<DexCachePair<T>>* pairs,
size_t num_pairs,
const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
- for (size_t i = 0; i < num_pairs; ++i) {
+ // Check both the data pointer and count since the array might be initialized
+ // concurrently on other thread, and we might observe just one of the values.
+ for (size_t i = 0; pairs != nullptr && i < num_pairs; ++i) {
DexCachePair<T> source = pairs[i].load(std::memory_order_relaxed);
// NOTE: We need the "template" keyword here to avoid a compilation
// failure. GcRoot<T> is a template argument-dependent type and we need to
@@ -326,7 +424,7 @@ inline void DexCache::VisitReferences(ObjPtr<Class> klass, const Visitor& visito
GcRoot<mirror::CallSite>* resolved_call_sites = GetResolvedCallSites<kVerifyFlags>();
size_t num_call_sites = NumResolvedCallSites<kVerifyFlags>();
- for (size_t i = 0; i != num_call_sites; ++i) {
+ for (size_t i = 0; resolved_call_sites != nullptr && i != num_call_sites; ++i) {
visitor.VisitRootIfNonNull(resolved_call_sites[i].AddressWithoutBarrier());
}
}