Revert^4 "Lazily allocate DexCache arrays."
We rarely need the DexCache for compiled code.
Delay the allocation in hope we never need it.
This reduces DexCache memory usage by ~25% at startup.
Bug: b/181097963
Test: test.py -b --host
Change-Id: I1f654aeb538dfed013705a61b1955af1f6b94fe7
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 4d1589b..c85faf5 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1905,13 +1905,10 @@
return false;
}
- LinearAlloc* linear_alloc = GetOrCreateAllocatorForClassLoader(class_loader.Get());
- DCHECK(linear_alloc != nullptr);
- DCHECK_EQ(linear_alloc == Runtime::Current()->GetLinearAlloc(), !app_image);
{
- // Native fields are all null. Initialize them and allocate native memory.
+ // Native fields are all null. Initialize them.
WriterMutexLock mu(self, *Locks::dex_lock_);
- dex_cache->InitializeNativeFields(dex_file.get(), linear_alloc);
+ dex_cache->Initialize(dex_file.get(), class_loader.Get());
}
if (!app_image) {
// Register dex files, keep track of existing ones that are conflicts.
@@ -2367,13 +2364,14 @@
return dex_cache.Get();
}
-ObjPtr<mirror::DexCache> ClassLinker::AllocAndInitializeDexCache(Thread* self,
- const DexFile& dex_file,
- LinearAlloc* linear_alloc) {
+ObjPtr<mirror::DexCache> ClassLinker::AllocAndInitializeDexCache(
+ Thread* self, const DexFile& dex_file, ObjPtr<mirror::ClassLoader> class_loader) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
ObjPtr<mirror::DexCache> dex_cache = AllocDexCache(self, dex_file);
if (dex_cache != nullptr) {
WriterMutexLock mu(self, *Locks::dex_lock_);
- dex_cache->InitializeNativeFields(&dex_file, linear_alloc);
+ dex_cache->Initialize(&dex_file, h_class_loader.Get());
}
return dex_cache;
}
@@ -3844,10 +3842,8 @@
}
void ClassLinker::AppendToBootClassPath(Thread* self, const DexFile* dex_file) {
- ObjPtr<mirror::DexCache> dex_cache = AllocAndInitializeDexCache(
- self,
- *dex_file,
- Runtime::Current()->GetLinearAlloc());
+ ObjPtr<mirror::DexCache> dex_cache =
+ AllocAndInitializeDexCache(self, *dex_file, /* class_loader= */ nullptr);
CHECK(dex_cache != nullptr) << "Failed to allocate dex cache for " << dex_file->GetLocation();
AppendToBootClassPath(dex_file, dex_cache);
}
@@ -4037,10 +4033,10 @@
const DexCacheData* old_data = FindDexCacheDataLocked(dex_file);
old_dex_cache = DecodeDexCacheLocked(self, old_data);
if (old_dex_cache == nullptr && h_dex_cache != nullptr) {
- // Do InitializeNativeFields while holding dex lock to make sure two threads don't call it
+ // Do Initialize while holding dex lock to make sure two threads don't call it
// at the same time with the same dex cache. Since the .bss is shared this can cause failing
// DCHECK that the arrays are null.
- h_dex_cache->InitializeNativeFields(&dex_file, linear_alloc);
+ h_dex_cache->Initialize(&dex_file, h_class_loader.Get());
RegisterDexFileLocked(dex_file, h_dex_cache.Get(), h_class_loader.Get());
}
if (old_dex_cache != nullptr) {
@@ -7610,8 +7606,10 @@
if (kIsDebugBuild) {
PointerSize pointer_size = class_linker_->GetImagePointerSize();
// Check that there are no stale methods are in the dex cache array.
- auto* resolved_methods = klass_->GetDexCache()->GetResolvedMethods();
- for (size_t i = 0, count = klass_->GetDexCache()->NumResolvedMethods(); i < count; ++i) {
+ ObjPtr<mirror::DexCache> dex_cache = klass_->GetDexCache();
+ auto* resolved_methods = dex_cache->GetResolvedMethods();
+ size_t num_methods = dex_cache->NumResolvedMethods();
+ for (size_t i = 0; resolved_methods != nullptr && i < num_methods; ++i) {
auto pair = mirror::DexCache::GetNativePair(resolved_methods, i);
ArtMethod* m = pair.object;
CHECK(move_table_.find(m) == move_table_.end() ||