summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/class_linker.cc19
-rw-r--r--runtime/mirror/dex_cache.cc5
-rw-r--r--runtime/runtime.cc5
-rw-r--r--runtime/runtime.h8
-rw-r--r--runtime/startup_completed_task.cc134
-rw-r--r--runtime/startup_completed_task.h2
6 files changed, 107 insertions, 66 deletions
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 19f79481f5..ecb2fcf472 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -145,6 +145,7 @@
#include "runtime.h"
#include "runtime_callbacks.h"
#include "scoped_thread_state_change-inl.h"
+#include "startup_completed_task.h"
#include "thread-inl.h"
#include "thread.h"
#include "thread_list.h"
@@ -10789,9 +10790,12 @@ void ClassLinker::CleanupClassLoaders() {
}
}
}
+ if (to_delete.empty()) {
+ return;
+ }
std::set<const OatFile*> unregistered_oat_files;
- if (!to_delete.empty()) {
- JavaVMExt* vm = self->GetJniEnv()->GetVm();
+ JavaVMExt* vm = self->GetJniEnv()->GetVm();
+ {
WriterMutexLock mu(self, *Locks::dex_lock_);
for (auto it = dex_caches_.begin(), end = dex_caches_.end(); it != end; ) {
const DexFile* dex_file = it->first;
@@ -10820,6 +10824,7 @@ void ClassLinker::CleanupClassLoaders() {
DeleteClassLoader(self, data, /*cleanup_cha=*/ true);
}
}
+ Runtime* runtime = Runtime::Current();
if (!unregistered_oat_files.empty()) {
for (const OatFile* oat_file : unregistered_oat_files) {
// Notify the fault handler about removal of the executable code range if needed.
@@ -10828,10 +10833,18 @@ void ClassLinker::CleanupClassLoaders() {
DCHECK_LE(exec_offset, oat_file->Size());
size_t exec_size = oat_file->Size() - exec_offset;
if (exec_size != 0u) {
- Runtime::Current()->RemoveGeneratedCodeRange(oat_file->Begin() + exec_offset, exec_size);
+ runtime->RemoveGeneratedCodeRange(oat_file->Begin() + exec_offset, exec_size);
}
}
}
+
+ if (runtime->GetStartupLinearAlloc() != nullptr) {
+ // Because the startup linear alloc can contain dex cache arrays associated
+ // to class loaders that got unloaded, we need to delete these
+ // arrays.
+ StartupCompletedTask::DeleteStartupDexCaches(self, /* called_by_gc= */ true);
+ DCHECK_EQ(runtime->GetStartupLinearAlloc(), nullptr);
+ }
}
class ClassLinker::FindVirtualMethodHolderVisitor : public ClassVisitor {
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index d1ddb79b5b..d724315a19 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -167,6 +167,11 @@ bool DexCache::ShouldAllocateFullArrayAtStartup() {
return false;
}
+ if (runtime->IsZygote()) {
+ // Zygote doesn't have a notion of startup.
+ return false;
+ }
+
if (runtime->GetStartupCompleted()) {
// We only allocate full arrays during app startup.
return false;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 2f5cd539cd..36d7d7ed7a 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -531,7 +531,7 @@ Runtime::~Runtime() {
// Destroy allocators before shutting down the MemMap because they may use it.
java_vm_.reset();
linear_alloc_.reset();
- startup_linear_alloc_.reset();
+ delete ReleaseStartupLinearAlloc();
linear_alloc_arena_pool_.reset();
arena_pool_.reset();
jit_arena_pool_.reset();
@@ -1734,7 +1734,7 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
linear_alloc_arena_pool_.reset(new MemMapArenaPool(low_4gb));
}
linear_alloc_.reset(CreateLinearAlloc());
- startup_linear_alloc_.reset(CreateLinearAlloc());
+ startup_linear_alloc_.store(CreateLinearAlloc(), std::memory_order_relaxed);
small_lrt_allocator_ = new jni::SmallLrtAllocator();
@@ -3366,6 +3366,7 @@ void Runtime::ResetStartupCompleted() {
}
bool Runtime::NotifyStartupCompleted() {
+ DCHECK(!IsZygote());
bool expected = false;
if (!startup_completed_.compare_exchange_strong(expected, true, std::memory_order_seq_cst)) {
// Right now NotifyStartupCompleted will be called up to twice, once from profiler and up to
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 9ec9937f03..fc8c050cf1 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -824,7 +824,7 @@ class Runtime {
}
LinearAlloc* GetStartupLinearAlloc() {
- return startup_linear_alloc_.get();
+ return startup_linear_alloc_.load(std::memory_order_relaxed);
}
jit::JitOptions* GetJITOptions() {
@@ -1063,7 +1063,7 @@ class Runtime {
};
LinearAlloc* ReleaseStartupLinearAlloc() {
- return startup_linear_alloc_.release();
+ return startup_linear_alloc_.exchange(nullptr, std::memory_order_relaxed);
}
bool LoadAppImageStartupCache() const {
@@ -1303,8 +1303,8 @@ class Runtime {
std::unique_ptr<LinearAlloc> linear_alloc_;
// Linear alloc used for allocations during startup. Will be deleted after
- // startup.
- std::unique_ptr<LinearAlloc> startup_linear_alloc_;
+ // startup. Atomic because the pointer can be concurrently updated to null.
+ std::atomic<LinearAlloc*> startup_linear_alloc_;
// The number of spins that are done before thread suspension is used to forcibly inflate.
size_t max_spins_before_thin_lock_inflation_;
diff --git a/runtime/startup_completed_task.cc b/runtime/startup_completed_task.cc
index a9a06bb0e1..d29889af75 100644
--- a/runtime/startup_completed_task.cc
+++ b/runtime/startup_completed_task.cc
@@ -45,75 +45,95 @@ class UnlinkStartupDexCacheVisitor : public DexCacheVisitor {
};
void StartupCompletedTask::Run(Thread* self) {
- VLOG(startup) << "StartupCompletedTask running";
Runtime* const runtime = Runtime::Current();
- if (!runtime->NotifyStartupCompleted()) {
- return;
- }
-
- // Maybe generate a runtime app image. If the runtime is debuggable, boot
- // classpath classes can be dynamically changed, so don't bother generating an
- // image.
- if (!runtime->IsJavaDebuggable()) {
- std::string compiler_filter;
- std::string compilation_reason;
- runtime->GetAppInfo()->GetPrimaryApkOptimizationStatus(&compiler_filter, &compilation_reason);
- CompilerFilter::Filter filter;
- if (CompilerFilter::ParseCompilerFilter(compiler_filter.c_str(), &filter) &&
- !CompilerFilter::IsAotCompilationEnabled(filter)) {
- std::string error_msg;
- if (!RuntimeImage::WriteImageToDisk(&error_msg)) {
- LOG(DEBUG) << "Could not write temporary image to disk " << error_msg;
+ if (runtime->NotifyStartupCompleted()) {
+ // Maybe generate a runtime app image. If the runtime is debuggable, boot
+ // classpath classes can be dynamically changed, so don't bother generating an
+ // image.
+ if (!runtime->IsJavaDebuggable()) {
+ std::string compiler_filter;
+ std::string compilation_reason;
+ runtime->GetAppInfo()->GetPrimaryApkOptimizationStatus(&compiler_filter, &compilation_reason);
+ CompilerFilter::Filter filter;
+ if (CompilerFilter::ParseCompilerFilter(compiler_filter.c_str(), &filter) &&
+ !CompilerFilter::IsAotCompilationEnabled(filter)) {
+ std::string error_msg;
+ if (!RuntimeImage::WriteImageToDisk(&error_msg)) {
+ LOG(DEBUG) << "Could not write temporary image to disk " << error_msg;
+ }
}
}
+
+ ScopedObjectAccess soa(self);
+ DeleteStartupDexCaches(self, /* called_by_gc= */ false);
}
+ // Delete the thread pool used for app image loading since startup is assumed to be completed.
+ ScopedTrace trace2("Delete thread pool");
+ Runtime::Current()->DeleteThreadPool();
+}
+
+void StartupCompletedTask::DeleteStartupDexCaches(Thread* self, bool called_by_gc) {
+ VLOG(startup) << "StartupCompletedTask running";
+ Runtime* const runtime = Runtime::Current();
+
+ ScopedTrace trace("Releasing dex caches and app image spaces metadata");
+
+ static struct EmptyClosure : Closure {
+ void Run([[maybe_unused]] Thread* thread) override {}
+ } closure;
+
+ // Fetch the startup linear alloc so no other thread tries to allocate there.
+ std::unique_ptr<LinearAlloc> startup_linear_alloc(runtime->ReleaseStartupLinearAlloc());
+
+ // Request a checkpoint to make sure all threads see we have started up and
+ // won't allocate in the startup linear alloc. Without this checkpoint what
+ // could happen is (T0 == self):
+ // 1) T1 fetches startup alloc, allocates an array there.
+ // 2) T0 goes over the dex caches, clear dex cache arrays in the startup alloc.
+ // 3) T1 sets the dex cache array from startup alloc in a dex cache.
+ // 4) T0 releases startup alloc.
+ //
+ // With this checkpoint, 3) cannot happen as T0 waits for T1 to reach the
+ // checkpoint.
+ runtime->GetThreadList()->RunCheckpoint(&closure);
+
{
- ScopedTrace trace("Releasing dex caches and app image spaces metadata");
- ScopedObjectAccess soa(Thread::Current());
+ UnlinkStartupDexCacheVisitor visitor;
+ ReaderMutexLock mu(self, *Locks::dex_lock_);
+ runtime->GetClassLinker()->VisitDexCaches(&visitor);
+ }
- {
- UnlinkStartupDexCacheVisitor visitor;
- ReaderMutexLock mu(self, *Locks::dex_lock_);
- runtime->GetClassLinker()->VisitDexCaches(&visitor);
- }
- // Request a checkpoint to make sure no threads are:
- // - accessing the image space metadata section when we madvise it
- // - accessing dex caches when we free them
- static struct EmptyClosure : Closure {
- void Run([[maybe_unused]] Thread* thread) override {}
- } closure;
-
- runtime->GetThreadList()->RunCheckpoint(&closure);
-
- // Now delete dex cache arrays from both images and startup linear alloc in
- // a critical section. The critical section is to ensure there is no
- // possibility the GC can temporarily see those arrays.
- gc::ScopedGCCriticalSection sgcs(soa.Self(),
- gc::kGcCauseDeletingDexCacheArrays,
- gc::kCollectorTypeCriticalSection);
- for (gc::space::ContinuousSpace* space : runtime->GetHeap()->GetContinuousSpaces()) {
- if (space->IsImageSpace()) {
- gc::space::ImageSpace* image_space = space->AsImageSpace();
- if (image_space->GetImageHeader().IsAppImage()) {
- image_space->ReleaseMetadata();
- }
- }
- }
+ // Request a checkpoint to make sure no threads are:
+ // - accessing the image space metadata section when we madvise it
+ // - accessing dex caches when we free them
+ runtime->GetThreadList()->RunCheckpoint(&closure);
- std::unique_ptr<LinearAlloc> startup_linear_alloc(runtime->ReleaseStartupLinearAlloc());
- if (startup_linear_alloc != nullptr) {
- ScopedTrace trace2("Delete startup linear alloc");
- ArenaPool* arena_pool = startup_linear_alloc->GetArenaPool();
- startup_linear_alloc.reset();
- arena_pool->TrimMaps();
+ // If this isn't the GC calling `DeleteStartupDexCaches` and a GC may be
+ // running, wait for it to be complete. We don't want it to see these dex
+ // caches.
+ if (!called_by_gc) {
+ runtime->GetHeap()->WaitForGcToComplete(gc::kGcCauseDeletingDexCacheArrays, self);
+ }
+
+ // At this point, we know no other thread can see the arrays, nor the GC. So
+ // we can safely release them.
+ for (gc::space::ContinuousSpace* space : runtime->GetHeap()->GetContinuousSpaces()) {
+ if (space->IsImageSpace()) {
+ gc::space::ImageSpace* image_space = space->AsImageSpace();
+ if (image_space->GetImageHeader().IsAppImage()) {
+ image_space->ReleaseMetadata();
+ }
}
}
- // Delete the thread pool used for app image loading since startup is assumed to be completed.
- ScopedTrace trace2("Delete thread pool");
- runtime->DeleteThreadPool();
+ if (startup_linear_alloc != nullptr) {
+ ScopedTrace trace2("Delete startup linear alloc");
+ ArenaPool* arena_pool = startup_linear_alloc->GetArenaPool();
+ startup_linear_alloc.reset();
+ arena_pool->TrimMaps();
+ }
}
} // namespace art
diff --git a/runtime/startup_completed_task.h b/runtime/startup_completed_task.h
index 54f3631842..8077561833 100644
--- a/runtime/startup_completed_task.h
+++ b/runtime/startup_completed_task.h
@@ -28,6 +28,8 @@ class StartupCompletedTask : public gc::HeapTask {
explicit StartupCompletedTask(uint64_t target_run_time) : gc::HeapTask(target_run_time) {}
void Run(Thread* self) override;
+ static void DeleteStartupDexCaches(Thread* self, bool called_by_gc)
+ REQUIRES_SHARED(Locks::mutator_lock_);
};
} // namespace art