summaryrefslogtreecommitdiff
path: root/runtime/gc/heap.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc/heap.cc')
-rw-r--r--runtime/gc/heap.cc42
1 files changed, 23 insertions, 19 deletions
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index f039f6beb3..aeab7d80b4 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -110,6 +110,9 @@ static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
sizeof(mirror::HeapReference<mirror::Object>);
static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
sizeof(mirror::HeapReference<mirror::Object>);
+// System.runFinalization can deadlock with native allocations, to deal with this, we have a
+// timeout on how long we wait for finalizers to run. b/21544853
+static constexpr uint64_t kNativeAllocationFinalizeTimeout = MsToNs(250u);
Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
double target_utilization, double foreground_heap_growth_multiplier,
@@ -232,10 +235,11 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
requested_alloc_space_begin = reinterpret_cast<uint8_t*>(300 * MB) - non_moving_space_capacity;
}
if (!image_file_name.empty()) {
+ ATRACE_BEGIN("ImageSpace::Create");
std::string error_msg;
- space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str(),
- image_instruction_set,
- &error_msg);
+ auto* image_space = space::ImageSpace::Create(image_file_name.c_str(), image_instruction_set,
+ &error_msg);
+ ATRACE_END();
if (image_space != nullptr) {
AddSpace(image_space);
// Oat files referenced by image files immediately follow them in memory, ensure alloc space
@@ -287,6 +291,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
std::string error_str;
std::unique_ptr<MemMap> non_moving_space_mem_map;
+ ATRACE_BEGIN("Create heap maps");
if (separate_non_moving_space) {
// If we are the zygote, the non moving space becomes the zygote space when we run
// PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
@@ -323,6 +328,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
capacity_, &error_str));
CHECK(main_mem_map_2.get() != nullptr) << error_str;
}
+ ATRACE_END();
+ ATRACE_BEGIN("Create spaces");
// Create the non moving space first so that bitmaps don't take up the address range.
if (separate_non_moving_space) {
// Non moving space is always dlmalloc since we currently don't have support for multiple
@@ -340,7 +347,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
if (foreground_collector_type_ == kCollectorTypeCC) {
region_space_ = space::RegionSpace::Create("Region space", capacity_ * 2, request_begin);
AddSpace(region_space_);
- } else if (IsMovingGc(foreground_collector_type_) && foreground_collector_type_ != kCollectorTypeGSS) {
+ } else if (IsMovingGc(foreground_collector_type_) &&
+ foreground_collector_type_ != kCollectorTypeGSS) {
// Create bump pointer spaces.
// We only to create the bump pointer if the foreground collector is a compacting GC.
// TODO: Place bump-pointer spaces somewhere to minimize size of card table.
@@ -411,10 +419,12 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
if (main_space_backup_.get() != nullptr) {
RemoveSpace(main_space_backup_.get());
}
+ ATRACE_END();
// Allocate the card table.
+ ATRACE_BEGIN("Create card table");
card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
CHECK(card_table_.get() != nullptr) << "Failed to create card table";
-
+ ATRACE_END();
if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
rb_table_.reset(new accounting::ReadBarrierTable());
DCHECK(rb_table_->IsAllCleared());
@@ -2252,8 +2262,8 @@ void Heap::PreZygoteFork() {
// Set all the cards in the mod-union table since we don't know which objects contain references
// to large objects.
mod_union_table->SetCards();
- large_object_space_->SetAllLargeObjectsAsZygoteObjects(self);
AddModUnionTable(mod_union_table);
+ large_object_space_->SetAllLargeObjectsAsZygoteObjects(self);
if (collector::SemiSpace::kUseRememberedSet) {
// Add a new remembered set for the post-zygote non-moving space.
accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
@@ -3531,22 +3541,16 @@ bool Heap::IsGCRequestPending() const {
return concurrent_gc_pending_.LoadRelaxed();
}
-void Heap::RunFinalization(JNIEnv* env) {
- // Can't do this in WellKnownClasses::Init since System is not properly set up at that point.
- if (WellKnownClasses::java_lang_System_runFinalization == nullptr) {
- CHECK(WellKnownClasses::java_lang_System != nullptr);
- WellKnownClasses::java_lang_System_runFinalization =
- CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
- CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr);
- }
- env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
- WellKnownClasses::java_lang_System_runFinalization);
+void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
+ env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime,
+ WellKnownClasses::dalvik_system_VMRuntime_runFinalization,
+ static_cast<jlong>(timeout));
}
void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
Thread* self = ThreadForEnv(env);
if (native_need_to_run_finalization_) {
- RunFinalization(env);
+ RunFinalization(env, kNativeAllocationFinalizeTimeout);
UpdateMaxNativeFootprint();
native_need_to_run_finalization_ = false;
}
@@ -3562,7 +3566,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
if (new_native_bytes_allocated > growth_limit_) {
if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
// Just finished a GC, attempt to run finalizers.
- RunFinalization(env);
+ RunFinalization(env, kNativeAllocationFinalizeTimeout);
CHECK(!env->ExceptionCheck());
// Native bytes allocated may be updated by finalization, refresh it.
new_native_bytes_allocated = native_bytes_allocated_.LoadRelaxed();
@@ -3570,7 +3574,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
// If we still are over the watermark, attempt a GC for alloc and run finalizers.
if (new_native_bytes_allocated > growth_limit_) {
CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
- RunFinalization(env);
+ RunFinalization(env, kNativeAllocationFinalizeTimeout);
native_need_to_run_finalization_ = false;
CHECK(!env->ExceptionCheck());
}