summaryrefslogtreecommitdiff
path: root/runtime/gc/heap.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc/heap.cc')
-rw-r--r--runtime/gc/heap.cc104
1 files changed, 37 insertions, 67 deletions
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 37963e49e7..f04bc896f1 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -18,13 +18,13 @@
#include <limits>
#include <memory>
-#include <unwind.h> // For GC verification.
#include <vector>
#include "android-base/stringprintf.h"
#include "allocation_listener.h"
#include "art_field-inl.h"
+#include "backtrace_helper.h"
#include "base/allocator.h"
#include "base/arena_allocator.h"
#include "base/dumpable.h"
@@ -133,6 +133,17 @@ static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
// config.
static constexpr double kExtraHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0;
+static const char* kRegionSpaceName = "main space (region space)";
+
+#if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
+// 300 MB (0x12c00000) - (default non-moving space capacity).
+static uint8_t* const kPreferredAllocSpaceBegin =
+ reinterpret_cast<uint8_t*>(300 * MB - Heap::kDefaultNonMovingSpaceCapacity);
+#else
+// For 32-bit, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
+static uint8_t* const kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000);
+#endif
+
static inline bool CareAboutPauseTimes() {
return Runtime::Current()->InJankPerceptibleProcessState();
}
@@ -286,15 +297,9 @@ Heap::Heap(size_t initial_size,
// Requested begin for the alloc space, to follow the mapped image and oat files
uint8_t* requested_alloc_space_begin = nullptr;
if (foreground_collector_type_ == kCollectorTypeCC) {
- // Need to use a low address so that we can allocate a contiguous
- // 2 * Xmx space when there's no image (dex2oat for target).
-#if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
- CHECK_GE(300 * MB, non_moving_space_capacity);
- requested_alloc_space_begin = reinterpret_cast<uint8_t*>(300 * MB) - non_moving_space_capacity;
-#else
- // For 32-bit, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
- requested_alloc_space_begin = reinterpret_cast<uint8_t*>(0x20000000);
-#endif
+ // Need to use a low address so that we can allocate a contiguous 2 * Xmx space when there's no
+ // image (dex2oat for target).
+ requested_alloc_space_begin = kPreferredAllocSpaceBegin;
}
// Load image space(s).
@@ -369,12 +374,7 @@ Heap::Heap(size_t initial_size,
&error_str));
CHECK(non_moving_space_mem_map != nullptr) << error_str;
// Try to reserve virtual memory at a lower address if we have a separate non moving space.
-#if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
- request_begin = reinterpret_cast<uint8_t*>(300 * MB);
-#else
- // For 32-bit, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
- request_begin = reinterpret_cast<uint8_t*>(0x20000000) + non_moving_space_capacity;
-#endif
+ request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
}
// Attempt to create 2 mem maps at or after the requested begin.
if (foreground_collector_type_ != kCollectorTypeCC) {
@@ -419,7 +419,12 @@ Heap::Heap(size_t initial_size,
}
// Create other spaces based on whether or not we have a moving GC.
if (foreground_collector_type_ == kCollectorTypeCC) {
- region_space_ = space::RegionSpace::Create("main space (region space)", capacity_ * 2, request_begin);
+ CHECK(separate_non_moving_space);
+ MemMap* region_space_mem_map = space::RegionSpace::CreateMemMap(kRegionSpaceName,
+ capacity_ * 2,
+ request_begin);
+ CHECK(region_space_mem_map != nullptr) << "No region space mem map";
+ region_space_ = space::RegionSpace::Create(kRegionSpaceName, region_space_mem_map);
AddSpace(region_space_);
} else if (IsMovingGc(foreground_collector_type_) &&
foreground_collector_type_ != kCollectorTypeGSS) {
@@ -1830,6 +1835,11 @@ void Heap::SetTargetHeapUtilization(float target) {
size_t Heap::GetObjectsAllocated() const {
Thread* const self = Thread::Current();
ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
+ // Prevent GC running during GetObjectsALlocated since we may get a checkpoint request that tells
+ // us to suspend while we are doing SuspendAll. b/35232978
+ gc::ScopedGCCriticalSection gcs(Thread::Current(),
+ gc::kGcCauseGetObjectsAllocated,
+ gc::kCollectorTypeGetObjectsAllocated);
// Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
ScopedSuspendAll ssa(__FUNCTION__);
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -2327,7 +2337,9 @@ class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
size_t bin_size = object_addr - context->prev_;
// Add the bin consisting of the end of the previous object to the start of the current object.
collector->AddBin(bin_size, context->prev_);
- context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
+ // Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
+ context->prev_ = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>(),
+ kObjectAlignment);
}
void AddBin(size_t size, uintptr_t position) {
@@ -2347,7 +2359,8 @@ class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- size_t obj_size = obj->SizeOf();
+ // Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
+ size_t obj_size = obj->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
mirror::Object* forward_address;
// Find the smallest bin which we can move obj in.
@@ -3551,11 +3564,8 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
collector::GcType gc_type = collector_ran->GetGcType();
const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
// foreground.
- // Ensure at least 2.5 MB to temporarily fix excessive GC caused by TLAB ergonomics.
- const uint64_t adjusted_min_free = std::max(static_cast<uint64_t>(min_free_ * multiplier),
- static_cast<uint64_t>(5 * MB / 2));
- const uint64_t adjusted_max_free = std::max(static_cast<uint64_t>(max_free_ * multiplier),
- static_cast<uint64_t>(5 * MB / 2));
+ const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
+ const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
if (gc_type != collector::kGcTypeSticky) {
// Grow the heap for non sticky GC.
ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
@@ -4055,42 +4065,6 @@ void Heap::BroadcastForNewAllocationRecords() const {
}
}
-// Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
-class StackCrawlState {
- public:
- StackCrawlState(uintptr_t* frames, size_t max_depth, size_t skip_count)
- : frames_(frames), frame_count_(0), max_depth_(max_depth), skip_count_(skip_count) {
- }
- size_t GetFrameCount() const {
- return frame_count_;
- }
- static _Unwind_Reason_Code Callback(_Unwind_Context* context, void* arg) {
- auto* const state = reinterpret_cast<StackCrawlState*>(arg);
- const uintptr_t ip = _Unwind_GetIP(context);
- // The first stack frame is get_backtrace itself. Skip it.
- if (ip != 0 && state->skip_count_ > 0) {
- --state->skip_count_;
- return _URC_NO_REASON;
- }
- // ip may be off for ARM but it shouldn't matter since we only use it for hashing.
- state->frames_[state->frame_count_] = ip;
- state->frame_count_++;
- return state->frame_count_ >= state->max_depth_ ? _URC_END_OF_STACK : _URC_NO_REASON;
- }
-
- private:
- uintptr_t* const frames_;
- size_t frame_count_;
- const size_t max_depth_;
- size_t skip_count_;
-};
-
-static size_t get_backtrace(uintptr_t* frames, size_t max_depth) {
- StackCrawlState state(frames, max_depth, 0u);
- _Unwind_Backtrace(&StackCrawlState::Callback, &state);
- return state.GetFrameCount();
-}
-
void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
auto* const runtime = Runtime::Current();
if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
@@ -4099,13 +4073,9 @@ void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
bool new_backtrace = false;
{
static constexpr size_t kMaxFrames = 16u;
- uintptr_t backtrace[kMaxFrames];
- const size_t frames = get_backtrace(backtrace, kMaxFrames);
- uint64_t hash = 0;
- for (size_t i = 0; i < frames; ++i) {
- hash = hash * 2654435761 + backtrace[i];
- hash += (hash >> 13) ^ (hash << 6);
- }
+ FixedSizeBacktrace<kMaxFrames> backtrace;
+ backtrace.Collect(/* skip_frames */ 2);
+ uint64_t hash = backtrace.Hash();
MutexLock mu(self, *backtrace_lock_);
new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
if (new_backtrace) {