diff options
| -rw-r--r-- | runtime/gc/accounting/card_table-inl.h | 4 | ||||
| -rw-r--r-- | runtime/gc/collector/garbage_collector.cc | 2 | ||||
| -rw-r--r-- | runtime/gc/collector/mark_sweep.cc | 10 | ||||
| -rw-r--r-- | runtime/gc/heap.cc | 49 | ||||
| -rw-r--r-- | runtime/gc/heap.h | 16 | ||||
| -rw-r--r-- | runtime/thread_pool.cc | 2 | ||||
| -rw-r--r-- | runtime/utils.h | 4 |
7 files changed, 50 insertions, 37 deletions
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h index fa2ab27952..c5e8812b83 100644 --- a/runtime/gc/accounting/card_table-inl.h +++ b/runtime/gc/accounting/card_table-inl.h @@ -65,8 +65,8 @@ inline void CardTable::Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_en (reinterpret_cast<uintptr_t>(card_end) & (sizeof(uintptr_t) - 1)); // Now we have the words, we can send these to be processed in parallel. - auto* word_cur = reinterpret_cast<uintptr_t*>(card_cur); - auto* word_end = reinterpret_cast<uintptr_t*>(aligned_end); + uintptr_t* word_cur = reinterpret_cast<uintptr_t*>(card_cur); + uintptr_t* word_end = reinterpret_cast<uintptr_t*>(aligned_end); for (;;) { while (LIKELY(*word_cur == 0)) { ++word_cur; diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc index 039415eaa0..b7641a4162 100644 --- a/runtime/gc/collector/garbage_collector.cc +++ b/runtime/gc/collector/garbage_collector.cc @@ -80,7 +80,7 @@ void GarbageCollector::Run() { uint64_t pause_end = NanoTime(); pause_times_.push_back(pause_end - pause_start); } else { - auto* self = Thread::Current(); + Thread* self = Thread::Current(); { ReaderMutexLock mu(self, *Locks::mutator_lock_); MarkingPhase(); diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 5c31eb1078..9c69fdf271 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -268,7 +268,7 @@ void MarkSweep::MarkReachableObjects() { void MarkSweep::ReclaimPhase() { base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); - auto* self = Thread::Current(); + Thread* self = Thread::Current(); if (!IsConcurrent()) { base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_); @@ -745,14 +745,14 @@ void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) { ThreadPool* thread_pool = GetHeap()->GetThreadPool(); const bool parallel = kParallelCardScan && thread_pool != nullptr; if (parallel) { - auto* self = Thread::Current(); + Thread* self = Thread::Current(); // Can't have a different split for each space since multiple spaces can have their cards being // scanned at the same time. timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects"); // Try to take some of the mark stack since we can pass this off to the worker tasks. const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin()); const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End()); - const auto mark_stack_size = mark_stack_end - mark_stack_begin; + const size_t mark_stack_size = mark_stack_end - mark_stack_begin; const size_t thread_count = thread_pool->GetThreadCount() + 1; // Estimated number of work tasks we will create. const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; @@ -1209,8 +1209,8 @@ void MarkSweep::Sweep(bool swap_bitmaps) { sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect); } if (sweep_space) { - auto begin = reinterpret_cast<uintptr_t>(space->Begin()); - auto end = reinterpret_cast<uintptr_t>(space->End()); + uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); + uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); scc.space = space->AsDlMallocSpace(); accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 5b011047e6..e89dd66894 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -59,17 +59,16 @@ namespace art { namespace gc { // When to create a log message about a slow GC, 100ms. -static const uint64_t kSlowGcThreshold = MsToNs(100); +static constexpr uint64_t kSlowGcThreshold = MsToNs(100); // When to create a log message about a long pause, 5ms. -static const uint64_t kLongGcPauseThreshold = MsToNs(5); -static const bool kGCALotMode = false; -static const size_t kGcAlotInterval = KB; -static const bool kDumpGcPerformanceOnShutdown = false; +static constexpr uint64_t kLongGcPauseThreshold = MsToNs(5); +static constexpr bool kGCALotMode = false; +static constexpr size_t kGcAlotInterval = KB; +static constexpr bool kDumpGcPerformanceOnShutdown = false; // Minimum amount of remaining bytes before a concurrent GC is triggered. -static const size_t kMinConcurrentRemainingBytes = 128 * KB; -const double Heap::kDefaultTargetUtilization = 0.5; +static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB; // If true, measure the total allocation time. -static const bool kMeasureAllocationTime = false; +static constexpr bool kMeasureAllocationTime = false; Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free, double target_utilization, size_t capacity, const std::string& original_image_file_name, @@ -100,8 +99,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max // Initially care about pauses in case we never get notified of process states, or if the JNI // code becomes broken. care_about_pause_times_(true), - concurrent_start_bytes_(concurrent_gc ? initial_size - kMinConcurrentRemainingBytes - : std::numeric_limits<size_t>::max()), + concurrent_start_bytes_(concurrent_gc_ ? initial_size - kMinConcurrentRemainingBytes + : std::numeric_limits<size_t>::max()), total_bytes_freed_ever_(0), total_objects_freed_ever_(0), large_object_threshold_(3 * kPageSize), @@ -793,13 +792,26 @@ void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) { } } -inline bool Heap::IsOutOfMemoryOnAllocation(size_t alloc_size) { - return num_bytes_allocated_ + alloc_size > growth_limit_; +inline bool Heap::IsOutOfMemoryOnAllocation(size_t alloc_size, bool grow) { + size_t new_footprint = num_bytes_allocated_ + alloc_size; + if (UNLIKELY(new_footprint > max_allowed_footprint_)) { + if (UNLIKELY(new_footprint > growth_limit_)) { + return true; + } + if (!concurrent_gc_) { + if (!grow) { + return true; + } else { + max_allowed_footprint_ = new_footprint; + } + } + } + return false; } inline mirror::Object* Heap::TryToAllocate(Thread* self, space::AllocSpace* space, size_t alloc_size, bool grow, size_t* bytes_allocated) { - if (IsOutOfMemoryOnAllocation(alloc_size)) { + if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) { return NULL; } return space->Alloc(self, alloc_size, bytes_allocated); @@ -808,10 +820,10 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, space::AllocSpace* spac // DlMallocSpace-specific version. inline mirror::Object* Heap::TryToAllocate(Thread* self, space::DlMallocSpace* space, size_t alloc_size, bool grow, size_t* bytes_allocated) { - if (IsOutOfMemoryOnAllocation(alloc_size)) { + if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) { return NULL; } - if (!running_on_valgrind_) { + if (LIKELY(!running_on_valgrind_)) { return space->AllocNonvirtual(self, alloc_size, bytes_allocated); } else { return space->Alloc(self, alloc_size, bytes_allocated); @@ -819,7 +831,8 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, space::DlMallocSpace* s } template <class T> -inline mirror::Object* Heap::Allocate(Thread* self, T* space, size_t alloc_size, size_t* bytes_allocated) { +inline mirror::Object* Heap::Allocate(Thread* self, T* space, size_t alloc_size, + size_t* bytes_allocated) { // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are // done in the runnable state where suspension is expected. DCHECK_EQ(self->GetState(), kRunnable); @@ -832,8 +845,8 @@ inline mirror::Object* Heap::Allocate(Thread* self, T* space, size_t alloc_size, return AllocateInternalWithGc(self, space, alloc_size, bytes_allocated); } -mirror::Object* Heap::AllocateInternalWithGc(Thread* self, space::AllocSpace* space, size_t alloc_size, - size_t* bytes_allocated) { +mirror::Object* Heap::AllocateInternalWithGc(Thread* self, space::AllocSpace* space, + size_t alloc_size, size_t* bytes_allocated) { mirror::Object* ptr; // The allocation failed. If the GC is running, block until it completes, and then retry the diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 72e6e43c21..cda252e81c 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -99,20 +99,20 @@ enum HeapVerificationMode { kVerifyAllFast, // Sanity check all heap accesses with quick(er) tests. kVerifyAll // Sanity check all heap accesses. }; -const HeapVerificationMode kDesiredHeapVerification = kNoHeapVerification; +static constexpr HeapVerificationMode kDesiredHeapVerification = kNoHeapVerification; class Heap { public: - static const size_t kDefaultInitialSize = 2 * MB; - static const size_t kDefaultMaximumSize = 32 * MB; - static const size_t kDefaultMaxFree = 2 * MB; - static const size_t kDefaultMinFree = kDefaultMaxFree / 4; + static constexpr size_t kDefaultInitialSize = 2 * MB; + static constexpr size_t kDefaultMaximumSize = 32 * MB; + static constexpr size_t kDefaultMaxFree = 2 * MB; + static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4; // Default target utilization. - static const double kDefaultTargetUtilization; + static constexpr double kDefaultTargetUtilization = 0.5; // Used so that we don't overflow the allocation time atomic integer. - static const size_t kTimeAdjust = 1024; + static constexpr size_t kTimeAdjust = 1024; // Create a heap with the requested sizes. The possible empty // image_file_names names specify Spaces to load based on @@ -434,7 +434,7 @@ class Heap { LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsOutOfMemoryOnAllocation(size_t alloc_size); + bool IsOutOfMemoryOnAllocation(size_t alloc_size, bool grow); // Pushes a list of cleared references out to the managed heap. void EnqueueClearedReferences(mirror::Object** cleared_references); diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc index f7fdcfb25c..39d30bb24d 100644 --- a/runtime/thread_pool.cc +++ b/runtime/thread_pool.cc @@ -23,7 +23,7 @@ namespace art { -static const bool kMeasureWaitTime = false; +static constexpr bool kMeasureWaitTime = false; ThreadPoolWorker::ThreadPoolWorker(ThreadPool* thread_pool, const std::string& name, size_t stack_size) diff --git a/runtime/utils.h b/runtime/utils.h index bd8111454f..fcbb992bf4 100644 --- a/runtime/utils.h +++ b/runtime/utils.h @@ -289,12 +289,12 @@ uint64_t NanoTime(); uint64_t ThreadCpuNanoTime(); // Converts the given number of nanoseconds to milliseconds. -static inline uint64_t NsToMs(uint64_t ns) { +static constexpr inline uint64_t NsToMs(uint64_t ns) { return ns / 1000 / 1000; } // Converts the given number of milliseconds to nanoseconds -static inline uint64_t MsToNs(uint64_t ns) { +static constexpr inline uint64_t MsToNs(uint64_t ns) { return ns * 1000 * 1000; } |