summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/gc/collector/garbage_collector.cc13
-rw-r--r--runtime/gc/collector/garbage_collector.h4
-rw-r--r--runtime/gc/heap.cc6
-rw-r--r--runtime/gc/heap.h2
-rw-r--r--runtime/jit/jit_code_cache.cc20
-rw-r--r--runtime/jit/jit_code_cache.h25
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc14
-rw-r--r--runtime/runtime.cc10
-rw-r--r--runtime/runtime.h3
-rw-r--r--test/688-shared-library/check21
10 files changed, 82 insertions, 36 deletions
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 5e3692ea9a..0294db7b7e 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -77,8 +77,9 @@ void GarbageCollector::RegisterPause(uint64_t nano_length) {
void GarbageCollector::ResetCumulativeStatistics() {
cumulative_timings_.Reset();
- total_time_ns_ = 0;
- total_freed_objects_ = 0;
+ total_thread_cpu_time_ns_ = 0u;
+ total_time_ns_ = 0u;
+ total_freed_objects_ = 0u;
total_freed_bytes_ = 0;
MutexLock mu(Thread::Current(), pause_histogram_lock_);
pause_histogram_.Reset();
@@ -88,6 +89,7 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
ScopedTrace trace(android::base::StringPrintf("%s %s GC", PrettyCause(gc_cause), GetName()));
Thread* self = Thread::Current();
uint64_t start_time = NanoTime();
+ uint64_t thread_cpu_start_time = ThreadCpuNanoTime();
Iteration* current_iteration = GetCurrentIteration();
current_iteration->Reset(gc_cause, clear_soft_references);
// Note transaction mode is single-threaded and there's no asynchronous GC and this flag doesn't
@@ -102,6 +104,8 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
total_freed_bytes_ += current_iteration->GetFreedBytes() +
current_iteration->GetFreedLargeObjectBytes();
uint64_t end_time = NanoTime();
+ uint64_t thread_cpu_end_time = ThreadCpuNanoTime();
+ total_thread_cpu_time_ns_ += thread_cpu_end_time - thread_cpu_start_time;
current_iteration->SetDurationNs(end_time - start_time);
if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
// The entire GC was paused, clear the fake pauses which might be in the pause times and add
@@ -159,8 +163,9 @@ void GarbageCollector::ResetMeasurements() {
pause_histogram_.Reset();
}
cumulative_timings_.Reset();
- total_time_ns_ = 0;
- total_freed_objects_ = 0;
+ total_thread_cpu_time_ns_ = 0u;
+ total_time_ns_ = 0u;
+ total_freed_objects_ = 0u;
total_freed_bytes_ = 0;
}
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index f722e8d855..2857881456 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -81,6 +81,9 @@ class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public Mark
void SwapBitmaps()
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ uint64_t GetTotalCpuTime() const {
+ return total_thread_cpu_time_ns_;
+ }
uint64_t GetTotalPausedTimeNs() REQUIRES(!pause_histogram_lock_);
int64_t GetTotalFreedBytes() const {
return total_freed_bytes_;
@@ -146,6 +149,7 @@ class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public Mark
std::string name_;
// Cumulative statistics.
Histogram<uint64_t> pause_histogram_ GUARDED_BY(pause_histogram_lock_);
+ uint64_t total_thread_cpu_time_ns_;
uint64_t total_time_ns_;
uint64_t total_freed_objects_;
int64_t total_freed_bytes_;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index f0f81fc67e..71c23926d3 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1062,6 +1062,12 @@ void Heap::RemoveSpace(space::Space* space) {
}
}
+uint64_t Heap::GetTotalGcCpuTime() {
+ uint64_t sum = 0;
+ sum += young_concurrent_copying_collector_->GetTotalCpuTime();
+ sum += concurrent_copying_collector_->GetTotalCpuTime();
+ return sum;
+}
void Heap::DumpGcPerformanceInfo(std::ostream& os) {
// Dump cumulative timings.
os << "Dumping cumulative Gc timings\n";
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index c3ee5267b5..a43f3156f5 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -395,6 +395,8 @@ class Heap {
REQUIRES(!Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
+ uint64_t GetTotalGcCpuTime();
+
// Set target ideal heap utilization ratio, implements
// dalvik.system.VMRuntime.setTargetHeapUtilization.
void SetTargetHeapUtilization(float target);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 359f97e705..1701ca8a78 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -1274,10 +1274,10 @@ size_t JitCodeCache::ReserveData(Thread* self,
class MarkCodeVisitor final : public StackVisitor {
public:
- MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
+ MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in, CodeCacheBitmap* bitmap)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
code_cache_(code_cache_in),
- bitmap_(code_cache_->GetLiveBitmap()) {}
+ bitmap_(bitmap) {}
bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
@@ -1299,13 +1299,13 @@ class MarkCodeVisitor final : public StackVisitor {
class MarkCodeClosure final : public Closure {
public:
- MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
- : code_cache_(code_cache), barrier_(barrier) {}
+ MarkCodeClosure(JitCodeCache* code_cache, CodeCacheBitmap* bitmap, Barrier* barrier)
+ : code_cache_(code_cache), bitmap_(bitmap), barrier_(barrier) {}
void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedTrace trace(__PRETTY_FUNCTION__);
DCHECK(thread == Thread::Current() || thread->IsSuspended());
- MarkCodeVisitor visitor(thread, code_cache_);
+ MarkCodeVisitor visitor(thread, code_cache_, bitmap_);
visitor.WalkStack();
if (kIsDebugBuild) {
// The stack walking code queries the side instrumentation stack if it
@@ -1320,7 +1320,7 @@ class MarkCodeClosure final : public Closure {
code_cache_->LookupMethodHeader(frame.return_pc_, /* method= */ nullptr);
if (method_header != nullptr) {
const void* code = method_header->GetCode();
- CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code)));
+ CHECK(bitmap_->Test(FromCodeToAllocation(code)));
}
}
}
@@ -1329,6 +1329,7 @@ class MarkCodeClosure final : public Closure {
private:
JitCodeCache* const code_cache_;
+ CodeCacheBitmap* const bitmap_;
Barrier* const barrier_;
};
@@ -1374,7 +1375,7 @@ bool JitCodeCache::IncreaseCodeCacheCapacity() {
void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) {
Barrier barrier(0);
size_t threads_running_checkpoint = 0;
- MarkCodeClosure closure(this, &barrier);
+ MarkCodeClosure closure(this, GetLiveBitmap(), &barrier);
threads_running_checkpoint = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
// Now that we have run our checkpoint, move to a suspended state and wait
// for other threads to run the checkpoint.
@@ -1987,11 +1988,6 @@ void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self, bool osr) {
}
}
-size_t JitCodeCache::GetMemorySizeOfCodePointer(const void* ptr) {
- MutexLock mu(Thread::Current(), lock_);
- return mspace_usable_size(reinterpret_cast<const void*>(FromCodeToAllocation(ptr)));
-}
-
void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
const OatQuickMethodHeader* header) {
DCHECK(!method->IsNative());
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 126fd441db..a5075638f2 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -71,7 +71,6 @@ template<class T> class ObjectArray;
namespace jit {
-class JitInstrumentationCache;
class ScopedCodeCacheWrite;
// Alignment in bits that will suit all architectures.
@@ -97,12 +96,6 @@ class JitCodeCache {
std::string* error_msg);
~JitCodeCache();
- // Number of bytes allocated in the code cache.
- size_t CodeCacheSize() REQUIRES(!lock_);
-
- // Number of bytes allocated in the data cache.
- size_t DataCacheSize() REQUIRES(!lock_);
-
bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
@@ -177,10 +170,6 @@ class JitCodeCache {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
- CodeCacheBitmap* GetLiveBitmap() const {
- return live_bitmap_.get();
- }
-
// Perform a collection on the code cache.
void GarbageCollectCache(Thread* self)
REQUIRES(!lock_)
@@ -234,10 +223,6 @@ class JitCodeCache {
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- uint64_t GetLastUpdateTimeNs() const;
-
- size_t GetMemorySizeOfCodePointer(const void* ptr) REQUIRES(!lock_);
-
void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -339,6 +324,12 @@ class JitCodeCache {
void FreeCodeAndData(const void* code_ptr) REQUIRES(lock_);
// Number of bytes allocated in the code cache.
+ size_t CodeCacheSize() REQUIRES(!lock_);
+
+ // Number of bytes allocated in the data cache.
+ size_t DataCacheSize() REQUIRES(!lock_);
+
+ // Number of bytes allocated in the code cache.
size_t CodeCacheSizeLocked() REQUIRES(lock_);
// Number of bytes allocated in the data cache.
@@ -375,6 +366,10 @@ class JitCodeCache {
REQUIRES(lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ CodeCacheBitmap* GetLiveBitmap() const {
+ return live_bitmap_.get();
+ }
+
uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
void FreeCode(uint8_t* code) REQUIRES(lock_);
uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index e4bc1b77a2..56e9094983 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -76,12 +76,16 @@ static void EnableDebugger() {
}
}
#endif
- // We don't want core dumps, though, so set the core dump size to 0.
+ // We don't want core dumps, though, so set the soft limit on core dump size
+ // to 0 without changing the hard limit.
rlimit rl;
- rl.rlim_cur = 0;
- rl.rlim_max = RLIM_INFINITY;
- if (setrlimit(RLIMIT_CORE, &rl) == -1) {
- PLOG(ERROR) << "setrlimit(RLIMIT_CORE) failed for pid " << getpid();
+ if (getrlimit(RLIMIT_CORE, &rl) == -1) {
+ PLOG(ERROR) << "getrlimit(RLIMIT_CORE) failed for pid " << getpid();
+ } else {
+ rl.rlim_cur = 0;
+ if (setrlimit(RLIMIT_CORE, &rl) == -1) {
+ PLOG(ERROR) << "setrlimit(RLIMIT_CORE) failed for pid " << getpid();
+ }
}
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 19c1623d1f..f016e874ca 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -278,6 +278,7 @@ Runtime::Runtime()
// Initially assume we perceive jank in case the process state is never updated.
process_state_(kProcessStateJankPerceptible),
zygote_no_threads_(false),
+ process_cpu_start_time_(ProcessCpuNanoTime()),
verifier_logging_threshold_ms_(100) {
static_assert(Runtime::kCalleeSaveSize ==
static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType), "Unexpected size");
@@ -321,11 +322,20 @@ Runtime::~Runtime() {
}
if (dump_gc_performance_on_shutdown_) {
+ process_cpu_end_time_ = ProcessCpuNanoTime();
ScopedLogSeverity sls(LogSeverity::INFO);
// This can't be called from the Heap destructor below because it
// could call RosAlloc::InspectAll() which needs the thread_list
// to be still alive.
heap_->DumpGcPerformanceInfo(LOG_STREAM(INFO));
+
+ uint64_t process_cpu_time = process_cpu_end_time_ - process_cpu_start_time_;
+ uint64_t gc_cpu_time = heap_->GetTotalGcCpuTime();
+ float ratio = static_cast<float>(gc_cpu_time) / process_cpu_time;
+ LOG_STREAM(INFO) << "GC CPU time " << PrettyDuration(gc_cpu_time)
+ << " out of process CPU time " << PrettyDuration(process_cpu_time)
+ << " (" << ratio << ")"
+ << "\n";
}
if (jit_ != nullptr) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index a696c2845e..3c057f3c41 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -1109,6 +1109,9 @@ class Runtime {
MemMap protected_fault_page_;
+ uint64_t process_cpu_start_time_;
+ uint64_t process_cpu_end_time_;
+
uint32_t verifier_logging_threshold_ms_;
DISALLOW_COPY_AND_ASSIGN(Runtime);
diff --git a/test/688-shared-library/check b/test/688-shared-library/check
new file mode 100644
index 0000000000..0b6c9e4d2f
--- /dev/null
+++ b/test/688-shared-library/check
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Finalizers of DexFile will complain not being able to close
+# the main dex file, as it's still open. That's OK to ignore.
+sed -e '/^E\/System/d' "$2" > "$2.tmp"
+
+diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null