summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Hiroshi Yamauchi <yamauchi@google.com> 2015-04-02 10:18:12 -0700
committer Hiroshi Yamauchi <yamauchi@google.com> 2015-04-13 15:37:40 -0700
commita1c9f013c034fbddb9337cc5c7ecf0e5a8b77547 (patch)
tree77a722cae515f6b3c7ea54f5b0fd2435682ae55b
parent67592a44cd5600b3c007b9215e3e5296a61118e8 (diff)
getRuntimeStat() support (ART).
Export some runtime stats (currently GC stats) via VMDebug.getRuntimeStat(). Added several new GC stats such as blocking GC counts and GC count histograms. Bug: 19825248 Change-Id: I8ece9ed241dc3982dfd983d7159090ba82940dce
-rw-r--r--runtime/base/histogram-inl.h18
-rw-r--r--runtime/base/histogram.h1
-rw-r--r--runtime/gc/heap.cc137
-rw-r--r--runtime/gc/heap.h32
-rw-r--r--runtime/gc/task_processor.cc10
-rw-r--r--runtime/gc/task_processor.h2
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc122
-rw-r--r--test/099-vmdebug/src/Main.java108
8 files changed, 422 insertions, 8 deletions
diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h
index 812ed86e40..0f969b9626 100644
--- a/runtime/base/histogram-inl.h
+++ b/runtime/base/histogram-inl.h
@@ -165,6 +165,23 @@ inline void Histogram<Value>::PrintBins(std::ostream& os, const CumulativeData&
}
template <class Value>
+inline void Histogram<Value>::DumpBins(std::ostream& os) const {
+ DCHECK_GT(sample_size_, 0ull);
+ bool dumped_one = false;
+ for (size_t bin_idx = 0; bin_idx < frequency_.size(); ++bin_idx) {
+ if (frequency_[bin_idx] != 0U) {
+ if (dumped_one) {
+ // Prepend a comma if not the first bin.
+ os << ",";
+ } else {
+ dumped_one = true;
+ }
+ os << GetRange(bin_idx) << ":" << frequency_[bin_idx];
+ }
+ }
+}
+
+template <class Value>
inline void Histogram<Value>::PrintConfidenceIntervals(std::ostream &os, double interval,
const CumulativeData& data) const {
static constexpr size_t kFractionalDigits = 3;
@@ -249,4 +266,3 @@ inline double Histogram<Value>::Percentile(double per, const CumulativeData& dat
} // namespace art
#endif // ART_RUNTIME_BASE_HISTOGRAM_INL_H_
-
diff --git a/runtime/base/histogram.h b/runtime/base/histogram.h
index 78f6e1c7c7..c312fb252c 100644
--- a/runtime/base/histogram.h
+++ b/runtime/base/histogram.h
@@ -61,6 +61,7 @@ template <class Value> class Histogram {
void PrintConfidenceIntervals(std::ostream& os, double interval,
const CumulativeData& data) const;
void PrintBins(std::ostream& os, const CumulativeData& data) const;
+ void DumpBins(std::ostream& os) const;
Value GetRange(size_t bucket_idx) const;
size_t GetBucketCount() const;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 83da5a8fbd..beaf067236 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -195,7 +195,17 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
pending_collector_transition_(nullptr),
pending_heap_trim_(nullptr),
- use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom) {
+ use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
+ running_collection_is_blocking_(false),
+ blocking_gc_count_(0U),
+ blocking_gc_time_(0U),
+ last_update_time_gc_count_rate_histograms_( // Round down by the window duration.
+ (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
+ gc_count_last_window_(0U),
+ blocking_gc_count_last_window_(0U),
+ gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
+ blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
+ kGcCountRateMaxBucketCount) {
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
}
@@ -926,7 +936,6 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
total_duration += collector->GetCumulativeTimings().GetTotalNs();
total_paused_time += collector->GetTotalPausedTimeNs();
collector->DumpPerformanceInfo(os);
- collector->ResetMeasurements();
}
uint64_t allocation_time =
static_cast<uint64_t>(total_allocation_time_.LoadRelaxed()) * kTimeAdjust;
@@ -940,8 +949,8 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
}
uint64_t total_objects_allocated = GetObjectsAllocatedEver();
os << "Total number of allocations " << total_objects_allocated << "\n";
- uint64_t total_bytes_allocated = GetBytesAllocatedEver();
- os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
+ os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
+ os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
@@ -956,10 +965,68 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
}
os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
- os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_);
+ os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
+ os << "Total GC count: " << GetGcCount() << "\n";
+ os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
+ os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
+ os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
+
+ {
+ MutexLock mu(Thread::Current(), *gc_complete_lock_);
+ if (gc_count_rate_histogram_.SampleSize() > 0U) {
+ os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
+ gc_count_rate_histogram_.DumpBins(os);
+ os << "\n";
+ }
+ if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
+ os << "Histogram of blocking GC count per "
+ << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
+ blocking_gc_count_rate_histogram_.DumpBins(os);
+ os << "\n";
+ }
+ }
+
BaseMutex::DumpAll(os);
}
+uint64_t Heap::GetGcCount() const {
+ uint64_t gc_count = 0U;
+ for (auto& collector : garbage_collectors_) {
+ gc_count += collector->GetCumulativeTimings().GetIterations();
+ }
+ return gc_count;
+}
+
+uint64_t Heap::GetGcTime() const {
+ uint64_t gc_time = 0U;
+ for (auto& collector : garbage_collectors_) {
+ gc_time += collector->GetCumulativeTimings().GetTotalNs();
+ }
+ return gc_time;
+}
+
+uint64_t Heap::GetBlockingGcCount() const {
+ return blocking_gc_count_;
+}
+
+uint64_t Heap::GetBlockingGcTime() const {
+ return blocking_gc_time_;
+}
+
+void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
+ MutexLock mu(Thread::Current(), *gc_complete_lock_);
+ if (gc_count_rate_histogram_.SampleSize() > 0U) {
+ gc_count_rate_histogram_.DumpBins(os);
+ }
+}
+
+void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
+ MutexLock mu(Thread::Current(), *gc_complete_lock_);
+ if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
+ blocking_gc_count_rate_histogram_.DumpBins(os);
+ }
+}
+
Heap::~Heap() {
VLOG(heap) << "Starting ~Heap()";
STLDeleteElements(&garbage_collectors_);
@@ -2274,7 +2341,6 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
}
collector_type_running_ = collector_type_;
}
-
if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
++runtime->GetStats()->gc_for_alloc_count;
++self->GetStats()->gc_for_alloc_count;
@@ -2389,11 +2455,55 @@ void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
collector_type_running_ = kCollectorTypeNone;
if (gc_type != collector::kGcTypeNone) {
last_gc_type_ = gc_type;
+
+ // Update stats.
+ ++gc_count_last_window_;
+ if (running_collection_is_blocking_) {
+ // If the currently running collection was a blocking one,
+ // increment the counters and reset the flag.
+ ++blocking_gc_count_;
+ blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
+ ++blocking_gc_count_last_window_;
+ }
+ // Update the gc count rate histograms if due.
+ UpdateGcCountRateHistograms();
}
+ // Reset.
+ running_collection_is_blocking_ = false;
// Wake anyone who may have been waiting for the GC to complete.
gc_complete_cond_->Broadcast(self);
}
+void Heap::UpdateGcCountRateHistograms() {
+ // Invariant: if the time since the last update includes more than
+ // one windows, all the GC runs (if > 0) must have happened in first
+ // window because otherwise the update must have already taken place
+ // at an earlier GC run. So, we report the non-first windows with
+ // zero counts to the histograms.
+ DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
+ uint64_t now = NanoTime();
+ DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
+ uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
+ uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
+ if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
+ // Record the first window.
+ gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1); // Exclude the current run.
+ blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
+ blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
+ // Record the other windows (with zero counts).
+ for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
+ gc_count_rate_histogram_.AddValue(0);
+ blocking_gc_count_rate_histogram_.AddValue(0);
+ }
+ // Update the last update time and reset the counters.
+ last_update_time_gc_count_rate_histograms_ =
+ (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
+ gc_count_last_window_ = 1; // Include the current run.
+ blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
+ }
+ DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
+}
+
class RootMatchesObjectVisitor : public SingleRootVisitor {
public:
explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
@@ -3003,6 +3113,14 @@ collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
collector::GcType last_gc_type = collector::kGcTypeNone;
uint64_t wait_start = NanoTime();
while (collector_type_running_ != kCollectorTypeNone) {
+ if (self != task_processor_->GetRunningThread()) {
+ // The current thread is about to wait for a currently running
+ // collection to finish. If the waiting thread is not the heap
+ // task daemon thread, the currently running collection is
+ // considered as a blocking GC.
+ running_collection_is_blocking_ = true;
+ VLOG(gc) << "Waiting for a blocking GC " << cause;
+ }
ATRACE_BEGIN("GC: Wait For Completion");
// We must wait, change thread state then sleep on gc_complete_cond_;
gc_complete_cond_->Wait(self);
@@ -3015,6 +3133,13 @@ collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
<< " for cause " << cause;
}
+ if (self != task_processor_->GetRunningThread()) {
+ // The current thread is about to run a collection. If the thread
+ // is not the heap task daemon thread, it's considered as a
+ // blocking GC (i.e., blocking itself).
+ running_collection_is_blocking_ = true;
+ VLOG(gc) << "Starting a blocking GC " << cause;
+ }
return last_gc_type;
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 603cbfd42a..2f627985a1 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -672,6 +672,14 @@ class Heap {
min_interval_homogeneous_space_compaction_by_oom_ = interval;
}
+ // Helpers for android.os.Debug.getRuntimeStat().
+ uint64_t GetGcCount() const;
+ uint64_t GetGcTime() const;
+ uint64_t GetBlockingGcCount() const;
+ uint64_t GetBlockingGcTime() const;
+ void DumpGcCountRateHistogram(std::ostream& os) const;
+ void DumpBlockingGcCountRateHistogram(std::ostream& os) const;
+
private:
class ConcurrentGCTask;
class CollectorTransitionTask;
@@ -873,6 +881,8 @@ class Heap {
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ void UpdateGcCountRateHistograms() EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
+
// All-known continuous spaces, where objects lie within fixed bounds.
std::vector<space::ContinuousSpace*> continuous_spaces_;
@@ -1156,6 +1166,28 @@ class Heap {
// Whether or not we use homogeneous space compaction to avoid OOM errors.
bool use_homogeneous_space_compaction_for_oom_;
+ // True if the currently running collection has made some thread wait.
+ bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
+ // The number of blocking GC runs.
+ uint64_t blocking_gc_count_;
+ // The total duration of blocking GC runs.
+ uint64_t blocking_gc_time_;
+ // The duration of the window for the GC count rate histograms.
+ static constexpr uint64_t kGcCountRateHistogramWindowDuration = MsToNs(10 * 1000); // 10s.
+ // The last time when the GC count rate histograms were updated.
+ // This is rounded by kGcCountRateHistogramWindowDuration (a multiple of 10s).
+ uint64_t last_update_time_gc_count_rate_histograms_;
+ // The running count of GC runs in the last window.
+ uint64_t gc_count_last_window_;
+ // The running count of blocking GC runs in the last window.
+ uint64_t blocking_gc_count_last_window_;
+ // The maximum number of buckets in the GC count rate histograms.
+ static constexpr size_t kGcCountRateMaxBucketCount = 200;
+ // The histogram of the number of GC invocations per window duration.
+ Histogram<uint64_t> gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
+ // The histogram of the number of blocking GC invocations per window duration.
+ Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
+
friend class CollectorTransitionTask;
friend class collector::GarbageCollector;
friend class collector::MarkCompact;
diff --git a/runtime/gc/task_processor.cc b/runtime/gc/task_processor.cc
index 2ca4b3f922..ef34c68cc4 100644
--- a/runtime/gc/task_processor.cc
+++ b/runtime/gc/task_processor.cc
@@ -22,7 +22,8 @@ namespace art {
namespace gc {
TaskProcessor::TaskProcessor()
- : lock_(new Mutex("Task processor lock", kReferenceProcessorLock)), is_running_(false) {
+ : lock_(new Mutex("Task processor lock", kReferenceProcessorLock)), is_running_(false),
+ running_thread_(nullptr) {
// Piggyback off the reference processor lock level.
cond_.reset(new ConditionVariable("Task processor condition", *lock_));
}
@@ -96,15 +97,22 @@ bool TaskProcessor::IsRunning() const {
return is_running_;
}
+Thread* TaskProcessor::GetRunningThread() const {
+ MutexLock mu(Thread::Current(), *lock_);
+ return running_thread_;
+}
+
void TaskProcessor::Stop(Thread* self) {
MutexLock mu(self, *lock_);
is_running_ = false;
+ running_thread_ = nullptr;
cond_->Broadcast(self);
}
void TaskProcessor::Start(Thread* self) {
MutexLock mu(self, *lock_);
is_running_ = true;
+ running_thread_ = self;
}
void TaskProcessor::RunAllTasks(Thread* self) {
diff --git a/runtime/gc/task_processor.h b/runtime/gc/task_processor.h
index 765f03557e..67e3a549a9 100644
--- a/runtime/gc/task_processor.h
+++ b/runtime/gc/task_processor.h
@@ -63,6 +63,7 @@ class TaskProcessor {
bool IsRunning() const LOCKS_EXCLUDED(lock_);
void UpdateTargetRunTime(Thread* self, HeapTask* target_time, uint64_t new_target_time)
LOCKS_EXCLUDED(lock_);
+ Thread* GetRunningThread() const LOCKS_EXCLUDED(lock_);
private:
class CompareByTargetRunTime {
@@ -76,6 +77,7 @@ class TaskProcessor {
bool is_running_ GUARDED_BY(lock_);
std::unique_ptr<ConditionVariable> cond_ GUARDED_BY(lock_);
std::multiset<HeapTask*, CompareByTargetRunTime> tasks_ GUARDED_BY(lock_);
+ Thread* running_thread_ GUARDED_BY(lock_);
};
} // namespace gc
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 2724d91f6b..876e29aa77 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -19,6 +19,9 @@
#include <string.h>
#include <unistd.h>
+#include <sstream>
+
+#include "base/histogram-inl.h"
#include "class_linker.h"
#include "common_throws.h"
#include "debugger.h"
@@ -329,6 +332,123 @@ static void VMDebug_getHeapSpaceStats(JNIEnv* env, jclass, jlongArray data) {
env->ReleasePrimitiveArrayCritical(data, arr, 0);
}
+// The runtime stat names for VMDebug.getRuntimeStat().
+enum class VMDebugRuntimeStatId {
+ kArtGcGcCount = 0,
+ kArtGcGcTime,
+ kArtGcBytesAllocated,
+ kArtGcBytesFreed,
+ kArtGcBlockingGcCount,
+ kArtGcBlockingGcTime,
+ kArtGcGcCountRateHistogram,
+ kArtGcBlockingGcCountRateHistogram,
+ kNumRuntimeStats,
+};
+
+static jobject VMDebug_getRuntimeStatInternal(JNIEnv* env, jclass, jint statId) {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ switch (static_cast<VMDebugRuntimeStatId>(statId)) {
+ case VMDebugRuntimeStatId::kArtGcGcCount: {
+ std::string output = std::to_string(heap->GetGcCount());
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcGcTime: {
+ std::string output = std::to_string(NsToMs(heap->GetGcTime()));
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcBytesAllocated: {
+ std::string output = std::to_string(heap->GetBytesAllocatedEver());
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcBytesFreed: {
+ std::string output = std::to_string(heap->GetBytesFreedEver());
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcBlockingGcCount: {
+ std::string output = std::to_string(heap->GetBlockingGcCount());
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcBlockingGcTime: {
+ std::string output = std::to_string(NsToMs(heap->GetBlockingGcTime()));
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcGcCountRateHistogram: {
+ std::ostringstream output;
+ heap->DumpGcCountRateHistogram(output);
+ return env->NewStringUTF(output.str().c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcBlockingGcCountRateHistogram: {
+ std::ostringstream output;
+ heap->DumpBlockingGcCountRateHistogram(output);
+ return env->NewStringUTF(output.str().c_str());
+ }
+ default:
+ return nullptr;
+ }
+}
+
+static bool SetRuntimeStatValue(JNIEnv* env, jobjectArray result, VMDebugRuntimeStatId id,
+ std::string value) {
+ ScopedLocalRef<jstring> jvalue(env, env->NewStringUTF(value.c_str()));
+ if (jvalue.get() == nullptr) {
+ return false;
+ }
+ env->SetObjectArrayElement(result, static_cast<jint>(id), jvalue.get());
+ return true;
+}
+
+static jobjectArray VMDebug_getRuntimeStatsInternal(JNIEnv* env, jclass) {
+ jobjectArray result = env->NewObjectArray(
+ static_cast<jint>(VMDebugRuntimeStatId::kNumRuntimeStats),
+ WellKnownClasses::java_lang_String,
+ nullptr);
+ if (result == nullptr) {
+ return nullptr;
+ }
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcGcCount,
+ std::to_string(heap->GetGcCount()))) {
+ return nullptr;
+ }
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcGcTime,
+ std::to_string(NsToMs(heap->GetGcTime())))) {
+ return nullptr;
+ }
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBytesAllocated,
+ std::to_string(heap->GetBytesAllocatedEver()))) {
+ return nullptr;
+ }
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBytesFreed,
+ std::to_string(heap->GetBytesFreedEver()))) {
+ return nullptr;
+ }
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBlockingGcCount,
+ std::to_string(heap->GetBlockingGcCount()))) {
+ return nullptr;
+ }
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBlockingGcTime,
+ std::to_string(NsToMs(heap->GetBlockingGcTime())))) {
+ return nullptr;
+ }
+ {
+ std::ostringstream output;
+ heap->DumpGcCountRateHistogram(output);
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcGcCountRateHistogram,
+ output.str())) {
+ return nullptr;
+ }
+ }
+ {
+ std::ostringstream output;
+ heap->DumpBlockingGcCountRateHistogram(output);
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBlockingGcCountRateHistogram,
+ output.str())) {
+ return nullptr;
+ }
+ }
+ return result;
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMDebug, countInstancesOfClass, "(Ljava/lang/Class;Z)J"),
NATIVE_METHOD(VMDebug, crash, "()V"),
@@ -359,6 +479,8 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMDebug, stopInstructionCounting, "()V"),
NATIVE_METHOD(VMDebug, stopMethodTracing, "()V"),
NATIVE_METHOD(VMDebug, threadCpuTimeNanos, "!()J"),
+ NATIVE_METHOD(VMDebug, getRuntimeStatInternal, "(I)Ljava/lang/String;"),
+ NATIVE_METHOD(VMDebug, getRuntimeStatsInternal, "()[Ljava/lang/String;")
};
void register_dalvik_system_VMDebug(JNIEnv* env) {
diff --git a/test/099-vmdebug/src/Main.java b/test/099-vmdebug/src/Main.java
index 4d781c3fc2..a8db0699ee 100644
--- a/test/099-vmdebug/src/Main.java
+++ b/test/099-vmdebug/src/Main.java
@@ -17,6 +17,7 @@
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Method;
+import java.util.Map;
public class Main {
public static void main(String[] args) throws Exception {
@@ -26,6 +27,7 @@ public class Main {
return;
}
testMethodTracing();
+ testRuntimeStat();
}
private static File createTempFile() throws Exception {
@@ -109,10 +111,108 @@ public class Main {
tempFile.delete();
}
+ private static void checkNumber(String s) throws Exception {
+ if (s == null) {
+ System.out.println("Got null string");
+ return;
+ }
+ long n = Long.valueOf(s);
+ if (n < 0) {
+ System.out.println("Got negative number " + n);
+ }
+ }
+
+ private static void checkHistogram(String s) throws Exception {
+ if (s == null || s.length() == 0) {
+ System.out.println("Got null or empty string");
+ return;
+ }
+ String[] buckets = s.split(",");
+ long last_key = 0;
+ for (int i = 0; i < buckets.length; ++i) {
+ String bucket = buckets[i];
+ if (bucket.length() == 0) {
+ System.out.println("Got empty bucket");
+ continue;
+ }
+ String[] kv = bucket.split(":");
+ if (kv.length != 2 || kv[0].length() == 0 || kv[1].length() == 0) {
+ System.out.println("Got bad bucket " + bucket);
+ continue;
+ }
+ long key = Long.valueOf(kv[0]);
+ long value = Long.valueOf(kv[1]);
+ if (key < 0 || value < 0) {
+ System.out.println("Got negative key or value " + bucket);
+ continue;
+ }
+ if (key < last_key) {
+ System.out.println("Got decreasing key " + bucket);
+ continue;
+ }
+ last_key = key;
+ }
+ }
+
+ private static void testRuntimeStat() throws Exception {
+ // Invoke at least one GC and wait for 20 seconds or so so we get at
+ // least one bucket in the histograms.
+ for (int i = 0; i < 20; ++i) {
+ Runtime.getRuntime().gc();
+ Thread.sleep(1000L);
+ }
+ String gc_count = VMDebug.getRuntimeStat("art.gc.gc-count");
+ String gc_time = VMDebug.getRuntimeStat("art.gc.gc-time");
+ String bytes_allocated = VMDebug.getRuntimeStat("art.gc.bytes-allocated");
+ String bytes_freed = VMDebug.getRuntimeStat("art.gc.bytes-freed");
+ String blocking_gc_count = VMDebug.getRuntimeStat("art.gc.blocking-gc-count");
+ String blocking_gc_time = VMDebug.getRuntimeStat("art.gc.blocking-gc-time");
+ String gc_count_rate_histogram = VMDebug.getRuntimeStat("art.gc.gc-count-rate-histogram");
+ String blocking_gc_count_rate_histogram =
+ VMDebug.getRuntimeStat("art.gc.blocking-gc-count-rate-histogram");
+ checkNumber(gc_count);
+ checkNumber(gc_time);
+ checkNumber(bytes_allocated);
+ checkNumber(bytes_freed);
+ checkNumber(blocking_gc_count);
+ checkNumber(blocking_gc_time);
+ checkHistogram(gc_count_rate_histogram);
+ checkHistogram(blocking_gc_count_rate_histogram);
+ }
+
+ private static void testRuntimeStats() throws Exception {
+ // Invoke at least one GC and wait for 20 seconds or so so we get at
+ // least one bucket in the histograms.
+ for (int i = 0; i < 20; ++i) {
+ Runtime.getRuntime().gc();
+ Thread.sleep(1000L);
+ }
+ Map<String, String> map = VMDebug.getRuntimeStats();
+ String gc_count = map.get("art.gc.gc-count");
+ String gc_time = map.get("art.gc.gc-time");
+ String bytes_allocated = map.get("art.gc.bytes-allocated");
+ String bytes_freed = map.get("art.gc.bytes-freed");
+ String blocking_gc_count = map.get("art.gc.blocking-gc-count");
+ String blocking_gc_time = map.get("art.gc.blocking-gc-time");
+ String gc_count_rate_histogram = map.get("art.gc.gc-count-rate-histogram");
+ String blocking_gc_count_rate_histogram =
+ map.get("art.gc.blocking-gc-count-rate-histogram");
+ checkNumber(gc_count);
+ checkNumber(gc_time);
+ checkNumber(bytes_allocated);
+ checkNumber(bytes_freed);
+ checkNumber(blocking_gc_count);
+ checkNumber(blocking_gc_time);
+ checkHistogram(gc_count_rate_histogram);
+ checkHistogram(blocking_gc_count_rate_histogram);
+ }
+
private static class VMDebug {
private static final Method startMethodTracingMethod;
private static final Method stopMethodTracingMethod;
private static final Method getMethodTracingModeMethod;
+ private static final Method getRuntimeStatMethod;
+ private static final Method getRuntimeStatsMethod;
static {
try {
Class c = Class.forName("dalvik.system.VMDebug");
@@ -120,6 +220,8 @@ public class Main {
Integer.TYPE, Integer.TYPE, Boolean.TYPE, Integer.TYPE);
stopMethodTracingMethod = c.getDeclaredMethod("stopMethodTracing");
getMethodTracingModeMethod = c.getDeclaredMethod("getMethodTracingMode");
+ getRuntimeStatMethod = c.getDeclaredMethod("getRuntimeStat", String.class);
+ getRuntimeStatsMethod = c.getDeclaredMethod("getRuntimeStats");
} catch (Exception e) {
throw new RuntimeException(e);
}
@@ -136,5 +238,11 @@ public class Main {
public static int getMethodTracingMode() throws Exception {
return (int) getMethodTracingModeMethod.invoke(null);
}
+ public static String getRuntimeStat(String statName) throws Exception {
+ return (String) getRuntimeStatMethod.invoke(null, statName);
+ }
+ public static Map<String, String> getRuntimeStats() throws Exception {
+ return (Map<String, String>) getRuntimeStatsMethod.invoke(null);
+ }
}
}