diff options
-rw-r--r-- | runtime/thread.cc | 13 | ||||
-rw-r--r-- | runtime/thread.h | 3 | ||||
-rw-r--r-- | runtime/trace.cc | 85 | ||||
-rw-r--r-- | runtime/trace.h | 19 | ||||
-rw-r--r-- | runtime/trace_common.h | 40 | ||||
-rw-r--r-- | runtime/trace_profile.cc | 6 |
6 files changed, 100 insertions, 66 deletions
diff --git a/runtime/thread.cc b/runtime/thread.cc index 50f6bfc499..89465979da 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1485,12 +1485,21 @@ void Thread::GetThreadName(std::string& name) const { uint64_t Thread::GetCpuMicroTime() const { #if defined(__linux__) + return Thread::GetCpuNanoTime() / 1000; +#else // __APPLE__ + UNIMPLEMENTED(WARNING); + return -1; +#endif +} + +uint64_t Thread::GetCpuNanoTime() const { +#if defined(__linux__) clockid_t cpu_clock_id; pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id); timespec now; clock_gettime(cpu_clock_id, &now); - return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + - static_cast<uint64_t>(now.tv_nsec) / UINT64_C(1000); + return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + + static_cast<uint64_t>(now.tv_nsec); #else // __APPLE__ UNIMPLEMENTED(WARNING); return -1; diff --git a/runtime/thread.h b/runtime/thread.h index be4ffd0bc2..4a675d4c99 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -643,6 +643,9 @@ class EXPORT Thread { // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable. uint64_t GetCpuMicroTime() const; + // Returns the thread-specific CPU-time clock in nanoseconds or -1 if unavailable. + uint64_t GetCpuNanoTime() const; + mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead"; CHECK(tlsPtr_.jpeer == nullptr); diff --git a/runtime/trace.cc b/runtime/trace.cc index c08a13b6dc..e003c6b3a5 100644 --- a/runtime/trace.cc +++ b/runtime/trace.cc @@ -61,8 +61,8 @@ namespace art HIDDEN { struct MethodTraceRecord { ArtMethod* method; TraceAction action; - uint32_t wall_clock_time; - uint32_t thread_cpu_time; + uint64_t wall_clock_time; + uint64_t thread_cpu_time; }; using android::base::StringPrintf; @@ -95,7 +95,7 @@ static constexpr size_t kScalingFactorEncodedEntries = 6; // The key identifying the tracer to update instrumentation. static constexpr const char* kTracerInstrumentationKey = "Tracer"; -double TimestampCounter::tsc_to_microsec_scaling_factor = -1; +double TimestampCounter::tsc_to_nanosec_scaling_factor = -1; Trace* Trace::the_trace_ = nullptr; pthread_t Trace::sampling_pthread_ = 0U; @@ -288,7 +288,7 @@ bool UseFastTraceListeners(TraceClockSource clock_source) { void Trace::MeasureClockOverhead() { if (UseThreadCpuClock(clock_source_)) { - Thread::Current()->GetCpuMicroTime(); + Thread::Current()->GetCpuNanoTime(); } if (UseWallClock(clock_source_)) { TimestampCounter::GetTimestamp(); @@ -296,11 +296,12 @@ void Trace::MeasureClockOverhead() { } // Compute an average time taken to measure clocks. -uint32_t Trace::GetClockOverheadNanoSeconds() { +uint64_t Trace::GetClockOverheadNanoSeconds() { Thread* self = Thread::Current(); - uint64_t start = self->GetCpuMicroTime(); + uint64_t start = self->GetCpuNanoTime(); - for (int i = 4000; i > 0; i--) { + const uint64_t numIter = 4000; + for (int i = numIter; i > 0; i--) { MeasureClockOverhead(); MeasureClockOverhead(); MeasureClockOverhead(); @@ -311,8 +312,8 @@ uint32_t Trace::GetClockOverheadNanoSeconds() { MeasureClockOverhead(); } - uint64_t elapsed_us = self->GetCpuMicroTime() - start; - return static_cast<uint32_t>(elapsed_us / 32); + uint64_t elapsed_ns = self->GetCpuNanoTime() - start; + return elapsed_ns / (numIter * 8); } static void GetSample(Thread* thread, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) { @@ -347,7 +348,7 @@ void Trace::CompareAndUpdateStackTrace(Thread* thread, // Update the thread's stack trace sample. thread->SetStackTraceSample(stack_trace); // Read timer clocks to use for all events in this trace. - uint32_t thread_clock_diff = 0; + uint64_t thread_clock_diff = 0; uint64_t timestamp_counter = 0; ReadClocks(thread, &thread_clock_diff, ×tamp_counter); if (old_stack_trace == nullptr) { @@ -896,14 +897,14 @@ TraceWriter::TraceWriter(File* trace_file, size_t buffer_size, int num_trace_buffers, int trace_format_version, - uint32_t clock_overhead_ns) + uint64_t clock_overhead_ns) : trace_file_(trace_file), trace_output_mode_(output_mode), clock_source_(clock_source), buf_(new uint8_t[std::max(kMinBufSize, buffer_size)]()), buffer_size_(std::max(kMinBufSize, buffer_size)), trace_format_version_(trace_format_version), - start_time_(TimestampCounter::GetMicroTime(TimestampCounter::GetTimestamp())), + start_time_(TimestampCounter::GetNanoTime(TimestampCounter::GetTimestamp())), overflow_(false), num_records_(0), clock_overhead_ns_(clock_overhead_ns), @@ -920,8 +921,7 @@ TraceWriter::TraceWriter(File* trace_file, // fetches the monotonic timer from other places and matches these times to // construct a cpu profile. See b/318052824 for more context. uint64_t start_time_monotonic = - start_time_ + - (MicroTime() - TimestampCounter::GetMicroTime(TimestampCounter::GetTimestamp())); + start_time_ + (NanoTime() - TimestampCounter::GetNanoTime(TimestampCounter::GetTimestamp())); uint16_t trace_version = GetTraceVersion(clock_source_, trace_format_version_); if (output_mode == TraceOutputMode::kStreaming) { trace_version |= 0xF0U; @@ -933,7 +933,8 @@ TraceWriter::TraceWriter(File* trace_file, Append4LE(buf_.get(), kTraceMagicValue); Append2LE(buf_.get() + 4, trace_version); Append2LE(buf_.get() + 6, kTraceHeaderLength); - Append8LE(buf_.get() + 8, start_time_monotonic); + // Use microsecond precision for V1 format. + Append8LE(buf_.get() + 8, (start_time_monotonic / 1000)); if (trace_version >= kTraceVersionDualClock) { uint16_t record_size = GetRecordSize(clock_source_, trace_format_version_); Append2LE(buf_.get() + 16, record_size); @@ -1004,7 +1005,7 @@ Trace::Trace(File* trace_file, std::string TraceWriter::CreateSummary(int flags) { std::ostringstream os; // Compute elapsed time. - uint64_t elapsed = TimestampCounter::GetMicroTime(TimestampCounter::GetTimestamp()) - start_time_; + uint64_t elapsed = TimestampCounter::GetNanoTime(TimestampCounter::GetTimestamp()) - start_time_; os << StringPrintf("%cversion\n", kTraceTokenChar); os << StringPrintf("%d\n", GetTraceVersion(clock_source_, trace_format_version_)); os << StringPrintf("data-file-overflow=%s\n", overflow_ ? "true" : "false"); @@ -1017,11 +1018,15 @@ std::string TraceWriter::CreateSummary(int flags) { } else { os << StringPrintf("clock=wall\n"); } - os << StringPrintf("elapsed-time-usec=%" PRIu64 "\n", elapsed); + if (trace_format_version_ == Trace::kFormatV1) { + os << StringPrintf("elapsed-time-usec=%" PRIu64 "\n", elapsed / 1000); + } else { + os << StringPrintf("elapsed-time-nsec=%" PRIu64 "\n", elapsed); + } if (trace_output_mode_ != TraceOutputMode::kStreaming) { os << StringPrintf("num-method-calls=%zd\n", num_records_); } - os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns_); + os << StringPrintf("clock-call-overhead-nsec=%" PRIu64 "\n", clock_overhead_ns_); os << StringPrintf("vm=art\n"); os << StringPrintf("pid=%d\n", getpid()); if ((flags & Trace::kTraceCountAllocs) != 0) { @@ -1173,7 +1178,7 @@ void Trace::FieldWritten([[maybe_unused]] Thread* thread, } void Trace::MethodEntered(Thread* thread, ArtMethod* method) { - uint32_t thread_clock_diff = 0; + uint64_t thread_clock_diff = 0; uint64_t timestamp_counter = 0; ReadClocks(thread, &thread_clock_diff, ×tamp_counter); LogMethodTraceEvent(thread, method, kTraceMethodEnter, thread_clock_diff, timestamp_counter); @@ -1183,14 +1188,14 @@ void Trace::MethodExited(Thread* thread, ArtMethod* method, [[maybe_unused]] instrumentation::OptionalFrame frame, [[maybe_unused]] JValue& return_value) { - uint32_t thread_clock_diff = 0; + uint64_t thread_clock_diff = 0; uint64_t timestamp_counter = 0; ReadClocks(thread, &thread_clock_diff, ×tamp_counter); LogMethodTraceEvent(thread, method, kTraceMethodExit, thread_clock_diff, timestamp_counter); } void Trace::MethodUnwind(Thread* thread, ArtMethod* method, [[maybe_unused]] uint32_t dex_pc) { - uint32_t thread_clock_diff = 0; + uint64_t thread_clock_diff = 0; uint64_t timestamp_counter = 0; ReadClocks(thread, &thread_clock_diff, ×tamp_counter); LogMethodTraceEvent(thread, method, kTraceUnroll, thread_clock_diff, timestamp_counter); @@ -1219,15 +1224,15 @@ void Trace::WatchedFramePop([[maybe_unused]] Thread* self, LOG(ERROR) << "Unexpected WatchedFramePop event in tracing"; } -void Trace::ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint64_t* timestamp_counter) { +void Trace::ReadClocks(Thread* thread, uint64_t* thread_clock_diff, uint64_t* timestamp_counter) { if (UseThreadCpuClock(clock_source_)) { uint64_t clock_base = thread->GetTraceClockBase(); if (UNLIKELY(clock_base == 0)) { // First event, record the base time in the map. - uint64_t time = thread->GetCpuMicroTime(); + uint64_t time = thread->GetCpuNanoTime(); thread->SetTraceClockBase(time); } else { - *thread_clock_diff = thread->GetCpuMicroTime() - clock_base; + *thread_clock_diff = thread->GetCpuNanoTime() - clock_base; } } if (UseWallClock(clock_source_)) { @@ -1542,6 +1547,11 @@ void TraceWriter::ReadValuesFromRecord(uintptr_t* method_trace_entries, record.wall_clock_time = 0; if (has_thread_cpu_clock) { record.thread_cpu_time = method_trace_entries[record_index++]; + if (art::kRuntimePointerSize == PointerSize::k32) { + // On 32-bit architectures threadcputime is stored as two 32-bit values. + uint64_t high_bits = method_trace_entries[record_index++]; + record.thread_cpu_time = (high_bits << 32 | record.thread_cpu_time); + } } if (has_wall_clock) { uint64_t timestamp = method_trace_entries[record_index++]; @@ -1550,7 +1560,7 @@ void TraceWriter::ReadValuesFromRecord(uintptr_t* method_trace_entries, uint64_t high_timestamp = method_trace_entries[record_index++]; timestamp = (high_timestamp << 32 | timestamp); } - record.wall_clock_time = TimestampCounter::GetMicroTime(timestamp) - start_time_; + record.wall_clock_time = TimestampCounter::GetNanoTime(timestamp) - start_time_; } } @@ -1635,8 +1645,8 @@ size_t TraceWriter::FlushEntriesFormatV2(uintptr_t* method_trace_entries, bool has_thread_cpu_clock = UseThreadCpuClock(clock_source_); bool has_wall_clock = UseWallClock(clock_source_); size_t num_entries = GetNumEntries(clock_source_); - uint32_t prev_wall_timestamp = 0; - uint32_t prev_thread_timestamp = 0; + uint64_t prev_wall_timestamp = 0; + uint64_t prev_thread_timestamp = 0; uint64_t prev_method_action_encoding = 0; size_t entry_index = kPerThreadBufSize; size_t curr_record_index = 0; @@ -1727,7 +1737,7 @@ void TraceWriter::FlushBuffer(uintptr_t* method_trace_entries, void Trace::LogMethodTraceEvent(Thread* thread, ArtMethod* method, TraceAction action, - uint32_t thread_clock_diff, + uint64_t thread_clock_diff, uint64_t timestamp_counter) { // This method is called in both tracing modes (method and sampling). In sampling mode, this // method is only called by the sampling thread. In method tracing mode, it can be called @@ -1770,7 +1780,13 @@ void Trace::LogMethodTraceEvent(Thread* thread, method = method->GetNonObsoleteMethod(); current_entry[entry_index++] = reinterpret_cast<uintptr_t>(method) | action; if (UseThreadCpuClock(clock_source_)) { - current_entry[entry_index++] = thread_clock_diff; + if (art::kRuntimePointerSize == PointerSize::k32) { + // On 32-bit architectures store threadcputimer as two 32-bit values. + current_entry[entry_index++] = static_cast<uint32_t>(thread_clock_diff); + current_entry[entry_index++] = thread_clock_diff >> 32; + } else { + current_entry[entry_index++] = thread_clock_diff; + } } if (UseWallClock(clock_source_)) { if (art::kRuntimePointerSize == PointerSize::k32) { @@ -1787,8 +1803,8 @@ void TraceWriter::EncodeEventEntry(uint8_t* ptr, uint16_t thread_id, uint32_t method_index, TraceAction action, - uint32_t thread_clock_diff, - uint32_t wall_clock_diff) { + uint64_t thread_clock_diff, + uint64_t wall_clock_diff) { static constexpr size_t kPacketSize = 14U; // The maximum size of data in a packet. DCHECK(method_index < (1 << (32 - TraceActionBits))); uint32_t method_value = (method_index << TraceActionBits) | action; @@ -1796,12 +1812,15 @@ void TraceWriter::EncodeEventEntry(uint8_t* ptr, Append4LE(ptr + 2, method_value); ptr += 6; + static constexpr uint64_t ns_to_us = 1000; + uint32_t thread_clock_diff_us = thread_clock_diff / ns_to_us; + uint32_t wall_clock_diff_us = wall_clock_diff / ns_to_us; if (UseThreadCpuClock(clock_source_)) { - Append4LE(ptr, thread_clock_diff); + Append4LE(ptr, thread_clock_diff_us); ptr += 4; } if (UseWallClock(clock_source_)) { - Append4LE(ptr, wall_clock_diff); + Append4LE(ptr, wall_clock_diff_us); } static_assert(kPacketSize == 2 + 4 + 4 + 4, "Packet size incorrect."); } diff --git a/runtime/trace.h b/runtime/trace.h index b3b9f75c7f..952c996d63 100644 --- a/runtime/trace.h +++ b/runtime/trace.h @@ -111,7 +111,10 @@ enum class TraceOutputMode { // We need 3 entries to store 64-bit timestamp counter as two 32-bit values on 32-bit architectures. static constexpr uint32_t kNumEntriesForWallClock = (kRuntimePointerSize == PointerSize::k64) ? 2 : 3; -static constexpr uint32_t kNumEntriesForDualClock = kNumEntriesForWallClock + 1; +// Timestamps are stored as two 32-bit balues on 32-bit architectures. +static constexpr uint32_t kNumEntriesForDualClock = (kRuntimePointerSize == PointerSize::k64) + ? kNumEntriesForWallClock + 1 + : kNumEntriesForWallClock + 2; // These define offsets in bytes for the individual fields of a trace entry. These are used by the // JITed code when storing a trace entry. @@ -203,7 +206,7 @@ class TraceWriter { size_t buffer_size, int num_trace_buffers, int trace_format_version, - uint32_t clock_overhead_ns); + uint64_t clock_overhead_ns); // This encodes all the events in the per-thread trace buffer and writes it to the trace file / // buffer. This acquires streaming lock to prevent any other threads writing concurrently. It is @@ -341,8 +344,8 @@ class TraceWriter { uint16_t thread_id, uint32_t method_index, TraceAction action, - uint32_t thread_clock_diff, - uint32_t wall_clock_diff) REQUIRES(trace_writer_lock_); + uint64_t thread_clock_diff, + uint64_t wall_clock_diff) REQUIRES(trace_writer_lock_); // Encodes the header for the events block. This assumes that there is enough space reserved to // encode the entry. @@ -416,7 +419,7 @@ class TraceWriter { size_t num_records_; // Clock overhead. - const uint32_t clock_overhead_ns_; + const uint64_t clock_overhead_ns_; std::vector<std::atomic<size_t>> owner_tids_; std::unique_ptr<uintptr_t[]> trace_buffer_; @@ -518,7 +521,7 @@ class Trace final : public instrumentation::InstrumentationListener, public Clas static void RemoveListeners() REQUIRES(Locks::mutator_lock_); void MeasureClockOverhead(); - uint32_t GetClockOverheadNanoSeconds(); + uint64_t GetClockOverheadNanoSeconds(); void CompareAndUpdateStackTrace(Thread* thread, std::vector<ArtMethod*>* stack_trace) REQUIRES_SHARED(Locks::mutator_lock_); @@ -604,12 +607,12 @@ class Trace final : public instrumentation::InstrumentationListener, public Clas // how to annotate this. NO_THREAD_SAFETY_ANALYSIS; - void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint64_t* timestamp_counter); + void ReadClocks(Thread* thread, uint64_t* thread_clock_diff, uint64_t* timestamp_counter); void LogMethodTraceEvent(Thread* thread, ArtMethod* method, TraceAction action, - uint32_t thread_clock_diff, + uint64_t thread_clock_diff, uint64_t timestamp_counter) REQUIRES_SHARED(Locks::mutator_lock_); // Singleton instance of the Trace or null when no method tracing is active. diff --git a/runtime/trace_common.h b/runtime/trace_common.h index 759a0bbc49..f29b347462 100644 --- a/runtime/trace_common.h +++ b/runtime/trace_common.h @@ -47,7 +47,7 @@ class TimestampCounter { // access is disabled only for 32-bit processes even when 64-bit processes can accesses the // timer from user space. These are not reflected in the HWCAP_EVTSTRM capability.So just // fallback to clock_gettime on these processes. See b/289178149 for more discussion. - t = MicroTime(); + t = NanoTime(); #elif defined(__aarch64__) // See Arm Architecture Registers Armv8 section System Registers asm volatile("mrs %0, cntvct_el0" : "=r"(t)); @@ -59,7 +59,7 @@ class TimestampCounter { #elif defined(__riscv) asm volatile("rdtime %0" : "=r"(t)); #else - t = MicroTime(); + t = NanoTime(); #endif return t; } @@ -67,36 +67,36 @@ class TimestampCounter { static void InitializeTimestampCounters() { // It is sufficient to initialize this once for the entire execution. Just return if it is // already initialized. - if (tsc_to_microsec_scaling_factor > 0.0) { + if (tsc_to_nanosec_scaling_factor > 0.0) { return; } #if defined(__arm__) // On ARM 32 bit, we don't always have access to the timestamp counters from // user space. Seem comment in GetTimestamp for more details. - tsc_to_microsec_scaling_factor = 1.0; + tsc_to_nanosec_scaling_factor = 1.0; #elif defined(__aarch64__) - double seconds_to_microseconds = 1000 * 1000; + double seconds_to_nanoseconds = 1000 * 1000; uint64_t freq = 0; // See Arm Architecture Registers Armv8 section System Registers asm volatile("mrs %0, cntfrq_el0" : "=r"(freq)); if (freq == 0) { // It is expected that cntfrq_el0 is correctly setup during system initialization but some // devices don't do this. In such cases fall back to computing the frequency. See b/315139000. - tsc_to_microsec_scaling_factor = computeScalingFactor(); + tsc_to_nanosec_scaling_factor = computeScalingFactor(); } else { - tsc_to_microsec_scaling_factor = seconds_to_microseconds / static_cast<double>(freq); + tsc_to_nanosec_scaling_factor = seconds_to_nanoseconds / static_cast<double>(freq); } #elif defined(__i386__) || defined(__x86_64__) - tsc_to_microsec_scaling_factor = GetScalingFactorForX86(); + tsc_to_nanosec_scaling_factor = GetScalingFactorForX86(); #else - tsc_to_microsec_scaling_factor = 1.0; + tsc_to_nanosec_scaling_factor = 1.0; #endif } - static ALWAYS_INLINE uint64_t GetMicroTime(uint64_t counter) { - DCHECK(tsc_to_microsec_scaling_factor > 0.0) << tsc_to_microsec_scaling_factor; - return tsc_to_microsec_scaling_factor * counter; + static ALWAYS_INLINE uint64_t GetNanoTime(uint64_t counter) { + DCHECK(tsc_to_nanosec_scaling_factor > 0.0) << tsc_to_nanosec_scaling_factor; + return tsc_to_nanosec_scaling_factor * counter; } private: @@ -107,12 +107,12 @@ class TimestampCounter { // step using these two samples. However, that would require a change in Android Studio which is // the main consumer of these profiles. For now, just compute the frequency of tsc updates here. static double computeScalingFactor() { - uint64_t start = MicroTime(); + uint64_t start = NanoTime(); uint64_t start_tsc = GetTimestamp(); // Sleep for one millisecond. usleep(1000); uint64_t diff_tsc = GetTimestamp() - start_tsc; - uint64_t diff_time = MicroTime() - start; + uint64_t diff_time = NanoTime() - start; double scaling_factor = static_cast<double>(diff_time) / diff_tsc; DCHECK(scaling_factor > 0.0) << scaling_factor; return scaling_factor; @@ -150,18 +150,18 @@ class TimestampCounter { } double coreCrystalFreq = ecx; // frequency = coreCrystalFreq * (ebx / eax) - // scaling_factor = seconds_to_microseconds / frequency - // = seconds_to_microseconds * eax / (coreCrystalFreq * ebx) - double seconds_to_microseconds = 1000 * 1000; - double scaling_factor = (seconds_to_microseconds * eax) / (coreCrystalFreq * ebx); + // scaling_factor = seconds_to_nanoseconds / frequency + // = seconds_to_nanoseconds * eax / (coreCrystalFreq * ebx) + double seconds_to_nanoseconds = 1000 * 1000; + double scaling_factor = (seconds_to_nanoseconds * eax) / (coreCrystalFreq * ebx); return scaling_factor; } #endif - // Scaling factor to convert timestamp counter into wall clock time reported in micro seconds. + // Scaling factor to convert timestamp counter into wall clock time reported in nano seconds. // This is initialized at the start of tracing using the timestamp counter update frequency. // See InitializeTimestampCounters for more details. - static double tsc_to_microsec_scaling_factor; + static double tsc_to_nanosec_scaling_factor; }; } // namespace art diff --git a/runtime/trace_profile.cc b/runtime/trace_profile.cc index d0d234c476..19e0a56bcc 100644 --- a/runtime/trace_profile.cc +++ b/runtime/trace_profile.cc @@ -140,7 +140,7 @@ void RecordMethodsOnThreadStack(Thread* thread, uintptr_t* method_trace_buffer) visitor.WalkStack(true); // Create method entry events for all methods currently on the thread's stack. - uint64_t init_time = TimestampCounter::GetMicroTime(TimestampCounter::GetTimestamp()); + uint64_t init_time = TimestampCounter::GetNanoTime(TimestampCounter::GetTimestamp()); // Set the lsb to 0 to indicate method entry. init_time = init_time & ~1; std::ostringstream os; @@ -492,13 +492,13 @@ void TraceProfiler::DumpLongRunningMethodBuffer(uint32_t thread_id, // start of the trace. Just ignore this entry. } else if (entry & 0x1) { // Method exit - os << "<-" << TimestampCounter::GetMicroTime(entry & ~1) << "\n"; + os << "<-" << TimestampCounter::GetNanoTime(entry & ~1) << "\n"; } else { // Method entry ArtMethod* method = reinterpret_cast<ArtMethod*>(entry); ptr--; CHECK(ptr >= end_trace_entries); - os << "->" << method << " " << TimestampCounter::GetMicroTime(*ptr) << "\n"; + os << "->" << method << " " << TimestampCounter::GetNanoTime(*ptr) << "\n"; methods.insert(method); } ptr--; |