summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/thread.cc13
-rw-r--r--runtime/thread.h3
-rw-r--r--runtime/trace.cc85
-rw-r--r--runtime/trace.h19
-rw-r--r--runtime/trace_common.h40
-rw-r--r--runtime/trace_profile.cc4
6 files changed, 65 insertions, 99 deletions
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 5fc0ce6790..1c2e95c84d 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1485,21 +1485,12 @@ void Thread::GetThreadName(std::string& name) const {
uint64_t Thread::GetCpuMicroTime() const {
#if defined(__linux__)
- return Thread::GetCpuNanoTime() / 1000;
-#else // __APPLE__
- UNIMPLEMENTED(WARNING);
- return -1;
-#endif
-}
-
-uint64_t Thread::GetCpuNanoTime() const {
-#if defined(__linux__)
clockid_t cpu_clock_id;
pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id);
timespec now;
clock_gettime(cpu_clock_id, &now);
- return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) +
- static_cast<uint64_t>(now.tv_nsec);
+ return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) +
+ static_cast<uint64_t>(now.tv_nsec) / UINT64_C(1000);
#else // __APPLE__
UNIMPLEMENTED(WARNING);
return -1;
diff --git a/runtime/thread.h b/runtime/thread.h
index 4a675d4c99..be4ffd0bc2 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -643,9 +643,6 @@ class EXPORT Thread {
// Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
uint64_t GetCpuMicroTime() const;
- // Returns the thread-specific CPU-time clock in nanoseconds or -1 if unavailable.
- uint64_t GetCpuNanoTime() const;
-
mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead";
CHECK(tlsPtr_.jpeer == nullptr);
diff --git a/runtime/trace.cc b/runtime/trace.cc
index e003c6b3a5..c08a13b6dc 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -61,8 +61,8 @@ namespace art HIDDEN {
struct MethodTraceRecord {
ArtMethod* method;
TraceAction action;
- uint64_t wall_clock_time;
- uint64_t thread_cpu_time;
+ uint32_t wall_clock_time;
+ uint32_t thread_cpu_time;
};
using android::base::StringPrintf;
@@ -95,7 +95,7 @@ static constexpr size_t kScalingFactorEncodedEntries = 6;
// The key identifying the tracer to update instrumentation.
static constexpr const char* kTracerInstrumentationKey = "Tracer";
-double TimestampCounter::tsc_to_nanosec_scaling_factor = -1;
+double TimestampCounter::tsc_to_microsec_scaling_factor = -1;
Trace* Trace::the_trace_ = nullptr;
pthread_t Trace::sampling_pthread_ = 0U;
@@ -288,7 +288,7 @@ bool UseFastTraceListeners(TraceClockSource clock_source) {
void Trace::MeasureClockOverhead() {
if (UseThreadCpuClock(clock_source_)) {
- Thread::Current()->GetCpuNanoTime();
+ Thread::Current()->GetCpuMicroTime();
}
if (UseWallClock(clock_source_)) {
TimestampCounter::GetTimestamp();
@@ -296,12 +296,11 @@ void Trace::MeasureClockOverhead() {
}
// Compute an average time taken to measure clocks.
-uint64_t Trace::GetClockOverheadNanoSeconds() {
+uint32_t Trace::GetClockOverheadNanoSeconds() {
Thread* self = Thread::Current();
- uint64_t start = self->GetCpuNanoTime();
+ uint64_t start = self->GetCpuMicroTime();
- const uint64_t numIter = 4000;
- for (int i = numIter; i > 0; i--) {
+ for (int i = 4000; i > 0; i--) {
MeasureClockOverhead();
MeasureClockOverhead();
MeasureClockOverhead();
@@ -312,8 +311,8 @@ uint64_t Trace::GetClockOverheadNanoSeconds() {
MeasureClockOverhead();
}
- uint64_t elapsed_ns = self->GetCpuNanoTime() - start;
- return elapsed_ns / (numIter * 8);
+ uint64_t elapsed_us = self->GetCpuMicroTime() - start;
+ return static_cast<uint32_t>(elapsed_us / 32);
}
static void GetSample(Thread* thread, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -348,7 +347,7 @@ void Trace::CompareAndUpdateStackTrace(Thread* thread,
// Update the thread's stack trace sample.
thread->SetStackTraceSample(stack_trace);
// Read timer clocks to use for all events in this trace.
- uint64_t thread_clock_diff = 0;
+ uint32_t thread_clock_diff = 0;
uint64_t timestamp_counter = 0;
ReadClocks(thread, &thread_clock_diff, &timestamp_counter);
if (old_stack_trace == nullptr) {
@@ -897,14 +896,14 @@ TraceWriter::TraceWriter(File* trace_file,
size_t buffer_size,
int num_trace_buffers,
int trace_format_version,
- uint64_t clock_overhead_ns)
+ uint32_t clock_overhead_ns)
: trace_file_(trace_file),
trace_output_mode_(output_mode),
clock_source_(clock_source),
buf_(new uint8_t[std::max(kMinBufSize, buffer_size)]()),
buffer_size_(std::max(kMinBufSize, buffer_size)),
trace_format_version_(trace_format_version),
- start_time_(TimestampCounter::GetNanoTime(TimestampCounter::GetTimestamp())),
+ start_time_(TimestampCounter::GetMicroTime(TimestampCounter::GetTimestamp())),
overflow_(false),
num_records_(0),
clock_overhead_ns_(clock_overhead_ns),
@@ -921,7 +920,8 @@ TraceWriter::TraceWriter(File* trace_file,
// fetches the monotonic timer from other places and matches these times to
// construct a cpu profile. See b/318052824 for more context.
uint64_t start_time_monotonic =
- start_time_ + (NanoTime() - TimestampCounter::GetNanoTime(TimestampCounter::GetTimestamp()));
+ start_time_ +
+ (MicroTime() - TimestampCounter::GetMicroTime(TimestampCounter::GetTimestamp()));
uint16_t trace_version = GetTraceVersion(clock_source_, trace_format_version_);
if (output_mode == TraceOutputMode::kStreaming) {
trace_version |= 0xF0U;
@@ -933,8 +933,7 @@ TraceWriter::TraceWriter(File* trace_file,
Append4LE(buf_.get(), kTraceMagicValue);
Append2LE(buf_.get() + 4, trace_version);
Append2LE(buf_.get() + 6, kTraceHeaderLength);
- // Use microsecond precision for V1 format.
- Append8LE(buf_.get() + 8, (start_time_monotonic / 1000));
+ Append8LE(buf_.get() + 8, start_time_monotonic);
if (trace_version >= kTraceVersionDualClock) {
uint16_t record_size = GetRecordSize(clock_source_, trace_format_version_);
Append2LE(buf_.get() + 16, record_size);
@@ -1005,7 +1004,7 @@ Trace::Trace(File* trace_file,
std::string TraceWriter::CreateSummary(int flags) {
std::ostringstream os;
// Compute elapsed time.
- uint64_t elapsed = TimestampCounter::GetNanoTime(TimestampCounter::GetTimestamp()) - start_time_;
+ uint64_t elapsed = TimestampCounter::GetMicroTime(TimestampCounter::GetTimestamp()) - start_time_;
os << StringPrintf("%cversion\n", kTraceTokenChar);
os << StringPrintf("%d\n", GetTraceVersion(clock_source_, trace_format_version_));
os << StringPrintf("data-file-overflow=%s\n", overflow_ ? "true" : "false");
@@ -1018,15 +1017,11 @@ std::string TraceWriter::CreateSummary(int flags) {
} else {
os << StringPrintf("clock=wall\n");
}
- if (trace_format_version_ == Trace::kFormatV1) {
- os << StringPrintf("elapsed-time-usec=%" PRIu64 "\n", elapsed / 1000);
- } else {
- os << StringPrintf("elapsed-time-nsec=%" PRIu64 "\n", elapsed);
- }
+ os << StringPrintf("elapsed-time-usec=%" PRIu64 "\n", elapsed);
if (trace_output_mode_ != TraceOutputMode::kStreaming) {
os << StringPrintf("num-method-calls=%zd\n", num_records_);
}
- os << StringPrintf("clock-call-overhead-nsec=%" PRIu64 "\n", clock_overhead_ns_);
+ os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns_);
os << StringPrintf("vm=art\n");
os << StringPrintf("pid=%d\n", getpid());
if ((flags & Trace::kTraceCountAllocs) != 0) {
@@ -1178,7 +1173,7 @@ void Trace::FieldWritten([[maybe_unused]] Thread* thread,
}
void Trace::MethodEntered(Thread* thread, ArtMethod* method) {
- uint64_t thread_clock_diff = 0;
+ uint32_t thread_clock_diff = 0;
uint64_t timestamp_counter = 0;
ReadClocks(thread, &thread_clock_diff, &timestamp_counter);
LogMethodTraceEvent(thread, method, kTraceMethodEnter, thread_clock_diff, timestamp_counter);
@@ -1188,14 +1183,14 @@ void Trace::MethodExited(Thread* thread,
ArtMethod* method,
[[maybe_unused]] instrumentation::OptionalFrame frame,
[[maybe_unused]] JValue& return_value) {
- uint64_t thread_clock_diff = 0;
+ uint32_t thread_clock_diff = 0;
uint64_t timestamp_counter = 0;
ReadClocks(thread, &thread_clock_diff, &timestamp_counter);
LogMethodTraceEvent(thread, method, kTraceMethodExit, thread_clock_diff, timestamp_counter);
}
void Trace::MethodUnwind(Thread* thread, ArtMethod* method, [[maybe_unused]] uint32_t dex_pc) {
- uint64_t thread_clock_diff = 0;
+ uint32_t thread_clock_diff = 0;
uint64_t timestamp_counter = 0;
ReadClocks(thread, &thread_clock_diff, &timestamp_counter);
LogMethodTraceEvent(thread, method, kTraceUnroll, thread_clock_diff, timestamp_counter);
@@ -1224,15 +1219,15 @@ void Trace::WatchedFramePop([[maybe_unused]] Thread* self,
LOG(ERROR) << "Unexpected WatchedFramePop event in tracing";
}
-void Trace::ReadClocks(Thread* thread, uint64_t* thread_clock_diff, uint64_t* timestamp_counter) {
+void Trace::ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint64_t* timestamp_counter) {
if (UseThreadCpuClock(clock_source_)) {
uint64_t clock_base = thread->GetTraceClockBase();
if (UNLIKELY(clock_base == 0)) {
// First event, record the base time in the map.
- uint64_t time = thread->GetCpuNanoTime();
+ uint64_t time = thread->GetCpuMicroTime();
thread->SetTraceClockBase(time);
} else {
- *thread_clock_diff = thread->GetCpuNanoTime() - clock_base;
+ *thread_clock_diff = thread->GetCpuMicroTime() - clock_base;
}
}
if (UseWallClock(clock_source_)) {
@@ -1547,11 +1542,6 @@ void TraceWriter::ReadValuesFromRecord(uintptr_t* method_trace_entries,
record.wall_clock_time = 0;
if (has_thread_cpu_clock) {
record.thread_cpu_time = method_trace_entries[record_index++];
- if (art::kRuntimePointerSize == PointerSize::k32) {
- // On 32-bit architectures threadcputime is stored as two 32-bit values.
- uint64_t high_bits = method_trace_entries[record_index++];
- record.thread_cpu_time = (high_bits << 32 | record.thread_cpu_time);
- }
}
if (has_wall_clock) {
uint64_t timestamp = method_trace_entries[record_index++];
@@ -1560,7 +1550,7 @@ void TraceWriter::ReadValuesFromRecord(uintptr_t* method_trace_entries,
uint64_t high_timestamp = method_trace_entries[record_index++];
timestamp = (high_timestamp << 32 | timestamp);
}
- record.wall_clock_time = TimestampCounter::GetNanoTime(timestamp) - start_time_;
+ record.wall_clock_time = TimestampCounter::GetMicroTime(timestamp) - start_time_;
}
}
@@ -1645,8 +1635,8 @@ size_t TraceWriter::FlushEntriesFormatV2(uintptr_t* method_trace_entries,
bool has_thread_cpu_clock = UseThreadCpuClock(clock_source_);
bool has_wall_clock = UseWallClock(clock_source_);
size_t num_entries = GetNumEntries(clock_source_);
- uint64_t prev_wall_timestamp = 0;
- uint64_t prev_thread_timestamp = 0;
+ uint32_t prev_wall_timestamp = 0;
+ uint32_t prev_thread_timestamp = 0;
uint64_t prev_method_action_encoding = 0;
size_t entry_index = kPerThreadBufSize;
size_t curr_record_index = 0;
@@ -1737,7 +1727,7 @@ void TraceWriter::FlushBuffer(uintptr_t* method_trace_entries,
void Trace::LogMethodTraceEvent(Thread* thread,
ArtMethod* method,
TraceAction action,
- uint64_t thread_clock_diff,
+ uint32_t thread_clock_diff,
uint64_t timestamp_counter) {
// This method is called in both tracing modes (method and sampling). In sampling mode, this
// method is only called by the sampling thread. In method tracing mode, it can be called
@@ -1780,13 +1770,7 @@ void Trace::LogMethodTraceEvent(Thread* thread,
method = method->GetNonObsoleteMethod();
current_entry[entry_index++] = reinterpret_cast<uintptr_t>(method) | action;
if (UseThreadCpuClock(clock_source_)) {
- if (art::kRuntimePointerSize == PointerSize::k32) {
- // On 32-bit architectures store threadcputimer as two 32-bit values.
- current_entry[entry_index++] = static_cast<uint32_t>(thread_clock_diff);
- current_entry[entry_index++] = thread_clock_diff >> 32;
- } else {
- current_entry[entry_index++] = thread_clock_diff;
- }
+ current_entry[entry_index++] = thread_clock_diff;
}
if (UseWallClock(clock_source_)) {
if (art::kRuntimePointerSize == PointerSize::k32) {
@@ -1803,8 +1787,8 @@ void TraceWriter::EncodeEventEntry(uint8_t* ptr,
uint16_t thread_id,
uint32_t method_index,
TraceAction action,
- uint64_t thread_clock_diff,
- uint64_t wall_clock_diff) {
+ uint32_t thread_clock_diff,
+ uint32_t wall_clock_diff) {
static constexpr size_t kPacketSize = 14U; // The maximum size of data in a packet.
DCHECK(method_index < (1 << (32 - TraceActionBits)));
uint32_t method_value = (method_index << TraceActionBits) | action;
@@ -1812,15 +1796,12 @@ void TraceWriter::EncodeEventEntry(uint8_t* ptr,
Append4LE(ptr + 2, method_value);
ptr += 6;
- static constexpr uint64_t ns_to_us = 1000;
- uint32_t thread_clock_diff_us = thread_clock_diff / ns_to_us;
- uint32_t wall_clock_diff_us = wall_clock_diff / ns_to_us;
if (UseThreadCpuClock(clock_source_)) {
- Append4LE(ptr, thread_clock_diff_us);
+ Append4LE(ptr, thread_clock_diff);
ptr += 4;
}
if (UseWallClock(clock_source_)) {
- Append4LE(ptr, wall_clock_diff_us);
+ Append4LE(ptr, wall_clock_diff);
}
static_assert(kPacketSize == 2 + 4 + 4 + 4, "Packet size incorrect.");
}
diff --git a/runtime/trace.h b/runtime/trace.h
index ca728e93f9..8f37d8328b 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -108,10 +108,7 @@ enum class TraceOutputMode {
// We need 3 entries to store 64-bit timestamp counter as two 32-bit values on 32-bit architectures.
static constexpr uint32_t kNumEntriesForWallClock =
(kRuntimePointerSize == PointerSize::k64) ? 2 : 3;
-// Timestamps are stored as two 32-bit balues on 32-bit architectures.
-static constexpr uint32_t kNumEntriesForDualClock = (kRuntimePointerSize == PointerSize::k64)
- ? kNumEntriesForWallClock + 1
- : kNumEntriesForWallClock + 2;
+static constexpr uint32_t kNumEntriesForDualClock = kNumEntriesForWallClock + 1;
// These define offsets in bytes for the individual fields of a trace entry. These are used by the
// JITed code when storing a trace entry.
@@ -203,7 +200,7 @@ class TraceWriter {
size_t buffer_size,
int num_trace_buffers,
int trace_format_version,
- uint64_t clock_overhead_ns);
+ uint32_t clock_overhead_ns);
// This encodes all the events in the per-thread trace buffer and writes it to the trace file /
// buffer. This acquires streaming lock to prevent any other threads writing concurrently. It is
@@ -341,8 +338,8 @@ class TraceWriter {
uint16_t thread_id,
uint32_t method_index,
TraceAction action,
- uint64_t thread_clock_diff,
- uint64_t wall_clock_diff) REQUIRES(trace_writer_lock_);
+ uint32_t thread_clock_diff,
+ uint32_t wall_clock_diff) REQUIRES(trace_writer_lock_);
// Encodes the header for the events block. This assumes that there is enough space reserved to
// encode the entry.
@@ -416,7 +413,7 @@ class TraceWriter {
size_t num_records_;
// Clock overhead.
- const uint64_t clock_overhead_ns_;
+ const uint32_t clock_overhead_ns_;
std::vector<std::atomic<size_t>> owner_tids_;
std::unique_ptr<uintptr_t[]> trace_buffer_;
@@ -518,7 +515,7 @@ class Trace final : public instrumentation::InstrumentationListener, public Clas
static void RemoveListeners() REQUIRES(Locks::mutator_lock_);
void MeasureClockOverhead();
- uint64_t GetClockOverheadNanoSeconds();
+ uint32_t GetClockOverheadNanoSeconds();
void CompareAndUpdateStackTrace(Thread* thread, std::vector<ArtMethod*>* stack_trace)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -604,12 +601,12 @@ class Trace final : public instrumentation::InstrumentationListener, public Clas
// how to annotate this.
NO_THREAD_SAFETY_ANALYSIS;
- void ReadClocks(Thread* thread, uint64_t* thread_clock_diff, uint64_t* timestamp_counter);
+ void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint64_t* timestamp_counter);
void LogMethodTraceEvent(Thread* thread,
ArtMethod* method,
TraceAction action,
- uint64_t thread_clock_diff,
+ uint32_t thread_clock_diff,
uint64_t timestamp_counter) REQUIRES_SHARED(Locks::mutator_lock_);
// Singleton instance of the Trace or null when no method tracing is active.
diff --git a/runtime/trace_common.h b/runtime/trace_common.h
index f29b347462..759a0bbc49 100644
--- a/runtime/trace_common.h
+++ b/runtime/trace_common.h
@@ -47,7 +47,7 @@ class TimestampCounter {
// access is disabled only for 32-bit processes even when 64-bit processes can accesses the
// timer from user space. These are not reflected in the HWCAP_EVTSTRM capability.So just
// fallback to clock_gettime on these processes. See b/289178149 for more discussion.
- t = NanoTime();
+ t = MicroTime();
#elif defined(__aarch64__)
// See Arm Architecture Registers Armv8 section System Registers
asm volatile("mrs %0, cntvct_el0" : "=r"(t));
@@ -59,7 +59,7 @@ class TimestampCounter {
#elif defined(__riscv)
asm volatile("rdtime %0" : "=r"(t));
#else
- t = NanoTime();
+ t = MicroTime();
#endif
return t;
}
@@ -67,36 +67,36 @@ class TimestampCounter {
static void InitializeTimestampCounters() {
// It is sufficient to initialize this once for the entire execution. Just return if it is
// already initialized.
- if (tsc_to_nanosec_scaling_factor > 0.0) {
+ if (tsc_to_microsec_scaling_factor > 0.0) {
return;
}
#if defined(__arm__)
// On ARM 32 bit, we don't always have access to the timestamp counters from
// user space. Seem comment in GetTimestamp for more details.
- tsc_to_nanosec_scaling_factor = 1.0;
+ tsc_to_microsec_scaling_factor = 1.0;
#elif defined(__aarch64__)
- double seconds_to_nanoseconds = 1000 * 1000;
+ double seconds_to_microseconds = 1000 * 1000;
uint64_t freq = 0;
// See Arm Architecture Registers Armv8 section System Registers
asm volatile("mrs %0, cntfrq_el0" : "=r"(freq));
if (freq == 0) {
// It is expected that cntfrq_el0 is correctly setup during system initialization but some
// devices don't do this. In such cases fall back to computing the frequency. See b/315139000.
- tsc_to_nanosec_scaling_factor = computeScalingFactor();
+ tsc_to_microsec_scaling_factor = computeScalingFactor();
} else {
- tsc_to_nanosec_scaling_factor = seconds_to_nanoseconds / static_cast<double>(freq);
+ tsc_to_microsec_scaling_factor = seconds_to_microseconds / static_cast<double>(freq);
}
#elif defined(__i386__) || defined(__x86_64__)
- tsc_to_nanosec_scaling_factor = GetScalingFactorForX86();
+ tsc_to_microsec_scaling_factor = GetScalingFactorForX86();
#else
- tsc_to_nanosec_scaling_factor = 1.0;
+ tsc_to_microsec_scaling_factor = 1.0;
#endif
}
- static ALWAYS_INLINE uint64_t GetNanoTime(uint64_t counter) {
- DCHECK(tsc_to_nanosec_scaling_factor > 0.0) << tsc_to_nanosec_scaling_factor;
- return tsc_to_nanosec_scaling_factor * counter;
+ static ALWAYS_INLINE uint64_t GetMicroTime(uint64_t counter) {
+ DCHECK(tsc_to_microsec_scaling_factor > 0.0) << tsc_to_microsec_scaling_factor;
+ return tsc_to_microsec_scaling_factor * counter;
}
private:
@@ -107,12 +107,12 @@ class TimestampCounter {
// step using these two samples. However, that would require a change in Android Studio which is
// the main consumer of these profiles. For now, just compute the frequency of tsc updates here.
static double computeScalingFactor() {
- uint64_t start = NanoTime();
+ uint64_t start = MicroTime();
uint64_t start_tsc = GetTimestamp();
// Sleep for one millisecond.
usleep(1000);
uint64_t diff_tsc = GetTimestamp() - start_tsc;
- uint64_t diff_time = NanoTime() - start;
+ uint64_t diff_time = MicroTime() - start;
double scaling_factor = static_cast<double>(diff_time) / diff_tsc;
DCHECK(scaling_factor > 0.0) << scaling_factor;
return scaling_factor;
@@ -150,18 +150,18 @@ class TimestampCounter {
}
double coreCrystalFreq = ecx;
// frequency = coreCrystalFreq * (ebx / eax)
- // scaling_factor = seconds_to_nanoseconds / frequency
- // = seconds_to_nanoseconds * eax / (coreCrystalFreq * ebx)
- double seconds_to_nanoseconds = 1000 * 1000;
- double scaling_factor = (seconds_to_nanoseconds * eax) / (coreCrystalFreq * ebx);
+ // scaling_factor = seconds_to_microseconds / frequency
+ // = seconds_to_microseconds * eax / (coreCrystalFreq * ebx)
+ double seconds_to_microseconds = 1000 * 1000;
+ double scaling_factor = (seconds_to_microseconds * eax) / (coreCrystalFreq * ebx);
return scaling_factor;
}
#endif
- // Scaling factor to convert timestamp counter into wall clock time reported in nano seconds.
+ // Scaling factor to convert timestamp counter into wall clock time reported in micro seconds.
// This is initialized at the start of tracing using the timestamp counter update frequency.
// See InitializeTimestampCounters for more details.
- static double tsc_to_nanosec_scaling_factor;
+ static double tsc_to_microsec_scaling_factor;
};
} // namespace art
diff --git a/runtime/trace_profile.cc b/runtime/trace_profile.cc
index 6039cd999e..d903033d40 100644
--- a/runtime/trace_profile.cc
+++ b/runtime/trace_profile.cc
@@ -527,11 +527,11 @@ size_t TraceProfiler::DumpLongRunningMethodBuffer(uint32_t thread_id,
uintptr_t method_ptr;
if (is_method_exit) {
// Method exit. We only have timestamp here.
- event_time = TimestampCounter::GetNanoTime(event & ~0x1);
+ event_time = TimestampCounter::GetMicroTime(event & ~0x1);
} else {
// method entry
method_ptr = event;
- event_time = TimestampCounter::GetNanoTime(method_trace_entries[i--] & ~0x1);
+ event_time = TimestampCounter::GetMicroTime(method_trace_entries[i--] & ~0x1);
}
uint64_t time_action_encoding = event_time << 1;