diff options
author | 2024-07-05 09:08:25 +0000 | |
---|---|---|
committer | 2024-07-11 14:00:00 +0000 | |
commit | b67495b6aa3f1253383938f2699da6eada1a0ead (patch) | |
tree | 902d6c0e3f61e399b0c2fad7f75c5e63a1f9740c /runtime/trace.cc | |
parent | 3b92bd7e571e3efc0d9afc37f5262cdb16fc6733 (diff) |
Use a current entry pointer instead of index for the method trace buffer
For the thread local method trace buffers we used to store the pointer
to the buffer and the current index of the next free space. We compute
the address of the location to store the entry in the generated JITed
code when storing the method trace entry. Instead we could use a pointer
to the entry to avoid computing the address in the generated code. This
doesn't have a noticeable impact for the regular method tracing but is
better for the upcoming changes that will introduce an experimental
feature for on-demand method tracing.
Bug: 259258187
Test: art/test.py
Change-Id: I7e39e686bee62ee91c651a1cb3ff242470010cb6
Diffstat (limited to 'runtime/trace.cc')
-rw-r--r-- | runtime/trace.cc | 50 |
1 files changed, 23 insertions, 27 deletions
diff --git a/runtime/trace.cc b/runtime/trace.cc index 8e3d8b926d..af2a6befd7 100644 --- a/runtime/trace.cc +++ b/runtime/trace.cc @@ -1018,7 +1018,7 @@ void Trace::ReleaseThreadBuffer(Thread* self) { return; } the_trace_->trace_writer_->ReleaseBufferForThread(self); - self->SetMethodTraceBuffer(nullptr); + self->SetMethodTraceBuffer(nullptr, 0); } void Trace::Abort() { @@ -1661,25 +1661,25 @@ int TraceWriter::GetMethodTraceIndex(uintptr_t* current_buffer) { void TraceWriter::FlushBuffer(Thread* thread, bool is_sync, bool release) { uintptr_t* method_trace_entries = thread->GetMethodTraceBuffer(); - size_t* current_offset = thread->GetMethodTraceIndexPtr(); + uintptr_t** current_entry_ptr = thread->GetTraceBufferCurrEntryPtr(); + size_t current_offset = *current_entry_ptr - method_trace_entries; size_t tid = thread->GetTid(); DCHECK(method_trace_entries != nullptr); if (is_sync || thread_pool_ == nullptr) { std::unordered_map<ArtMethod*, std::string> method_infos; if (trace_format_version_ == Trace::kFormatV1) { - PreProcessTraceForMethodInfos(method_trace_entries, *current_offset, method_infos); + PreProcessTraceForMethodInfos(method_trace_entries, current_offset, method_infos); } - FlushBuffer(method_trace_entries, *current_offset, tid, method_infos); + FlushBuffer(method_trace_entries, current_offset, tid, method_infos); // This is a synchronous flush, so no need to allocate a new buffer. This is used either // when the tracing has finished or in non-streaming mode. // Just reset the buffer pointer to the initial value, so we can reuse the same buffer. if (release) { - thread->SetMethodTraceBuffer(nullptr); - *current_offset = 0; + thread->SetMethodTraceBuffer(nullptr, 0); } else { - *current_offset = kPerThreadBufSize; + thread->SetTraceBufferCurrentEntry(kPerThreadBufSize); } } else { int old_index = GetMethodTraceIndex(method_trace_entries); @@ -1687,13 +1687,11 @@ void TraceWriter::FlushBuffer(Thread* thread, bool is_sync, bool release) { // entries are flushed. thread_pool_->AddTask( Thread::Current(), - new TraceEntriesWriterTask(this, old_index, method_trace_entries, *current_offset, tid)); + new TraceEntriesWriterTask(this, old_index, method_trace_entries, current_offset, tid)); if (release) { - thread->SetMethodTraceBuffer(nullptr); - *current_offset = 0; + thread->SetMethodTraceBuffer(nullptr, 0); } else { - thread->SetMethodTraceBuffer(AcquireTraceBuffer(tid)); - *current_offset = kPerThreadBufSize; + thread->SetMethodTraceBuffer(AcquireTraceBuffer(tid), kPerThreadBufSize); } } @@ -1882,53 +1880,51 @@ void Trace::LogMethodTraceEvent(Thread* thread, // concurrently. uintptr_t* method_trace_buffer = thread->GetMethodTraceBuffer(); - size_t* current_index = thread->GetMethodTraceIndexPtr(); + uintptr_t** current_entry_ptr = thread->GetTraceBufferCurrEntryPtr(); // Initialize the buffer lazily. It's just simpler to keep the creation at one place. if (method_trace_buffer == nullptr) { method_trace_buffer = trace_writer_->AcquireTraceBuffer(thread->GetTid()); DCHECK(method_trace_buffer != nullptr); - thread->SetMethodTraceBuffer(method_trace_buffer); - *current_index = kPerThreadBufSize; + thread->SetMethodTraceBuffer(method_trace_buffer, kPerThreadBufSize); trace_writer_->RecordThreadInfo(thread); } if (trace_writer_->HasOverflow()) { // In non-streaming modes, we stop recoding events once the buffer is full. Just reset the // index, so we don't go to runtime for each method. - *current_index = kPerThreadBufSize; + thread->SetTraceBufferCurrentEntry(kPerThreadBufSize); return; } size_t required_entries = GetNumEntries(clock_source_); - if (*current_index < required_entries) { + if (*current_entry_ptr - required_entries < method_trace_buffer) { // This returns nullptr in non-streaming mode if there's an overflow and we cannot record any // more entries. In streaming mode, it returns nullptr if it fails to allocate a new buffer. method_trace_buffer = trace_writer_->PrepareBufferForNewEntries(thread); if (method_trace_buffer == nullptr) { - *current_index = kPerThreadBufSize; + thread->SetTraceBufferCurrentEntry(kPerThreadBufSize); return; } } + *current_entry_ptr = *current_entry_ptr - required_entries; // Record entry in per-thread trace buffer. - // Update the offset - int new_entry_index = *current_index - required_entries; - *current_index = new_entry_index; - + int entry_index = 0; + uintptr_t* current_entry = *current_entry_ptr; // Ensure we always use the non-obsolete version of the method so that entry/exit events have the // same pointer value. method = method->GetNonObsoleteMethod(); - method_trace_buffer[new_entry_index++] = reinterpret_cast<uintptr_t>(method) | action; + current_entry[entry_index++] = reinterpret_cast<uintptr_t>(method) | action; if (UseThreadCpuClock(clock_source_)) { - method_trace_buffer[new_entry_index++] = thread_clock_diff; + current_entry[entry_index++] = thread_clock_diff; } if (UseWallClock(clock_source_)) { if (art::kRuntimePointerSize == PointerSize::k32) { // On 32-bit architectures store timestamp counter as two 32-bit values. - method_trace_buffer[new_entry_index++] = static_cast<uint32_t>(timestamp_counter); - method_trace_buffer[new_entry_index++] = timestamp_counter >> 32; + current_entry[entry_index++] = static_cast<uint32_t>(timestamp_counter); + current_entry[entry_index++] = timestamp_counter >> 32; } else { - method_trace_buffer[new_entry_index++] = timestamp_counter; + current_entry[entry_index++] = timestamp_counter; } } } |