summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Nicolas Geoffray <ngeoffray@google.com> 2024-07-11 19:08:38 +0000
committer Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com> 2024-07-11 19:08:38 +0000
commitefad19d5493329c8c1c4ee94b609a3c81e02b722 (patch)
treec26c7e92129bc3691e7fc33cb11a99fc8544cd10
parent878637c5c71829f9fe0ac48e3703850284bff2ef (diff)
parent44b5204a81e9263a612af65f426e66395ae9426b (diff)
Revert "Use a current entry pointer instead of index for the method trace buffer" am: 44b5204a81
Original change: https://android-review.googlesource.com/c/platform/art/+/3164241 Change-Id: Ic57db4d88371279fd2c151f6f597ebec903d2025 Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--compiler/optimizing/code_generator_arm64.cc27
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc25
-rw-r--r--compiler/optimizing/code_generator_riscv64.cc22
-rw-r--r--compiler/optimizing/code_generator_x86.cc30
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc35
-rw-r--r--runtime/entrypoints_order_test.cc4
-rw-r--r--runtime/thread.h27
-rw-r--r--runtime/trace.cc50
8 files changed, 112 insertions, 108 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 3ec67afce9..e3a76e807e 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1212,8 +1212,8 @@ void InstructionCodeGeneratorARM64::GenerateMethodEntryExitHook(HInstruction* in
MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
Register addr = temps.AcquireX();
- Register curr_entry = temps.AcquireX();
- Register value = curr_entry.W();
+ Register index = temps.AcquireX();
+ Register value = index.W();
SlowPathCodeARM64* slow_path =
new (codegen_->GetScopedAllocator()) MethodEntryExitHooksSlowPathARM64(instruction);
@@ -1242,20 +1242,21 @@ void InstructionCodeGeneratorARM64::GenerateMethodEntryExitHook(HInstruction* in
// If yes, just take the slow path.
__ B(gt, slow_path->GetEntryLabel());
- Register init_entry = addr;
// Check if there is place in the buffer to store a new entry, if no, take slow path.
- uint32_t trace_buffer_curr_entry_offset =
- Thread::TraceBufferCurrPtrOffset<kArm64PointerSize>().Int32Value();
- __ Ldr(curr_entry, MemOperand(tr, trace_buffer_curr_entry_offset));
- __ Sub(curr_entry, curr_entry, kNumEntriesForWallClock * sizeof(void*));
- __ Ldr(init_entry, MemOperand(tr, Thread::TraceBufferPtrOffset<kArm64PointerSize>().SizeValue()));
- __ Cmp(curr_entry, init_entry);
+ uint32_t trace_buffer_index_offset =
+ Thread::TraceBufferIndexOffset<kArm64PointerSize>().Int32Value();
+ __ Ldr(index, MemOperand(tr, trace_buffer_index_offset));
+ __ Subs(index, index, kNumEntriesForWallClock);
__ B(lt, slow_path->GetEntryLabel());
// Update the index in the `Thread`.
- __ Str(curr_entry, MemOperand(tr, trace_buffer_curr_entry_offset));
+ __ Str(index, MemOperand(tr, trace_buffer_index_offset));
+ // Calculate the entry address in the buffer.
+ // addr = base_addr + sizeof(void*) * index;
+ __ Ldr(addr, MemOperand(tr, Thread::TraceBufferPtrOffset<kArm64PointerSize>().SizeValue()));
+ __ ComputeAddress(addr, MemOperand(addr, index, LSL, TIMES_8));
- Register tmp = init_entry;
+ Register tmp = index;
// Record method pointer and trace action.
__ Ldr(tmp, MemOperand(sp, 0));
// Use last two bits to encode trace method action. For MethodEntry it is 0
@@ -1266,10 +1267,10 @@ void InstructionCodeGeneratorARM64::GenerateMethodEntryExitHook(HInstruction* in
static_assert(enum_cast<int32_t>(TraceAction::kTraceMethodExit) == 1);
__ Orr(tmp, tmp, Operand(enum_cast<int32_t>(TraceAction::kTraceMethodExit)));
}
- __ Str(tmp, MemOperand(curr_entry, kMethodOffsetInBytes));
+ __ Str(tmp, MemOperand(addr, kMethodOffsetInBytes));
// Record the timestamp.
__ Mrs(tmp, (SystemRegister)SYS_CNTVCT_EL0);
- __ Str(tmp, MemOperand(curr_entry, kTimestampOffsetInBytes));
+ __ Str(tmp, MemOperand(addr, kTimestampOffsetInBytes));
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 34227a5480..fc2fc34dde 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2220,18 +2220,19 @@ void InstructionCodeGeneratorARMVIXL::GenerateMethodEntryExitHook(HInstruction*
__ B(gt, slow_path->GetEntryLabel());
// Check if there is place in the buffer to store a new entry, if no, take slow path.
- uint32_t trace_buffer_curr_entry_offset =
- Thread::TraceBufferCurrPtrOffset<kArmPointerSize>().Int32Value();
- vixl32::Register curr_entry = value;
- vixl32::Register init_entry = addr;
- __ Ldr(curr_entry, MemOperand(tr, trace_buffer_curr_entry_offset));
- __ Subs(curr_entry, curr_entry, static_cast<uint32_t>(kNumEntriesForWallClock * sizeof(void*)));
- __ Ldr(init_entry, MemOperand(tr, Thread::TraceBufferPtrOffset<kArmPointerSize>().SizeValue()));
- __ Cmp(curr_entry, init_entry);
+ uint32_t trace_buffer_index_offset =
+ Thread::TraceBufferIndexOffset<kArmPointerSize>().Int32Value();
+ vixl32::Register index = value;
+ __ Ldr(index, MemOperand(tr, trace_buffer_index_offset));
+ __ Subs(index, index, kNumEntriesForWallClock);
__ B(lt, slow_path->GetEntryLabel());
// Update the index in the `Thread`.
- __ Str(curr_entry, MemOperand(tr, trace_buffer_curr_entry_offset));
+ __ Str(index, MemOperand(tr, trace_buffer_index_offset));
+ // Calculate the entry address in the buffer.
+ // addr = base_addr + sizeof(void*) * index
+ __ Ldr(addr, MemOperand(tr, Thread::TraceBufferPtrOffset<kArmPointerSize>().SizeValue()));
+ __ Add(addr, addr, Operand(index, LSL, TIMES_4));
// Record method pointer and trace action.
__ Ldr(tmp, MemOperand(sp, 0));
@@ -2243,9 +2244,9 @@ void InstructionCodeGeneratorARMVIXL::GenerateMethodEntryExitHook(HInstruction*
static_assert(enum_cast<int32_t>(TraceAction::kTraceMethodExit) == 1);
__ Orr(tmp, tmp, Operand(enum_cast<int32_t>(TraceAction::kTraceMethodExit)));
}
- __ Str(tmp, MemOperand(curr_entry, kMethodOffsetInBytes));
+ __ Str(tmp, MemOperand(addr, kMethodOffsetInBytes));
- vixl32::Register tmp1 = init_entry;
+ vixl32::Register tmp1 = index;
// See Architecture Reference Manual ARMv7-A and ARMv7-R edition section B4.1.34.
__ Mrrc(/* lower 32-bit */ tmp,
/* higher 32-bit */ tmp1,
@@ -2254,7 +2255,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateMethodEntryExitHook(HInstruction*
/* crm= */ 14);
static_assert(kHighTimestampOffsetInBytes ==
kTimestampOffsetInBytes + static_cast<uint32_t>(kRuntimePointerSize));
- __ Strd(tmp, tmp1, MemOperand(curr_entry, kTimestampOffsetInBytes));
+ __ Strd(tmp, tmp1, MemOperand(addr, kTimestampOffsetInBytes));
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
index f6067a5468..8581c38895 100644
--- a/compiler/optimizing/code_generator_riscv64.cc
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -2640,20 +2640,24 @@ void InstructionCodeGeneratorRISCV64::GenerateMethodEntryExitHook(HInstruction*
__ Addi(tmp, tmp, -1);
__ Bnez(tmp, slow_path->GetEntryLabel());
+ // Check if there is place in the buffer to store a new entry, if no, take the slow path.
+ int32_t trace_buffer_index_offset =
+ Thread::TraceBufferIndexOffset<kRiscv64PointerSize>().Int32Value();
+ __ Loadd(tmp, TR, trace_buffer_index_offset);
+ __ Addi(tmp, tmp, -dchecked_integral_cast<int32_t>(kNumEntriesForWallClock));
+ __ Bltz(tmp, slow_path->GetEntryLabel());
+
+ // Update the index in the `Thread`.
+ __ Stored(tmp, TR, trace_buffer_index_offset);
+
// Allocate second core scratch register. We can no longer use `Stored()`
// and similar macro instructions because there is no core scratch register left.
XRegister tmp2 = temps.AllocateXRegister();
- // Check if there is place in the buffer to store a new entry, if no, take the slow path.
- int32_t trace_buffer_curr_entry_offset =
- Thread::TraceBufferCurrPtrOffset<kRiscv64PointerSize>().Int32Value();
- __ Loadd(tmp, TR, trace_buffer_curr_entry_offset);
+ // Calculate the entry address in the buffer.
+ // /*addr*/ tmp = TR->GetMethodTraceBuffer() + sizeof(void*) * /*index*/ tmp;
__ Loadd(tmp2, TR, Thread::TraceBufferPtrOffset<kRiscv64PointerSize>().SizeValue());
- __ Addi(tmp, tmp, -dchecked_integral_cast<int32_t>(kNumEntriesForWallClock * sizeof(void*)));
- __ Blt(tmp, tmp2, slow_path->GetEntryLabel());
-
- // Update the index in the `Thread`.
- __ Sd(tmp, TR, trace_buffer_curr_entry_offset);
+ __ Sh3Add(tmp, tmp, tmp2);
// Record method pointer and trace action.
__ Ld(tmp2, SP, 0);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 91f4a89ced..f549da189f 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1274,27 +1274,29 @@ void InstructionCodeGeneratorX86::GenerateMethodEntryExitHook(HInstruction* inst
// If yes, just take the slow path.
__ j(kGreater, slow_path->GetEntryLabel());
- // For curr_entry use the register that isn't EAX or EDX. We need this after
+ // For entry_addr use the first temp that isn't EAX or EDX. We need this after
// rdtsc which returns values in EAX + EDX.
- Register curr_entry = locations->GetTemp(2).AsRegister<Register>();
- Register init_entry = locations->GetTemp(1).AsRegister<Register>();
+ Register entry_addr = locations->GetTemp(2).AsRegister<Register>();
+ Register index = locations->GetTemp(1).AsRegister<Register>();
// Check if there is place in the buffer for a new entry, if no, take slow path.
uint32_t trace_buffer_ptr = Thread::TraceBufferPtrOffset<kX86PointerSize>().Int32Value();
- uint64_t trace_buffer_curr_entry_offset =
- Thread::TraceBufferCurrPtrOffset<kX86PointerSize>().Int32Value();
+ uint64_t trace_buffer_index_offset =
+ Thread::TraceBufferIndexOffset<kX86PointerSize>().Int32Value();
- __ fs()->movl(curr_entry, Address::Absolute(trace_buffer_curr_entry_offset));
- __ subl(curr_entry, Immediate(kNumEntriesForWallClock * sizeof(void*)));
- __ fs()->movl(init_entry, Address::Absolute(trace_buffer_ptr));
- __ cmpl(curr_entry, init_entry);
+ __ fs()->movl(index, Address::Absolute(trace_buffer_index_offset));
+ __ subl(index, Immediate(kNumEntriesForWallClock));
__ j(kLess, slow_path->GetEntryLabel());
// Update the index in the `Thread`.
- __ fs()->movl(Address::Absolute(trace_buffer_curr_entry_offset), curr_entry);
+ __ fs()->movl(Address::Absolute(trace_buffer_index_offset), index);
+ // Calculate the entry address in the buffer.
+ // entry_addr = base_addr + sizeof(void*) * index
+ __ fs()->movl(entry_addr, Address::Absolute(trace_buffer_ptr));
+ __ leal(entry_addr, Address(entry_addr, index, TIMES_4, 0));
// Record method pointer and trace action.
- Register method = init_entry;
+ Register method = index;
__ movl(method, Address(ESP, kCurrentMethodStackOffset));
// Use last two bits to encode trace method action. For MethodEntry it is 0
// so no need to set the bits since they are 0 already.
@@ -1304,11 +1306,11 @@ void InstructionCodeGeneratorX86::GenerateMethodEntryExitHook(HInstruction* inst
static_assert(enum_cast<int32_t>(TraceAction::kTraceMethodExit) == 1);
__ orl(method, Immediate(enum_cast<int32_t>(TraceAction::kTraceMethodExit)));
}
- __ movl(Address(curr_entry, kMethodOffsetInBytes), method);
+ __ movl(Address(entry_addr, kMethodOffsetInBytes), method);
// Get the timestamp. rdtsc returns timestamp in EAX + EDX.
__ rdtsc();
- __ movl(Address(curr_entry, kTimestampOffsetInBytes), EAX);
- __ movl(Address(curr_entry, kHighTimestampOffsetInBytes), EDX);
+ __ movl(Address(entry_addr, kTimestampOffsetInBytes), EAX);
+ __ movl(Address(entry_addr, kHighTimestampOffsetInBytes), EDX);
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 5e213e38e0..f61bb39ccc 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1695,25 +1695,28 @@ void InstructionCodeGeneratorX86_64::GenerateMethodEntryExitHook(HInstruction* i
__ j(kGreater, slow_path->GetEntryLabel());
// Check if there is place in the buffer for a new entry, if no, take slow path.
- CpuRegister curr_entry = locations->GetTemp(0).AsRegister<CpuRegister>();
- CpuRegister init_entry = CpuRegister(TMP);
- uint64_t trace_buffer_curr_entry_offset =
- Thread::TraceBufferCurrPtrOffset<kX86_64PointerSize>().SizeValue();
- __ gs()->movq(CpuRegister(curr_entry),
- Address::Absolute(trace_buffer_curr_entry_offset, /* no_rip= */ true));
- __ subq(CpuRegister(curr_entry), Immediate(kNumEntriesForWallClock * sizeof(void*)));
- __ gs()->movq(init_entry,
- Address::Absolute(Thread::TraceBufferPtrOffset<kX86_64PointerSize>().SizeValue(),
- /* no_rip= */ true));
- __ cmpq(curr_entry, init_entry);
+ CpuRegister index = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister entry_addr = CpuRegister(TMP);
+ uint64_t trace_buffer_index_offset =
+ Thread::TraceBufferIndexOffset<kX86_64PointerSize>().SizeValue();
+ __ gs()->movq(CpuRegister(index),
+ Address::Absolute(trace_buffer_index_offset, /* no_rip= */ true));
+ __ subq(CpuRegister(index), Immediate(kNumEntriesForWallClock));
__ j(kLess, slow_path->GetEntryLabel());
// Update the index in the `Thread`.
- __ gs()->movq(Address::Absolute(trace_buffer_curr_entry_offset, /* no_rip= */ true),
- CpuRegister(curr_entry));
+ __ gs()->movq(Address::Absolute(trace_buffer_index_offset, /* no_rip= */ true),
+ CpuRegister(index));
+ // Calculate the entry address in the buffer.
+ // entry_addr = base_addr + sizeof(void*) * index
+ __ gs()->movq(entry_addr,
+ Address::Absolute(Thread::TraceBufferPtrOffset<kX86_64PointerSize>().SizeValue(),
+ /* no_rip= */ true));
+ __ leaq(CpuRegister(entry_addr),
+ Address(CpuRegister(entry_addr), CpuRegister(index), TIMES_8, 0));
// Record method pointer and action.
- CpuRegister method = init_entry;
+ CpuRegister method = index;
__ movq(CpuRegister(method), Address(CpuRegister(RSP), kCurrentMethodStackOffset));
// Use last two bits to encode trace method action. For MethodEntry it is 0
// so no need to set the bits since they are 0 already.
@@ -1723,12 +1726,12 @@ void InstructionCodeGeneratorX86_64::GenerateMethodEntryExitHook(HInstruction* i
static_assert(enum_cast<int32_t>(TraceAction::kTraceMethodExit) == 1);
__ orq(method, Immediate(enum_cast<int32_t>(TraceAction::kTraceMethodExit)));
}
- __ movq(Address(curr_entry, kMethodOffsetInBytes), CpuRegister(method));
+ __ movq(Address(entry_addr, kMethodOffsetInBytes), CpuRegister(method));
// Get the timestamp. rdtsc returns timestamp in RAX + RDX even in 64-bit architectures.
__ rdtsc();
__ shlq(CpuRegister(RDX), Immediate(32));
__ orq(CpuRegister(RAX), CpuRegister(RDX));
- __ movq(Address(curr_entry, kTimestampOffsetInBytes), CpuRegister(RAX));
+ __ movq(Address(entry_addr, kTimestampOffsetInBytes), CpuRegister(RAX));
__ Bind(slow_path->GetExitLabel());
}
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 090484eabb..b25e01067b 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -134,9 +134,9 @@ class EntrypointsOrderTest : public CommonArtTest {
EXPECT_OFFSET_DIFFP(
Thread, tlsPtr_, top_reflective_handle_scope, method_trace_buffer, sizeof(void*));
EXPECT_OFFSET_DIFFP(
- Thread, tlsPtr_, method_trace_buffer, method_trace_buffer_curr_entry, sizeof(void*));
+ Thread, tlsPtr_, method_trace_buffer, method_trace_buffer_index, sizeof(void*));
EXPECT_OFFSET_DIFFP(
- Thread, tlsPtr_, method_trace_buffer_curr_entry, thread_exit_flags, sizeof(void*));
+ Thread, tlsPtr_, method_trace_buffer_index, thread_exit_flags, sizeof(void*));
EXPECT_OFFSET_DIFFP(
Thread, tlsPtr_, thread_exit_flags, last_no_thread_suspension_cause, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, last_no_thread_suspension_cause,
diff --git a/runtime/thread.h b/runtime/thread.h
index fda086adf0..464b343b1e 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1099,9 +1099,9 @@ class EXPORT Thread {
}
template <PointerSize pointer_size>
- static constexpr ThreadOffset<pointer_size> TraceBufferCurrPtrOffset() {
+ static constexpr ThreadOffset<pointer_size> TraceBufferIndexOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
- OFFSETOF_MEMBER(tls_ptr_sized_values, method_trace_buffer_curr_entry));
+ OFFSETOF_MEMBER(tls_ptr_sized_values, method_trace_buffer_index));
}
template <PointerSize pointer_size>
@@ -1364,21 +1364,10 @@ class EXPORT Thread {
uintptr_t* GetMethodTraceBuffer() { return tlsPtr_.method_trace_buffer; }
- uintptr_t** GetTraceBufferCurrEntryPtr() { return &tlsPtr_.method_trace_buffer_curr_entry; }
+ size_t* GetMethodTraceIndexPtr() { return &tlsPtr_.method_trace_buffer_index; }
- void SetMethodTraceBuffer(uintptr_t* buffer, int init_index) {
- tlsPtr_.method_trace_buffer = buffer;
- SetTraceBufferCurrentEntry(init_index);
- }
-
- void SetTraceBufferCurrentEntry(int index) {
- uintptr_t* buffer = tlsPtr_.method_trace_buffer;
- if (buffer == nullptr) {
- tlsPtr_.method_trace_buffer_curr_entry = nullptr;
- } else {
- DCHECK(buffer != nullptr);
- tlsPtr_.method_trace_buffer_curr_entry = buffer + index;
- }
+ uintptr_t* SetMethodTraceBuffer(uintptr_t* buffer) {
+ return tlsPtr_.method_trace_buffer = buffer;
}
uint64_t GetTraceClockBase() const {
@@ -2163,7 +2152,7 @@ class EXPORT Thread {
async_exception(nullptr),
top_reflective_handle_scope(nullptr),
method_trace_buffer(nullptr),
- method_trace_buffer_curr_entry(nullptr),
+ method_trace_buffer_index(0),
thread_exit_flags(nullptr),
last_no_thread_suspension_cause(nullptr),
last_no_transaction_checks_cause(nullptr) {
@@ -2338,8 +2327,8 @@ class EXPORT Thread {
// Pointer to a thread-local buffer for method tracing.
uintptr_t* method_trace_buffer;
- // Pointer to the current entry in the buffer.
- uintptr_t* method_trace_buffer_curr_entry;
+ // The index of the next free entry in method_trace_buffer.
+ size_t method_trace_buffer_index;
// Pointer to the first node of an intrusively doubly-linked list of ThreadExitFlags.
ThreadExitFlag* thread_exit_flags GUARDED_BY(Locks::thread_list_lock_);
diff --git a/runtime/trace.cc b/runtime/trace.cc
index af2a6befd7..8e3d8b926d 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -1018,7 +1018,7 @@ void Trace::ReleaseThreadBuffer(Thread* self) {
return;
}
the_trace_->trace_writer_->ReleaseBufferForThread(self);
- self->SetMethodTraceBuffer(nullptr, 0);
+ self->SetMethodTraceBuffer(nullptr);
}
void Trace::Abort() {
@@ -1661,25 +1661,25 @@ int TraceWriter::GetMethodTraceIndex(uintptr_t* current_buffer) {
void TraceWriter::FlushBuffer(Thread* thread, bool is_sync, bool release) {
uintptr_t* method_trace_entries = thread->GetMethodTraceBuffer();
- uintptr_t** current_entry_ptr = thread->GetTraceBufferCurrEntryPtr();
- size_t current_offset = *current_entry_ptr - method_trace_entries;
+ size_t* current_offset = thread->GetMethodTraceIndexPtr();
size_t tid = thread->GetTid();
DCHECK(method_trace_entries != nullptr);
if (is_sync || thread_pool_ == nullptr) {
std::unordered_map<ArtMethod*, std::string> method_infos;
if (trace_format_version_ == Trace::kFormatV1) {
- PreProcessTraceForMethodInfos(method_trace_entries, current_offset, method_infos);
+ PreProcessTraceForMethodInfos(method_trace_entries, *current_offset, method_infos);
}
- FlushBuffer(method_trace_entries, current_offset, tid, method_infos);
+ FlushBuffer(method_trace_entries, *current_offset, tid, method_infos);
// This is a synchronous flush, so no need to allocate a new buffer. This is used either
// when the tracing has finished or in non-streaming mode.
// Just reset the buffer pointer to the initial value, so we can reuse the same buffer.
if (release) {
- thread->SetMethodTraceBuffer(nullptr, 0);
+ thread->SetMethodTraceBuffer(nullptr);
+ *current_offset = 0;
} else {
- thread->SetTraceBufferCurrentEntry(kPerThreadBufSize);
+ *current_offset = kPerThreadBufSize;
}
} else {
int old_index = GetMethodTraceIndex(method_trace_entries);
@@ -1687,11 +1687,13 @@ void TraceWriter::FlushBuffer(Thread* thread, bool is_sync, bool release) {
// entries are flushed.
thread_pool_->AddTask(
Thread::Current(),
- new TraceEntriesWriterTask(this, old_index, method_trace_entries, current_offset, tid));
+ new TraceEntriesWriterTask(this, old_index, method_trace_entries, *current_offset, tid));
if (release) {
- thread->SetMethodTraceBuffer(nullptr, 0);
+ thread->SetMethodTraceBuffer(nullptr);
+ *current_offset = 0;
} else {
- thread->SetMethodTraceBuffer(AcquireTraceBuffer(tid), kPerThreadBufSize);
+ thread->SetMethodTraceBuffer(AcquireTraceBuffer(tid));
+ *current_offset = kPerThreadBufSize;
}
}
@@ -1880,51 +1882,53 @@ void Trace::LogMethodTraceEvent(Thread* thread,
// concurrently.
uintptr_t* method_trace_buffer = thread->GetMethodTraceBuffer();
- uintptr_t** current_entry_ptr = thread->GetTraceBufferCurrEntryPtr();
+ size_t* current_index = thread->GetMethodTraceIndexPtr();
// Initialize the buffer lazily. It's just simpler to keep the creation at one place.
if (method_trace_buffer == nullptr) {
method_trace_buffer = trace_writer_->AcquireTraceBuffer(thread->GetTid());
DCHECK(method_trace_buffer != nullptr);
- thread->SetMethodTraceBuffer(method_trace_buffer, kPerThreadBufSize);
+ thread->SetMethodTraceBuffer(method_trace_buffer);
+ *current_index = kPerThreadBufSize;
trace_writer_->RecordThreadInfo(thread);
}
if (trace_writer_->HasOverflow()) {
// In non-streaming modes, we stop recoding events once the buffer is full. Just reset the
// index, so we don't go to runtime for each method.
- thread->SetTraceBufferCurrentEntry(kPerThreadBufSize);
+ *current_index = kPerThreadBufSize;
return;
}
size_t required_entries = GetNumEntries(clock_source_);
- if (*current_entry_ptr - required_entries < method_trace_buffer) {
+ if (*current_index < required_entries) {
// This returns nullptr in non-streaming mode if there's an overflow and we cannot record any
// more entries. In streaming mode, it returns nullptr if it fails to allocate a new buffer.
method_trace_buffer = trace_writer_->PrepareBufferForNewEntries(thread);
if (method_trace_buffer == nullptr) {
- thread->SetTraceBufferCurrentEntry(kPerThreadBufSize);
+ *current_index = kPerThreadBufSize;
return;
}
}
- *current_entry_ptr = *current_entry_ptr - required_entries;
// Record entry in per-thread trace buffer.
- int entry_index = 0;
- uintptr_t* current_entry = *current_entry_ptr;
+ // Update the offset
+ int new_entry_index = *current_index - required_entries;
+ *current_index = new_entry_index;
+
// Ensure we always use the non-obsolete version of the method so that entry/exit events have the
// same pointer value.
method = method->GetNonObsoleteMethod();
- current_entry[entry_index++] = reinterpret_cast<uintptr_t>(method) | action;
+ method_trace_buffer[new_entry_index++] = reinterpret_cast<uintptr_t>(method) | action;
if (UseThreadCpuClock(clock_source_)) {
- current_entry[entry_index++] = thread_clock_diff;
+ method_trace_buffer[new_entry_index++] = thread_clock_diff;
}
if (UseWallClock(clock_source_)) {
if (art::kRuntimePointerSize == PointerSize::k32) {
// On 32-bit architectures store timestamp counter as two 32-bit values.
- current_entry[entry_index++] = static_cast<uint32_t>(timestamp_counter);
- current_entry[entry_index++] = timestamp_counter >> 32;
+ method_trace_buffer[new_entry_index++] = static_cast<uint32_t>(timestamp_counter);
+ method_trace_buffer[new_entry_index++] = timestamp_counter >> 32;
} else {
- current_entry[entry_index++] = timestamp_counter;
+ method_trace_buffer[new_entry_index++] = timestamp_counter;
}
}
}