summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/optimizing/code_generator_arm64.cc27
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc25
-rw-r--r--compiler/optimizing/code_generator_riscv64.cc22
-rw-r--r--compiler/optimizing/code_generator_x86.cc30
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc35
-rw-r--r--runtime/entrypoints_order_test.cc4
-rw-r--r--runtime/thread.h27
-rw-r--r--runtime/trace.cc50
8 files changed, 108 insertions, 112 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index e3a76e807e..3ec67afce9 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1212,8 +1212,8 @@ void InstructionCodeGeneratorARM64::GenerateMethodEntryExitHook(HInstruction* in
MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
Register addr = temps.AcquireX();
- Register index = temps.AcquireX();
- Register value = index.W();
+ Register curr_entry = temps.AcquireX();
+ Register value = curr_entry.W();
SlowPathCodeARM64* slow_path =
new (codegen_->GetScopedAllocator()) MethodEntryExitHooksSlowPathARM64(instruction);
@@ -1242,21 +1242,20 @@ void InstructionCodeGeneratorARM64::GenerateMethodEntryExitHook(HInstruction* in
// If yes, just take the slow path.
__ B(gt, slow_path->GetEntryLabel());
+ Register init_entry = addr;
// Check if there is place in the buffer to store a new entry, if no, take slow path.
- uint32_t trace_buffer_index_offset =
- Thread::TraceBufferIndexOffset<kArm64PointerSize>().Int32Value();
- __ Ldr(index, MemOperand(tr, trace_buffer_index_offset));
- __ Subs(index, index, kNumEntriesForWallClock);
+ uint32_t trace_buffer_curr_entry_offset =
+ Thread::TraceBufferCurrPtrOffset<kArm64PointerSize>().Int32Value();
+ __ Ldr(curr_entry, MemOperand(tr, trace_buffer_curr_entry_offset));
+ __ Sub(curr_entry, curr_entry, kNumEntriesForWallClock * sizeof(void*));
+ __ Ldr(init_entry, MemOperand(tr, Thread::TraceBufferPtrOffset<kArm64PointerSize>().SizeValue()));
+ __ Cmp(curr_entry, init_entry);
__ B(lt, slow_path->GetEntryLabel());
// Update the index in the `Thread`.
- __ Str(index, MemOperand(tr, trace_buffer_index_offset));
- // Calculate the entry address in the buffer.
- // addr = base_addr + sizeof(void*) * index;
- __ Ldr(addr, MemOperand(tr, Thread::TraceBufferPtrOffset<kArm64PointerSize>().SizeValue()));
- __ ComputeAddress(addr, MemOperand(addr, index, LSL, TIMES_8));
+ __ Str(curr_entry, MemOperand(tr, trace_buffer_curr_entry_offset));
- Register tmp = index;
+ Register tmp = init_entry;
// Record method pointer and trace action.
__ Ldr(tmp, MemOperand(sp, 0));
// Use last two bits to encode trace method action. For MethodEntry it is 0
@@ -1267,10 +1266,10 @@ void InstructionCodeGeneratorARM64::GenerateMethodEntryExitHook(HInstruction* in
static_assert(enum_cast<int32_t>(TraceAction::kTraceMethodExit) == 1);
__ Orr(tmp, tmp, Operand(enum_cast<int32_t>(TraceAction::kTraceMethodExit)));
}
- __ Str(tmp, MemOperand(addr, kMethodOffsetInBytes));
+ __ Str(tmp, MemOperand(curr_entry, kMethodOffsetInBytes));
// Record the timestamp.
__ Mrs(tmp, (SystemRegister)SYS_CNTVCT_EL0);
- __ Str(tmp, MemOperand(addr, kTimestampOffsetInBytes));
+ __ Str(tmp, MemOperand(curr_entry, kTimestampOffsetInBytes));
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index fc2fc34dde..34227a5480 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2220,19 +2220,18 @@ void InstructionCodeGeneratorARMVIXL::GenerateMethodEntryExitHook(HInstruction*
__ B(gt, slow_path->GetEntryLabel());
// Check if there is place in the buffer to store a new entry, if no, take slow path.
- uint32_t trace_buffer_index_offset =
- Thread::TraceBufferIndexOffset<kArmPointerSize>().Int32Value();
- vixl32::Register index = value;
- __ Ldr(index, MemOperand(tr, trace_buffer_index_offset));
- __ Subs(index, index, kNumEntriesForWallClock);
+ uint32_t trace_buffer_curr_entry_offset =
+ Thread::TraceBufferCurrPtrOffset<kArmPointerSize>().Int32Value();
+ vixl32::Register curr_entry = value;
+ vixl32::Register init_entry = addr;
+ __ Ldr(curr_entry, MemOperand(tr, trace_buffer_curr_entry_offset));
+ __ Subs(curr_entry, curr_entry, static_cast<uint32_t>(kNumEntriesForWallClock * sizeof(void*)));
+ __ Ldr(init_entry, MemOperand(tr, Thread::TraceBufferPtrOffset<kArmPointerSize>().SizeValue()));
+ __ Cmp(curr_entry, init_entry);
__ B(lt, slow_path->GetEntryLabel());
// Update the index in the `Thread`.
- __ Str(index, MemOperand(tr, trace_buffer_index_offset));
- // Calculate the entry address in the buffer.
- // addr = base_addr + sizeof(void*) * index
- __ Ldr(addr, MemOperand(tr, Thread::TraceBufferPtrOffset<kArmPointerSize>().SizeValue()));
- __ Add(addr, addr, Operand(index, LSL, TIMES_4));
+ __ Str(curr_entry, MemOperand(tr, trace_buffer_curr_entry_offset));
// Record method pointer and trace action.
__ Ldr(tmp, MemOperand(sp, 0));
@@ -2244,9 +2243,9 @@ void InstructionCodeGeneratorARMVIXL::GenerateMethodEntryExitHook(HInstruction*
static_assert(enum_cast<int32_t>(TraceAction::kTraceMethodExit) == 1);
__ Orr(tmp, tmp, Operand(enum_cast<int32_t>(TraceAction::kTraceMethodExit)));
}
- __ Str(tmp, MemOperand(addr, kMethodOffsetInBytes));
+ __ Str(tmp, MemOperand(curr_entry, kMethodOffsetInBytes));
- vixl32::Register tmp1 = index;
+ vixl32::Register tmp1 = init_entry;
// See Architecture Reference Manual ARMv7-A and ARMv7-R edition section B4.1.34.
__ Mrrc(/* lower 32-bit */ tmp,
/* higher 32-bit */ tmp1,
@@ -2255,7 +2254,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateMethodEntryExitHook(HInstruction*
/* crm= */ 14);
static_assert(kHighTimestampOffsetInBytes ==
kTimestampOffsetInBytes + static_cast<uint32_t>(kRuntimePointerSize));
- __ Strd(tmp, tmp1, MemOperand(addr, kTimestampOffsetInBytes));
+ __ Strd(tmp, tmp1, MemOperand(curr_entry, kTimestampOffsetInBytes));
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
index 8581c38895..f6067a5468 100644
--- a/compiler/optimizing/code_generator_riscv64.cc
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -2640,24 +2640,20 @@ void InstructionCodeGeneratorRISCV64::GenerateMethodEntryExitHook(HInstruction*
__ Addi(tmp, tmp, -1);
__ Bnez(tmp, slow_path->GetEntryLabel());
- // Check if there is place in the buffer to store a new entry, if no, take the slow path.
- int32_t trace_buffer_index_offset =
- Thread::TraceBufferIndexOffset<kRiscv64PointerSize>().Int32Value();
- __ Loadd(tmp, TR, trace_buffer_index_offset);
- __ Addi(tmp, tmp, -dchecked_integral_cast<int32_t>(kNumEntriesForWallClock));
- __ Bltz(tmp, slow_path->GetEntryLabel());
-
- // Update the index in the `Thread`.
- __ Stored(tmp, TR, trace_buffer_index_offset);
-
// Allocate second core scratch register. We can no longer use `Stored()`
// and similar macro instructions because there is no core scratch register left.
XRegister tmp2 = temps.AllocateXRegister();
- // Calculate the entry address in the buffer.
- // /*addr*/ tmp = TR->GetMethodTraceBuffer() + sizeof(void*) * /*index*/ tmp;
+ // Check if there is place in the buffer to store a new entry, if no, take the slow path.
+ int32_t trace_buffer_curr_entry_offset =
+ Thread::TraceBufferCurrPtrOffset<kRiscv64PointerSize>().Int32Value();
+ __ Loadd(tmp, TR, trace_buffer_curr_entry_offset);
__ Loadd(tmp2, TR, Thread::TraceBufferPtrOffset<kRiscv64PointerSize>().SizeValue());
- __ Sh3Add(tmp, tmp, tmp2);
+ __ Addi(tmp, tmp, -dchecked_integral_cast<int32_t>(kNumEntriesForWallClock * sizeof(void*)));
+ __ Blt(tmp, tmp2, slow_path->GetEntryLabel());
+
+ // Update the index in the `Thread`.
+ __ Sd(tmp, TR, trace_buffer_curr_entry_offset);
// Record method pointer and trace action.
__ Ld(tmp2, SP, 0);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f549da189f..91f4a89ced 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1274,29 +1274,27 @@ void InstructionCodeGeneratorX86::GenerateMethodEntryExitHook(HInstruction* inst
// If yes, just take the slow path.
__ j(kGreater, slow_path->GetEntryLabel());
- // For entry_addr use the first temp that isn't EAX or EDX. We need this after
+ // For curr_entry use the register that isn't EAX or EDX. We need this after
// rdtsc which returns values in EAX + EDX.
- Register entry_addr = locations->GetTemp(2).AsRegister<Register>();
- Register index = locations->GetTemp(1).AsRegister<Register>();
+ Register curr_entry = locations->GetTemp(2).AsRegister<Register>();
+ Register init_entry = locations->GetTemp(1).AsRegister<Register>();
// Check if there is place in the buffer for a new entry, if no, take slow path.
uint32_t trace_buffer_ptr = Thread::TraceBufferPtrOffset<kX86PointerSize>().Int32Value();
- uint64_t trace_buffer_index_offset =
- Thread::TraceBufferIndexOffset<kX86PointerSize>().Int32Value();
+ uint64_t trace_buffer_curr_entry_offset =
+ Thread::TraceBufferCurrPtrOffset<kX86PointerSize>().Int32Value();
- __ fs()->movl(index, Address::Absolute(trace_buffer_index_offset));
- __ subl(index, Immediate(kNumEntriesForWallClock));
+ __ fs()->movl(curr_entry, Address::Absolute(trace_buffer_curr_entry_offset));
+ __ subl(curr_entry, Immediate(kNumEntriesForWallClock * sizeof(void*)));
+ __ fs()->movl(init_entry, Address::Absolute(trace_buffer_ptr));
+ __ cmpl(curr_entry, init_entry);
__ j(kLess, slow_path->GetEntryLabel());
// Update the index in the `Thread`.
- __ fs()->movl(Address::Absolute(trace_buffer_index_offset), index);
- // Calculate the entry address in the buffer.
- // entry_addr = base_addr + sizeof(void*) * index
- __ fs()->movl(entry_addr, Address::Absolute(trace_buffer_ptr));
- __ leal(entry_addr, Address(entry_addr, index, TIMES_4, 0));
+ __ fs()->movl(Address::Absolute(trace_buffer_curr_entry_offset), curr_entry);
// Record method pointer and trace action.
- Register method = index;
+ Register method = init_entry;
__ movl(method, Address(ESP, kCurrentMethodStackOffset));
// Use last two bits to encode trace method action. For MethodEntry it is 0
// so no need to set the bits since they are 0 already.
@@ -1306,11 +1304,11 @@ void InstructionCodeGeneratorX86::GenerateMethodEntryExitHook(HInstruction* inst
static_assert(enum_cast<int32_t>(TraceAction::kTraceMethodExit) == 1);
__ orl(method, Immediate(enum_cast<int32_t>(TraceAction::kTraceMethodExit)));
}
- __ movl(Address(entry_addr, kMethodOffsetInBytes), method);
+ __ movl(Address(curr_entry, kMethodOffsetInBytes), method);
// Get the timestamp. rdtsc returns timestamp in EAX + EDX.
__ rdtsc();
- __ movl(Address(entry_addr, kTimestampOffsetInBytes), EAX);
- __ movl(Address(entry_addr, kHighTimestampOffsetInBytes), EDX);
+ __ movl(Address(curr_entry, kTimestampOffsetInBytes), EAX);
+ __ movl(Address(curr_entry, kHighTimestampOffsetInBytes), EDX);
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index f61bb39ccc..5e213e38e0 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1695,28 +1695,25 @@ void InstructionCodeGeneratorX86_64::GenerateMethodEntryExitHook(HInstruction* i
__ j(kGreater, slow_path->GetEntryLabel());
// Check if there is place in the buffer for a new entry, if no, take slow path.
- CpuRegister index = locations->GetTemp(0).AsRegister<CpuRegister>();
- CpuRegister entry_addr = CpuRegister(TMP);
- uint64_t trace_buffer_index_offset =
- Thread::TraceBufferIndexOffset<kX86_64PointerSize>().SizeValue();
- __ gs()->movq(CpuRegister(index),
- Address::Absolute(trace_buffer_index_offset, /* no_rip= */ true));
- __ subq(CpuRegister(index), Immediate(kNumEntriesForWallClock));
+ CpuRegister curr_entry = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister init_entry = CpuRegister(TMP);
+ uint64_t trace_buffer_curr_entry_offset =
+ Thread::TraceBufferCurrPtrOffset<kX86_64PointerSize>().SizeValue();
+ __ gs()->movq(CpuRegister(curr_entry),
+ Address::Absolute(trace_buffer_curr_entry_offset, /* no_rip= */ true));
+ __ subq(CpuRegister(curr_entry), Immediate(kNumEntriesForWallClock * sizeof(void*)));
+ __ gs()->movq(init_entry,
+ Address::Absolute(Thread::TraceBufferPtrOffset<kX86_64PointerSize>().SizeValue(),
+ /* no_rip= */ true));
+ __ cmpq(curr_entry, init_entry);
__ j(kLess, slow_path->GetEntryLabel());
// Update the index in the `Thread`.
- __ gs()->movq(Address::Absolute(trace_buffer_index_offset, /* no_rip= */ true),
- CpuRegister(index));
- // Calculate the entry address in the buffer.
- // entry_addr = base_addr + sizeof(void*) * index
- __ gs()->movq(entry_addr,
- Address::Absolute(Thread::TraceBufferPtrOffset<kX86_64PointerSize>().SizeValue(),
- /* no_rip= */ true));
- __ leaq(CpuRegister(entry_addr),
- Address(CpuRegister(entry_addr), CpuRegister(index), TIMES_8, 0));
+ __ gs()->movq(Address::Absolute(trace_buffer_curr_entry_offset, /* no_rip= */ true),
+ CpuRegister(curr_entry));
// Record method pointer and action.
- CpuRegister method = index;
+ CpuRegister method = init_entry;
__ movq(CpuRegister(method), Address(CpuRegister(RSP), kCurrentMethodStackOffset));
// Use last two bits to encode trace method action. For MethodEntry it is 0
// so no need to set the bits since they are 0 already.
@@ -1726,12 +1723,12 @@ void InstructionCodeGeneratorX86_64::GenerateMethodEntryExitHook(HInstruction* i
static_assert(enum_cast<int32_t>(TraceAction::kTraceMethodExit) == 1);
__ orq(method, Immediate(enum_cast<int32_t>(TraceAction::kTraceMethodExit)));
}
- __ movq(Address(entry_addr, kMethodOffsetInBytes), CpuRegister(method));
+ __ movq(Address(curr_entry, kMethodOffsetInBytes), CpuRegister(method));
// Get the timestamp. rdtsc returns timestamp in RAX + RDX even in 64-bit architectures.
__ rdtsc();
__ shlq(CpuRegister(RDX), Immediate(32));
__ orq(CpuRegister(RAX), CpuRegister(RDX));
- __ movq(Address(entry_addr, kTimestampOffsetInBytes), CpuRegister(RAX));
+ __ movq(Address(curr_entry, kTimestampOffsetInBytes), CpuRegister(RAX));
__ Bind(slow_path->GetExitLabel());
}
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index b25e01067b..090484eabb 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -134,9 +134,9 @@ class EntrypointsOrderTest : public CommonArtTest {
EXPECT_OFFSET_DIFFP(
Thread, tlsPtr_, top_reflective_handle_scope, method_trace_buffer, sizeof(void*));
EXPECT_OFFSET_DIFFP(
- Thread, tlsPtr_, method_trace_buffer, method_trace_buffer_index, sizeof(void*));
+ Thread, tlsPtr_, method_trace_buffer, method_trace_buffer_curr_entry, sizeof(void*));
EXPECT_OFFSET_DIFFP(
- Thread, tlsPtr_, method_trace_buffer_index, thread_exit_flags, sizeof(void*));
+ Thread, tlsPtr_, method_trace_buffer_curr_entry, thread_exit_flags, sizeof(void*));
EXPECT_OFFSET_DIFFP(
Thread, tlsPtr_, thread_exit_flags, last_no_thread_suspension_cause, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, last_no_thread_suspension_cause,
diff --git a/runtime/thread.h b/runtime/thread.h
index 464b343b1e..fda086adf0 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1099,9 +1099,9 @@ class EXPORT Thread {
}
template <PointerSize pointer_size>
- static constexpr ThreadOffset<pointer_size> TraceBufferIndexOffset() {
+ static constexpr ThreadOffset<pointer_size> TraceBufferCurrPtrOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
- OFFSETOF_MEMBER(tls_ptr_sized_values, method_trace_buffer_index));
+ OFFSETOF_MEMBER(tls_ptr_sized_values, method_trace_buffer_curr_entry));
}
template <PointerSize pointer_size>
@@ -1364,10 +1364,21 @@ class EXPORT Thread {
uintptr_t* GetMethodTraceBuffer() { return tlsPtr_.method_trace_buffer; }
- size_t* GetMethodTraceIndexPtr() { return &tlsPtr_.method_trace_buffer_index; }
+ uintptr_t** GetTraceBufferCurrEntryPtr() { return &tlsPtr_.method_trace_buffer_curr_entry; }
- uintptr_t* SetMethodTraceBuffer(uintptr_t* buffer) {
- return tlsPtr_.method_trace_buffer = buffer;
+ void SetMethodTraceBuffer(uintptr_t* buffer, int init_index) {
+ tlsPtr_.method_trace_buffer = buffer;
+ SetTraceBufferCurrentEntry(init_index);
+ }
+
+ void SetTraceBufferCurrentEntry(int index) {
+ uintptr_t* buffer = tlsPtr_.method_trace_buffer;
+ if (buffer == nullptr) {
+ tlsPtr_.method_trace_buffer_curr_entry = nullptr;
+ } else {
+ DCHECK(buffer != nullptr);
+ tlsPtr_.method_trace_buffer_curr_entry = buffer + index;
+ }
}
uint64_t GetTraceClockBase() const {
@@ -2152,7 +2163,7 @@ class EXPORT Thread {
async_exception(nullptr),
top_reflective_handle_scope(nullptr),
method_trace_buffer(nullptr),
- method_trace_buffer_index(0),
+ method_trace_buffer_curr_entry(nullptr),
thread_exit_flags(nullptr),
last_no_thread_suspension_cause(nullptr),
last_no_transaction_checks_cause(nullptr) {
@@ -2327,8 +2338,8 @@ class EXPORT Thread {
// Pointer to a thread-local buffer for method tracing.
uintptr_t* method_trace_buffer;
- // The index of the next free entry in method_trace_buffer.
- size_t method_trace_buffer_index;
+ // Pointer to the current entry in the buffer.
+ uintptr_t* method_trace_buffer_curr_entry;
// Pointer to the first node of an intrusively doubly-linked list of ThreadExitFlags.
ThreadExitFlag* thread_exit_flags GUARDED_BY(Locks::thread_list_lock_);
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 8e3d8b926d..af2a6befd7 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -1018,7 +1018,7 @@ void Trace::ReleaseThreadBuffer(Thread* self) {
return;
}
the_trace_->trace_writer_->ReleaseBufferForThread(self);
- self->SetMethodTraceBuffer(nullptr);
+ self->SetMethodTraceBuffer(nullptr, 0);
}
void Trace::Abort() {
@@ -1661,25 +1661,25 @@ int TraceWriter::GetMethodTraceIndex(uintptr_t* current_buffer) {
void TraceWriter::FlushBuffer(Thread* thread, bool is_sync, bool release) {
uintptr_t* method_trace_entries = thread->GetMethodTraceBuffer();
- size_t* current_offset = thread->GetMethodTraceIndexPtr();
+ uintptr_t** current_entry_ptr = thread->GetTraceBufferCurrEntryPtr();
+ size_t current_offset = *current_entry_ptr - method_trace_entries;
size_t tid = thread->GetTid();
DCHECK(method_trace_entries != nullptr);
if (is_sync || thread_pool_ == nullptr) {
std::unordered_map<ArtMethod*, std::string> method_infos;
if (trace_format_version_ == Trace::kFormatV1) {
- PreProcessTraceForMethodInfos(method_trace_entries, *current_offset, method_infos);
+ PreProcessTraceForMethodInfos(method_trace_entries, current_offset, method_infos);
}
- FlushBuffer(method_trace_entries, *current_offset, tid, method_infos);
+ FlushBuffer(method_trace_entries, current_offset, tid, method_infos);
// This is a synchronous flush, so no need to allocate a new buffer. This is used either
// when the tracing has finished or in non-streaming mode.
// Just reset the buffer pointer to the initial value, so we can reuse the same buffer.
if (release) {
- thread->SetMethodTraceBuffer(nullptr);
- *current_offset = 0;
+ thread->SetMethodTraceBuffer(nullptr, 0);
} else {
- *current_offset = kPerThreadBufSize;
+ thread->SetTraceBufferCurrentEntry(kPerThreadBufSize);
}
} else {
int old_index = GetMethodTraceIndex(method_trace_entries);
@@ -1687,13 +1687,11 @@ void TraceWriter::FlushBuffer(Thread* thread, bool is_sync, bool release) {
// entries are flushed.
thread_pool_->AddTask(
Thread::Current(),
- new TraceEntriesWriterTask(this, old_index, method_trace_entries, *current_offset, tid));
+ new TraceEntriesWriterTask(this, old_index, method_trace_entries, current_offset, tid));
if (release) {
- thread->SetMethodTraceBuffer(nullptr);
- *current_offset = 0;
+ thread->SetMethodTraceBuffer(nullptr, 0);
} else {
- thread->SetMethodTraceBuffer(AcquireTraceBuffer(tid));
- *current_offset = kPerThreadBufSize;
+ thread->SetMethodTraceBuffer(AcquireTraceBuffer(tid), kPerThreadBufSize);
}
}
@@ -1882,53 +1880,51 @@ void Trace::LogMethodTraceEvent(Thread* thread,
// concurrently.
uintptr_t* method_trace_buffer = thread->GetMethodTraceBuffer();
- size_t* current_index = thread->GetMethodTraceIndexPtr();
+ uintptr_t** current_entry_ptr = thread->GetTraceBufferCurrEntryPtr();
// Initialize the buffer lazily. It's just simpler to keep the creation at one place.
if (method_trace_buffer == nullptr) {
method_trace_buffer = trace_writer_->AcquireTraceBuffer(thread->GetTid());
DCHECK(method_trace_buffer != nullptr);
- thread->SetMethodTraceBuffer(method_trace_buffer);
- *current_index = kPerThreadBufSize;
+ thread->SetMethodTraceBuffer(method_trace_buffer, kPerThreadBufSize);
trace_writer_->RecordThreadInfo(thread);
}
if (trace_writer_->HasOverflow()) {
// In non-streaming modes, we stop recoding events once the buffer is full. Just reset the
// index, so we don't go to runtime for each method.
- *current_index = kPerThreadBufSize;
+ thread->SetTraceBufferCurrentEntry(kPerThreadBufSize);
return;
}
size_t required_entries = GetNumEntries(clock_source_);
- if (*current_index < required_entries) {
+ if (*current_entry_ptr - required_entries < method_trace_buffer) {
// This returns nullptr in non-streaming mode if there's an overflow and we cannot record any
// more entries. In streaming mode, it returns nullptr if it fails to allocate a new buffer.
method_trace_buffer = trace_writer_->PrepareBufferForNewEntries(thread);
if (method_trace_buffer == nullptr) {
- *current_index = kPerThreadBufSize;
+ thread->SetTraceBufferCurrentEntry(kPerThreadBufSize);
return;
}
}
+ *current_entry_ptr = *current_entry_ptr - required_entries;
// Record entry in per-thread trace buffer.
- // Update the offset
- int new_entry_index = *current_index - required_entries;
- *current_index = new_entry_index;
-
+ int entry_index = 0;
+ uintptr_t* current_entry = *current_entry_ptr;
// Ensure we always use the non-obsolete version of the method so that entry/exit events have the
// same pointer value.
method = method->GetNonObsoleteMethod();
- method_trace_buffer[new_entry_index++] = reinterpret_cast<uintptr_t>(method) | action;
+ current_entry[entry_index++] = reinterpret_cast<uintptr_t>(method) | action;
if (UseThreadCpuClock(clock_source_)) {
- method_trace_buffer[new_entry_index++] = thread_clock_diff;
+ current_entry[entry_index++] = thread_clock_diff;
}
if (UseWallClock(clock_source_)) {
if (art::kRuntimePointerSize == PointerSize::k32) {
// On 32-bit architectures store timestamp counter as two 32-bit values.
- method_trace_buffer[new_entry_index++] = static_cast<uint32_t>(timestamp_counter);
- method_trace_buffer[new_entry_index++] = timestamp_counter >> 32;
+ current_entry[entry_index++] = static_cast<uint32_t>(timestamp_counter);
+ current_entry[entry_index++] = timestamp_counter >> 32;
} else {
- method_trace_buffer[new_entry_index++] = timestamp_counter;
+ current_entry[entry_index++] = timestamp_counter;
}
}
}