Emit CFI for x86 & x86_64 JNI compiler.
Now for host-side x86 & x86_64 ART, we are able to get complete stacktrace with even mixed C/C++ & Java stack frames.
Testing:
1. art/test/run-test --host --gdb [--64] --no-relocate 005
2. In gdb, run 'b art::Class_classForName' which is implementation of a Java native method, then 'r'
3. In gdb, run 'bt'. You should see stack frames down to main()
Change-Id: I2d17e9aa0f6d42d374b5362a15ea35a2fce96302
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index b6a5c20..48edb15 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -20,6 +20,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "memory_region.h"
#include "thread.h"
+#include "utils/dwarf_cfi.h"
namespace art {
namespace x86 {
@@ -1407,20 +1408,61 @@
EmitOperand(reg_or_opcode, Operand(operand));
}
+void X86Assembler::InitializeFrameDescriptionEntry() {
+ WriteFDEHeader(&cfi_info_);
+}
+
+void X86Assembler::FinalizeFrameDescriptionEntry() {
+ WriteFDEAddressRange(&cfi_info_, buffer_.Size());
+ PadCFI(&cfi_info_);
+ WriteCFILength(&cfi_info_);
+}
+
constexpr size_t kFramePointerSize = 4;
void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& spill_regs,
const ManagedRegisterEntrySpills& entry_spills) {
+ cfi_cfa_offset_ = kFramePointerSize; // Only return address on stack
+ cfi_pc_ = buffer_.Size(); // Nothing emitted yet
+ DCHECK_EQ(cfi_pc_, 0U);
+
+ uint32_t reg_offset = 1;
CHECK_ALIGNED(frame_size, kStackAlignment);
for (int i = spill_regs.size() - 1; i >= 0; --i) {
pushl(spill_regs.at(i).AsX86().AsCpuRegister());
+
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += kFramePointerSize;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+ // DW_CFA_offset reg offset
+ reg_offset++;
+ DW_CFA_offset(&cfi_info_, spill_regs.at(i).AsX86().DWARFRegId(), reg_offset);
}
+
// return address then method on stack
- addl(ESP, Immediate(-frame_size + (spill_regs.size() * kFramePointerSize) +
- sizeof(StackReference<mirror::ArtMethod>) /*method*/ +
- kFramePointerSize /*return address*/));
+ int32_t adjust = frame_size - (spill_regs.size() * kFramePointerSize) -
+ sizeof(StackReference<mirror::ArtMethod>) /*method*/ -
+ kFramePointerSize /*return address*/;
+ addl(ESP, Immediate(-adjust));
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += adjust;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+
pushl(method_reg.AsX86().AsCpuRegister());
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += kFramePointerSize;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+
for (size_t i = 0; i < entry_spills.size(); ++i) {
movl(Address(ESP, frame_size + sizeof(StackReference<mirror::ArtMethod>) +
(i * kFramePointerSize)),
@@ -1442,6 +1484,12 @@
void X86Assembler::IncreaseFrameSize(size_t adjust) {
CHECK_ALIGNED(adjust, kStackAlignment);
addl(ESP, Immediate(-adjust));
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += adjust;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
}
void X86Assembler::DecreaseFrameSize(size_t adjust) {
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index ce20768..5c4e34f 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -571,6 +571,12 @@
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+ void InitializeFrameDescriptionEntry() OVERRIDE;
+ void FinalizeFrameDescriptionEntry() OVERRIDE;
+ std::vector<uint8_t>* GetFrameDescriptionEntry() OVERRIDE {
+ return &cfi_info_;
+ }
+
private:
inline void EmitUint8(uint8_t value);
inline void EmitInt32(int32_t value);
@@ -589,6 +595,9 @@
void EmitGenericShift(int rm, Register reg, const Immediate& imm);
void EmitGenericShift(int rm, Register operand, Register shifter);
+ std::vector<uint8_t> cfi_info_;
+ uint32_t cfi_cfa_offset_, cfi_pc_;
+
DISALLOW_COPY_AND_ASSIGN(X86Assembler);
};
diff --git a/compiler/utils/x86/managed_register_x86.h b/compiler/utils/x86/managed_register_x86.h
index 09d2b49..5d46ee2 100644
--- a/compiler/utils/x86/managed_register_x86.h
+++ b/compiler/utils/x86/managed_register_x86.h
@@ -88,6 +88,14 @@
// There is a one-to-one mapping between ManagedRegister and register id.
class X86ManagedRegister : public ManagedRegister {
public:
+ int DWARFRegId() const {
+ CHECK(IsCpuRegister());
+ // For all the X86 registers we care about:
+ // EAX, ECX, EDX, EBX, ESP, EBP, ESI, EDI,
+ // DWARF register id is the same as id_.
+ return static_cast<int>(id_);
+ }
+
ByteRegister AsByteRegister() const {
CHECK(IsCpuRegister());
CHECK_LT(AsCpuRegister(), ESP); // ESP, EBP, ESI and EDI cannot be encoded as byte registers.