summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.mk1
-rw-r--r--compiler/compiled_method.cc5
-rw-r--r--compiler/compiled_method.h3
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h1
-rw-r--r--compiler/dex/quick/arm/int_arm.cc94
-rw-r--r--compiler/dex/quick/arm64/codegen_arm64.h1
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc116
-rw-r--r--compiler/dex/quick/codegen_util.cc4
-rw-r--r--compiler/dex/quick/mir_to_lir.h6
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h2
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc144
-rw-r--r--compiler/elf_writer_quick.cc25
-rw-r--r--compiler/jni/quick/jni_compiler.cc5
-rw-r--r--compiler/optimizing/code_generator_x86.cc10
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc31
-rw-r--r--compiler/utils/assembler.h4
-rw-r--r--compiler/utils/dwarf_cfi.cc114
-rw-r--r--compiler/utils/dwarf_cfi.h91
-rw-r--r--compiler/utils/x86/assembler_x86.cc54
-rw-r--r--compiler/utils/x86/assembler_x86.h9
-rw-r--r--compiler/utils/x86/managed_register_x86.h8
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc39
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h9
-rw-r--r--compiler/utils/x86_64/managed_register_x86_64.h15
24 files changed, 624 insertions, 167 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 7983040722..5c5163d401 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -107,6 +107,7 @@ LIBART_COMPILER_SRC_FILES := \
utils/arm64/assembler_arm64.cc \
utils/arm64/managed_register_arm64.cc \
utils/assembler.cc \
+ utils/dwarf_cfi.cc \
utils/mips/assembler_mips.cc \
utils/mips/managed_register_mips.cc \
utils/x86/assembler_x86.cc \
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index f098a34ea7..f2a8d84731 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -165,14 +165,15 @@ CompiledMethod::CompiledMethod(CompilerDriver* driver,
const std::vector<uint8_t>& code,
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
- const uint32_t fp_spill_mask)
+ const uint32_t fp_spill_mask,
+ const std::vector<uint8_t>* cfi_info)
: CompiledCode(driver, instruction_set, code),
frame_size_in_bytes_(frame_size_in_bytes),
core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
mapping_table_(driver->DeduplicateMappingTable(std::vector<uint8_t>())),
vmap_table_(driver->DeduplicateVMapTable(std::vector<uint8_t>())),
gc_map_(driver->DeduplicateGCMap(std::vector<uint8_t>())),
- cfi_info_(nullptr) {
+ cfi_info_(driver->DeduplicateCFIInfo(cfi_info)) {
}
// Constructs a CompiledMethod for the Portable compiler.
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index b8cd851a1f..c98d06a01d 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -120,7 +120,8 @@ class CompiledMethod : public CompiledCode {
const std::vector<uint8_t>& quick_code,
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
- const uint32_t fp_spill_mask);
+ const uint32_t fp_spill_mask,
+ const std::vector<uint8_t>* cfi_info);
// Constructs a CompiledMethod for the Portable compiler.
CompiledMethod(CompilerDriver* driver, InstructionSet instruction_set, const std::string& code,
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 072acbeaa7..cd6c9cc1e1 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -108,6 +108,7 @@ class ArmMir2Lir FINAL : public Mir2Lir {
bool GenInlinedSqrt(CallInfo* info);
bool GenInlinedPeek(CallInfo* info, OpSize size);
bool GenInlinedPoke(CallInfo* info, OpSize size);
+ bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 6711ab36a1..b9a17cceb9 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -949,6 +949,100 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
return true;
}
+bool ArmMir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
+ constexpr int kLargeArrayThreshold = 256;
+
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_src_pos = info->args[1];
+ RegLocation rl_dst = info->args[2];
+ RegLocation rl_dst_pos = info->args[3];
+ RegLocation rl_length = info->args[4];
+ // Compile time check, handle exception by non-inline method to reduce related meta-data.
+ if ((rl_src_pos.is_const && (mir_graph_->ConstantValue(rl_src_pos) < 0)) ||
+ (rl_dst_pos.is_const && (mir_graph_->ConstantValue(rl_dst_pos) < 0)) ||
+ (rl_length.is_const && (mir_graph_->ConstantValue(rl_length) < 0))) {
+ return false;
+ }
+
+ ClobberCallerSave();
+ LockCallTemps(); // Prepare for explicit register usage.
+ LockTemp(rs_r12);
+ RegStorage rs_src = rs_r0;
+ RegStorage rs_dst = rs_r1;
+ LoadValueDirectFixed(rl_src, rs_src);
+ LoadValueDirectFixed(rl_dst, rs_dst);
+
+ // Handle null pointer exception in slow-path.
+ LIR* src_check_branch = OpCmpImmBranch(kCondEq, rs_src, 0, nullptr);
+ LIR* dst_check_branch = OpCmpImmBranch(kCondEq, rs_dst, 0, nullptr);
+ // Handle potential overlapping in slow-path.
+ LIR* src_dst_same = OpCmpBranch(kCondEq, rs_src, rs_dst, nullptr);
+ // Handle exception or big length in slow-path.
+ RegStorage rs_length = rs_r2;
+ LoadValueDirectFixed(rl_length, rs_length);
+ LIR* len_neg_or_too_big = OpCmpImmBranch(kCondHi, rs_length, kLargeArrayThreshold, nullptr);
+ // Src bounds check.
+ RegStorage rs_pos = rs_r3;
+ RegStorage rs_arr_length = rs_r12;
+ LoadValueDirectFixed(rl_src_pos, rs_pos);
+ LIR* src_pos_negative = OpCmpImmBranch(kCondLt, rs_pos, 0, nullptr);
+ Load32Disp(rs_src, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
+ OpRegReg(kOpSub, rs_arr_length, rs_pos);
+ LIR* src_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
+ // Dst bounds check.
+ LoadValueDirectFixed(rl_dst_pos, rs_pos);
+ LIR* dst_pos_negative = OpCmpImmBranch(kCondLt, rs_pos, 0, nullptr);
+ Load32Disp(rs_dst, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
+ OpRegReg(kOpSub, rs_arr_length, rs_pos);
+ LIR* dst_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
+
+ // Everything is checked now.
+ OpRegImm(kOpAdd, rs_dst, mirror::Array::DataOffset(2).Int32Value());
+ OpRegReg(kOpAdd, rs_dst, rs_pos);
+ OpRegReg(kOpAdd, rs_dst, rs_pos);
+ OpRegImm(kOpAdd, rs_src, mirror::Array::DataOffset(2).Int32Value());
+ LoadValueDirectFixed(rl_src_pos, rs_pos);
+ OpRegReg(kOpAdd, rs_src, rs_pos);
+ OpRegReg(kOpAdd, rs_src, rs_pos);
+
+ RegStorage rs_tmp = rs_pos;
+ OpRegRegImm(kOpLsl, rs_length, rs_length, 1);
+
+ // Copy one element.
+ OpRegRegImm(kOpAnd, rs_tmp, rs_length, 2);
+ LIR* jmp_to_begin_loop = OpCmpImmBranch(kCondEq, rs_tmp, 0, nullptr);
+ OpRegImm(kOpSub, rs_length, 2);
+ LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, kSignedHalf);
+ StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, kSignedHalf);
+
+ // Copy two elements.
+ LIR *begin_loop = NewLIR0(kPseudoTargetLabel);
+ LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_length, 0, nullptr);
+ OpRegImm(kOpSub, rs_length, 4);
+ LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k32);
+ StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k32);
+ OpUnconditionalBranch(begin_loop);
+
+ LIR *check_failed = NewLIR0(kPseudoTargetLabel);
+ LIR* launchpad_branch = OpUnconditionalBranch(nullptr);
+ LIR* return_point = NewLIR0(kPseudoTargetLabel);
+
+ src_check_branch->target = check_failed;
+ dst_check_branch->target = check_failed;
+ src_dst_same->target = check_failed;
+ len_neg_or_too_big->target = check_failed;
+ src_pos_negative->target = check_failed;
+ src_bad_len->target = check_failed;
+ dst_pos_negative->target = check_failed;
+ dst_bad_len->target = check_failed;
+ jmp_to_begin_loop->target = begin_loop;
+ jmp_to_ret->target = return_point;
+
+ AddIntrinsicSlowPath(info, launchpad_branch, return_point);
+
+ return true;
+}
+
LIR* ArmMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg.GetReg(), 0, 0, 0, 0, target);
}
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 2cd24c6874..3e1c18baf4 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -168,6 +168,7 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
bool GenInlinedPeek(CallInfo* info, OpSize size) OVERRIDE;
bool GenInlinedPoke(CallInfo* info, OpSize size) OVERRIDE;
bool GenInlinedAbsLong(CallInfo* info) OVERRIDE;
+ bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2) OVERRIDE;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 147fee8436..d00c57dee9 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -272,6 +272,7 @@ LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_
ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
}
+ // TODO: Use tbz/tbnz for < 0 or >= 0.
}
if (branch == nullptr) {
@@ -788,6 +789,121 @@ bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
return true;
}
+bool Arm64Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
+ constexpr int kLargeArrayThreshold = 512;
+
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_src_pos = info->args[1];
+ RegLocation rl_dst = info->args[2];
+ RegLocation rl_dst_pos = info->args[3];
+ RegLocation rl_length = info->args[4];
+ // Compile time check, handle exception by non-inline method to reduce related meta-data.
+ if ((rl_src_pos.is_const && (mir_graph_->ConstantValue(rl_src_pos) < 0)) ||
+ (rl_dst_pos.is_const && (mir_graph_->ConstantValue(rl_dst_pos) < 0)) ||
+ (rl_length.is_const && (mir_graph_->ConstantValue(rl_length) < 0))) {
+ return false;
+ }
+
+ ClobberCallerSave();
+ LockCallTemps(); // Prepare for explicit register usage.
+ RegStorage rs_src = rs_x0;
+ RegStorage rs_dst = rs_x1;
+ LoadValueDirectFixed(rl_src, rs_src);
+ LoadValueDirectFixed(rl_dst, rs_dst);
+
+ // Handle null pointer exception in slow-path.
+ LIR* src_check_branch = OpCmpImmBranch(kCondEq, rs_src, 0, nullptr);
+ LIR* dst_check_branch = OpCmpImmBranch(kCondEq, rs_dst, 0, nullptr);
+ // Handle potential overlapping in slow-path.
+ // TUNING: Support overlapping cases.
+ LIR* src_dst_same = OpCmpBranch(kCondEq, rs_src, rs_dst, nullptr);
+ // Handle exception or big length in slow-path.
+ RegStorage rs_length = rs_w2;
+ LoadValueDirectFixed(rl_length, rs_length);
+ LIR* len_neg_or_too_big = OpCmpImmBranch(kCondHi, rs_length, kLargeArrayThreshold, nullptr);
+ // Src bounds check.
+ RegStorage rs_src_pos = rs_w3;
+ RegStorage rs_arr_length = rs_w4;
+ LoadValueDirectFixed(rl_src_pos, rs_src_pos);
+ LIR* src_pos_negative = OpCmpImmBranch(kCondLt, rs_src_pos, 0, nullptr);
+ Load32Disp(rs_src, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
+ OpRegReg(kOpSub, rs_arr_length, rs_src_pos);
+ LIR* src_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
+ // Dst bounds check.
+ RegStorage rs_dst_pos = rs_w5;
+ LoadValueDirectFixed(rl_dst_pos, rs_dst_pos);
+ LIR* dst_pos_negative = OpCmpImmBranch(kCondLt, rs_dst_pos, 0, nullptr);
+ Load32Disp(rs_dst, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
+ OpRegReg(kOpSub, rs_arr_length, rs_dst_pos);
+ LIR* dst_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
+
+ // Everything is checked now.
+ // Set rs_src to the address of the first element to be copied.
+ rs_src_pos = As64BitReg(rs_src_pos);
+ OpRegImm(kOpAdd, rs_src, mirror::Array::DataOffset(2).Int32Value());
+ OpRegRegImm(kOpLsl, rs_src_pos, rs_src_pos, 1);
+ OpRegReg(kOpAdd, rs_src, rs_src_pos);
+ // Set rs_src to the address of the first element to be copied.
+ rs_dst_pos = As64BitReg(rs_dst_pos);
+ OpRegImm(kOpAdd, rs_dst, mirror::Array::DataOffset(2).Int32Value());
+ OpRegRegImm(kOpLsl, rs_dst_pos, rs_dst_pos, 1);
+ OpRegReg(kOpAdd, rs_dst, rs_dst_pos);
+
+ // rs_arr_length won't be not used anymore.
+ RegStorage rs_tmp = rs_arr_length;
+ // Use 64-bit view since rs_length will be used as index.
+ rs_length = As64BitReg(rs_length);
+ OpRegRegImm(kOpLsl, rs_length, rs_length, 1);
+
+ // Copy one element.
+ OpRegRegImm(kOpAnd, rs_tmp, As32BitReg(rs_length), 2);
+ LIR* jmp_to_copy_two = OpCmpImmBranch(kCondEq, rs_tmp, 0, nullptr);
+ OpRegImm(kOpSub, rs_length, 2);
+ LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, kSignedHalf);
+ StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, kSignedHalf);
+
+ // Copy two elements.
+ LIR *copy_two = NewLIR0(kPseudoTargetLabel);
+ OpRegRegImm(kOpAnd, rs_tmp, As32BitReg(rs_length), 4);
+ LIR* jmp_to_copy_four = OpCmpImmBranch(kCondEq, rs_tmp, 0, nullptr);
+ OpRegImm(kOpSub, rs_length, 4);
+ LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k32);
+ StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k32);
+
+ // Copy four elements.
+ LIR *copy_four = NewLIR0(kPseudoTargetLabel);
+ LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_length, 0, nullptr);
+ LIR *begin_loop = NewLIR0(kPseudoTargetLabel);
+ OpRegImm(kOpSub, rs_length, 8);
+ rs_tmp = As64BitReg(rs_tmp);
+ LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k64);
+ StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k64);
+ LIR* jmp_to_loop = OpCmpImmBranch(kCondNe, rs_length, 0, nullptr);
+ LIR* loop_finished = OpUnconditionalBranch(nullptr);
+
+ LIR *check_failed = NewLIR0(kPseudoTargetLabel);
+ LIR* launchpad_branch = OpUnconditionalBranch(nullptr);
+ LIR* return_point = NewLIR0(kPseudoTargetLabel);
+
+ src_check_branch->target = check_failed;
+ dst_check_branch->target = check_failed;
+ src_dst_same->target = check_failed;
+ len_neg_or_too_big->target = check_failed;
+ src_pos_negative->target = check_failed;
+ src_bad_len->target = check_failed;
+ dst_pos_negative->target = check_failed;
+ dst_bad_len->target = check_failed;
+ jmp_to_copy_two->target = copy_two;
+ jmp_to_copy_four->target = copy_four;
+ jmp_to_ret->target = return_point;
+ jmp_to_loop->target = begin_loop;
+ loop_finished->target = return_point;
+
+ AddIntrinsicSlowPath(info, launchpad_branch, return_point);
+
+ return true;
+}
+
LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
return RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp), reg.GetReg(), 0, 0, 0, 0, target);
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 511297c713..be79b63931 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1085,7 +1085,7 @@ CompiledMethod* Mir2Lir::GetCompiledMethod() {
vmap_encoder.PushBackUnsigned(0u); // Size is 0.
}
- std::unique_ptr<std::vector<uint8_t>> cfi_info(ReturnCallFrameInformation());
+ std::unique_ptr<std::vector<uint8_t>> cfi_info(ReturnFrameDescriptionEntry());
CompiledMethod* result =
new CompiledMethod(cu_->compiler_driver, cu_->instruction_set, code_buffer_, frame_size_,
core_spill_mask_, fp_spill_mask_, encoded_mapping_table_,
@@ -1250,7 +1250,7 @@ void Mir2Lir::LoadClassType(const DexFile& dex_file, uint32_t type_idx,
AppendLIR(load_pc_rel);
}
-std::vector<uint8_t>* Mir2Lir::ReturnCallFrameInformation() {
+std::vector<uint8_t>* Mir2Lir::ReturnFrameDescriptionEntry() {
// Default case is to do nothing.
return nullptr;
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 2221bb5407..4b8f794e1e 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1528,10 +1528,10 @@ class Mir2Lir : public Backend {
uint32_t type_idx, RegLocation rl_dest,
RegLocation rl_src);
/*
- * @brief Generate the debug_frame FDE information if possible.
- * @returns pointer to vector containg CFE information, or NULL.
+ * @brief Generate the eh_frame FDE information if possible.
+ * @returns pointer to vector containg FDE information, or NULL.
*/
- virtual std::vector<uint8_t>* ReturnCallFrameInformation();
+ virtual std::vector<uint8_t>* ReturnFrameDescriptionEntry();
/**
* @brief Used to insert marker that can be used to associate MIR with LIR.
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index d3ed48d07e..24a3fe3656 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -360,7 +360,7 @@ class X86Mir2Lir : public Mir2Lir {
* @brief Generate the debug_frame FDE information.
* @returns pointer to vector containing CFE information
*/
- std::vector<uint8_t>* ReturnCallFrameInformation() OVERRIDE;
+ std::vector<uint8_t>* ReturnFrameDescriptionEntry() OVERRIDE;
LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 69f3e67513..fb68f45b64 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -24,6 +24,7 @@
#include "mirror/array.h"
#include "mirror/string.h"
#include "x86_lir.h"
+#include "utils/dwarf_cfi.h"
namespace art {
@@ -880,9 +881,13 @@ RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
// First load the pointer in fs:[suspend-trigger] into eax
// Then use a test instruction to indirect via that address.
- NewLIR2(kX86Mov32RT, rs_rAX.GetReg(), cu_->target64 ?
- Thread::ThreadSuspendTriggerOffset<8>().Int32Value() :
- Thread::ThreadSuspendTriggerOffset<4>().Int32Value());
+ if (cu_->target64) {
+ NewLIR2(kX86Mov64RT, rs_rAX.GetReg(),
+ Thread::ThreadSuspendTriggerOffset<8>().Int32Value());
+ } else {
+ NewLIR2(kX86Mov32RT, rs_rAX.GetReg(),
+ Thread::ThreadSuspendTriggerOffset<4>().Int32Value());
+ }
return NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rAX.GetReg(), 0);
}
@@ -1009,19 +1014,6 @@ LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, Invok
return call;
}
-/*
- * @brief Enter a 32 bit quantity into a buffer
- * @param buf buffer.
- * @param data Data value.
- */
-
-static void PushWord(std::vector<uint8_t>&buf, int32_t data) {
- buf.push_back(data & 0xff);
- buf.push_back((data >> 8) & 0xff);
- buf.push_back((data >> 16) & 0xff);
- buf.push_back((data >> 24) & 0xff);
-}
-
void X86Mir2Lir::InstallLiteralPools() {
// These are handled differently for x86.
DCHECK(code_literal_list_ == nullptr);
@@ -1042,10 +1034,10 @@ void X86Mir2Lir::InstallLiteralPools() {
align_size--;
}
for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
- PushWord(code_buffer_, p->operands[0]);
- PushWord(code_buffer_, p->operands[1]);
- PushWord(code_buffer_, p->operands[2]);
- PushWord(code_buffer_, p->operands[3]);
+ PushWord(&code_buffer_, p->operands[0]);
+ PushWord(&code_buffer_, p->operands[1]);
+ PushWord(&code_buffer_, p->operands[2]);
+ PushWord(&code_buffer_, p->operands[3]);
}
}
@@ -1293,14 +1285,14 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
// Compute the number of words to search in to rCX.
Load32Disp(rs_rDX, count_offset, rs_rCX);
- if (!cu_->target64) {
- // Possible signal here due to null pointer dereference.
- // Note that the signal handler will expect the top word of
- // the stack to be the ArtMethod*. If the PUSH edi instruction
- // below is ahead of the load above then this will not be true
- // and the signal handler will not work.
- MarkPossibleNullPointerException(0);
+ // Possible signal here due to null pointer dereference.
+ // Note that the signal handler will expect the top word of
+ // the stack to be the ArtMethod*. If the PUSH edi instruction
+ // below is ahead of the load above then this will not be true
+ // and the signal handler will not work.
+ MarkPossibleNullPointerException(0);
+ if (!cu_->target64) {
// EDI is callee-save register in 32-bit mode.
NewLIR1(kX86Push32R, rs_rDI.GetReg());
}
@@ -1418,47 +1410,6 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
return true;
}
-/*
- * @brief Enter an 'advance LOC' into the FDE buffer
- * @param buf FDE buffer.
- * @param increment Amount by which to increase the current location.
- */
-static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) {
- if (increment < 64) {
- // Encoding in opcode.
- buf.push_back(0x1 << 6 | increment);
- } else if (increment < 256) {
- // Single byte delta.
- buf.push_back(0x02);
- buf.push_back(increment);
- } else if (increment < 256 * 256) {
- // Two byte delta.
- buf.push_back(0x03);
- buf.push_back(increment & 0xff);
- buf.push_back((increment >> 8) & 0xff);
- } else {
- // Four byte delta.
- buf.push_back(0x04);
- PushWord(buf, increment);
- }
-}
-
-static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) {
- uint8_t buffer[12];
- uint8_t *ptr = EncodeUnsignedLeb128(buffer, value);
- for (uint8_t *p = buffer; p < ptr; p++) {
- buf.push_back(*p);
- }
-}
-
-static void EncodeSignedLeb128(std::vector<uint8_t>& buf, int32_t value) {
- uint8_t buffer[12];
- uint8_t *ptr = EncodeSignedLeb128(buffer, value);
- for (uint8_t *p = buffer; p < ptr; p++) {
- buf.push_back(*p);
- }
-}
-
static bool ARTRegIDToDWARFRegID(bool is_x86_64, int art_reg_id, int* dwarf_reg_id) {
if (is_x86_64) {
switch (art_reg_id) {
@@ -1481,36 +1432,23 @@ static bool ARTRegIDToDWARFRegID(bool is_x86_64, int art_reg_id, int* dwarf_reg_
}
}
-std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
- std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
+std::vector<uint8_t>* X86Mir2Lir::ReturnFrameDescriptionEntry() {
+ std::vector<uint8_t>* cfi_info = new std::vector<uint8_t>;
// Generate the FDE for the method.
DCHECK_NE(data_offset_, 0U);
- // Length (will be filled in later in this routine).
- PushWord(*cfi_info, 0);
-
- // 'CIE_pointer' (filled in by linker).
- PushWord(*cfi_info, 0);
-
- // 'initial_location' (filled in by linker).
- PushWord(*cfi_info, 0);
-
- // 'address_range' (number of bytes in the method).
- PushWord(*cfi_info, data_offset_);
-
- // Augmentation length: 0
- cfi_info->push_back(0);
+ WriteFDEHeader(cfi_info);
+ WriteFDEAddressRange(cfi_info, data_offset_);
// The instructions in the FDE.
if (stack_decrement_ != nullptr) {
// Advance LOC to just past the stack decrement.
uint32_t pc = NEXT_LIR(stack_decrement_)->offset;
- AdvanceLoc(*cfi_info, pc);
+ DW_CFA_advance_loc(cfi_info, pc);
// Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size.
- cfi_info->push_back(0x0e);
- EncodeUnsignedLeb128(*cfi_info, frame_size_);
+ DW_CFA_def_cfa_offset(cfi_info, frame_size_);
// Handle register spills
const uint32_t kSpillInstLen = (cu_->target64) ? 5 : 4;
@@ -1522,14 +1460,12 @@ std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
pc += kSpillInstLen;
// Advance LOC to pass this instruction
- AdvanceLoc(*cfi_info, kSpillInstLen);
+ DW_CFA_advance_loc(cfi_info, kSpillInstLen);
int dwarf_reg_id;
if (ARTRegIDToDWARFRegID(cu_->target64, reg, &dwarf_reg_id)) {
- // DW_CFA_offset_extended_sf reg_no offset
- cfi_info->push_back(0x11);
- EncodeUnsignedLeb128(*cfi_info, dwarf_reg_id);
- EncodeSignedLeb128(*cfi_info, offset / kDataAlignmentFactor);
+ // DW_CFA_offset_extended_sf reg offset
+ DW_CFA_offset_extended_sf(cfi_info, dwarf_reg_id, offset / kDataAlignmentFactor);
}
offset += GetInstructionSetPointerSize(cu_->instruction_set);
@@ -1539,16 +1475,15 @@ std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
// We continue with that stack until the epilogue.
if (stack_increment_ != nullptr) {
uint32_t new_pc = NEXT_LIR(stack_increment_)->offset;
- AdvanceLoc(*cfi_info, new_pc - pc);
+ DW_CFA_advance_loc(cfi_info, new_pc - pc);
// We probably have code snippets after the epilogue, so save the
// current state: DW_CFA_remember_state.
- cfi_info->push_back(0x0a);
+ DW_CFA_remember_state(cfi_info);
// We have now popped the stack: DW_CFA_def_cfa_offset 4/8.
// There is only the return PC on the stack now.
- cfi_info->push_back(0x0e);
- EncodeUnsignedLeb128(*cfi_info, GetInstructionSetPointerSize(cu_->instruction_set));
+ DW_CFA_def_cfa_offset(cfi_info, GetInstructionSetPointerSize(cu_->instruction_set));
// Everything after that is the same as before the epilogue.
// Stack bump was followed by RET instruction.
@@ -1556,25 +1491,16 @@ std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
if (post_ret_insn != nullptr) {
pc = new_pc;
new_pc = post_ret_insn->offset;
- AdvanceLoc(*cfi_info, new_pc - pc);
+ DW_CFA_advance_loc(cfi_info, new_pc - pc);
// Restore the state: DW_CFA_restore_state.
- cfi_info->push_back(0x0b);
+ DW_CFA_restore_state(cfi_info);
}
}
}
- // Padding to a multiple of 4
- while ((cfi_info->size() & 3) != 0) {
- // DW_CFA_nop is encoded as 0.
- cfi_info->push_back(0);
- }
+ PadCFI(cfi_info);
+ WriteCFILength(cfi_info);
- // Set the length of the FDE inside the generated bytes.
- uint32_t length = cfi_info->size() - 4;
- (*cfi_info)[0] = length;
- (*cfi_info)[1] = length >> 8;
- (*cfi_info)[2] = length >> 16;
- (*cfi_info)[3] = length >> 24;
return cfi_info;
}
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 12e9401107..bb5f7e0f9d 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -46,13 +46,6 @@ static void UpdateWord(std::vector<uint8_t>* buf, int offset, int data) {
(*buf)[offset+3] = data >> 24;
}
-static void PushWord(std::vector<uint8_t>* buf, int data) {
- buf->push_back(data & 0xff);
- buf->push_back((data >> 8) & 0xff);
- buf->push_back((data >> 16) & 0xff);
- buf->push_back((data >> 24) & 0xff);
-}
-
static void PushHalf(std::vector<uint8_t>* buf, int data) {
buf->push_back(data & 0xff);
buf->push_back((data >> 8) & 0xff);
@@ -842,24 +835,6 @@ void ElfWriterQuick::ReservePatchSpace(std::vector<uint8_t>* buffer, bool debug)
}
}
-static void EncodeUnsignedLeb128(uint32_t data, std::vector<uint8_t>* dst) {
- size_t encoded_size = UnsignedLeb128Size(data);
- size_t cur_index = dst->size();
- dst->resize(dst->size() + encoded_size);
- uint8_t* write_pos = &((*dst)[cur_index]);
- uint8_t* write_pos_after = EncodeUnsignedLeb128(write_pos, data);
- DCHECK_EQ(static_cast<size_t>(write_pos_after - write_pos), encoded_size);
-}
-
-static void EncodeSignedLeb128(int32_t data, std::vector<uint8_t>* dst) {
- size_t encoded_size = SignedLeb128Size(data);
- size_t cur_index = dst->size();
- dst->resize(dst->size() + encoded_size);
- uint8_t* write_pos = &((*dst)[cur_index]);
- uint8_t* write_pos_after = EncodeSignedLeb128(write_pos, data);
- DCHECK_EQ(static_cast<size_t>(write_pos_after - write_pos), encoded_size);
-}
-
std::vector<uint8_t>* ConstructCIEFrameX86(bool is_x86_64) {
std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index c38cfaf746..1a35da071c 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -90,6 +90,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
// Assembler that holds generated instructions
std::unique_ptr<Assembler> jni_asm(Assembler::Create(instruction_set));
+ jni_asm->InitializeFrameDescriptionEntry();
// Offsets into data structures
// TODO: if cross compiling these offsets are for the host not the target
@@ -432,12 +433,14 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
std::vector<uint8_t> managed_code(cs);
MemoryRegion code(&managed_code[0], managed_code.size());
__ FinalizeInstructions(code);
+ jni_asm->FinalizeFrameDescriptionEntry();
return new CompiledMethod(driver,
instruction_set,
managed_code,
frame_size,
main_jni_conv->CoreSpillMask(),
- main_jni_conv->FpSpillMask());
+ main_jni_conv->FpSpillMask(),
+ jni_asm->GetFrameDescriptionEntry());
}
// Copy a single parameter from the managed to the JNI calling convention
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 2264638110..ab53b17636 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -15,16 +15,16 @@
*/
#include "code_generator_x86.h"
-#include "gc/accounting/card_table.h"
-#include "utils/assembler.h"
-#include "utils/stack_checks.h"
-#include "utils/x86/assembler_x86.h"
-#include "utils/x86/managed_register_x86.h"
#include "entrypoints/quick/quick_entrypoints.h"
+#include "gc/accounting/card_table.h"
#include "mirror/array.h"
#include "mirror/art_method.h"
#include "thread.h"
+#include "utils/assembler.h"
+#include "utils/stack_checks.h"
+#include "utils/x86/assembler_x86.h"
+#include "utils/x86/managed_register_x86.h"
namespace art {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 2ff2a1710e..e4259f51b4 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -35,7 +35,7 @@ x86_64::X86_64ManagedRegister Location::AsX86_64() const {
namespace x86_64 {
-static constexpr bool kExplicitStackOverflowCheck = true;
+static constexpr bool kExplicitStackOverflowCheck = false;
// Some x86_64 instructions require a register to be available as temp.
static constexpr Register TMP = R11;
@@ -208,25 +208,26 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
static const int kFakeReturnRegister = 16;
core_spill_mask_ |= (1 << kFakeReturnRegister);
+ bool skip_overflow_check = IsLeafMethod()
+ && !IsLargeFrame(GetFrameSize(), InstructionSet::kX86_64);
+
+ if (!skip_overflow_check && !kExplicitStackOverflowCheck) {
+ __ testq(CpuRegister(RAX), Address(
+ CpuRegister(RSP), -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86_64))));
+ RecordPcInfo(0);
+ }
+
// The return PC has already been pushed on the stack.
__ subq(CpuRegister(RSP),
Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86_64WordSize));
- bool skip_overflow_check = IsLeafMethod()
- && !IsLargeFrame(GetFrameSize(), InstructionSet::kX86_64);
-
- if (!skip_overflow_check) {
- if (kExplicitStackOverflowCheck) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathX86_64();
- AddSlowPath(slow_path);
+ if (!skip_overflow_check && kExplicitStackOverflowCheck) {
+ SlowPathCode* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathX86_64();
+ AddSlowPath(slow_path);
- __ gs()->cmpq(CpuRegister(RSP),
- Address::Absolute(Thread::StackEndOffset<kX86_64WordSize>(), true));
- __ j(kLess, slow_path->GetEntryLabel());
- } else {
- __ testq(CpuRegister(RAX), Address(
- CpuRegister(RSP), -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86_64))));
- }
+ __ gs()->cmpq(CpuRegister(RSP),
+ Address::Absolute(Thread::StackEndOffset<kX86_64WordSize>(), true));
+ __ j(kLess, slow_path->GetEntryLabel());
}
__ movl(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(RDI));
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index f72f5e55ed..4addfa0946 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -499,6 +499,10 @@ class Assembler {
// and branch to a ExceptionSlowPath if it is.
virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0;
+ virtual void InitializeFrameDescriptionEntry() {}
+ virtual void FinalizeFrameDescriptionEntry() {}
+ virtual std::vector<uint8_t>* GetFrameDescriptionEntry() { return nullptr; }
+
virtual ~Assembler() {}
protected:
diff --git a/compiler/utils/dwarf_cfi.cc b/compiler/utils/dwarf_cfi.cc
new file mode 100644
index 0000000000..b3d1a47b80
--- /dev/null
+++ b/compiler/utils/dwarf_cfi.cc
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "leb128.h"
+#include "utils.h"
+
+#include "dwarf_cfi.h"
+
+namespace art {
+
+void DW_CFA_advance_loc(std::vector<uint8_t>* buf, uint32_t increment) {
+ if (increment < 64) {
+ // Encoding in opcode.
+ buf->push_back(0x1 << 6 | increment);
+ } else if (increment < 256) {
+ // Single byte delta.
+ buf->push_back(0x02);
+ buf->push_back(increment);
+ } else if (increment < 256 * 256) {
+ // Two byte delta.
+ buf->push_back(0x03);
+ buf->push_back(increment & 0xff);
+ buf->push_back((increment >> 8) & 0xff);
+ } else {
+ // Four byte delta.
+ buf->push_back(0x04);
+ PushWord(buf, increment);
+ }
+}
+
+void DW_CFA_offset_extended_sf(std::vector<uint8_t>* buf, int reg, int32_t offset) {
+ buf->push_back(0x11);
+ EncodeUnsignedLeb128(reg, buf);
+ EncodeSignedLeb128(offset, buf);
+}
+
+void DW_CFA_offset(std::vector<uint8_t>* buf, int reg, uint32_t offset) {
+ buf->push_back((0x2 << 6) | reg);
+ EncodeUnsignedLeb128(offset, buf);
+}
+
+void DW_CFA_def_cfa_offset(std::vector<uint8_t>* buf, int32_t offset) {
+ buf->push_back(0x0e);
+ EncodeUnsignedLeb128(offset, buf);
+}
+
+void DW_CFA_remember_state(std::vector<uint8_t>* buf) {
+ buf->push_back(0x0a);
+}
+
+void DW_CFA_restore_state(std::vector<uint8_t>* buf) {
+ buf->push_back(0x0b);
+}
+
+void WriteFDEHeader(std::vector<uint8_t>* buf) {
+ // 'length' (filled in by other functions).
+ PushWord(buf, 0);
+
+ // 'CIE_pointer' (filled in by linker).
+ PushWord(buf, 0);
+
+ // 'initial_location' (filled in by linker).
+ PushWord(buf, 0);
+
+ // 'address_range' (filled in by other functions).
+ PushWord(buf, 0);
+
+ // Augmentation length: 0
+ buf->push_back(0);
+}
+
+void WriteFDEAddressRange(std::vector<uint8_t>* buf, uint32_t data) {
+ const int kOffsetOfAddressRange = 12;
+ CHECK(buf->size() >= kOffsetOfAddressRange + sizeof(uint32_t));
+
+ uint8_t *p = buf->data() + kOffsetOfAddressRange;
+ p[0] = data;
+ p[1] = data >> 8;
+ p[2] = data >> 16;
+ p[3] = data >> 24;
+}
+
+void WriteCFILength(std::vector<uint8_t>* buf) {
+ uint32_t length = buf->size() - 4;
+ DCHECK_EQ((length & 0x3), 0U);
+ DCHECK_GT(length, 4U);
+
+ uint8_t *p = buf->data();
+ p[0] = length;
+ p[1] = length >> 8;
+ p[2] = length >> 16;
+ p[3] = length >> 24;
+}
+
+void PadCFI(std::vector<uint8_t>* buf) {
+ while (buf->size() & 0x3) {
+ buf->push_back(0);
+ }
+}
+
+} // namespace art
diff --git a/compiler/utils/dwarf_cfi.h b/compiler/utils/dwarf_cfi.h
new file mode 100644
index 0000000000..e5acc0eb3a
--- /dev/null
+++ b/compiler/utils/dwarf_cfi.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_DWARF_CFI_H_
+#define ART_COMPILER_UTILS_DWARF_CFI_H_
+
+#include <vector>
+
+namespace art {
+
+/**
+ * @brief Enter a 'DW_CFA_advance_loc' into an FDE buffer
+ * @param buf FDE buffer.
+ * @param increment Amount by which to increase the current location.
+ */
+void DW_CFA_advance_loc(std::vector<uint8_t>* buf, uint32_t increment);
+
+/**
+ * @brief Enter a 'DW_CFA_offset_extended_sf' into an FDE buffer
+ * @param buf FDE buffer.
+ * @param reg Register number.
+ * @param offset Offset of register address from CFA.
+ */
+void DW_CFA_offset_extended_sf(std::vector<uint8_t>* buf, int reg, int32_t offset);
+
+/**
+ * @brief Enter a 'DW_CFA_offset' into an FDE buffer
+ * @param buf FDE buffer.
+ * @param reg Register number.
+ * @param offset Offset of register address from CFA.
+ */
+void DW_CFA_offset(std::vector<uint8_t>* buf, int reg, uint32_t offset);
+
+/**
+ * @brief Enter a 'DW_CFA_def_cfa_offset' into an FDE buffer
+ * @param buf FDE buffer.
+ * @param offset New offset of CFA.
+ */
+void DW_CFA_def_cfa_offset(std::vector<uint8_t>* buf, int32_t offset);
+
+/**
+ * @brief Enter a 'DW_CFA_remember_state' into an FDE buffer
+ * @param buf FDE buffer.
+ */
+void DW_CFA_remember_state(std::vector<uint8_t>* buf);
+
+/**
+ * @brief Enter a 'DW_CFA_restore_state' into an FDE buffer
+ * @param buf FDE buffer.
+ */
+void DW_CFA_restore_state(std::vector<uint8_t>* buf);
+
+/**
+ * @brief Write FDE header into an FDE buffer
+ * @param buf FDE buffer.
+ */
+void WriteFDEHeader(std::vector<uint8_t>* buf);
+
+/**
+ * @brief Set 'address_range' field of an FDE buffer
+ * @param buf FDE buffer.
+ */
+void WriteFDEAddressRange(std::vector<uint8_t>* buf, uint32_t data);
+
+/**
+ * @brief Set 'length' field of an FDE buffer
+ * @param buf FDE buffer.
+ */
+void WriteCFILength(std::vector<uint8_t>* buf);
+
+/**
+ * @brief Pad an FDE buffer with 0 until its size is a multiple of 4
+ * @param buf FDE buffer.
+ */
+void PadCFI(std::vector<uint8_t>* buf);
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_DWARF_CFI_H_
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index b6a5c20cb8..48edb157f6 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -20,6 +20,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "memory_region.h"
#include "thread.h"
+#include "utils/dwarf_cfi.h"
namespace art {
namespace x86 {
@@ -1407,20 +1408,61 @@ void X86Assembler::EmitGenericShift(int reg_or_opcode,
EmitOperand(reg_or_opcode, Operand(operand));
}
+void X86Assembler::InitializeFrameDescriptionEntry() {
+ WriteFDEHeader(&cfi_info_);
+}
+
+void X86Assembler::FinalizeFrameDescriptionEntry() {
+ WriteFDEAddressRange(&cfi_info_, buffer_.Size());
+ PadCFI(&cfi_info_);
+ WriteCFILength(&cfi_info_);
+}
+
constexpr size_t kFramePointerSize = 4;
void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& spill_regs,
const ManagedRegisterEntrySpills& entry_spills) {
+ cfi_cfa_offset_ = kFramePointerSize; // Only return address on stack
+ cfi_pc_ = buffer_.Size(); // Nothing emitted yet
+ DCHECK_EQ(cfi_pc_, 0U);
+
+ uint32_t reg_offset = 1;
CHECK_ALIGNED(frame_size, kStackAlignment);
for (int i = spill_regs.size() - 1; i >= 0; --i) {
pushl(spill_regs.at(i).AsX86().AsCpuRegister());
+
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += kFramePointerSize;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+ // DW_CFA_offset reg offset
+ reg_offset++;
+ DW_CFA_offset(&cfi_info_, spill_regs.at(i).AsX86().DWARFRegId(), reg_offset);
}
+
// return address then method on stack
- addl(ESP, Immediate(-frame_size + (spill_regs.size() * kFramePointerSize) +
- sizeof(StackReference<mirror::ArtMethod>) /*method*/ +
- kFramePointerSize /*return address*/));
+ int32_t adjust = frame_size - (spill_regs.size() * kFramePointerSize) -
+ sizeof(StackReference<mirror::ArtMethod>) /*method*/ -
+ kFramePointerSize /*return address*/;
+ addl(ESP, Immediate(-adjust));
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += adjust;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+
pushl(method_reg.AsX86().AsCpuRegister());
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += kFramePointerSize;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+
for (size_t i = 0; i < entry_spills.size(); ++i) {
movl(Address(ESP, frame_size + sizeof(StackReference<mirror::ArtMethod>) +
(i * kFramePointerSize)),
@@ -1442,6 +1484,12 @@ void X86Assembler::RemoveFrame(size_t frame_size,
void X86Assembler::IncreaseFrameSize(size_t adjust) {
CHECK_ALIGNED(adjust, kStackAlignment);
addl(ESP, Immediate(-adjust));
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += adjust;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
}
void X86Assembler::DecreaseFrameSize(size_t adjust) {
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index ce20768c26..5c4e34fc8b 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -571,6 +571,12 @@ class X86Assembler FINAL : public Assembler {
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+ void InitializeFrameDescriptionEntry() OVERRIDE;
+ void FinalizeFrameDescriptionEntry() OVERRIDE;
+ std::vector<uint8_t>* GetFrameDescriptionEntry() OVERRIDE {
+ return &cfi_info_;
+ }
+
private:
inline void EmitUint8(uint8_t value);
inline void EmitInt32(int32_t value);
@@ -589,6 +595,9 @@ class X86Assembler FINAL : public Assembler {
void EmitGenericShift(int rm, Register reg, const Immediate& imm);
void EmitGenericShift(int rm, Register operand, Register shifter);
+ std::vector<uint8_t> cfi_info_;
+ uint32_t cfi_cfa_offset_, cfi_pc_;
+
DISALLOW_COPY_AND_ASSIGN(X86Assembler);
};
diff --git a/compiler/utils/x86/managed_register_x86.h b/compiler/utils/x86/managed_register_x86.h
index 09d2b4919d..5d46ee25cd 100644
--- a/compiler/utils/x86/managed_register_x86.h
+++ b/compiler/utils/x86/managed_register_x86.h
@@ -88,6 +88,14 @@ const int kNumberOfAllocIds = kNumberOfCpuAllocIds + kNumberOfXmmAllocIds +
// There is a one-to-one mapping between ManagedRegister and register id.
class X86ManagedRegister : public ManagedRegister {
public:
+ int DWARFRegId() const {
+ CHECK(IsCpuRegister());
+ // For all the X86 registers we care about:
+ // EAX, ECX, EDX, EBX, ESP, EBP, ESI, EDI,
+ // DWARF register id is the same as id_.
+ return static_cast<int>(id_);
+ }
+
ByteRegister AsByteRegister() const {
CHECK(IsCpuRegister());
CHECK_LT(AsCpuRegister(), ESP); // ESP, EBP, ESI and EDI cannot be encoded as byte registers.
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 76842711dd..62b72c234a 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -20,6 +20,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "memory_region.h"
#include "thread.h"
+#include "utils/dwarf_cfi.h"
namespace art {
namespace x86_64 {
@@ -1714,11 +1715,26 @@ void X86_64Assembler::EmitOptionalByteRegNormalizingRex32(CpuRegister dst, const
}
}
+void X86_64Assembler::InitializeFrameDescriptionEntry() {
+ WriteFDEHeader(&cfi_info_);
+}
+
+void X86_64Assembler::FinalizeFrameDescriptionEntry() {
+ WriteFDEAddressRange(&cfi_info_, buffer_.Size());
+ PadCFI(&cfi_info_);
+ WriteCFILength(&cfi_info_);
+}
+
constexpr size_t kFramePointerSize = 8;
void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& spill_regs,
const ManagedRegisterEntrySpills& entry_spills) {
+ cfi_cfa_offset_ = kFramePointerSize; // Only return address on stack
+ cfi_pc_ = buffer_.Size(); // Nothing emitted yet
+ DCHECK_EQ(cfi_pc_, 0U);
+
+ uint32_t reg_offset = 1;
CHECK_ALIGNED(frame_size, kStackAlignment);
int gpr_count = 0;
for (int i = spill_regs.size() - 1; i >= 0; --i) {
@@ -1726,6 +1742,16 @@ void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
if (spill.IsCpuRegister()) {
pushq(spill.AsCpuRegister());
gpr_count++;
+
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += kFramePointerSize;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+ // DW_CFA_offset reg offset
+ reg_offset++;
+ DW_CFA_offset(&cfi_info_, spill.DWARFRegId(), reg_offset);
}
}
// return address then method on stack
@@ -1733,6 +1759,13 @@ void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
- (gpr_count * kFramePointerSize)
- kFramePointerSize /*return address*/;
subq(CpuRegister(RSP), Immediate(rest_of_frame));
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += rest_of_frame;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+
// spill xmms
int64_t offset = rest_of_frame;
for (int i = spill_regs.size() - 1; i >= 0; --i) {
@@ -1796,6 +1829,12 @@ void X86_64Assembler::RemoveFrame(size_t frame_size,
void X86_64Assembler::IncreaseFrameSize(size_t adjust) {
CHECK_ALIGNED(adjust, kStackAlignment);
addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += adjust;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
}
void X86_64Assembler::DecreaseFrameSize(size_t adjust) {
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 2f814dfaf4..ee1157520f 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -614,6 +614,12 @@ class X86_64Assembler FINAL : public Assembler {
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+ void InitializeFrameDescriptionEntry() OVERRIDE;
+ void FinalizeFrameDescriptionEntry() OVERRIDE;
+ std::vector<uint8_t>* GetFrameDescriptionEntry() OVERRIDE {
+ return &cfi_info_;
+ }
+
private:
void EmitUint8(uint8_t value);
void EmitInt32(int32_t value);
@@ -655,6 +661,9 @@ class X86_64Assembler FINAL : public Assembler {
void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, CpuRegister src);
void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, const Operand& operand);
+ std::vector<uint8_t> cfi_info_;
+ uint32_t cfi_cfa_offset_, cfi_pc_;
+
DISALLOW_COPY_AND_ASSIGN(X86_64Assembler);
};
diff --git a/compiler/utils/x86_64/managed_register_x86_64.h b/compiler/utils/x86_64/managed_register_x86_64.h
index 822659fffc..3a96ad0b51 100644
--- a/compiler/utils/x86_64/managed_register_x86_64.h
+++ b/compiler/utils/x86_64/managed_register_x86_64.h
@@ -87,6 +87,21 @@ const int kNumberOfAllocIds = kNumberOfCpuAllocIds + kNumberOfXmmAllocIds +
// There is a one-to-one mapping between ManagedRegister and register id.
class X86_64ManagedRegister : public ManagedRegister {
public:
+ int DWARFRegId() const {
+ CHECK(IsCpuRegister());
+ switch (id_) {
+ case RAX: return 0;
+ case RDX: return 1;
+ case RCX: return 2;
+ case RBX: return 3;
+ case RSI: return 4;
+ case RDI: return 5;
+ case RBP: return 6;
+ case RSP: return 7;
+ default: return static_cast<int>(id_); // R8 ~ R15
+ }
+ }
+
CpuRegister AsCpuRegister() const {
CHECK(IsCpuRegister());
return CpuRegister(static_cast<Register>(id_));