summaryrefslogtreecommitdiff
path: root/compiler/utils/assembler.h
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/utils/assembler.h')
-rw-r--r--compiler/utils/assembler.h250
1 files changed, 96 insertions, 154 deletions
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index d97a2a40b2..8981776314 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -21,17 +21,21 @@
#include "arch/instruction_set.h"
#include "arch/instruction_set_features.h"
+#include "arm/constants_arm.h"
+#include "base/arena_allocator.h"
+#include "base/arena_object.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
-#include "arm/constants_arm.h"
+#include "debug/dwarf/debug_frame_opcode_writer.h"
#include "label.h"
#include "managed_register.h"
#include "memory_region.h"
#include "mips/constants_mips.h"
#include "offsets.h"
+#include "utils/array_ref.h"
#include "x86/constants_x86.h"
#include "x86_64/constants_x86_64.h"
-#include "dwarf/debug_frame_opcode_writer.h"
namespace art {
@@ -60,7 +64,7 @@ class AssemblerFixup {
};
// Parent of all queued slow paths, emitted during finalization
-class SlowPath {
+class SlowPath : public DeletableArenaObject<kArenaAllocAssembler> {
public:
SlowPath() : next_(nullptr) {}
virtual ~SlowPath() {}
@@ -85,9 +89,13 @@ class SlowPath {
class AssemblerBuffer {
public:
- AssemblerBuffer();
+ explicit AssemblerBuffer(ArenaAllocator* arena);
~AssemblerBuffer();
+ ArenaAllocator* GetArena() {
+ return arena_;
+ }
+
// Basic support for emitting, loading, and storing.
template<typename T> void Emit(T value) {
CHECK(HasEnsuredCapacity());
@@ -172,8 +180,8 @@ class AssemblerBuffer {
class EnsureCapacity {
public:
explicit EnsureCapacity(AssemblerBuffer* buffer) {
- if (buffer->cursor() >= buffer->limit()) {
- buffer->ExtendCapacity();
+ if (buffer->cursor() > buffer->limit()) {
+ buffer->ExtendCapacity(buffer->Size() + kMinimumGap);
}
// In debug mode, we save the assembler buffer along with the gap
// size before we start emitting to the buffer. This allows us to
@@ -213,7 +221,9 @@ class AssemblerBuffer {
class EnsureCapacity {
public:
explicit EnsureCapacity(AssemblerBuffer* buffer) {
- if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity();
+ if (buffer->cursor() > buffer->limit()) {
+ buffer->ExtendCapacity(buffer->Size() + kMinimumGap);
+ }
}
};
@@ -227,12 +237,22 @@ class AssemblerBuffer {
// Returns the position in the instruction stream.
int GetPosition() { return cursor_ - contents_; }
+ size_t Capacity() const {
+ CHECK_GE(limit_, contents_);
+ return (limit_ - contents_) + kMinimumGap;
+ }
+
+ // Unconditionally increase the capacity.
+ // The provided `min_capacity` must be higher than current `Capacity()`.
+ void ExtendCapacity(size_t min_capacity);
+
private:
// The limit is set to kMinimumGap bytes before the end of the data area.
// This leaves enough space for the longest possible instruction and allows
// for a single, fast space check per instruction.
static const int kMinimumGap = 32;
+ ArenaAllocator* arena_;
uint8_t* contents_;
uint8_t* cursor_;
uint8_t* limit_;
@@ -246,10 +266,6 @@ class AssemblerBuffer {
uint8_t* cursor() const { return cursor_; }
uint8_t* limit() const { return limit_; }
- size_t Capacity() const {
- CHECK_GE(limit_, contents_);
- return (limit_ - contents_) + kMinimumGap;
- }
// Process the fixup chain starting at the given fixup. The offset is
// non-zero for fixups in the body if the preamble is non-empty.
@@ -261,8 +277,6 @@ class AssemblerBuffer {
return data + capacity - kMinimumGap;
}
- void ExtendCapacity(size_t min_capacity = 0u);
-
friend class AssemblerFixup;
};
@@ -271,23 +285,77 @@ class AssemblerBuffer {
class DebugFrameOpCodeWriterForAssembler FINAL
: public dwarf::DebugFrameOpCodeWriter<> {
public:
+ struct DelayedAdvancePC {
+ uint32_t stream_pos;
+ uint32_t pc;
+ };
+
// This method is called the by the opcode writers.
virtual void ImplicitlyAdvancePC() FINAL;
explicit DebugFrameOpCodeWriterForAssembler(Assembler* buffer)
- : dwarf::DebugFrameOpCodeWriter<>(),
- assembler_(buffer) {
+ : dwarf::DebugFrameOpCodeWriter<>(false /* enabled */),
+ assembler_(buffer),
+ delay_emitting_advance_pc_(false),
+ delayed_advance_pcs_() {
+ }
+
+ ~DebugFrameOpCodeWriterForAssembler() {
+ DCHECK(delayed_advance_pcs_.empty());
+ }
+
+ // Tell the writer to delay emitting advance PC info.
+ // The assembler must explicitly process all the delayed advances.
+ void DelayEmittingAdvancePCs() {
+ delay_emitting_advance_pc_ = true;
+ }
+
+ // Override the last delayed PC. The new PC can be out of order.
+ void OverrideDelayedPC(size_t pc) {
+ DCHECK(delay_emitting_advance_pc_);
+ if (enabled_) {
+ DCHECK(!delayed_advance_pcs_.empty());
+ delayed_advance_pcs_.back().pc = pc;
+ }
+ }
+
+ // Return the number of delayed advance PC entries.
+ size_t NumberOfDelayedAdvancePCs() const {
+ return delayed_advance_pcs_.size();
+ }
+
+ // Release the CFI stream and advance PC infos so that the assembler can patch it.
+ std::pair<std::vector<uint8_t>, std::vector<DelayedAdvancePC>>
+ ReleaseStreamAndPrepareForDelayedAdvancePC() {
+ DCHECK(delay_emitting_advance_pc_);
+ delay_emitting_advance_pc_ = false;
+ std::pair<std::vector<uint8_t>, std::vector<DelayedAdvancePC>> result;
+ result.first.swap(opcodes_);
+ result.second.swap(delayed_advance_pcs_);
+ return result;
+ }
+
+ // Reserve space for the CFI stream.
+ void ReserveCFIStream(size_t capacity) {
+ opcodes_.reserve(capacity);
+ }
+
+ // Append raw data to the CFI stream.
+ void AppendRawData(const std::vector<uint8_t>& raw_data, size_t first, size_t last) {
+ DCHECK_LE(0u, first);
+ DCHECK_LE(first, last);
+ DCHECK_LE(last, raw_data.size());
+ opcodes_.insert(opcodes_.end(), raw_data.begin() + first, raw_data.begin() + last);
}
private:
Assembler* assembler_;
+ bool delay_emitting_advance_pc_;
+ std::vector<DelayedAdvancePC> delayed_advance_pcs_;
};
-class Assembler {
+class Assembler : public DeletableArenaObject<kArenaAllocAssembler> {
public:
- static Assembler* Create(InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features = nullptr);
-
// Finalize the code; emit slow paths, fixup branches, add literal pool, etc.
virtual void FinalizeCode() { buffer_.EmitSlowPaths(this); }
@@ -303,140 +371,6 @@ class Assembler {
// TODO: Implement with disassembler.
virtual void Comment(const char* format ATTRIBUTE_UNUSED, ...) {}
- // Emit code that will create an activation on the stack
- virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
- const std::vector<ManagedRegister>& callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) = 0;
-
- // Emit code that will remove an activation from the stack
- virtual void RemoveFrame(size_t frame_size,
- const std::vector<ManagedRegister>& callee_save_regs) = 0;
-
- virtual void IncreaseFrameSize(size_t adjust) = 0;
- virtual void DecreaseFrameSize(size_t adjust) = 0;
-
- // Store routines
- virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0;
- virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0;
- virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0;
-
- virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
- ManagedRegister scratch) = 0;
-
- virtual void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
- ManagedRegister scratch);
- virtual void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
- ManagedRegister scratch);
-
- virtual void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch);
- virtual void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch);
-
- virtual void StoreStackPointerToThread32(ThreadOffset<4> thr_offs);
- virtual void StoreStackPointerToThread64(ThreadOffset<8> thr_offs);
-
- virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
- FrameOffset in_off, ManagedRegister scratch) = 0;
-
- // Load routines
- virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0;
-
- virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size);
- virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size);
-
- virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0;
- // If unpoison_reference is true and kPoisonReference is true, then we negate the read reference.
- virtual void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) = 0;
-
- virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0;
-
- virtual void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs);
- virtual void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs);
-
- // Copying routines
- virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0;
-
- virtual void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
- ManagedRegister scratch);
- virtual void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
- ManagedRegister scratch);
-
- virtual void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch);
- virtual void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch);
-
- virtual void CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister scratch) = 0;
-
- virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0;
-
- virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
- ManagedRegister scratch, size_t size) = 0;
-
- virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
- ManagedRegister scratch, size_t size) = 0;
-
- virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
- ManagedRegister scratch, size_t size) = 0;
-
- virtual void Copy(ManagedRegister dest, Offset dest_offset,
- ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) = 0;
-
- virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size) = 0;
-
- virtual void MemoryBarrier(ManagedRegister scratch) = 0;
-
- // Sign extension
- virtual void SignExtend(ManagedRegister mreg, size_t size) = 0;
-
- // Zero extension
- virtual void ZeroExtend(ManagedRegister mreg, size_t size) = 0;
-
- // Exploit fast access in managed code to Thread::Current()
- virtual void GetCurrentThread(ManagedRegister tr) = 0;
- virtual void GetCurrentThread(FrameOffset dest_offset,
- ManagedRegister scratch) = 0;
-
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- virtual void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) = 0;
-
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- virtual void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
- ManagedRegister scratch, bool null_allowed) = 0;
-
- // src holds a handle scope entry (Object**) load this into dst
- virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
- ManagedRegister src) = 0;
-
- // Heap::VerifyObject on src. In some cases (such as a reference to this) we
- // know that src may not be null.
- virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0;
- virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
-
- // Call to address held at [base+offset]
- virtual void Call(ManagedRegister base, Offset offset,
- ManagedRegister scratch) = 0;
- virtual void Call(FrameOffset base, Offset offset,
- ManagedRegister scratch) = 0;
- virtual void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch);
- virtual void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch);
-
- // Generate code to check if Thread::Current()->exception_ is non-null
- // and branch to a ExceptionSlowPath if it is.
- virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0;
-
virtual void Bind(Label* label) = 0;
virtual void Jump(Label* label) = 0;
@@ -448,8 +382,16 @@ class Assembler {
*/
DebugFrameOpCodeWriterForAssembler& cfi() { return cfi_; }
+ ArenaAllocator* GetArena() {
+ return buffer_.GetArena();
+ }
+
+ AssemblerBuffer* GetBuffer() {
+ return &buffer_;
+ }
+
protected:
- Assembler() : buffer_(), cfi_(this) {}
+ explicit Assembler(ArenaAllocator* arena) : buffer_(arena), cfi_(this) {}
AssemblerBuffer buffer_;