diff options
Diffstat (limited to 'compiler/utils')
| -rw-r--r-- | compiler/utils/arm/assembler_arm.cc | 2 | ||||
| -rw-r--r-- | compiler/utils/arm/assembler_arm.h | 7 | ||||
| -rw-r--r-- | compiler/utils/arm/assembler_arm32.h | 3 | ||||
| -rw-r--r-- | compiler/utils/arm/assembler_thumb2.cc | 4 | ||||
| -rw-r--r-- | compiler/utils/arm/assembler_thumb2.h | 27 | ||||
| -rw-r--r-- | compiler/utils/arm64/assembler_arm64.cc | 6 | ||||
| -rw-r--r-- | compiler/utils/arm64/assembler_arm64.h | 8 | ||||
| -rw-r--r-- | compiler/utils/assembler.cc | 55 | ||||
| -rw-r--r-- | compiler/utils/assembler.h | 25 | ||||
| -rw-r--r-- | compiler/utils/assembler_test.h | 7 | ||||
| -rw-r--r-- | compiler/utils/assembler_thumb_test.cc | 204 | ||||
| -rw-r--r-- | compiler/utils/mips/assembler_mips.h | 6 | ||||
| -rw-r--r-- | compiler/utils/mips64/assembler_mips64.h | 5 | ||||
| -rw-r--r-- | compiler/utils/x86/assembler_x86.cc | 4 | ||||
| -rw-r--r-- | compiler/utils/x86/assembler_x86.h | 13 | ||||
| -rw-r--r-- | compiler/utils/x86/assembler_x86_test.cc | 5 | ||||
| -rw-r--r-- | compiler/utils/x86_64/assembler_x86_64.cc | 4 | ||||
| -rw-r--r-- | compiler/utils/x86_64/assembler_x86_64.h | 12 | ||||
| -rw-r--r-- | compiler/utils/x86_64/assembler_x86_64_test.cc | 4 |
19 files changed, 174 insertions, 227 deletions
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc index dead8fd9a8..e5f91dc8ca 100644 --- a/compiler/utils/arm/assembler_arm.cc +++ b/compiler/utils/arm/assembler_arm.cc @@ -845,7 +845,7 @@ void ArmAssembler::GetCurrentThread(FrameOffset offset, void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) { ArmManagedRegister scratch = mscratch.AsArm(); - ArmExceptionSlowPath* slow = new ArmExceptionSlowPath(scratch, stack_adjust); + ArmExceptionSlowPath* slow = new (GetArena()) ArmExceptionSlowPath(scratch, stack_adjust); buffer_.EnqueueSlowPath(slow); LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), TR, Thread::ExceptionOffset<4>().Int32Value()); diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h index a894565425..ffbe786bf4 100644 --- a/compiler/utils/arm/assembler_arm.h +++ b/compiler/utils/arm/assembler_arm.h @@ -20,6 +20,8 @@ #include <type_traits> #include <vector> +#include "base/arena_allocator.h" +#include "base/arena_containers.h" #include "base/bit_utils.h" #include "base/logging.h" #include "base/stl_util.h" @@ -1078,6 +1080,9 @@ class ArmAssembler : public Assembler { } protected: + explicit ArmAssembler(ArenaAllocator* arena) + : Assembler(arena), tracked_labels_(arena->Adapter(kArenaAllocAssembler)) {} + // Returns whether or not the given register is used for passing parameters. static int RegisterCompare(const Register* reg1, const Register* reg2) { return *reg1 - *reg2; @@ -1086,7 +1091,7 @@ class ArmAssembler : public Assembler { void FinalizeTrackedLabels(); // Tracked labels. Use a vector, as we need to sort before adjusting. - std::vector<Label*> tracked_labels_; + ArenaVector<Label*> tracked_labels_; }; // Slowpath entered when Thread::Current()->_exception is non-null diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h index e3e05caf92..bc6020e008 100644 --- a/compiler/utils/arm/assembler_arm32.h +++ b/compiler/utils/arm/assembler_arm32.h @@ -30,8 +30,7 @@ namespace arm { class Arm32Assembler FINAL : public ArmAssembler { public: - Arm32Assembler() { - } + explicit Arm32Assembler(ArenaAllocator* arena) : ArmAssembler(arena) {} virtual ~Arm32Assembler() {} bool IsThumb() const OVERRIDE { diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc index 15298b390b..26f7d0dfcb 100644 --- a/compiler/utils/arm/assembler_thumb2.cc +++ b/compiler/utils/arm/assembler_thumb2.cc @@ -59,8 +59,8 @@ void Thumb2Assembler::Fixup::PrepareDependents(Thumb2Assembler* assembler) { return; } // Create and fill in the fixup_dependents_. - assembler->fixup_dependents_.reset(new FixupId[number_of_dependents]); - FixupId* dependents = assembler->fixup_dependents_.get(); + assembler->fixup_dependents_.resize(number_of_dependents); + FixupId* dependents = assembler->fixup_dependents_.data(); for (FixupId fixup_id = 0u; fixup_id != end_id; ++fixup_id) { uint32_t target = fixups[fixup_id].target_; if (target > fixups[fixup_id].location_) { diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h index 6b61acafac..111a6b09d7 100644 --- a/compiler/utils/arm/assembler_thumb2.h +++ b/compiler/utils/arm/assembler_thumb2.h @@ -21,6 +21,7 @@ #include <utility> #include <vector> +#include "base/arena_containers.h" #include "base/logging.h" #include "constants_arm.h" #include "utils/arm/managed_register_arm.h" @@ -33,14 +34,16 @@ namespace arm { class Thumb2Assembler FINAL : public ArmAssembler { public: - explicit Thumb2Assembler(bool can_relocate_branches = true) - : can_relocate_branches_(can_relocate_branches), + explicit Thumb2Assembler(ArenaAllocator* arena, bool can_relocate_branches = true) + : ArmAssembler(arena), + can_relocate_branches_(can_relocate_branches), force_32bit_(false), it_cond_index_(kNoItCondition), next_condition_(AL), - fixups_(), - fixup_dependents_(), - literals_(), + fixups_(arena->Adapter(kArenaAllocAssembler)), + fixup_dependents_(arena->Adapter(kArenaAllocAssembler)), + literals_(arena->Adapter(kArenaAllocAssembler)), + jump_tables_(arena->Adapter(kArenaAllocAssembler)), last_position_adjustment_(0u), last_old_position_(0u), last_fixup_id_(0u) { @@ -558,9 +561,9 @@ class Thumb2Assembler FINAL : public ArmAssembler { // Prepare the assembler->fixup_dependents_ and each Fixup's dependents_start_/count_. static void PrepareDependents(Thumb2Assembler* assembler); - ArrayRef<FixupId> Dependents(const Thumb2Assembler& assembler) const { - return ArrayRef<FixupId>(assembler.fixup_dependents_.get() + dependents_start_, - dependents_count_); + ArrayRef<const FixupId> Dependents(const Thumb2Assembler& assembler) const { + return ArrayRef<const FixupId>(assembler.fixup_dependents_).SubArray(dependents_start_, + dependents_count_); } // Resolve a branch when the target is known. @@ -839,15 +842,15 @@ class Thumb2Assembler FINAL : public ArmAssembler { static int16_t AdrEncoding16(Register rd, int32_t offset); static int32_t AdrEncoding32(Register rd, int32_t offset); - std::vector<Fixup> fixups_; - std::unique_ptr<FixupId[]> fixup_dependents_; + ArenaVector<Fixup> fixups_; + ArenaVector<FixupId> fixup_dependents_; // Use std::deque<> for literal labels to allow insertions at the end // without invalidating pointers and references to existing elements. - std::deque<Literal> literals_; + ArenaDeque<Literal> literals_; // Jump table list. - std::deque<JumpTable> jump_tables_; + ArenaDeque<JumpTable> jump_tables_; // Data for AdjustedPosition(), see the description there. uint32_t last_position_adjustment_; diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc index 0e17512041..eb851f9534 100644 --- a/compiler/utils/arm64/assembler_arm64.cc +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -32,10 +32,8 @@ namespace arm64 { #endif void Arm64Assembler::FinalizeCode() { - if (!exception_blocks_.empty()) { - for (size_t i = 0; i < exception_blocks_.size(); i++) { - EmitExceptionPoll(exception_blocks_.at(i)); - } + for (Arm64Exception* exception : exception_blocks_) { + EmitExceptionPoll(exception); } ___ FinalizeCode(); } diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h index 7b25b8f3f5..03ae996952 100644 --- a/compiler/utils/arm64/assembler_arm64.h +++ b/compiler/utils/arm64/assembler_arm64.h @@ -21,6 +21,7 @@ #include <memory> #include <vector> +#include "base/arena_containers.h" #include "base/logging.h" #include "constants_arm64.h" #include "utils/arm64/managed_register_arm64.h" @@ -67,7 +68,10 @@ class Arm64Assembler FINAL : public Assembler { public: // We indicate the size of the initial code generation buffer to the VIXL // assembler. From there we it will automatically manage the buffer. - Arm64Assembler() : vixl_masm_(new vixl::MacroAssembler(kArm64BaseBufferSize)) {} + explicit Arm64Assembler(ArenaAllocator* arena) + : Assembler(arena), + exception_blocks_(arena->Adapter(kArenaAllocAssembler)), + vixl_masm_(new vixl::MacroAssembler(kArm64BaseBufferSize)) {} virtual ~Arm64Assembler() { delete vixl_masm_; @@ -249,7 +253,7 @@ class Arm64Assembler FINAL : public Assembler { void AddConstant(XRegister rd, XRegister rn, int32_t value, vixl::Condition cond = vixl::al); // List of exception blocks to generate at the end of the code cache. - std::vector<Arm64Exception*> exception_blocks_; + ArenaVector<Arm64Exception*> exception_blocks_; public: // Vixl assembler. diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc index f784d2c3f8..c2aa574f76 100644 --- a/compiler/utils/assembler.cc +++ b/compiler/utils/assembler.cc @@ -44,14 +44,10 @@ namespace art { -static uint8_t* NewContents(size_t capacity) { - return new uint8_t[capacity]; -} - - -AssemblerBuffer::AssemblerBuffer() { +AssemblerBuffer::AssemblerBuffer(ArenaAllocator* arena) + : arena_(arena) { static const size_t kInitialBufferCapacity = 4 * KB; - contents_ = NewContents(kInitialBufferCapacity); + contents_ = arena_->AllocArray<uint8_t>(kInitialBufferCapacity); cursor_ = contents_; limit_ = ComputeLimit(contents_, kInitialBufferCapacity); fixup_ = nullptr; @@ -68,7 +64,9 @@ AssemblerBuffer::AssemblerBuffer() { AssemblerBuffer::~AssemblerBuffer() { - delete[] contents_; + if (arena_->IsRunningOnMemoryTool()) { + arena_->MakeInaccessible(contents_, Capacity()); + } } @@ -100,19 +98,12 @@ void AssemblerBuffer::ExtendCapacity(size_t min_capacity) { new_capacity = std::max(new_capacity, min_capacity); // Allocate the new data area and copy contents of the old one to it. - uint8_t* new_contents = NewContents(new_capacity); - memmove(reinterpret_cast<void*>(new_contents), - reinterpret_cast<void*>(contents_), - old_size); - - // Compute the relocation delta and switch to the new contents area. - ptrdiff_t delta = new_contents - contents_; - delete[] contents_; - contents_ = new_contents; + contents_ = reinterpret_cast<uint8_t*>( + arena_->Realloc(contents_, old_capacity, new_capacity, kArenaAllocAssembler)); // Update the cursor and recompute the limit. - cursor_ += delta; - limit_ = ComputeLimit(new_contents, new_capacity); + cursor_ = contents_ + old_size; + limit_ = ComputeLimit(contents_, new_capacity); // Verify internal state. CHECK_EQ(Capacity(), new_capacity); @@ -129,36 +120,40 @@ void DebugFrameOpCodeWriterForAssembler::ImplicitlyAdvancePC() { } } -Assembler* Assembler::Create(InstructionSet instruction_set, - const InstructionSetFeatures* instruction_set_features) { +std::unique_ptr<Assembler> Assembler::Create( + ArenaAllocator* arena, + InstructionSet instruction_set, + const InstructionSetFeatures* instruction_set_features) { switch (instruction_set) { #ifdef ART_ENABLE_CODEGEN_arm case kArm: - return new arm::Arm32Assembler(); + return std::unique_ptr<Assembler>(new (arena) arm::Arm32Assembler(arena)); case kThumb2: - return new arm::Thumb2Assembler(); + return std::unique_ptr<Assembler>(new (arena) arm::Thumb2Assembler(arena)); #endif #ifdef ART_ENABLE_CODEGEN_arm64 case kArm64: - return new arm64::Arm64Assembler(); + return std::unique_ptr<Assembler>(new (arena) arm64::Arm64Assembler(arena)); #endif #ifdef ART_ENABLE_CODEGEN_mips case kMips: - return new mips::MipsAssembler(instruction_set_features != nullptr - ? instruction_set_features->AsMipsInstructionSetFeatures() - : nullptr); + return std::unique_ptr<Assembler>(new (arena) mips::MipsAssembler( + arena, + instruction_set_features != nullptr + ? instruction_set_features->AsMipsInstructionSetFeatures() + : nullptr)); #endif #ifdef ART_ENABLE_CODEGEN_mips64 case kMips64: - return new mips64::Mips64Assembler(); + return std::unique_ptr<Assembler>(new (arena) mips64::Mips64Assembler(arena)); #endif #ifdef ART_ENABLE_CODEGEN_x86 case kX86: - return new x86::X86Assembler(); + return std::unique_ptr<Assembler>(new (arena) x86::X86Assembler(arena)); #endif #ifdef ART_ENABLE_CODEGEN_x86_64 case kX86_64: - return new x86_64::X86_64Assembler(); + return std::unique_ptr<Assembler>(new (arena) x86_64::X86_64Assembler(arena)); #endif default: LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h index 414ea7e660..4ea85a2c18 100644 --- a/compiler/utils/assembler.h +++ b/compiler/utils/assembler.h @@ -22,6 +22,8 @@ #include "arch/instruction_set.h" #include "arch/instruction_set_features.h" #include "arm/constants_arm.h" +#include "base/arena_allocator.h" +#include "base/arena_object.h" #include "base/logging.h" #include "base/macros.h" #include "debug/dwarf/debug_frame_opcode_writer.h" @@ -60,7 +62,7 @@ class AssemblerFixup { }; // Parent of all queued slow paths, emitted during finalization -class SlowPath { +class SlowPath : public DeletableArenaObject<kArenaAllocAssembler> { public: SlowPath() : next_(nullptr) {} virtual ~SlowPath() {} @@ -85,9 +87,13 @@ class SlowPath { class AssemblerBuffer { public: - AssemblerBuffer(); + explicit AssemblerBuffer(ArenaAllocator* arena); ~AssemblerBuffer(); + ArenaAllocator* GetArena() { + return arena_; + } + // Basic support for emitting, loading, and storing. template<typename T> void Emit(T value) { CHECK(HasEnsuredCapacity()); @@ -235,6 +241,7 @@ class AssemblerBuffer { // for a single, fast space check per instruction. static const int kMinimumGap = 32; + ArenaAllocator* arena_; uint8_t* contents_; uint8_t* cursor_; uint8_t* limit_; @@ -338,10 +345,12 @@ class DebugFrameOpCodeWriterForAssembler FINAL std::vector<DelayedAdvancePC> delayed_advance_pcs_; }; -class Assembler { +class Assembler : public DeletableArenaObject<kArenaAllocAssembler> { public: - static Assembler* Create(InstructionSet instruction_set, - const InstructionSetFeatures* instruction_set_features = nullptr); + static std::unique_ptr<Assembler> Create( + ArenaAllocator* arena, + InstructionSet instruction_set, + const InstructionSetFeatures* instruction_set_features = nullptr); // Finalize the code; emit slow paths, fixup branches, add literal pool, etc. virtual void FinalizeCode() { buffer_.EmitSlowPaths(this); } @@ -504,7 +513,11 @@ class Assembler { DebugFrameOpCodeWriterForAssembler& cfi() { return cfi_; } protected: - Assembler() : buffer_(), cfi_(this) {} + explicit Assembler(ArenaAllocator* arena) : buffer_(arena), cfi_(this) {} + + ArenaAllocator* GetArena() { + return buffer_.GetArena(); + } AssemblerBuffer buffer_; diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h index 2579ddb52e..084e9011ba 100644 --- a/compiler/utils/assembler_test.h +++ b/compiler/utils/assembler_test.h @@ -460,7 +460,8 @@ class AssemblerTest : public testing::Test { explicit AssemblerTest() {} void SetUp() OVERRIDE { - assembler_.reset(new Ass()); + arena_.reset(new ArenaAllocator(&pool_)); + assembler_.reset(new (arena_.get()) Ass(arena_.get())); test_helper_.reset( new AssemblerTestInfrastructure(GetArchitectureString(), GetAssemblerCmdName(), @@ -476,6 +477,8 @@ class AssemblerTest : public testing::Test { void TearDown() OVERRIDE { test_helper_.reset(); // Clean up the helper. + assembler_.reset(); + arena_.reset(); } // Override this to set up any architecture-specific things, e.g., register vectors. @@ -919,6 +922,8 @@ class AssemblerTest : public testing::Test { static constexpr size_t kWarnManyCombinationsThreshold = 500; + ArenaPool pool_; + std::unique_ptr<ArenaAllocator> arena_; std::unique_ptr<Ass> assembler_; std::unique_ptr<AssemblerTestInfrastructure> test_helper_; diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc index 2df9b177bf..c67cb5a563 100644 --- a/compiler/utils/assembler_thumb_test.cc +++ b/compiler/utils/assembler_thumb_test.cc @@ -195,11 +195,18 @@ void EmitAndCheck(arm::Thumb2Assembler* assembler, const char* testname) { #undef __ -#define __ assembler. +class Thumb2AssemblerTest : public ::testing::Test { + public: + Thumb2AssemblerTest() : pool(), arena(&pool), assembler(&arena) { } -TEST(Thumb2AssemblerTest, SimpleMov) { + ArenaPool pool; + ArenaAllocator arena; arm::Thumb2Assembler assembler; +}; + +#define __ assembler. +TEST_F(Thumb2AssemblerTest, SimpleMov) { __ movs(R0, ShifterOperand(R1)); __ mov(R0, ShifterOperand(R1)); __ mov(R8, ShifterOperand(R9)); @@ -210,8 +217,7 @@ TEST(Thumb2AssemblerTest, SimpleMov) { EmitAndCheck(&assembler, "SimpleMov"); } -TEST(Thumb2AssemblerTest, SimpleMov32) { - arm::Thumb2Assembler assembler; +TEST_F(Thumb2AssemblerTest, SimpleMov32) { __ Force32Bit(); __ mov(R0, ShifterOperand(R1)); @@ -220,9 +226,7 @@ TEST(Thumb2AssemblerTest, SimpleMov32) { EmitAndCheck(&assembler, "SimpleMov32"); } -TEST(Thumb2AssemblerTest, SimpleMovAdd) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, SimpleMovAdd) { __ mov(R0, ShifterOperand(R1)); __ adds(R0, R1, ShifterOperand(R2)); __ add(R0, R1, ShifterOperand(0)); @@ -230,9 +234,7 @@ TEST(Thumb2AssemblerTest, SimpleMovAdd) { EmitAndCheck(&assembler, "SimpleMovAdd"); } -TEST(Thumb2AssemblerTest, DataProcessingRegister) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, DataProcessingRegister) { // 32 bit variants using low registers. __ mvn(R0, ShifterOperand(R1), AL, kCcKeep); __ add(R0, R1, ShifterOperand(R2), AL, kCcKeep); @@ -364,9 +366,7 @@ TEST(Thumb2AssemblerTest, DataProcessingRegister) { EmitAndCheck(&assembler, "DataProcessingRegister"); } -TEST(Thumb2AssemblerTest, DataProcessingImmediate) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, DataProcessingImmediate) { __ mov(R0, ShifterOperand(0x55)); __ mvn(R0, ShifterOperand(0x55)); __ add(R0, R1, ShifterOperand(0x55)); @@ -397,9 +397,7 @@ TEST(Thumb2AssemblerTest, DataProcessingImmediate) { EmitAndCheck(&assembler, "DataProcessingImmediate"); } -TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediate) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, DataProcessingModifiedImmediate) { __ mov(R0, ShifterOperand(0x550055)); __ mvn(R0, ShifterOperand(0x550055)); __ add(R0, R1, ShifterOperand(0x550055)); @@ -422,9 +420,7 @@ TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediate) { } -TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediates) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, DataProcessingModifiedImmediates) { __ mov(R0, ShifterOperand(0x550055)); __ mov(R0, ShifterOperand(0x55005500)); __ mov(R0, ShifterOperand(0x55555555)); @@ -436,9 +432,7 @@ TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediates) { EmitAndCheck(&assembler, "DataProcessingModifiedImmediates"); } -TEST(Thumb2AssemblerTest, DataProcessingShiftedRegister) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, DataProcessingShiftedRegister) { // 16-bit variants. __ movs(R3, ShifterOperand(R4, LSL, 4)); __ movs(R3, ShifterOperand(R4, LSR, 5)); @@ -467,10 +461,9 @@ TEST(Thumb2AssemblerTest, DataProcessingShiftedRegister) { EmitAndCheck(&assembler, "DataProcessingShiftedRegister"); } -TEST(Thumb2AssemblerTest, ShiftImmediate) { +TEST_F(Thumb2AssemblerTest, ShiftImmediate) { // Note: This test produces the same results as DataProcessingShiftedRegister // but it does so using shift functions instead of mov(). - arm::Thumb2Assembler assembler; // 16-bit variants. __ Lsl(R3, R4, 4); @@ -500,9 +493,7 @@ TEST(Thumb2AssemblerTest, ShiftImmediate) { EmitAndCheck(&assembler, "ShiftImmediate"); } -TEST(Thumb2AssemblerTest, BasicLoad) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, BasicLoad) { __ ldr(R3, Address(R4, 24)); __ ldrb(R3, Address(R4, 24)); __ ldrh(R3, Address(R4, 24)); @@ -522,9 +513,7 @@ TEST(Thumb2AssemblerTest, BasicLoad) { } -TEST(Thumb2AssemblerTest, BasicStore) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, BasicStore) { __ str(R3, Address(R4, 24)); __ strb(R3, Address(R4, 24)); __ strh(R3, Address(R4, 24)); @@ -539,9 +528,7 @@ TEST(Thumb2AssemblerTest, BasicStore) { EmitAndCheck(&assembler, "BasicStore"); } -TEST(Thumb2AssemblerTest, ComplexLoad) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, ComplexLoad) { __ ldr(R3, Address(R4, 24, Address::Mode::Offset)); __ ldr(R3, Address(R4, 24, Address::Mode::PreIndex)); __ ldr(R3, Address(R4, 24, Address::Mode::PostIndex)); @@ -581,9 +568,7 @@ TEST(Thumb2AssemblerTest, ComplexLoad) { } -TEST(Thumb2AssemblerTest, ComplexStore) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, ComplexStore) { __ str(R3, Address(R4, 24, Address::Mode::Offset)); __ str(R3, Address(R4, 24, Address::Mode::PreIndex)); __ str(R3, Address(R4, 24, Address::Mode::PostIndex)); @@ -608,9 +593,7 @@ TEST(Thumb2AssemblerTest, ComplexStore) { EmitAndCheck(&assembler, "ComplexStore"); } -TEST(Thumb2AssemblerTest, NegativeLoadStore) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, NegativeLoadStore) { __ ldr(R3, Address(R4, -24, Address::Mode::Offset)); __ ldr(R3, Address(R4, -24, Address::Mode::PreIndex)); __ ldr(R3, Address(R4, -24, Address::Mode::PostIndex)); @@ -670,18 +653,14 @@ TEST(Thumb2AssemblerTest, NegativeLoadStore) { EmitAndCheck(&assembler, "NegativeLoadStore"); } -TEST(Thumb2AssemblerTest, SimpleLoadStoreDual) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, SimpleLoadStoreDual) { __ strd(R2, Address(R0, 24, Address::Mode::Offset)); __ ldrd(R2, Address(R0, 24, Address::Mode::Offset)); EmitAndCheck(&assembler, "SimpleLoadStoreDual"); } -TEST(Thumb2AssemblerTest, ComplexLoadStoreDual) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, ComplexLoadStoreDual) { __ strd(R2, Address(R0, 24, Address::Mode::Offset)); __ strd(R2, Address(R0, 24, Address::Mode::PreIndex)); __ strd(R2, Address(R0, 24, Address::Mode::PostIndex)); @@ -699,9 +678,7 @@ TEST(Thumb2AssemblerTest, ComplexLoadStoreDual) { EmitAndCheck(&assembler, "ComplexLoadStoreDual"); } -TEST(Thumb2AssemblerTest, NegativeLoadStoreDual) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, NegativeLoadStoreDual) { __ strd(R2, Address(R0, -24, Address::Mode::Offset)); __ strd(R2, Address(R0, -24, Address::Mode::PreIndex)); __ strd(R2, Address(R0, -24, Address::Mode::PostIndex)); @@ -719,9 +696,7 @@ TEST(Thumb2AssemblerTest, NegativeLoadStoreDual) { EmitAndCheck(&assembler, "NegativeLoadStoreDual"); } -TEST(Thumb2AssemblerTest, SimpleBranch) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, SimpleBranch) { Label l1; __ mov(R0, ShifterOperand(2)); __ Bind(&l1); @@ -757,8 +732,7 @@ TEST(Thumb2AssemblerTest, SimpleBranch) { EmitAndCheck(&assembler, "SimpleBranch"); } -TEST(Thumb2AssemblerTest, LongBranch) { - arm::Thumb2Assembler assembler; +TEST_F(Thumb2AssemblerTest, LongBranch) { __ Force32Bit(); // 32 bit branches. Label l1; @@ -797,9 +771,7 @@ TEST(Thumb2AssemblerTest, LongBranch) { EmitAndCheck(&assembler, "LongBranch"); } -TEST(Thumb2AssemblerTest, LoadMultiple) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, LoadMultiple) { // 16 bit. __ ldm(DB_W, R4, (1 << R0 | 1 << R3)); @@ -813,9 +785,7 @@ TEST(Thumb2AssemblerTest, LoadMultiple) { EmitAndCheck(&assembler, "LoadMultiple"); } -TEST(Thumb2AssemblerTest, StoreMultiple) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, StoreMultiple) { // 16 bit. __ stm(IA_W, R4, (1 << R0 | 1 << R3)); @@ -830,9 +800,7 @@ TEST(Thumb2AssemblerTest, StoreMultiple) { EmitAndCheck(&assembler, "StoreMultiple"); } -TEST(Thumb2AssemblerTest, MovWMovT) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, MovWMovT) { // Always 32 bit. __ movw(R4, 0); __ movw(R4, 0x34); @@ -848,9 +816,7 @@ TEST(Thumb2AssemblerTest, MovWMovT) { EmitAndCheck(&assembler, "MovWMovT"); } -TEST(Thumb2AssemblerTest, SpecialAddSub) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, SpecialAddSub) { __ add(R2, SP, ShifterOperand(0x50)); // 16 bit. __ add(SP, SP, ShifterOperand(0x50)); // 16 bit. __ add(R8, SP, ShifterOperand(0x50)); // 32 bit. @@ -869,9 +835,7 @@ TEST(Thumb2AssemblerTest, SpecialAddSub) { EmitAndCheck(&assembler, "SpecialAddSub"); } -TEST(Thumb2AssemblerTest, LoadFromOffset) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, LoadFromOffset) { __ LoadFromOffset(kLoadWord, R2, R4, 12); __ LoadFromOffset(kLoadWord, R2, R4, 0xfff); __ LoadFromOffset(kLoadWord, R2, R4, 0x1000); @@ -901,9 +865,7 @@ TEST(Thumb2AssemblerTest, LoadFromOffset) { EmitAndCheck(&assembler, "LoadFromOffset"); } -TEST(Thumb2AssemblerTest, StoreToOffset) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, StoreToOffset) { __ StoreToOffset(kStoreWord, R2, R4, 12); __ StoreToOffset(kStoreWord, R2, R4, 0xfff); __ StoreToOffset(kStoreWord, R2, R4, 0x1000); @@ -931,9 +893,7 @@ TEST(Thumb2AssemblerTest, StoreToOffset) { EmitAndCheck(&assembler, "StoreToOffset"); } -TEST(Thumb2AssemblerTest, IfThen) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, IfThen) { __ it(EQ); __ mov(R1, ShifterOperand(1), EQ); @@ -964,9 +924,7 @@ TEST(Thumb2AssemblerTest, IfThen) { EmitAndCheck(&assembler, "IfThen"); } -TEST(Thumb2AssemblerTest, CbzCbnz) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, CbzCbnz) { Label l1; __ cbz(R2, &l1); __ mov(R1, ShifterOperand(3)); @@ -984,9 +942,7 @@ TEST(Thumb2AssemblerTest, CbzCbnz) { EmitAndCheck(&assembler, "CbzCbnz"); } -TEST(Thumb2AssemblerTest, Multiply) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, Multiply) { __ mul(R0, R1, R0); __ mul(R0, R1, R2); __ mul(R8, R9, R8); @@ -1004,9 +960,7 @@ TEST(Thumb2AssemblerTest, Multiply) { EmitAndCheck(&assembler, "Multiply"); } -TEST(Thumb2AssemblerTest, Divide) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, Divide) { __ sdiv(R0, R1, R2); __ sdiv(R8, R9, R10); @@ -1016,9 +970,7 @@ TEST(Thumb2AssemblerTest, Divide) { EmitAndCheck(&assembler, "Divide"); } -TEST(Thumb2AssemblerTest, VMov) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, VMov) { __ vmovs(S1, 1.0); __ vmovd(D1, 1.0); @@ -1029,9 +981,7 @@ TEST(Thumb2AssemblerTest, VMov) { } -TEST(Thumb2AssemblerTest, BasicFloatingPoint) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, BasicFloatingPoint) { __ vadds(S0, S1, S2); __ vsubs(S0, S1, S2); __ vmuls(S0, S1, S2); @@ -1055,9 +1005,7 @@ TEST(Thumb2AssemblerTest, BasicFloatingPoint) { EmitAndCheck(&assembler, "BasicFloatingPoint"); } -TEST(Thumb2AssemblerTest, FloatingPointConversions) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, FloatingPointConversions) { __ vcvtsd(S2, D2); __ vcvtds(D2, S2); @@ -1076,9 +1024,7 @@ TEST(Thumb2AssemblerTest, FloatingPointConversions) { EmitAndCheck(&assembler, "FloatingPointConversions"); } -TEST(Thumb2AssemblerTest, FloatingPointComparisons) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, FloatingPointComparisons) { __ vcmps(S0, S1); __ vcmpd(D0, D1); @@ -1088,35 +1034,27 @@ TEST(Thumb2AssemblerTest, FloatingPointComparisons) { EmitAndCheck(&assembler, "FloatingPointComparisons"); } -TEST(Thumb2AssemblerTest, Calls) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, Calls) { __ blx(LR); __ bx(LR); EmitAndCheck(&assembler, "Calls"); } -TEST(Thumb2AssemblerTest, Breakpoint) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, Breakpoint) { __ bkpt(0); EmitAndCheck(&assembler, "Breakpoint"); } -TEST(Thumb2AssemblerTest, StrR1) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, StrR1) { __ str(R1, Address(SP, 68)); __ str(R1, Address(SP, 1068)); EmitAndCheck(&assembler, "StrR1"); } -TEST(Thumb2AssemblerTest, VPushPop) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, VPushPop) { __ vpushs(S2, 4); __ vpushd(D2, 4); @@ -1126,9 +1064,7 @@ TEST(Thumb2AssemblerTest, VPushPop) { EmitAndCheck(&assembler, "VPushPop"); } -TEST(Thumb2AssemblerTest, Max16BitBranch) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, Max16BitBranch) { Label l1; __ b(&l1); for (int i = 0 ; i < (1 << 11) ; i += 2) { @@ -1140,9 +1076,7 @@ TEST(Thumb2AssemblerTest, Max16BitBranch) { EmitAndCheck(&assembler, "Max16BitBranch"); } -TEST(Thumb2AssemblerTest, Branch32) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, Branch32) { Label l1; __ b(&l1); for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) { @@ -1154,9 +1088,7 @@ TEST(Thumb2AssemblerTest, Branch32) { EmitAndCheck(&assembler, "Branch32"); } -TEST(Thumb2AssemblerTest, CompareAndBranchMax) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, CompareAndBranchMax) { Label l1; __ cbz(R4, &l1); for (int i = 0 ; i < (1 << 7) ; i += 2) { @@ -1168,9 +1100,7 @@ TEST(Thumb2AssemblerTest, CompareAndBranchMax) { EmitAndCheck(&assembler, "CompareAndBranchMax"); } -TEST(Thumb2AssemblerTest, CompareAndBranchRelocation16) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, CompareAndBranchRelocation16) { Label l1; __ cbz(R4, &l1); for (int i = 0 ; i < (1 << 7) + 2 ; i += 2) { @@ -1182,9 +1112,7 @@ TEST(Thumb2AssemblerTest, CompareAndBranchRelocation16) { EmitAndCheck(&assembler, "CompareAndBranchRelocation16"); } -TEST(Thumb2AssemblerTest, CompareAndBranchRelocation32) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, CompareAndBranchRelocation32) { Label l1; __ cbz(R4, &l1); for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) { @@ -1196,9 +1124,7 @@ TEST(Thumb2AssemblerTest, CompareAndBranchRelocation32) { EmitAndCheck(&assembler, "CompareAndBranchRelocation32"); } -TEST(Thumb2AssemblerTest, MixedBranch32) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, MixedBranch32) { Label l1; Label l2; __ b(&l1); // Forwards. @@ -1215,9 +1141,7 @@ TEST(Thumb2AssemblerTest, MixedBranch32) { EmitAndCheck(&assembler, "MixedBranch32"); } -TEST(Thumb2AssemblerTest, Shifts) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, Shifts) { // 16 bit selected for CcDontCare. __ Lsl(R0, R1, 5); __ Lsr(R0, R1, 5); @@ -1292,9 +1216,7 @@ TEST(Thumb2AssemblerTest, Shifts) { EmitAndCheck(&assembler, "Shifts"); } -TEST(Thumb2AssemblerTest, LoadStoreRegOffset) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, LoadStoreRegOffset) { // 16 bit. __ ldr(R0, Address(R1, R2)); __ str(R0, Address(R1, R2)); @@ -1319,9 +1241,7 @@ TEST(Thumb2AssemblerTest, LoadStoreRegOffset) { EmitAndCheck(&assembler, "LoadStoreRegOffset"); } -TEST(Thumb2AssemblerTest, LoadStoreLiteral) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, LoadStoreLiteral) { __ ldr(R0, Address(4)); __ str(R0, Address(4)); @@ -1337,9 +1257,7 @@ TEST(Thumb2AssemblerTest, LoadStoreLiteral) { EmitAndCheck(&assembler, "LoadStoreLiteral"); } -TEST(Thumb2AssemblerTest, LoadStoreLimits) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, LoadStoreLimits) { __ ldr(R0, Address(R4, 124)); // 16 bit. __ ldr(R0, Address(R4, 128)); // 32 bit. @@ -1367,9 +1285,7 @@ TEST(Thumb2AssemblerTest, LoadStoreLimits) { EmitAndCheck(&assembler, "LoadStoreLimits"); } -TEST(Thumb2AssemblerTest, CompareAndBranch) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, CompareAndBranch) { Label label; __ CompareAndBranchIfZero(arm::R0, &label); __ CompareAndBranchIfZero(arm::R11, &label); @@ -1380,9 +1296,7 @@ TEST(Thumb2AssemblerTest, CompareAndBranch) { EmitAndCheck(&assembler, "CompareAndBranch"); } -TEST(Thumb2AssemblerTest, AddConstant) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, AddConstant) { // Low registers, Rd != Rn. __ AddConstant(R0, R1, 0); // MOV. __ AddConstant(R0, R1, 1); // 16-bit ADDS, encoding T1. @@ -1626,9 +1540,7 @@ TEST(Thumb2AssemblerTest, AddConstant) { EmitAndCheck(&assembler, "AddConstant"); } -TEST(Thumb2AssemblerTest, CmpConstant) { - arm::Thumb2Assembler assembler; - +TEST_F(Thumb2AssemblerTest, CmpConstant) { __ CmpConstant(R0, 0); // 16-bit CMP. __ CmpConstant(R1, 1); // 16-bit CMP. __ CmpConstant(R0, 7); // 16-bit CMP. diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h index ffac4c4168..ecb67bd053 100644 --- a/compiler/utils/mips/assembler_mips.h +++ b/compiler/utils/mips/assembler_mips.h @@ -102,8 +102,10 @@ class MipsExceptionSlowPath { class MipsAssembler FINAL : public Assembler { public: - explicit MipsAssembler(const MipsInstructionSetFeatures* instruction_set_features = nullptr) - : overwriting_(false), + explicit MipsAssembler(ArenaAllocator* arena, + const MipsInstructionSetFeatures* instruction_set_features = nullptr) + : Assembler(arena), + overwriting_(false), overwrite_location_(0), last_position_adjustment_(0), last_old_position_(0), diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h index 71f5e00166..8acc38ac82 100644 --- a/compiler/utils/mips64/assembler_mips64.h +++ b/compiler/utils/mips64/assembler_mips64.h @@ -102,8 +102,9 @@ class Mips64ExceptionSlowPath { class Mips64Assembler FINAL : public Assembler { public: - Mips64Assembler() - : overwriting_(false), + explicit Mips64Assembler(ArenaAllocator* arena) + : Assembler(arena), + overwriting_(false), overwrite_location_(0), last_position_adjustment_(0), last_old_position_(0), diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc index 3efef70f77..2203646e77 100644 --- a/compiler/utils/x86/assembler_x86.cc +++ b/compiler/utils/x86/assembler_x86.cc @@ -2379,7 +2379,7 @@ void X86Assembler::GetCurrentThread(FrameOffset offset, } void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) { - X86ExceptionSlowPath* slow = new X86ExceptionSlowPath(stack_adjust); + X86ExceptionSlowPath* slow = new (GetArena()) X86ExceptionSlowPath(stack_adjust); buffer_.EnqueueSlowPath(slow); fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<4>()), Immediate(0)); j(kNotEqual, slow->Entry()); @@ -2402,7 +2402,7 @@ void X86ExceptionSlowPath::Emit(Assembler *sasm) { } void X86Assembler::AddConstantArea() { - const std::vector<int32_t>& area = constant_area_.GetBuffer(); + ArrayRef<const int32_t> area = constant_area_.GetBuffer(); // Generate the data for the literal area. for (size_t i = 0, e = area.size(); i < e; i++) { AssemblerBuffer::EnsureCapacity ensured(&buffer_); diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h index 00ff7bdbbd..8567ad2a17 100644 --- a/compiler/utils/x86/assembler_x86.h +++ b/compiler/utils/x86/assembler_x86.h @@ -18,12 +18,15 @@ #define ART_COMPILER_UTILS_X86_ASSEMBLER_X86_H_ #include <vector> + +#include "base/arena_containers.h" #include "base/bit_utils.h" #include "base/macros.h" #include "constants_x86.h" #include "globals.h" #include "managed_register_x86.h" #include "offsets.h" +#include "utils/array_ref.h" #include "utils/assembler.h" namespace art { @@ -260,7 +263,7 @@ class NearLabel : private Label { */ class ConstantArea { public: - ConstantArea() {} + explicit ConstantArea(ArenaAllocator* arena) : buffer_(arena->Adapter(kArenaAllocAssembler)) {} // Add a double to the constant area, returning the offset into // the constant area where the literal resides. @@ -290,18 +293,18 @@ class ConstantArea { return buffer_.size() * elem_size_; } - const std::vector<int32_t>& GetBuffer() const { - return buffer_; + ArrayRef<const int32_t> GetBuffer() const { + return ArrayRef<const int32_t>(buffer_); } private: static constexpr size_t elem_size_ = sizeof(int32_t); - std::vector<int32_t> buffer_; + ArenaVector<int32_t> buffer_; }; class X86Assembler FINAL : public Assembler { public: - X86Assembler() {} + explicit X86Assembler(ArenaAllocator* arena) : Assembler(arena), constant_area_(arena) {} virtual ~X86Assembler() {} /* diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc index d0d51473fe..1d1df6e447 100644 --- a/compiler/utils/x86/assembler_x86_test.cc +++ b/compiler/utils/x86/assembler_x86_test.cc @@ -16,13 +16,16 @@ #include "assembler_x86.h" +#include "base/arena_allocator.h" #include "base/stl_util.h" #include "utils/assembler_test.h" namespace art { TEST(AssemblerX86, CreateBuffer) { - AssemblerBuffer buffer; + ArenaPool pool; + ArenaAllocator arena(&pool); + AssemblerBuffer buffer(&arena); AssemblerBuffer::EnsureCapacity ensured(&buffer); buffer.Emit<uint8_t>(0x42); ASSERT_EQ(static_cast<size_t>(1), buffer.Size()); diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc index d86ad1be5f..32eb4a37bf 100644 --- a/compiler/utils/x86_64/assembler_x86_64.cc +++ b/compiler/utils/x86_64/assembler_x86_64.cc @@ -3144,7 +3144,7 @@ class X86_64ExceptionSlowPath FINAL : public SlowPath { }; void X86_64Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) { - X86_64ExceptionSlowPath* slow = new X86_64ExceptionSlowPath(stack_adjust); + X86_64ExceptionSlowPath* slow = new (GetArena()) X86_64ExceptionSlowPath(stack_adjust); buffer_.EnqueueSlowPath(slow); gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<8>(), true), Immediate(0)); j(kNotEqual, slow->Entry()); @@ -3167,7 +3167,7 @@ void X86_64ExceptionSlowPath::Emit(Assembler *sasm) { } void X86_64Assembler::AddConstantArea() { - const std::vector<int32_t>& area = constant_area_.GetBuffer(); + ArrayRef<const int32_t> area = constant_area_.GetBuffer(); for (size_t i = 0, e = area.size(); i < e; i++) { AssemblerBuffer::EnsureCapacity ensured(&buffer_); EmitInt32(area[i]); diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h index f00cb12413..92c7d0ab99 100644 --- a/compiler/utils/x86_64/assembler_x86_64.h +++ b/compiler/utils/x86_64/assembler_x86_64.h @@ -19,12 +19,14 @@ #include <vector> +#include "base/arena_containers.h" #include "base/bit_utils.h" #include "base/macros.h" #include "constants_x86_64.h" #include "globals.h" #include "managed_register_x86_64.h" #include "offsets.h" +#include "utils/array_ref.h" #include "utils/assembler.h" namespace art { @@ -270,7 +272,7 @@ class Address : public Operand { */ class ConstantArea { public: - ConstantArea() {} + explicit ConstantArea(ArenaAllocator* arena) : buffer_(arena->Adapter(kArenaAllocAssembler)) {} // Add a double to the constant area, returning the offset into // the constant area where the literal resides. @@ -296,13 +298,13 @@ class ConstantArea { return buffer_.size() * elem_size_; } - const std::vector<int32_t>& GetBuffer() const { - return buffer_; + ArrayRef<const int32_t> GetBuffer() const { + return ArrayRef<const int32_t>(buffer_); } private: static constexpr size_t elem_size_ = sizeof(int32_t); - std::vector<int32_t> buffer_; + ArenaVector<int32_t> buffer_; }; @@ -332,7 +334,7 @@ class NearLabel : private Label { class X86_64Assembler FINAL : public Assembler { public: - X86_64Assembler() {} + explicit X86_64Assembler(ArenaAllocator* arena) : Assembler(arena), constant_area_(arena) {} virtual ~X86_64Assembler() {} /* diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc index 4f65709810..b19e616dd6 100644 --- a/compiler/utils/x86_64/assembler_x86_64_test.cc +++ b/compiler/utils/x86_64/assembler_x86_64_test.cc @@ -27,7 +27,9 @@ namespace art { TEST(AssemblerX86_64, CreateBuffer) { - AssemblerBuffer buffer; + ArenaPool pool; + ArenaAllocator arena(&pool); + AssemblerBuffer buffer(&arena); AssemblerBuffer::EnsureCapacity ensured(&buffer); buffer.Emit<uint8_t>(0x42); ASSERT_EQ(static_cast<size_t>(1), buffer.Size()); |