diff options
Diffstat (limited to 'compiler/utils')
| -rw-r--r-- | compiler/utils/arm/assembler_arm_vixl.cc | 34 | ||||
| -rw-r--r-- | compiler/utils/arm/assembler_arm_vixl.h | 23 | ||||
| -rw-r--r-- | compiler/utils/arm/jni_macro_assembler_arm_vixl.cc | 50 | ||||
| -rw-r--r-- | compiler/utils/arm64/assembler_arm64.cc | 4 | ||||
| -rw-r--r-- | compiler/utils/assembler_thumb_test.cc | 6 |
5 files changed, 106 insertions, 11 deletions
diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc index e3b9fb62c8..c35c39328c 100644 --- a/compiler/utils/arm/assembler_arm_vixl.cc +++ b/compiler/utils/arm/assembler_arm_vixl.cc @@ -43,12 +43,12 @@ size_t ArmVIXLAssembler::CodeSize() const { } const uint8_t* ArmVIXLAssembler::CodeBufferBaseAddress() const { - return vixl_masm_.GetStartAddress<uint8_t*>(); + return vixl_masm_.GetBuffer().GetStartAddress<const uint8_t*>(); } void ArmVIXLAssembler::FinalizeInstructions(const MemoryRegion& region) { // Copy the instructions from the buffer. - MemoryRegion from(vixl_masm_.GetStartAddress<void*>(), CodeSize()); + MemoryRegion from(vixl_masm_.GetBuffer()->GetStartAddress<void*>(), CodeSize()); region.CopyFrom(0, from); } @@ -365,7 +365,7 @@ void ArmVIXLAssembler::StoreRegisterList(RegList regs, size_t stack_offset) { if (stack_offset != 0) { base = temps.Acquire(); DCHECK_EQ(regs & (1u << base.GetCode()), 0u); - ___ Add(base, sp, stack_offset); + ___ Add(base, sp, Operand::From(stack_offset)); } ___ Stm(base, NO_WRITE_BACK, RegisterList(regs)); } else { @@ -385,7 +385,7 @@ void ArmVIXLAssembler::LoadRegisterList(RegList regs, size_t stack_offset) { vixl32::Register base = sp; if (stack_offset != 0) { base = temps.Acquire(); - ___ Add(base, sp, stack_offset); + ___ Add(base, sp, Operand::From(stack_offset)); } ___ Ldm(base, NO_WRITE_BACK, RegisterList(regs)); } else { @@ -429,5 +429,31 @@ void ArmVIXLAssembler::AddConstantInIt(vixl32::Register rd, } } +void ArmVIXLMacroAssembler::CompareAndBranchIfZero(vixl32::Register rn, + vixl32::Label* label, + bool is_far_target) { + if (!is_far_target && rn.IsLow() && !label->IsBound()) { + // In T32, Cbz/Cbnz instructions have following limitations: + // - There are only 7 bits (i:imm5:0) to encode branch target address (cannot be far target). + // - Only low registers (i.e R0 .. R7) can be encoded. + // - Only forward branches (unbound labels) are supported. + Cbz(rn, label); + return; + } + Cmp(rn, 0); + B(eq, label); +} + +void ArmVIXLMacroAssembler::CompareAndBranchIfNonZero(vixl32::Register rn, + vixl32::Label* label, + bool is_far_target) { + if (!is_far_target && rn.IsLow() && !label->IsBound()) { + Cbnz(rn, label); + return; + } + Cmp(rn, 0); + B(ne, label); +} + } // namespace arm } // namespace art diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h index e0206287eb..b4a4abc87e 100644 --- a/compiler/utils/arm/assembler_arm_vixl.h +++ b/compiler/utils/arm/assembler_arm_vixl.h @@ -37,6 +37,25 @@ namespace vixl32 = vixl::aarch32; namespace art { namespace arm { +class ArmVIXLMacroAssembler FINAL : public vixl32::MacroAssembler { + public: + // The following interfaces can generate CMP+Bcc or Cbz/Cbnz. + // CMP+Bcc are generated by default. + // If a hint is given (is_far_target = false) and rn and label can all fit into Cbz/Cbnz, + // then Cbz/Cbnz is generated. + // Prefer following interfaces to using vixl32::MacroAssembler::Cbz/Cbnz. + // In T32, Cbz/Cbnz instructions have following limitations: + // - Far targets, which are over 126 bytes away, are not supported. + // - Only low registers can be encoded. + // - Backward branches are not supported. + void CompareAndBranchIfZero(vixl32::Register rn, + vixl32::Label* label, + bool is_far_target = true); + void CompareAndBranchIfNonZero(vixl32::Register rn, + vixl32::Label* label, + bool is_far_target = true); +}; + class ArmVIXLAssembler FINAL : public Assembler { private: class ArmException; @@ -48,7 +67,7 @@ class ArmVIXLAssembler FINAL : public Assembler { } virtual ~ArmVIXLAssembler() {} - vixl32::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; } + ArmVIXLMacroAssembler* GetVIXLAssembler() { return &vixl_masm_; } void FinalizeCode() OVERRIDE; // Size of generated code. @@ -117,7 +136,7 @@ class ArmVIXLAssembler FINAL : public Assembler { private: // VIXL assembler. - vixl32::MacroAssembler vixl_masm_; + ArmVIXLMacroAssembler vixl_masm_; }; // Thread register declaration. diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc index 23b2774179..f20ed0a0d0 100644 --- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc +++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc @@ -168,6 +168,8 @@ void ArmVIXLJNIMacroAssembler::Store(FrameOffset dest, ManagedRegister m_src, si CHECK_EQ(0u, size); } else if (src.IsCoreRegister()) { CHECK_EQ(4u, size); + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(src.AsVIXLRegister()); asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value()); } else if (src.IsRegisterPair()) { CHECK_EQ(8u, size); @@ -186,12 +188,16 @@ void ArmVIXLJNIMacroAssembler::Store(FrameOffset dest, ManagedRegister m_src, si void ArmVIXLJNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) { ArmManagedRegister src = msrc.AsArm(); CHECK(src.IsCoreRegister()) << src; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(src.AsVIXLRegister()); asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value()); } void ArmVIXLJNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) { ArmManagedRegister src = msrc.AsArm(); CHECK(src.IsCoreRegister()) << src; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(src.AsVIXLRegister()); asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value()); } @@ -202,6 +208,8 @@ void ArmVIXLJNIMacroAssembler::StoreSpanning(FrameOffset dest, ArmManagedRegister src = msrc.AsArm(); ArmManagedRegister scratch = mscratch.AsArm(); asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value()); + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(scratch.AsVIXLRegister()); asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, in_off.Int32Value()); asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, dest.Int32Value() + 4); } @@ -210,6 +218,8 @@ void ArmVIXLJNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) { ArmManagedRegister scratch = mscratch.AsArm(); + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(scratch.AsVIXLRegister()); asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, src.Int32Value()); asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, dest.Int32Value()); } @@ -220,6 +230,8 @@ void ArmVIXLJNIMacroAssembler::LoadRef(ManagedRegister dest, bool unpoison_reference) { ArmManagedRegister dst = dest.AsArm(); CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(dst.AsVIXLRegister(), base.AsArm().AsVIXLRegister()); asm_.LoadFromOffset(kLoadWord, dst.AsVIXLRegister(), base.AsArm().AsVIXLRegister(), @@ -246,6 +258,8 @@ void ArmVIXLJNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, ManagedRegister scratch) { ArmManagedRegister mscratch = scratch.AsArm(); CHECK(mscratch.IsCoreRegister()) << mscratch; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(mscratch.AsVIXLRegister()); asm_.LoadImmediate(mscratch.AsVIXLRegister(), imm); asm_.StoreToOffset(kStoreWord, mscratch.AsVIXLRegister(), sp, dest.Int32Value()); } @@ -263,6 +277,8 @@ void ArmVIXLJNIMacroAssembler::LoadFromThread(ManagedRegister m_dst, void ArmVIXLJNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset32 offs) { ArmManagedRegister dst = m_dst.AsArm(); CHECK(dst.IsCoreRegister()) << dst; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(dst.AsVIXLRegister()); asm_.LoadFromOffset(kLoadWord, dst.AsVIXLRegister(), tr, offs.Int32Value()); } @@ -271,6 +287,8 @@ void ArmVIXLJNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, ManagedRegister mscratch) { ArmManagedRegister scratch = mscratch.AsArm(); CHECK(scratch.IsCoreRegister()) << scratch; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(scratch.AsVIXLRegister()); asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), tr, thr_offs.Int32Value()); asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, fr_offs.Int32Value()); } @@ -286,6 +304,8 @@ void ArmVIXLJNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs, ManagedRegister mscratch) { ArmManagedRegister scratch = mscratch.AsArm(); CHECK(scratch.IsCoreRegister()) << scratch; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(scratch.AsVIXLRegister()); asm_.AddConstant(scratch.AsVIXLRegister(), sp, fr_offs.Int32Value()); asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), tr, thr_offs.Int32Value()); } @@ -312,6 +332,8 @@ void ArmVIXLJNIMacroAssembler::Move(ManagedRegister m_dst, if (!dst.Equals(src)) { if (dst.IsCoreRegister()) { CHECK(src.IsCoreRegister()) << src; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(dst.AsVIXLRegister()); ___ Mov(dst.AsVIXLRegister(), src.AsVIXLRegister()); } else if (dst.IsDRegister()) { if (src.IsDRegister()) { @@ -351,6 +373,8 @@ void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dest, ArmManagedRegister temp = scratch.AsArm(); CHECK(temp.IsCoreRegister()) << temp; CHECK(size == 4 || size == 8) << size; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(temp.AsVIXLRegister()); if (size == 4) { asm_.LoadFromOffset(kLoadWord, temp.AsVIXLRegister(), sp, src.Int32Value()); asm_.StoreToOffset(kStoreWord, temp.AsVIXLRegister(), sp, dest.Int32Value()); @@ -414,6 +438,8 @@ void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg, ArmManagedRegister in_reg = min_reg.AsArm(); CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg; CHECK(out_reg.IsCoreRegister()) << out_reg; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(out_reg.AsVIXLRegister()); if (null_allowed) { // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is // the address in the handle scope holding the reference. @@ -425,6 +451,8 @@ void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg, handle_scope_offset.Int32Value()); in_reg = out_reg; } + + temps.Exclude(in_reg.AsVIXLRegister()); ___ Cmp(in_reg.AsVIXLRegister(), 0); if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value(), kCcDontCare)) { @@ -457,6 +485,8 @@ void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off, bool null_allowed) { ArmManagedRegister scratch = mscratch.AsArm(); CHECK(scratch.IsCoreRegister()) << scratch; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(scratch.AsVIXLRegister()); if (null_allowed) { asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, handle_scope_offset.Int32Value()); // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is @@ -503,6 +533,8 @@ void ArmVIXLJNIMacroAssembler::Call(ManagedRegister mbase, ArmManagedRegister scratch = mscratch.AsArm(); CHECK(base.IsCoreRegister()) << base; CHECK(scratch.IsCoreRegister()) << scratch; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(scratch.AsVIXLRegister()); asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), base.AsVIXLRegister(), @@ -514,6 +546,8 @@ void ArmVIXLJNIMacroAssembler::Call(ManagedRegister mbase, void ArmVIXLJNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) { ArmManagedRegister scratch = mscratch.AsArm(); CHECK(scratch.IsCoreRegister()) << scratch; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(scratch.AsVIXLRegister()); // Call *(*(SP + base) + offset) asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, base.Int32Value()); asm_.LoadFromOffset(kLoadWord, @@ -541,6 +575,8 @@ void ArmVIXLJNIMacroAssembler::GetCurrentThread(FrameOffset dest_offset, void ArmVIXLJNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) { CHECK_ALIGNED(stack_adjust, kStackAlignment); ArmManagedRegister scratch = m_scratch.AsArm(); + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(scratch.AsVIXLRegister()); exception_blocks_.emplace_back( new ArmVIXLJNIMacroAssembler::ArmException(scratch, stack_adjust)); asm_.LoadFromOffset(kLoadWord, @@ -574,10 +610,12 @@ void ArmVIXLJNIMacroAssembler::Jump(JNIMacroLabel* label, switch (condition) { case JNIMacroUnaryCondition::kZero: - ___ Cbz(test.AsArm().AsVIXLRegister(), ArmVIXLJNIMacroLabel::Cast(label)->AsArm()); + ___ CompareAndBranchIfZero(test.AsArm().AsVIXLRegister(), + ArmVIXLJNIMacroLabel::Cast(label)->AsArm()); break; case JNIMacroUnaryCondition::kNotZero: - ___ Cbnz(test.AsArm().AsVIXLRegister(), ArmVIXLJNIMacroLabel::Cast(label)->AsArm()); + ___ CompareAndBranchIfNonZero(test.AsArm().AsVIXLRegister(), + ArmVIXLJNIMacroLabel::Cast(label)->AsArm()); break; default: LOG(FATAL) << "Not implemented unary condition: " << static_cast<int>(condition); @@ -596,11 +634,14 @@ void ArmVIXLJNIMacroAssembler::EmitExceptionPoll( if (exception->stack_adjust_ != 0) { // Fix up the frame. DecreaseFrameSize(exception->stack_adjust_); } + + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(exception->scratch_.AsVIXLRegister()); // Pass exception object as argument. // Don't care about preserving r0 as this won't return. ___ Mov(r0, exception->scratch_.AsVIXLRegister()); + temps.Include(exception->scratch_.AsVIXLRegister()); // TODO: check that exception->scratch_ is dead by this point. - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); vixl32::Register temp = temps.Acquire(); ___ Ldr(temp, MemOperand(tr, @@ -622,6 +663,9 @@ void ArmVIXLJNIMacroAssembler::Load(ArmManagedRegister } else if (dest.IsCoreRegister()) { CHECK(!dest.AsVIXLRegister().Is(sp)) << dest; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(dest.AsVIXLRegister()); + if (size == 1u) { ___ Ldrb(dest.AsVIXLRegister(), MemOperand(base, offset)); } else { diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc index f91bcfa92e..6ed0e9b670 100644 --- a/compiler/utils/arm64/assembler_arm64.cc +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -40,12 +40,12 @@ size_t Arm64Assembler::CodeSize() const { } const uint8_t* Arm64Assembler::CodeBufferBaseAddress() const { - return vixl_masm_.GetStartAddress<uint8_t*>(); + return vixl_masm_.GetBuffer().GetStartAddress<const uint8_t*>(); } void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) { // Copy the instructions from the buffer. - MemoryRegion from(vixl_masm_.GetStartAddress<void*>(), CodeSize()); + MemoryRegion from(vixl_masm_.GetBuffer()->GetStartAddress<void*>(), CodeSize()); region.CopyFrom(0, from); } diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc index 10bed13dad..50a1d9fd98 100644 --- a/compiler/utils/assembler_thumb_test.cc +++ b/compiler/utils/assembler_thumb_test.cc @@ -1753,7 +1753,10 @@ TEST_F(ArmVIXLAssemblerTest, VixlLoadFromOffset) { __ LoadFromOffset(kLoadWordPair, R2, R4, 0x40400); __ LoadFromOffset(kLoadWordPair, R4, R4, 0x40400); + vixl::aarch32::UseScratchRegisterScope temps(assembler.asm_.GetVIXLAssembler()); + temps.Exclude(R12); __ LoadFromOffset(kLoadWord, R0, R12, 12); // 32-bit because of R12. + temps.Include(R12); __ LoadFromOffset(kLoadWord, R2, R4, 0xa4 - 0x100000); __ LoadFromOffset(kLoadSignedByte, R2, R4, 12); @@ -1783,7 +1786,10 @@ TEST_F(ArmVIXLAssemblerTest, VixlStoreToOffset) { __ StoreToOffset(kStoreWordPair, R2, R4, 0x40400); __ StoreToOffset(kStoreWordPair, R4, R4, 0x40400); + vixl::aarch32::UseScratchRegisterScope temps(assembler.asm_.GetVIXLAssembler()); + temps.Exclude(R12); __ StoreToOffset(kStoreWord, R0, R12, 12); // 32-bit because of R12. + temps.Include(R12); __ StoreToOffset(kStoreWord, R2, R4, 0xa4 - 0x100000); __ StoreToOffset(kStoreByte, R2, R4, 12); |