diff options
author | 2016-11-21 19:46:00 +0000 | |
---|---|---|
committer | 2016-11-25 14:16:31 +0000 | |
commit | b77051ea5718fe017f2fa884b9ca4c8186c95190 (patch) | |
tree | bb51782f8350be00195becabc3cd8758f15010a0 | |
parent | d0111420a9f924fe560a97132d09ae531852fd69 (diff) |
ARM: VIXL32: Fix breaking changes from recent VIXL update.
Test: m test-art-host
Test: m test-art-target
Change-Id: I02a608bf51b889a2bfff43272a3619582bf9cf20
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 19 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.cc | 56 | ||||
-rw-r--r-- | compiler/optimizing/common_arm.h | 11 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm_vixl.cc | 30 | ||||
-rw-r--r-- | compiler/utils/arm/assembler_arm_vixl.cc | 8 | ||||
-rw-r--r-- | compiler/utils/arm/jni_macro_assembler_arm_vixl.cc | 44 | ||||
-rw-r--r-- | compiler/utils/arm64/assembler_arm64.cc | 4 | ||||
-rw-r--r-- | compiler/utils/assembler_thumb_test.cc | 6 | ||||
-rw-r--r-- | disassembler/disassembler_arm.cc | 35 | ||||
-rw-r--r-- | test/538-checker-embed-constants/src/Main.java | 32 |
10 files changed, 153 insertions, 92 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 59e17841e4..a78b3da455 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -572,8 +572,10 @@ void JumpTableARM64::EmitTable(CodeGeneratorARM64* codegen) { // We are about to use the assembler to place literals directly. Make sure we have enough // underlying code buffer and we have generated the jump table with right size. - CodeBufferCheckScope scope(codegen->GetVIXLAssembler(), num_entries * sizeof(int32_t), - CodeBufferCheckScope::kCheck, CodeBufferCheckScope::kExactSize); + vixl::CodeBufferCheckScope scope(codegen->GetVIXLAssembler(), + num_entries * sizeof(int32_t), + vixl::CodeBufferCheckScope::kReserveBufferSpace, + vixl::CodeBufferCheckScope::kExactSize); __ Bind(&table_start_); const ArenaVector<HBasicBlock*>& successors = switch_instr_->GetBlock()->GetSuccessors(); @@ -2260,10 +2262,10 @@ void InstructionCodeGeneratorARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* masm->GetCursorAddress<vixl::aarch64::Instruction*>() - kInstructionSize; if (prev->IsLoadOrStore()) { // Make sure we emit only exactly one nop. - vixl::aarch64::CodeBufferCheckScope scope(masm, - kInstructionSize, - vixl::aarch64::CodeBufferCheckScope::kCheck, - vixl::aarch64::CodeBufferCheckScope::kExactSize); + vixl::CodeBufferCheckScope scope(masm, + kInstructionSize, + vixl::CodeBufferCheckScope::kReserveBufferSpace, + vixl::CodeBufferCheckScope::kExactSize); __ nop(); } } @@ -4036,7 +4038,8 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invok vixl::aarch64::Label* label = &relative_call_patches_.back().label; SingleEmissionCheckScope guard(GetVIXLAssembler()); __ Bind(label); - __ bl(0); // Branch and link to itself. This will be overriden at link time. + // Branch and link to itself. This will be overriden at link time. + __ bl(static_cast<int64_t>(0)); break; } case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup: @@ -4167,7 +4170,7 @@ void CodeGeneratorARM64::EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label, DCHECK(reg.IsX()); SingleEmissionCheckScope guard(GetVIXLAssembler()); __ Bind(fixup_label); - __ adrp(reg, /* offset placeholder */ 0); + __ adrp(reg, /* offset placeholder */ static_cast<int64_t>(0)); } void CodeGeneratorARM64::EmitAddPlaceholder(vixl::aarch64::Label* fixup_label, diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index 7aea616c7f..e399f3228e 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -47,6 +47,7 @@ using helpers::InputRegister; using helpers::InputRegisterAt; using helpers::InputSRegisterAt; using helpers::InputVRegisterAt; +using helpers::Int32ConstantFrom; using helpers::LocationFrom; using helpers::LowRegisterFrom; using helpers::LowSRegisterFrom; @@ -132,7 +133,7 @@ static size_t SaveContiguousSRegisterList(size_t first, vixl32::Register base = sp; if (stack_offset != 0) { base = temps.Acquire(); - __ Add(base, sp, stack_offset); + __ Add(base, sp, Operand::From(stack_offset)); } __ Vstm(F64, base, NO_WRITE_BACK, DRegisterList(d_reg, number_of_d_regs)); } @@ -180,7 +181,7 @@ static size_t RestoreContiguousSRegisterList(size_t first, vixl32::Register base = sp; if (stack_offset != 0) { base = temps.Acquire(); - __ Add(base, sp, stack_offset); + __ Add(base, sp, Operand::From(stack_offset)); } __ Vldm(F64, base, NO_WRITE_BACK, DRegisterList(d_reg, number_of_d_regs)); } @@ -673,8 +674,8 @@ void JumpTableARMVIXL::EmitTable(CodeGeneratorARMVIXL* codegen) { DCHECK_GE(num_entries, kPackedSwitchCompareJumpThreshold); // We are about to use the assembler to place literals directly. Make sure we have enough - // underlying code buffer and we have generated the jump table with right size. - codegen->GetVIXLAssembler()->GetBuffer().Align(); + // underlying code buffer and we have generated a jump table of the right size, using + // codegen->GetVIXLAssembler()->GetBuffer().Align(); AssemblerAccurateScope aas(codegen->GetVIXLAssembler(), num_entries * sizeof(int32_t), CodeBufferCheckScope::kMaximumSize); @@ -701,7 +702,7 @@ void JumpTableARMVIXL::FixTable(CodeGeneratorARMVIXL* codegen) { DCHECK_GT(jump_offset, std::numeric_limits<int32_t>::min()); DCHECK_LE(jump_offset, std::numeric_limits<int32_t>::max()); - bb_addresses_[i].get()->UpdateValue(jump_offset, &codegen->GetVIXLAssembler()->GetBuffer()); + bb_addresses_[i].get()->UpdateValue(jump_offset, codegen->GetVIXLAssembler()->GetBuffer()); } } @@ -1667,7 +1668,20 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* inv // Set the hidden (in r12) argument. It is done here, right before a BLX to prevent other // instruction from clobbering it as they might use r12 as a scratch register. DCHECK(hidden_reg.Is(r12)); - __ Mov(hidden_reg, invoke->GetDexMethodIndex()); + + { + // The VIXL macro assembler may clobber any of the scratch registers that are available to it, + // so it checks if the application is using them (by passing them to the macro assembler + // methods). The following application of UseScratchRegisterScope corrects VIXL's notion of + // what is available, and is the opposite of the standard usage: Instead of requesting a + // temporary location, it imposes an external constraint (i.e. a specific register is reserved + // for the hidden argument). Note that this works even if VIXL needs a scratch register itself + // (to materialize the constant), since the destination register becomes available for such use + // internally for the duration of the macro instruction. + UseScratchRegisterScope temps(GetVIXLAssembler()); + temps.Exclude(hidden_reg); + __ Mov(hidden_reg, invoke->GetDexMethodIndex()); + } { AssemblerAccurateScope aas(GetVIXLAssembler(), @@ -2458,13 +2472,13 @@ void InstructionCodeGeneratorARMVIXL::GenerateDivRemWithAnyConstant(HBinaryOpera vixl32::Register dividend = InputRegisterAt(instruction, 0); vixl32::Register temp1 = RegisterFrom(locations->GetTemp(0)); vixl32::Register temp2 = RegisterFrom(locations->GetTemp(1)); - int64_t imm = second.GetConstant()->AsIntConstant()->GetValue(); + int32_t imm = Int32ConstantFrom(second); int64_t magic; int shift; CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift); - __ Mov(temp1, magic); + __ Mov(temp1, Operand::From(magic)); __ Smull(temp2, temp1, dividend, temp1); if (imm > 0 && magic < 0) { @@ -2857,9 +2871,9 @@ void InstructionCodeGeneratorARMVIXL::HandleLongRotate(HRor* ror) { } // Rotate, or mov to out for zero or word size rotations. if (rot != 0u) { - __ Lsr(out_reg_hi, in_reg_hi, rot); + __ Lsr(out_reg_hi, in_reg_hi, Operand::From(rot)); __ Orr(out_reg_hi, out_reg_hi, Operand(in_reg_lo, ShiftType::LSL, kArmBitsPerWord - rot)); - __ Lsr(out_reg_lo, in_reg_lo, rot); + __ Lsr(out_reg_lo, in_reg_lo, Operand::From(rot)); __ Orr(out_reg_lo, out_reg_lo, Operand(in_reg_hi, ShiftType::LSL, kArmBitsPerWord - rot)); } else { __ Mov(out_reg_lo, in_reg_lo); @@ -2874,7 +2888,7 @@ void InstructionCodeGeneratorARMVIXL::HandleLongRotate(HRor* ror) { __ And(shift_right, RegisterFrom(rhs), 0x1F); __ Lsrs(shift_left, RegisterFrom(rhs), 6); // TODO(VIXL): Check that flags are kept after "vixl32::LeaveFlags" enabled. - __ Rsb(shift_left, shift_right, kArmBitsPerWord); + __ Rsb(shift_left, shift_right, Operand::From(kArmBitsPerWord)); __ B(cc, &shift_by_32_plus_shift_right); // out_reg_hi = (reg_hi << shift_left) | (reg_lo >> shift_right). @@ -3040,11 +3054,11 @@ void InstructionCodeGeneratorARMVIXL::HandleShift(HBinaryOperation* op) { // Shift the high part __ Lsl(o_h, high, o_l); // Shift the low part and `or` what overflew on the high part - __ Rsb(temp, o_l, kArmBitsPerWord); + __ Rsb(temp, o_l, Operand::From(kArmBitsPerWord)); __ Lsr(temp, low, temp); __ Orr(o_h, o_h, temp); // If the shift is > 32 bits, override the high part - __ Subs(temp, o_l, kArmBitsPerWord); + __ Subs(temp, o_l, Operand::From(kArmBitsPerWord)); { AssemblerAccurateScope guard(GetVIXLAssembler(), 3 * kArmInstrMaxSizeInBytes, @@ -3059,11 +3073,11 @@ void InstructionCodeGeneratorARMVIXL::HandleShift(HBinaryOperation* op) { // Shift the low part __ Lsr(o_l, low, o_h); // Shift the high part and `or` what underflew on the low part - __ Rsb(temp, o_h, kArmBitsPerWord); + __ Rsb(temp, o_h, Operand::From(kArmBitsPerWord)); __ Lsl(temp, high, temp); __ Orr(o_l, o_l, temp); // If the shift is > 32 bits, override the low part - __ Subs(temp, o_h, kArmBitsPerWord); + __ Subs(temp, o_h, Operand::From(kArmBitsPerWord)); { AssemblerAccurateScope guard(GetVIXLAssembler(), 3 * kArmInstrMaxSizeInBytes, @@ -3077,10 +3091,10 @@ void InstructionCodeGeneratorARMVIXL::HandleShift(HBinaryOperation* op) { __ And(o_h, second_reg, kMaxLongShiftDistance); // same as Shr except we use `Lsr`s and not `Asr`s __ Lsr(o_l, low, o_h); - __ Rsb(temp, o_h, kArmBitsPerWord); + __ Rsb(temp, o_h, Operand::From(kArmBitsPerWord)); __ Lsl(temp, high, temp); __ Orr(o_l, o_l, temp); - __ Subs(temp, o_h, kArmBitsPerWord); + __ Subs(temp, o_h, Operand::From(kArmBitsPerWord)); { AssemblerAccurateScope guard(GetVIXLAssembler(), 3 * kArmInstrMaxSizeInBytes, @@ -3424,7 +3438,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateWideAtomicLoad(vixl32::Register ad __ Add(temp, addr, offset); addr = temp; } - __ Ldrexd(out_lo, out_hi, addr); + __ Ldrexd(out_lo, out_hi, MemOperand(addr)); } void InstructionCodeGeneratorARMVIXL::GenerateWideAtomicStore(vixl32::Register addr, @@ -3444,9 +3458,9 @@ void InstructionCodeGeneratorARMVIXL::GenerateWideAtomicStore(vixl32::Register a __ Bind(&fail); // We need a load followed by store. (The address used in a STREX instruction must // be the same as the address in the most recently executed LDREX instruction.) - __ Ldrexd(temp1, temp2, addr); + __ Ldrexd(temp1, temp2, MemOperand(addr)); codegen_->MaybeRecordImplicitNullCheck(instruction); - __ Strexd(temp1, value_lo, value_hi, addr); + __ Strexd(temp1, value_lo, value_hi, MemOperand(addr)); __ CompareAndBranchIfNonZero(temp1, &fail); } @@ -4648,7 +4662,7 @@ void CodeGeneratorARMVIXL::MarkGCCard(vixl32::Register temp, } GetAssembler()->LoadFromOffset( kLoadWord, card, tr, Thread::CardTableOffset<kArmPointerSize>().Int32Value()); - __ Lsr(temp, object, gc::accounting::CardTable::kCardShift); + __ Lsr(temp, object, Operand::From(gc::accounting::CardTable::kCardShift)); __ Strb(card, MemOperand(card, temp)); if (can_be_null) { __ Bind(&is_null); diff --git a/compiler/optimizing/common_arm.h b/compiler/optimizing/common_arm.h index 5129dafca1..d3623f17d1 100644 --- a/compiler/optimizing/common_arm.h +++ b/compiler/optimizing/common_arm.h @@ -139,9 +139,14 @@ inline int32_t Int32ConstantFrom(Location location) { HConstant* instr = location.GetConstant(); if (instr->IsIntConstant()) { return instr->AsIntConstant()->GetValue(); - } else { - DCHECK(instr->IsNullConstant()) << instr->DebugName(); + } else if (instr->IsNullConstant()) { return 0; + } else { + DCHECK(instr->IsLongConstant()) << instr->DebugName(); + const int64_t ret = instr->AsLongConstant()->GetValue(); + DCHECK_GE(ret, std::numeric_limits<int32_t>::min()); + DCHECK_LE(ret, std::numeric_limits<int32_t>::max()); + return ret; } } @@ -161,7 +166,7 @@ inline vixl::aarch32::Operand OperandFrom(Location location, Primitive::Type typ if (location.IsRegister()) { return vixl::aarch32::Operand(RegisterFrom(location, type)); } else { - return vixl::aarch32::Operand(Int64ConstantFrom(location)); + return vixl::aarch32::Operand(Int32ConstantFrom(location)); } } diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc index 7a1ec9f92e..c8e3534164 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.cc +++ b/compiler/optimizing/intrinsics_arm_vixl.cc @@ -518,7 +518,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) { void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) { ArmVIXLAssembler* assembler = GetAssembler(); // Ignore upper 4B of long address. - __ Ldrsb(OutputRegister(invoke), LowRegisterFrom(invoke->GetLocations()->InAt(0))); + __ Ldrsb(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0)))); } void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) { @@ -528,7 +528,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) { ArmVIXLAssembler* assembler = GetAssembler(); // Ignore upper 4B of long address. - __ Ldr(OutputRegister(invoke), LowRegisterFrom(invoke->GetLocations()->InAt(0))); + __ Ldr(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0)))); } void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) { @@ -545,9 +545,9 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) { vixl32::Register hi = HighRegisterFrom(invoke->GetLocations()->Out()); if (addr.Is(lo)) { __ Ldr(hi, MemOperand(addr, 4)); - __ Ldr(lo, addr); + __ Ldr(lo, MemOperand(addr)); } else { - __ Ldr(lo, addr); + __ Ldr(lo, MemOperand(addr)); __ Ldr(hi, MemOperand(addr, 4)); } } @@ -559,7 +559,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekShortNative(HInvoke* invok void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke) { ArmVIXLAssembler* assembler = GetAssembler(); // Ignore upper 4B of long address. - __ Ldrsh(OutputRegister(invoke), LowRegisterFrom(invoke->GetLocations()->InAt(0))); + __ Ldrsh(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0)))); } static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) { @@ -576,7 +576,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) { void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) { ArmVIXLAssembler* assembler = GetAssembler(); - __ Strb(InputRegisterAt(invoke, 1), LowRegisterFrom(invoke->GetLocations()->InAt(0))); + __ Strb(InputRegisterAt(invoke, 1), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0)))); } void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) { @@ -585,7 +585,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) { ArmVIXLAssembler* assembler = GetAssembler(); - __ Str(InputRegisterAt(invoke, 1), LowRegisterFrom(invoke->GetLocations()->InAt(0))); + __ Str(InputRegisterAt(invoke, 1), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0)))); } void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) { @@ -598,7 +598,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) { vixl32::Register addr = LowRegisterFrom(invoke->GetLocations()->InAt(0)); // Worst case: Control register bit SCTLR.A = 0. Then unaligned accesses throw a processor // exception. So we can't use ldrd as addr may be unaligned. - __ Str(LowRegisterFrom(invoke->GetLocations()->InAt(1)), addr); + __ Str(LowRegisterFrom(invoke->GetLocations()->InAt(1)), MemOperand(addr)); __ Str(HighRegisterFrom(invoke->GetLocations()->InAt(1)), MemOperand(addr, 4)); } @@ -608,7 +608,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeShortNative(HInvoke* invok void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke) { ArmVIXLAssembler* assembler = GetAssembler(); - __ Strh(InputRegisterAt(invoke, 1), LowRegisterFrom(invoke->GetLocations()->InAt(0))); + __ Strh(InputRegisterAt(invoke, 1), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0)))); } void IntrinsicLocationsBuilderARMVIXL::VisitThreadCurrentThread(HInvoke* invoke) { @@ -842,8 +842,8 @@ static void GenUnsafePut(LocationSummary* locations, __ Add(temp_reg, base, offset); vixl32::Label loop_head; __ Bind(&loop_head); - __ Ldrexd(temp_lo, temp_hi, temp_reg); - __ Strexd(temp_lo, value_lo, value_hi, temp_reg); + __ Ldrexd(temp_lo, temp_hi, MemOperand(temp_reg)); + __ Strexd(temp_lo, value_lo, value_hi, MemOperand(temp_reg)); __ Cmp(temp_lo, 0); __ B(ne, &loop_head); } else { @@ -1042,7 +1042,7 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARMVIXL* vixl32::Label loop_head; __ Bind(&loop_head); - __ Ldrex(tmp, tmp_ptr); + __ Ldrex(tmp, MemOperand(tmp_ptr)); __ Subs(tmp, tmp, expected); @@ -1052,7 +1052,7 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARMVIXL* CodeBufferCheckScope::kMaximumSize); __ itt(eq); - __ strex(eq, tmp, value, tmp_ptr); + __ strex(eq, tmp, value, MemOperand(tmp_ptr)); __ cmp(eq, tmp, 1); } @@ -1220,7 +1220,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringCompareTo(HInvoke* invoke) { static_assert(IsAligned<8>(kObjectAlignment), "String data must be 8-byte aligned for unrolled CompareTo loop."); - const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar); + const unsigned char_size = Primitive::ComponentSize(Primitive::kPrimChar); DCHECK_EQ(char_size, 2u); UseScratchRegisterScope temps(assembler->GetVIXLAssembler()); @@ -1469,7 +1469,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) { __ Bind(&loop); __ Ldr(out, MemOperand(str, temp1)); __ Ldr(temp2, MemOperand(arg, temp1)); - __ Add(temp1, temp1, sizeof(uint32_t)); + __ Add(temp1, temp1, Operand::From(sizeof(uint32_t))); __ Cmp(out, temp2); __ B(ne, &return_false); // With string compression, we have compared 4 bytes, otherwise 2 chars. diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc index 1aaa231d29..c35c39328c 100644 --- a/compiler/utils/arm/assembler_arm_vixl.cc +++ b/compiler/utils/arm/assembler_arm_vixl.cc @@ -43,12 +43,12 @@ size_t ArmVIXLAssembler::CodeSize() const { } const uint8_t* ArmVIXLAssembler::CodeBufferBaseAddress() const { - return vixl_masm_.GetStartAddress<uint8_t*>(); + return vixl_masm_.GetBuffer().GetStartAddress<const uint8_t*>(); } void ArmVIXLAssembler::FinalizeInstructions(const MemoryRegion& region) { // Copy the instructions from the buffer. - MemoryRegion from(vixl_masm_.GetStartAddress<void*>(), CodeSize()); + MemoryRegion from(vixl_masm_.GetBuffer()->GetStartAddress<void*>(), CodeSize()); region.CopyFrom(0, from); } @@ -365,7 +365,7 @@ void ArmVIXLAssembler::StoreRegisterList(RegList regs, size_t stack_offset) { if (stack_offset != 0) { base = temps.Acquire(); DCHECK_EQ(regs & (1u << base.GetCode()), 0u); - ___ Add(base, sp, stack_offset); + ___ Add(base, sp, Operand::From(stack_offset)); } ___ Stm(base, NO_WRITE_BACK, RegisterList(regs)); } else { @@ -385,7 +385,7 @@ void ArmVIXLAssembler::LoadRegisterList(RegList regs, size_t stack_offset) { vixl32::Register base = sp; if (stack_offset != 0) { base = temps.Acquire(); - ___ Add(base, sp, stack_offset); + ___ Add(base, sp, Operand::From(stack_offset)); } ___ Ldm(base, NO_WRITE_BACK, RegisterList(regs)); } else { diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc index b2bbd721bd..f20ed0a0d0 100644 --- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc +++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc @@ -168,6 +168,8 @@ void ArmVIXLJNIMacroAssembler::Store(FrameOffset dest, ManagedRegister m_src, si CHECK_EQ(0u, size); } else if (src.IsCoreRegister()) { CHECK_EQ(4u, size); + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(src.AsVIXLRegister()); asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value()); } else if (src.IsRegisterPair()) { CHECK_EQ(8u, size); @@ -186,12 +188,16 @@ void ArmVIXLJNIMacroAssembler::Store(FrameOffset dest, ManagedRegister m_src, si void ArmVIXLJNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) { ArmManagedRegister src = msrc.AsArm(); CHECK(src.IsCoreRegister()) << src; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(src.AsVIXLRegister()); asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value()); } void ArmVIXLJNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) { ArmManagedRegister src = msrc.AsArm(); CHECK(src.IsCoreRegister()) << src; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(src.AsVIXLRegister()); asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value()); } @@ -202,6 +208,8 @@ void ArmVIXLJNIMacroAssembler::StoreSpanning(FrameOffset dest, ArmManagedRegister src = msrc.AsArm(); ArmManagedRegister scratch = mscratch.AsArm(); asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value()); + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(scratch.AsVIXLRegister()); asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, in_off.Int32Value()); asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, dest.Int32Value() + 4); } @@ -210,6 +218,8 @@ void ArmVIXLJNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) { ArmManagedRegister scratch = mscratch.AsArm(); + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(scratch.AsVIXLRegister()); asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, src.Int32Value()); asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, dest.Int32Value()); } @@ -220,6 +230,8 @@ void ArmVIXLJNIMacroAssembler::LoadRef(ManagedRegister dest, bool unpoison_reference) { ArmManagedRegister dst = dest.AsArm(); CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(dst.AsVIXLRegister(), base.AsArm().AsVIXLRegister()); asm_.LoadFromOffset(kLoadWord, dst.AsVIXLRegister(), base.AsArm().AsVIXLRegister(), @@ -246,6 +258,8 @@ void ArmVIXLJNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, ManagedRegister scratch) { ArmManagedRegister mscratch = scratch.AsArm(); CHECK(mscratch.IsCoreRegister()) << mscratch; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(mscratch.AsVIXLRegister()); asm_.LoadImmediate(mscratch.AsVIXLRegister(), imm); asm_.StoreToOffset(kStoreWord, mscratch.AsVIXLRegister(), sp, dest.Int32Value()); } @@ -263,6 +277,8 @@ void ArmVIXLJNIMacroAssembler::LoadFromThread(ManagedRegister m_dst, void ArmVIXLJNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset32 offs) { ArmManagedRegister dst = m_dst.AsArm(); CHECK(dst.IsCoreRegister()) << dst; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(dst.AsVIXLRegister()); asm_.LoadFromOffset(kLoadWord, dst.AsVIXLRegister(), tr, offs.Int32Value()); } @@ -271,6 +287,8 @@ void ArmVIXLJNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, ManagedRegister mscratch) { ArmManagedRegister scratch = mscratch.AsArm(); CHECK(scratch.IsCoreRegister()) << scratch; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(scratch.AsVIXLRegister()); asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), tr, thr_offs.Int32Value()); asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, fr_offs.Int32Value()); } @@ -286,6 +304,8 @@ void ArmVIXLJNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs, ManagedRegister mscratch) { ArmManagedRegister scratch = mscratch.AsArm(); CHECK(scratch.IsCoreRegister()) << scratch; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(scratch.AsVIXLRegister()); asm_.AddConstant(scratch.AsVIXLRegister(), sp, fr_offs.Int32Value()); asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), tr, thr_offs.Int32Value()); } @@ -312,6 +332,8 @@ void ArmVIXLJNIMacroAssembler::Move(ManagedRegister m_dst, if (!dst.Equals(src)) { if (dst.IsCoreRegister()) { CHECK(src.IsCoreRegister()) << src; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(dst.AsVIXLRegister()); ___ Mov(dst.AsVIXLRegister(), src.AsVIXLRegister()); } else if (dst.IsDRegister()) { if (src.IsDRegister()) { @@ -351,6 +373,8 @@ void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dest, ArmManagedRegister temp = scratch.AsArm(); CHECK(temp.IsCoreRegister()) << temp; CHECK(size == 4 || size == 8) << size; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(temp.AsVIXLRegister()); if (size == 4) { asm_.LoadFromOffset(kLoadWord, temp.AsVIXLRegister(), sp, src.Int32Value()); asm_.StoreToOffset(kStoreWord, temp.AsVIXLRegister(), sp, dest.Int32Value()); @@ -414,6 +438,8 @@ void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg, ArmManagedRegister in_reg = min_reg.AsArm(); CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg; CHECK(out_reg.IsCoreRegister()) << out_reg; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(out_reg.AsVIXLRegister()); if (null_allowed) { // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is // the address in the handle scope holding the reference. @@ -425,6 +451,8 @@ void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg, handle_scope_offset.Int32Value()); in_reg = out_reg; } + + temps.Exclude(in_reg.AsVIXLRegister()); ___ Cmp(in_reg.AsVIXLRegister(), 0); if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value(), kCcDontCare)) { @@ -457,6 +485,8 @@ void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off, bool null_allowed) { ArmManagedRegister scratch = mscratch.AsArm(); CHECK(scratch.IsCoreRegister()) << scratch; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(scratch.AsVIXLRegister()); if (null_allowed) { asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, handle_scope_offset.Int32Value()); // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is @@ -503,6 +533,8 @@ void ArmVIXLJNIMacroAssembler::Call(ManagedRegister mbase, ArmManagedRegister scratch = mscratch.AsArm(); CHECK(base.IsCoreRegister()) << base; CHECK(scratch.IsCoreRegister()) << scratch; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(scratch.AsVIXLRegister()); asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), base.AsVIXLRegister(), @@ -514,6 +546,8 @@ void ArmVIXLJNIMacroAssembler::Call(ManagedRegister mbase, void ArmVIXLJNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) { ArmManagedRegister scratch = mscratch.AsArm(); CHECK(scratch.IsCoreRegister()) << scratch; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(scratch.AsVIXLRegister()); // Call *(*(SP + base) + offset) asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, base.Int32Value()); asm_.LoadFromOffset(kLoadWord, @@ -541,6 +575,8 @@ void ArmVIXLJNIMacroAssembler::GetCurrentThread(FrameOffset dest_offset, void ArmVIXLJNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) { CHECK_ALIGNED(stack_adjust, kStackAlignment); ArmManagedRegister scratch = m_scratch.AsArm(); + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(scratch.AsVIXLRegister()); exception_blocks_.emplace_back( new ArmVIXLJNIMacroAssembler::ArmException(scratch, stack_adjust)); asm_.LoadFromOffset(kLoadWord, @@ -598,11 +634,14 @@ void ArmVIXLJNIMacroAssembler::EmitExceptionPoll( if (exception->stack_adjust_ != 0) { // Fix up the frame. DecreaseFrameSize(exception->stack_adjust_); } + + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(exception->scratch_.AsVIXLRegister()); // Pass exception object as argument. // Don't care about preserving r0 as this won't return. ___ Mov(r0, exception->scratch_.AsVIXLRegister()); + temps.Include(exception->scratch_.AsVIXLRegister()); // TODO: check that exception->scratch_ is dead by this point. - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); vixl32::Register temp = temps.Acquire(); ___ Ldr(temp, MemOperand(tr, @@ -624,6 +663,9 @@ void ArmVIXLJNIMacroAssembler::Load(ArmManagedRegister } else if (dest.IsCoreRegister()) { CHECK(!dest.AsVIXLRegister().Is(sp)) << dest; + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(dest.AsVIXLRegister()); + if (size == 1u) { ___ Ldrb(dest.AsVIXLRegister(), MemOperand(base, offset)); } else { diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc index f91bcfa92e..6ed0e9b670 100644 --- a/compiler/utils/arm64/assembler_arm64.cc +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -40,12 +40,12 @@ size_t Arm64Assembler::CodeSize() const { } const uint8_t* Arm64Assembler::CodeBufferBaseAddress() const { - return vixl_masm_.GetStartAddress<uint8_t*>(); + return vixl_masm_.GetBuffer().GetStartAddress<const uint8_t*>(); } void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) { // Copy the instructions from the buffer. - MemoryRegion from(vixl_masm_.GetStartAddress<void*>(), CodeSize()); + MemoryRegion from(vixl_masm_.GetBuffer()->GetStartAddress<void*>(), CodeSize()); region.CopyFrom(0, from); } diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc index 10bed13dad..50a1d9fd98 100644 --- a/compiler/utils/assembler_thumb_test.cc +++ b/compiler/utils/assembler_thumb_test.cc @@ -1753,7 +1753,10 @@ TEST_F(ArmVIXLAssemblerTest, VixlLoadFromOffset) { __ LoadFromOffset(kLoadWordPair, R2, R4, 0x40400); __ LoadFromOffset(kLoadWordPair, R4, R4, 0x40400); + vixl::aarch32::UseScratchRegisterScope temps(assembler.asm_.GetVIXLAssembler()); + temps.Exclude(R12); __ LoadFromOffset(kLoadWord, R0, R12, 12); // 32-bit because of R12. + temps.Include(R12); __ LoadFromOffset(kLoadWord, R2, R4, 0xa4 - 0x100000); __ LoadFromOffset(kLoadSignedByte, R2, R4, 12); @@ -1783,7 +1786,10 @@ TEST_F(ArmVIXLAssemblerTest, VixlStoreToOffset) { __ StoreToOffset(kStoreWordPair, R2, R4, 0x40400); __ StoreToOffset(kStoreWordPair, R4, R4, 0x40400); + vixl::aarch32::UseScratchRegisterScope temps(assembler.asm_.GetVIXLAssembler()); + temps.Exclude(R12); __ StoreToOffset(kStoreWord, R0, R12, 12); // 32-bit because of R12. + temps.Include(R12); __ StoreToOffset(kStoreWord, R2, R4, 0xa4 - 0x100000); __ StoreToOffset(kStoreByte, R2, R4, 12); diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc index 30b708c0fc..3347dac535 100644 --- a/disassembler/disassembler_arm.cc +++ b/disassembler/disassembler_arm.cc @@ -63,9 +63,7 @@ class DisassemblerArm::CustomDisassembler FINAL : public PrintDisassembler { case kVld2Location: case kVld3Location: case kVld4Location: { - const uintptr_t pc_delta = disasm_->IsT32() - ? vixl::aarch32::kT32PcDelta - : vixl::aarch32::kA32PcDelta; + const uintptr_t pc_delta = label.GetLabel()->GetPcOffset(); const int32_t offset = label.GetLabel()->GetLocation(); os() << "[pc, #" << offset - pc_delta << "]"; @@ -77,7 +75,7 @@ class DisassemblerArm::CustomDisassembler FINAL : public PrintDisassembler { } } - DisassemblerStream& operator<<(const vixl::aarch32::Register reg) OVERRIDE { + DisassemblerStream& operator<<(vixl::aarch32::Register reg) OVERRIDE { if (reg.Is(tr)) { os() << "tr"; return *this; @@ -118,20 +116,11 @@ class DisassemblerArm::CustomDisassembler FINAL : public PrintDisassembler { CustomDisassembler(std::ostream& os, const DisassemblerOptions* options) : PrintDisassembler(&disassembler_stream_), disassembler_stream_(os, this, options) {} - void PrintPc(uint32_t prog_ctr) OVERRIDE { + void PrintCodeAddress(uint32_t prog_ctr) OVERRIDE { os() << "0x" << std::hex << std::setw(8) << std::setfill('0') << prog_ctr << ": "; } - bool IsT32() const { - return is_t32_; - } - - void SetT32(bool is_t32) { - is_t32_ = is_t32; - } - private: - bool is_t32_; CustomDisassemblerStream disassembler_stream_; }; @@ -152,7 +141,7 @@ void DisassemblerArm::CustomDisassembler::CustomDisassemblerStream::PrintLiteral sizeof(unaligned_float), sizeof(unaligned_double)}; const uintptr_t begin = reinterpret_cast<uintptr_t>(options_->base_address_); const uintptr_t end = reinterpret_cast<uintptr_t>(options_->end_address_); - uintptr_t literal_addr = RoundDown(disasm_->GetPc(), vixl::aarch32::kRegSizeInBytes) + offset; + uintptr_t literal_addr = RoundDown(disasm_->GetCodeAddress(), vixl::aarch32::kRegSizeInBytes) + offset; if (!options_->absolute_addresses_) { literal_addr += begin; @@ -208,12 +197,14 @@ size_t DisassemblerArm::Dump(std::ostream& os, const uint8_t* begin) { // Remove the Thumb specifier bit; no effect if begin does not point to T32 code. const uintptr_t instr_ptr = reinterpret_cast<uintptr_t>(begin) & ~1; - disasm_->SetT32((reinterpret_cast<uintptr_t>(begin) & 1) != 0); - disasm_->JumpToPc(GetPc(instr_ptr)); + const bool is_t32 = (reinterpret_cast<uintptr_t>(begin) & 1) != 0; + disasm_->SetCodeAddress(GetPc(instr_ptr)); - if (disasm_->IsT32()) { + if (is_t32) { const uint16_t* const ip = reinterpret_cast<const uint16_t*>(instr_ptr); - next = reinterpret_cast<uintptr_t>(disasm_->DecodeT32At(ip)); + const uint16_t* const end_address = reinterpret_cast<const uint16_t*>( + GetDisassemblerOptions()->end_address_); + next = reinterpret_cast<uintptr_t>(disasm_->DecodeT32At(ip, end_address)); } else { const uint32_t* const ip = reinterpret_cast<const uint32_t*>(instr_ptr); next = reinterpret_cast<uintptr_t>(disasm_->DecodeA32At(ip)); @@ -230,10 +221,10 @@ void DisassemblerArm::Dump(std::ostream& os, const uint8_t* begin, const uint8_t // Remove the Thumb specifier bit; no effect if begin does not point to T32 code. const uintptr_t base = reinterpret_cast<uintptr_t>(begin) & ~1; - disasm_->SetT32((reinterpret_cast<uintptr_t>(begin) & 1) != 0); - disasm_->JumpToPc(GetPc(base)); + const bool is_t32 = (reinterpret_cast<uintptr_t>(begin) & 1) != 0; + disasm_->SetCodeAddress(GetPc(base)); - if (disasm_->IsT32()) { + if (is_t32) { // The Thumb specifier bits cancel each other. disasm_->DisassembleT32Buffer(reinterpret_cast<const uint16_t*>(base), end - begin); } else { diff --git a/test/538-checker-embed-constants/src/Main.java b/test/538-checker-embed-constants/src/Main.java index 02c609ef7c..6b25747463 100644 --- a/test/538-checker-embed-constants/src/Main.java +++ b/test/538-checker-embed-constants/src/Main.java @@ -30,7 +30,7 @@ public class Main { /// CHECK-START-ARM: int Main.and255(int) disassembly (after) /// CHECK-NOT: movs {{r\d+}}, #255 - /// CHECK: and {{r\d+}}, {{r\d+}}, #255 + /// CHECK: and {{r\d+}}, {{r\d+}}, #0xff public static int and255(int arg) { return arg & 255; @@ -46,7 +46,7 @@ public class Main { /// CHECK-START-ARM: int Main.andNot15(int) disassembly (after) /// CHECK-NOT: mvn {{r\d+}}, #15 - /// CHECK: bic {{r\d+}}, {{r\d+}}, #15 + /// CHECK: bic {{r\d+}}, {{r\d+}}, #0xf public static int andNot15(int arg) { return arg & ~15; @@ -54,7 +54,7 @@ public class Main { /// CHECK-START-ARM: int Main.or255(int) disassembly (after) /// CHECK-NOT: movs {{r\d+}}, #255 - /// CHECK: orr {{r\d+}}, {{r\d+}}, #255 + /// CHECK: orr {{r\d+}}, {{r\d+}}, #0xff public static int or255(int arg) { return arg | 255; @@ -70,7 +70,7 @@ public class Main { /// CHECK-START-ARM: int Main.orNot15(int) disassembly (after) /// CHECK-NOT: mvn {{r\d+}}, #15 - /// CHECK: orn {{r\d+}}, {{r\d+}}, #15 + /// CHECK: orn {{r\d+}}, {{r\d+}}, #0xf public static int orNot15(int arg) { return arg | ~15; @@ -78,7 +78,7 @@ public class Main { /// CHECK-START-ARM: int Main.xor255(int) disassembly (after) /// CHECK-NOT: movs {{r\d+}}, #255 - /// CHECK: eor {{r\d+}}, {{r\d+}}, #255 + /// CHECK: eor {{r\d+}}, {{r\d+}}, #0xff public static int xor255(int arg) { return arg ^ 255; @@ -104,7 +104,7 @@ public class Main { /// CHECK-NOT: movs {{r\d+}}, #255 /// CHECK-NOT: and{{(\.w)?}} /// CHECK-NOT: bic{{(\.w)?}} - /// CHECK-DAG: and {{r\d+}}, {{r\d+}}, #255 + /// CHECK-DAG: and {{r\d+}}, {{r\d+}}, #0xff /// CHECK-DAG: movs {{r\d+}}, #0 /// CHECK-NOT: and{{(\.w)?}} /// CHECK-NOT: bic{{(\.w)?}} @@ -131,7 +131,7 @@ public class Main { /// CHECK-NOT: mvn {{r\d+}}, #15 /// CHECK-NOT: and{{(\.w)?}} /// CHECK-NOT: bic{{(\.w)?}} - /// CHECK: bic {{r\d+}}, {{r\d+}}, #15 + /// CHECK: bic {{r\d+}}, {{r\d+}}, #0xf /// CHECK-NOT: and{{(\.w)?}} /// CHECK-NOT: bic{{(\.w)?}} @@ -144,8 +144,8 @@ public class Main { /// CHECK-NOT: mvn {{r\d+}}, #15 /// CHECK-NOT: and{{(\.w)?}} /// CHECK-NOT: bic{{(\.w)?}} - /// CHECK-DAG: and {{r\d+}}, {{r\d+}}, #15 - /// CHECK-DAG: bic {{r\d+}}, {{r\d+}}, #15 + /// CHECK-DAG: and {{r\d+}}, {{r\d+}}, #0xf + /// CHECK-DAG: bic {{r\d+}}, {{r\d+}}, #0xf /// CHECK-NOT: and{{(\.w)?}} /// CHECK-NOT: bic{{(\.w)?}} @@ -157,7 +157,7 @@ public class Main { /// CHECK-NOT: movs {{r\d+}}, #255 /// CHECK-NOT: orr{{(\.w)?}} /// CHECK-NOT: orn - /// CHECK: orr {{r\d+}}, {{r\d+}}, #255 + /// CHECK: orr {{r\d+}}, {{r\d+}}, #0xff /// CHECK-NOT: orr{{(\.w)?}} /// CHECK-NOT: orn @@ -183,7 +183,7 @@ public class Main { /// CHECK-NOT: mvn {{r\d+}}, #15 /// CHECK-NOT: orr{{(\.w)?}} /// CHECK-NOT: orn - /// CHECK-DAG: orn {{r\d+}}, {{r\d+}}, #15 + /// CHECK-DAG: orn {{r\d+}}, {{r\d+}}, #0xf /// CHECK-DAG: mvn {{r\d+}}, #0 /// CHECK-NOT: orr{{(\.w)?}} /// CHECK-NOT: orn @@ -197,8 +197,8 @@ public class Main { /// CHECK-NOT: mvn {{r\d+}}, #15 /// CHECK-NOT: orr{{(\.w)?}} /// CHECK-NOT: orn - /// CHECK-DAG: orr {{r\d+}}, {{r\d+}}, #15 - /// CHECK-DAG: orn {{r\d+}}, {{r\d+}}, #15 + /// CHECK-DAG: orr {{r\d+}}, {{r\d+}}, #0xf + /// CHECK-DAG: orn {{r\d+}}, {{r\d+}}, #0xf /// CHECK-NOT: orr{{(\.w)?}} /// CHECK-NOT: orn @@ -209,7 +209,7 @@ public class Main { /// CHECK-START-ARM: long Main.xor255(long) disassembly (after) /// CHECK-NOT: movs {{r\d+}}, #255 /// CHECK-NOT: eor{{(\.w)?}} - /// CHECK: eor {{r\d+}}, {{r\d+}}, #255 + /// CHECK: eor {{r\d+}}, {{r\d+}}, #0xff /// CHECK-NOT: eor{{(\.w)?}} public static long xor255(long arg) { @@ -257,8 +257,8 @@ public class Main { /// CHECK-NOT: movs {{r\d+}}, #15 /// CHECK-NOT: mov.w {{r\d+}}, #-268435456 /// CHECK-NOT: eor{{(\.w)?}} - /// CHECK-DAG: eor {{r\d+}}, {{r\d+}}, #15 - /// CHECK-DAG: eor {{r\d+}}, {{r\d+}}, #4026531840 + /// CHECK-DAG: eor {{r\d+}}, {{r\d+}}, #0xf + /// CHECK-DAG: eor {{r\d+}}, {{r\d+}}, #0xf0000000 /// CHECK-NOT: eor{{(\.w)?}} public static long xor0xf00000000000000f(long arg) { |