diff options
| -rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 54 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm64.h | 2 | ||||
| -rw-r--r-- | compiler/optimizing/common_arm64.h | 22 | ||||
| -rw-r--r-- | compiler/optimizing/instruction_simplifier_arm64.cc | 57 | ||||
| -rw-r--r-- | compiler/optimizing/instruction_simplifier_arm64.h | 8 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_arm.cc | 8 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_arm64.cc | 8 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_x86.cc | 130 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_x86_64.cc | 118 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.h | 38 | ||||
| -rw-r--r-- | compiler/optimizing/nodes_arm64.h | 47 | ||||
| -rw-r--r-- | compiler/utils/mips64/assembler_mips64_test.cc | 43 | ||||
| -rw-r--r-- | test/004-UnsafeTest/src/Main.java | 29 | ||||
| -rw-r--r-- | test/527-checker-array-access-split/expected.txt | 0 | ||||
| -rw-r--r-- | test/527-checker-array-access-split/info.txt | 1 | ||||
| -rw-r--r-- | test/527-checker-array-access-split/src/Main.java | 341 | ||||
| -rw-r--r-- | tools/libcore_failures.txt | 7 |
17 files changed, 790 insertions, 123 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index f68b11b504..1773c06e0b 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -1580,6 +1580,21 @@ void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) { HandleBinaryOp(instruction); } +void LocationsBuilderARM64::VisitArm64IntermediateAddress(HArm64IntermediateAddress* instruction) { + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->GetOffset(), instruction)); + locations->SetOut(Location::RequiresRegister()); +} + +void InstructionCodeGeneratorARM64::VisitArm64IntermediateAddress( + HArm64IntermediateAddress* instruction) { + __ Add(OutputRegister(instruction), + InputRegisterAt(instruction, 0), + Operand(InputOperandAt(instruction, 1))); +} + void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); @@ -1593,14 +1608,16 @@ void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) { } void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { - LocationSummary* locations = instruction->GetLocations(); Primitive::Type type = instruction->GetType(); Register obj = InputRegisterAt(instruction, 0); - Location index = locations->InAt(1); + Location index = instruction->GetLocations()->InAt(1); size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value(); MemOperand source = HeapOperand(obj); + CPURegister dest = OutputCPURegister(instruction); + MacroAssembler* masm = GetVIXLAssembler(); UseScratchRegisterScope temps(masm); + // Block pools between `Load` and `MaybeRecordImplicitNullCheck`. BlockPoolsScope block_pools(masm); if (index.IsConstant()) { @@ -1608,15 +1625,26 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { source = HeapOperand(obj, offset); } else { Register temp = temps.AcquireSameSizeAs(obj); - __ Add(temp, obj, offset); + if (instruction->GetArray()->IsArm64IntermediateAddress()) { + // We do not need to compute the intermediate address from the array: the + // input instruction has done it already. See the comment in + // `InstructionSimplifierArm64::TryExtractArrayAccessAddress()`. + if (kIsDebugBuild) { + HArm64IntermediateAddress* tmp = instruction->GetArray()->AsArm64IntermediateAddress(); + DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset); + } + temp = obj; + } else { + __ Add(temp, obj, offset); + } source = HeapOperand(temp, XRegisterFrom(index), LSL, Primitive::ComponentSizeShift(type)); } - codegen_->Load(type, OutputCPURegister(instruction), source); + codegen_->Load(type, dest, source); codegen_->MaybeRecordImplicitNullCheck(instruction); - if (type == Primitive::kPrimNot) { - GetAssembler()->MaybeUnpoisonHeapReference(OutputCPURegister(instruction).W()); + if (instruction->GetType() == Primitive::kPrimNot) { + GetAssembler()->MaybeUnpoisonHeapReference(dest.W()); } } @@ -1670,7 +1698,18 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { } else { UseScratchRegisterScope temps(masm); Register temp = temps.AcquireSameSizeAs(array); - __ Add(temp, array, offset); + if (instruction->GetArray()->IsArm64IntermediateAddress()) { + // We do not need to compute the intermediate address from the array: the + // input instruction has done it already. See the comment in + // `InstructionSimplifierArm64::TryExtractArrayAccessAddress()`. + if (kIsDebugBuild) { + HArm64IntermediateAddress* tmp = instruction->GetArray()->AsArm64IntermediateAddress(); + DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset); + } + temp = array; + } else { + __ Add(temp, array, offset); + } destination = HeapOperand(temp, XRegisterFrom(index), LSL, @@ -1680,6 +1719,7 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { codegen_->MaybeRecordImplicitNullCheck(instruction); } else { DCHECK(needs_write_barrier); + DCHECK(!instruction->GetArray()->IsArm64IntermediateAddress()); vixl::Label done; SlowPathCodeARM64* slow_path = nullptr; { diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index a068b48797..799f1bdcff 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -382,7 +382,7 @@ class CodeGeneratorARM64 : public CodeGenerator { uint32_t dex_pc, SlowPathCode* slow_path); - ParallelMoveResolverARM64* GetMoveResolver() { return &move_resolver_; } + ParallelMoveResolverARM64* GetMoveResolver() OVERRIDE { return &move_resolver_; } bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return false; diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h index 4abe5e953c..e1a8c9cc0f 100644 --- a/compiler/optimizing/common_arm64.h +++ b/compiler/optimizing/common_arm64.h @@ -203,19 +203,23 @@ static bool CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* inst int64_t value = CodeGenerator::GetInt64ValueOf(constant); - if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() || - instr->IsCompare() || instr->IsBoundsCheck()) { - // Uses aliases of ADD/SUB instructions. - // If `value` does not fit but `-value` does, VIXL will automatically use - // the 'opposite' instruction. - return vixl::Assembler::IsImmAddSub(value) || vixl::Assembler::IsImmAddSub(-value); - } else if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) { + if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) { // Uses logical operations. return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize); - } else { - DCHECK(instr->IsNeg()); + } else if (instr->IsNeg()) { // Uses mov -immediate. return vixl::Assembler::IsImmMovn(value, vixl::kXRegSize); + } else { + DCHECK(instr->IsAdd() || + instr->IsArm64IntermediateAddress() || + instr->IsBoundsCheck() || + instr->IsCompare() || + instr->IsCondition() || + instr->IsSub()); + // Uses aliases of ADD/SUB instructions. + // If `value` does not fit but `-value` does, VIXL will automatically use + // the 'opposite' instruction. + return vixl::Assembler::IsImmAddSub(value) || vixl::Assembler::IsImmAddSub(-value); } } diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc index 4b2d36f443..eb79f469eb 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.cc +++ b/compiler/optimizing/instruction_simplifier_arm64.cc @@ -16,8 +16,65 @@ #include "instruction_simplifier_arm64.h" +#include "mirror/array-inl.h" + namespace art { namespace arm64 { +void InstructionSimplifierArm64Visitor::TryExtractArrayAccessAddress(HInstruction* access, + HInstruction* array, + HInstruction* index, + int access_size) { + if (index->IsConstant() || + (index->IsBoundsCheck() && index->AsBoundsCheck()->GetIndex()->IsConstant())) { + // When the index is a constant all the addressing can be fitted in the + // memory access instruction, so do not split the access. + return; + } + if (access->IsArraySet() && + access->AsArraySet()->GetValue()->GetType() == Primitive::kPrimNot) { + // The access may require a runtime call or the original array pointer. + return; + } + + // Proceed to extract the base address computation. + ArenaAllocator* arena = GetGraph()->GetArena(); + + HIntConstant* offset = + GetGraph()->GetIntConstant(mirror::Array::DataOffset(access_size).Uint32Value()); + HArm64IntermediateAddress* address = + new (arena) HArm64IntermediateAddress(array, offset, kNoDexPc); + access->GetBlock()->InsertInstructionBefore(address, access); + access->ReplaceInput(address, 0); + // Both instructions must depend on GC to prevent any instruction that can + // trigger GC to be inserted between the two. + access->AddSideEffects(SideEffects::DependsOnGC()); + DCHECK(address->GetSideEffects().Includes(SideEffects::DependsOnGC())); + DCHECK(access->GetSideEffects().Includes(SideEffects::DependsOnGC())); + // TODO: Code generation for HArrayGet and HArraySet will check whether the input address + // is an HArm64IntermediateAddress and generate appropriate code. + // We would like to replace the `HArrayGet` and `HArraySet` with custom instructions (maybe + // `HArm64Load` and `HArm64Store`). We defer these changes because these new instructions would + // not bring any advantages yet. + // Also see the comments in + // `InstructionCodeGeneratorARM64::VisitArrayGet()` and + // `InstructionCodeGeneratorARM64::VisitArraySet()`. + RecordSimplification(); +} + +void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) { + TryExtractArrayAccessAddress(instruction, + instruction->GetArray(), + instruction->GetIndex(), + Primitive::ComponentSize(instruction->GetType())); +} + +void InstructionSimplifierArm64Visitor::VisitArraySet(HArraySet* instruction) { + TryExtractArrayAccessAddress(instruction, + instruction->GetArray(), + instruction->GetIndex(), + Primitive::ComponentSize(instruction->GetComponentType())); +} + } // namespace arm64 } // namespace art diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h index d7f4eaee80..4b697dba0e 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.h +++ b/compiler/optimizing/instruction_simplifier_arm64.h @@ -35,6 +35,14 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor { } } + void TryExtractArrayAccessAddress(HInstruction* access, + HInstruction* array, + HInstruction* index, + int access_size); + + void VisitArrayGet(HArrayGet* instruction) OVERRIDE; + void VisitArraySet(HArraySet* instruction) OVERRIDE; + OptimizingCompilerStats* stats_; }; diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index 58e479afc7..0a5acc3e64 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -961,6 +961,14 @@ void IntrinsicLocationsBuilderARM::VisitUnsafeCASInt(HInvoke* invoke) { CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke); } void IntrinsicLocationsBuilderARM::VisitUnsafeCASObject(HInvoke* invoke) { + // The UnsafeCASObject intrinsic does not always work when heap + // poisoning is enabled (it breaks run-test 004-UnsafeTest); turn it + // off temporarily as a quick fix. + // TODO(rpl): Fix it and turn it back on. + if (kPoisonHeapReferences) { + return; + } + CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke); } void IntrinsicCodeGeneratorARM::VisitUnsafeCASInt(HInvoke* invoke) { diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 4da94ee9b3..059abf090d 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -1087,6 +1087,14 @@ void IntrinsicLocationsBuilderARM64::VisitUnsafeCASLong(HInvoke* invoke) { CreateIntIntIntIntIntToInt(arena_, invoke); } void IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject(HInvoke* invoke) { + // The UnsafeCASObject intrinsic does not always work when heap + // poisoning is enabled (it breaks run-test 004-UnsafeTest); turn it + // off temporarily as a quick fix. + // TODO(rpl): Fix it and turn it back on. + if (kPoisonHeapReferences) { + return; + } + CreateIntIntIntIntIntToInt(arena_, invoke); } diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 8a7aded935..040bf6a45e 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -45,7 +45,7 @@ IntrinsicLocationsBuilderX86::IntrinsicLocationsBuilderX86(CodeGeneratorX86* cod X86Assembler* IntrinsicCodeGeneratorX86::GetAssembler() { - return reinterpret_cast<X86Assembler*>(codegen_->GetAssembler()); + return down_cast<X86Assembler*>(codegen_->GetAssembler()); } ArenaAllocator* IntrinsicCodeGeneratorX86::GetAllocator() { @@ -1728,7 +1728,7 @@ static void GenUnsafePut(LocationSummary* locations, Primitive::Type type, bool is_volatile, CodeGeneratorX86* codegen) { - X86Assembler* assembler = reinterpret_cast<X86Assembler*>(codegen->GetAssembler()); + X86Assembler* assembler = down_cast<X86Assembler*>(codegen->GetAssembler()); Register base = locations->InAt(1).AsRegister<Register>(); Register offset = locations->InAt(2).AsRegisterPairLow<Register>(); Location value_loc = locations->InAt(3); @@ -1822,7 +1822,7 @@ static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, Primitive::Type ty locations->SetOut(Location::RegisterLocation(EAX)); if (type == Primitive::kPrimNot) { // Need temp registers for card-marking. - locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too. // Need a byte register for marking. locations->AddTemp(Location::RegisterLocation(ECX)); } @@ -1837,20 +1837,11 @@ void IntrinsicLocationsBuilderX86::VisitUnsafeCASLong(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) { - // The UnsafeCASObject intrinsic does not always work when heap - // poisoning is enabled (it breaks several libcore tests); turn it - // off temporarily as a quick fix. - // TODO(rpl): Fix it and turn it back on. - if (kPoisonHeapReferences) { - return; - } - CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimNot, invoke); } static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86* codegen) { - X86Assembler* assembler = - reinterpret_cast<X86Assembler*>(codegen->GetAssembler()); + X86Assembler* assembler = down_cast<X86Assembler*>(codegen->GetAssembler()); LocationSummary* locations = invoke->GetLocations(); Register base = locations->InAt(1).AsRegister<Register>(); @@ -1858,47 +1849,92 @@ static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86* code Location out = locations->Out(); DCHECK_EQ(out.AsRegister<Register>(), EAX); - if (type == Primitive::kPrimLong) { - DCHECK_EQ(locations->InAt(3).AsRegisterPairLow<Register>(), EAX); - DCHECK_EQ(locations->InAt(3).AsRegisterPairHigh<Register>(), EDX); - DCHECK_EQ(locations->InAt(4).AsRegisterPairLow<Register>(), EBX); - DCHECK_EQ(locations->InAt(4).AsRegisterPairHigh<Register>(), ECX); - __ LockCmpxchg8b(Address(base, offset, TIMES_1, 0)); - } else { - // Integer or object. + if (type == Primitive::kPrimNot) { Register expected = locations->InAt(3).AsRegister<Register>(); + // Ensure `expected` is in EAX (required by the CMPXCHG instruction). DCHECK_EQ(expected, EAX); Register value = locations->InAt(4).AsRegister<Register>(); - if (type == Primitive::kPrimNot) { - // Mark card for object assuming new value is stored. - bool value_can_be_null = true; // TODO: Worth finding out this information? - codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(), - locations->GetTemp(1).AsRegister<Register>(), - base, - value, - value_can_be_null); - - if (kPoisonHeapReferences) { - __ PoisonHeapReference(expected); - __ PoisonHeapReference(value); + + // Mark card for object assuming new value is stored. + bool value_can_be_null = true; // TODO: Worth finding out this information? + codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(), + locations->GetTemp(1).AsRegister<Register>(), + base, + value, + value_can_be_null); + + bool base_equals_value = (base == value); + if (kPoisonHeapReferences) { + if (base_equals_value) { + // If `base` and `value` are the same register location, move + // `value` to a temporary register. This way, poisoning + // `value` won't invalidate `base`. + value = locations->GetTemp(0).AsRegister<Register>(); + __ movl(value, base); } + + // Check that the register allocator did not assign the location + // of `expected` (EAX) to `value` nor to `base`, so that heap + // poisoning (when enabled) works as intended below. + // - If `value` were equal to `expected`, both references would + // be poisoned twice, meaning they would not be poisoned at + // all, as heap poisoning uses address negation. + // - If `base` were equal to `expected`, poisoning `expected` + // would invalidate `base`. + DCHECK_NE(value, expected); + DCHECK_NE(base, expected); + + __ PoisonHeapReference(expected); + __ PoisonHeapReference(value); } __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value); - } - // locked cmpxchg has full barrier semantics, and we don't need scheduling - // barriers at this time. + // locked cmpxchg has full barrier semantics, and we don't need + // scheduling barriers at this time. - // Convert ZF into the boolean result. - __ setb(kZero, out.AsRegister<Register>()); - __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>()); + // Convert ZF into the boolean result. + __ setb(kZero, out.AsRegister<Register>()); + __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>()); - if (kPoisonHeapReferences && type == Primitive::kPrimNot) { - Register value = locations->InAt(4).AsRegister<Register>(); - __ UnpoisonHeapReference(value); - // Do not unpoison the reference contained in register `expected`, - // as it is the same as register `out`. + if (kPoisonHeapReferences) { + if (base_equals_value) { + // `value` has been moved to a temporary register, no need to + // unpoison it. + } else { + // Ensure `value` is different from `out`, so that unpoisoning + // the former does not invalidate the latter. + DCHECK_NE(value, out.AsRegister<Register>()); + __ UnpoisonHeapReference(value); + } + // Do not unpoison the reference contained in register + // `expected`, as it is the same as register `out` (EAX). + } + } else { + if (type == Primitive::kPrimInt) { + // Ensure the expected value is in EAX (required by the CMPXCHG + // instruction). + DCHECK_EQ(locations->InAt(3).AsRegister<Register>(), EAX); + __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), + locations->InAt(4).AsRegister<Register>()); + } else if (type == Primitive::kPrimLong) { + // Ensure the expected value is in EAX:EDX and that the new + // value is in EBX:ECX (required by the CMPXCHG8B instruction). + DCHECK_EQ(locations->InAt(3).AsRegisterPairLow<Register>(), EAX); + DCHECK_EQ(locations->InAt(3).AsRegisterPairHigh<Register>(), EDX); + DCHECK_EQ(locations->InAt(4).AsRegisterPairLow<Register>(), EBX); + DCHECK_EQ(locations->InAt(4).AsRegisterPairHigh<Register>(), ECX); + __ LockCmpxchg8b(Address(base, offset, TIMES_1, 0)); + } else { + LOG(FATAL) << "Unexpected CAS type " << type; + } + + // locked cmpxchg has full barrier semantics, and we don't need + // scheduling barriers at this time. + + // Convert ZF into the boolean result. + __ setb(kZero, out.AsRegister<Register>()); + __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>()); } } @@ -1936,8 +1972,7 @@ static void SwapBits(Register reg, Register temp, int32_t shift, int32_t mask, } void IntrinsicCodeGeneratorX86::VisitIntegerReverse(HInvoke* invoke) { - X86Assembler* assembler = - reinterpret_cast<X86Assembler*>(codegen_->GetAssembler()); + X86Assembler* assembler = down_cast<X86Assembler*>(codegen_->GetAssembler()); LocationSummary* locations = invoke->GetLocations(); Register reg = locations->InAt(0).AsRegister<Register>(); @@ -1968,8 +2003,7 @@ void IntrinsicLocationsBuilderX86::VisitLongReverse(HInvoke* invoke) { } void IntrinsicCodeGeneratorX86::VisitLongReverse(HInvoke* invoke) { - X86Assembler* assembler = - reinterpret_cast<X86Assembler*>(codegen_->GetAssembler()); + X86Assembler* assembler = down_cast<X86Assembler*>(codegen_->GetAssembler()); LocationSummary* locations = invoke->GetLocations(); Register reg_low = locations->InAt(0).AsRegisterPairLow<Register>(); diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 7a1d92d2fe..14c65c9aaf 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -41,7 +41,7 @@ IntrinsicLocationsBuilderX86_64::IntrinsicLocationsBuilderX86_64(CodeGeneratorX8 X86_64Assembler* IntrinsicCodeGeneratorX86_64::GetAssembler() { - return reinterpret_cast<X86_64Assembler*>(codegen_->GetAssembler()); + return down_cast<X86_64Assembler*>(codegen_->GetAssembler()); } ArenaAllocator* IntrinsicCodeGeneratorX86_64::GetAllocator() { @@ -1822,7 +1822,7 @@ void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLongVolatile(HInvoke* invoke // memory model. static void GenUnsafePut(LocationSummary* locations, Primitive::Type type, bool is_volatile, CodeGeneratorX86_64* codegen) { - X86_64Assembler* assembler = reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler()); + X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler()); CpuRegister base = locations->InAt(1).AsRegister<CpuRegister>(); CpuRegister offset = locations->InAt(2).AsRegister<CpuRegister>(); CpuRegister value = locations->InAt(3).AsRegister<CpuRegister>(); @@ -1895,7 +1895,7 @@ static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, Primitive::Type ty locations->SetOut(Location::RequiresRegister()); if (type == Primitive::kPrimNot) { // Need temp registers for card-marking. - locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too. locations->AddTemp(Location::RequiresRegister()); } } @@ -1909,61 +1909,95 @@ void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASLong(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject(HInvoke* invoke) { - // The UnsafeCASObject intrinsic does not always work when heap - // poisoning is enabled (it breaks several libcore tests); turn it - // off temporarily as a quick fix. - // TODO(rpl): Fix it and turn it back on. - if (kPoisonHeapReferences) { - return; - } - CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimNot, invoke); } static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86_64* codegen) { - X86_64Assembler* assembler = - reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler()); + X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler()); LocationSummary* locations = invoke->GetLocations(); CpuRegister base = locations->InAt(1).AsRegister<CpuRegister>(); CpuRegister offset = locations->InAt(2).AsRegister<CpuRegister>(); CpuRegister expected = locations->InAt(3).AsRegister<CpuRegister>(); + // Ensure `expected` is in RAX (required by the CMPXCHG instruction). DCHECK_EQ(expected.AsRegister(), RAX); CpuRegister value = locations->InAt(4).AsRegister<CpuRegister>(); CpuRegister out = locations->Out().AsRegister<CpuRegister>(); - if (type == Primitive::kPrimLong) { - __ LockCmpxchgq(Address(base, offset, TIMES_1, 0), value); - } else { - // Integer or object. - if (type == Primitive::kPrimNot) { - // Mark card for object assuming new value is stored. - bool value_can_be_null = true; // TODO: Worth finding out this information? - codegen->MarkGCCard(locations->GetTemp(0).AsRegister<CpuRegister>(), - locations->GetTemp(1).AsRegister<CpuRegister>(), - base, - value, - value_can_be_null); - - if (kPoisonHeapReferences) { - __ PoisonHeapReference(expected); - __ PoisonHeapReference(value); + if (type == Primitive::kPrimNot) { + // Mark card for object assuming new value is stored. + bool value_can_be_null = true; // TODO: Worth finding out this information? + codegen->MarkGCCard(locations->GetTemp(0).AsRegister<CpuRegister>(), + locations->GetTemp(1).AsRegister<CpuRegister>(), + base, + value, + value_can_be_null); + + bool base_equals_value = (base.AsRegister() == value.AsRegister()); + Register value_reg = value.AsRegister(); + if (kPoisonHeapReferences) { + if (base_equals_value) { + // If `base` and `value` are the same register location, move + // `value_reg` to a temporary register. This way, poisoning + // `value_reg` won't invalidate `base`. + value_reg = locations->GetTemp(0).AsRegister<CpuRegister>().AsRegister(); + __ movl(CpuRegister(value_reg), base); } + + // Check that the register allocator did not assign the location + // of `expected` (RAX) to `value` nor to `base`, so that heap + // poisoning (when enabled) works as intended below. + // - If `value` were equal to `expected`, both references would + // be poisoned twice, meaning they would not be poisoned at + // all, as heap poisoning uses address negation. + // - If `base` were equal to `expected`, poisoning `expected` + // would invalidate `base`. + DCHECK_NE(value_reg, expected.AsRegister()); + DCHECK_NE(base.AsRegister(), expected.AsRegister()); + + __ PoisonHeapReference(expected); + __ PoisonHeapReference(CpuRegister(value_reg)); } - __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value); - } + __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), CpuRegister(value_reg)); - // locked cmpxchg has full barrier semantics, and we don't need scheduling - // barriers at this time. + // locked cmpxchg has full barrier semantics, and we don't need + // scheduling barriers at this time. - // Convert ZF into the boolean result. - __ setcc(kZero, out); - __ movzxb(out, out); + // Convert ZF into the boolean result. + __ setcc(kZero, out); + __ movzxb(out, out); - if (kPoisonHeapReferences && type == Primitive::kPrimNot) { - __ UnpoisonHeapReference(value); - __ UnpoisonHeapReference(expected); + if (kPoisonHeapReferences) { + if (base_equals_value) { + // `value_reg` has been moved to a temporary register, no need + // to unpoison it. + } else { + // Ensure `value` is different from `out`, so that unpoisoning + // the former does not invalidate the latter. + DCHECK_NE(value_reg, out.AsRegister()); + __ UnpoisonHeapReference(CpuRegister(value_reg)); + } + // Ensure `expected` is different from `out`, so that unpoisoning + // the former does not invalidate the latter. + DCHECK_NE(expected.AsRegister(), out.AsRegister()); + __ UnpoisonHeapReference(expected); + } + } else { + if (type == Primitive::kPrimInt) { + __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value); + } else if (type == Primitive::kPrimLong) { + __ LockCmpxchgq(Address(base, offset, TIMES_1, 0), value); + } else { + LOG(FATAL) << "Unexpected CAS type " << type; + } + + // locked cmpxchg has full barrier semantics, and we don't need + // scheduling barriers at this time. + + // Convert ZF into the boolean result. + __ setcc(kZero, out); + __ movzxb(out, out); } } @@ -2001,8 +2035,7 @@ static void SwapBits(CpuRegister reg, CpuRegister temp, int32_t shift, int32_t m } void IntrinsicCodeGeneratorX86_64::VisitIntegerReverse(HInvoke* invoke) { - X86_64Assembler* assembler = - reinterpret_cast<X86_64Assembler*>(codegen_->GetAssembler()); + X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen_->GetAssembler()); LocationSummary* locations = invoke->GetLocations(); CpuRegister reg = locations->InAt(0).AsRegister<CpuRegister>(); @@ -2046,8 +2079,7 @@ static void SwapBits64(CpuRegister reg, CpuRegister temp, CpuRegister temp_mask, } void IntrinsicCodeGeneratorX86_64::VisitLongReverse(HInvoke* invoke) { - X86_64Assembler* assembler = - reinterpret_cast<X86_64Assembler*>(codegen_->GetAssembler()); + X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen_->GetAssembler()); LocationSummary* locations = invoke->GetLocations(); CpuRegister reg = locations->InAt(0).AsRegister<CpuRegister>(); diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index e95f400099..cadb24c659 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -1080,16 +1080,25 @@ class HLoopInformationOutwardIterator : public ValueObject { #define FOR_EACH_CONCRETE_INSTRUCTION_ARM(M) +#ifndef ART_ENABLE_CODEGEN_arm64 #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M) +#else +#define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M) \ + M(Arm64IntermediateAddress, Instruction) +#endif #define FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M) #define FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M) +#ifndef ART_ENABLE_CODEGEN_x86 +#define FOR_EACH_CONCRETE_INSTRUCTION_X86(M) +#else #define FOR_EACH_CONCRETE_INSTRUCTION_X86(M) \ M(X86ComputeBaseMethodAddress, Instruction) \ M(X86LoadFromConstantTable, Instruction) \ M(X86PackedSwitch, Instruction) +#endif #define FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M) @@ -1374,6 +1383,10 @@ class SideEffects : public ValueObject { return SideEffects(flags_ & ~other.flags_); } + void Add(SideEffects other) { + flags_ |= other.flags_; + } + bool Includes(SideEffects other) const { return (other.flags_ & flags_) == other.flags_; } @@ -1947,6 +1960,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { } SideEffects GetSideEffects() const { return side_effects_; } + void AddSideEffects(SideEffects other) { side_effects_.Add(other); } size_t GetLifetimePosition() const { return lifetime_position_; } void SetLifetimePosition(size_t position) { lifetime_position_ = position; } @@ -2016,7 +2030,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { // order of blocks where this instruction's live interval start. size_t lifetime_position_; - const SideEffects side_effects_; + SideEffects side_effects_; // TODO: for primitive types this should be marked as invalid. ReferenceTypeInfo reference_type_info_; @@ -4453,8 +4467,11 @@ class HArrayGet : public HExpression<2> { HArrayGet(HInstruction* array, HInstruction* index, Primitive::Type type, - uint32_t dex_pc) - : HExpression(type, SideEffects::ArrayReadOfType(type), dex_pc) { + uint32_t dex_pc, + SideEffects additional_side_effects = SideEffects::None()) + : HExpression(type, + SideEffects::ArrayReadOfType(type).Union(additional_side_effects), + dex_pc) { SetRawInputAt(0, array); SetRawInputAt(1, index); } @@ -4489,10 +4506,13 @@ class HArraySet : public HTemplateInstruction<3> { HInstruction* index, HInstruction* value, Primitive::Type expected_component_type, - uint32_t dex_pc) + uint32_t dex_pc, + SideEffects additional_side_effects = SideEffects::None()) : HTemplateInstruction( SideEffects::ArrayWriteOfType(expected_component_type).Union( - SideEffectsForArchRuntimeCalls(value->GetType())), dex_pc), + SideEffectsForArchRuntimeCalls(value->GetType())).Union( + additional_side_effects), + dex_pc), expected_component_type_(expected_component_type), needs_type_check_(value->GetType() == Primitive::kPrimNot), value_can_be_null_(true), @@ -4547,6 +4567,10 @@ class HArraySet : public HTemplateInstruction<3> { : expected_component_type_; } + Primitive::Type GetRawExpectedComponentType() const { + return expected_component_type_; + } + static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type value_type) { return (value_type == Primitive::kPrimNot) ? SideEffects::CanTriggerGC() : SideEffects::None(); } @@ -4605,6 +4629,7 @@ class HBoundsCheck : public HExpression<2> { bool CanThrow() const OVERRIDE { return true; } + HInstruction* GetIndex() const { return InputAt(0); } DECLARE_INSTRUCTION(BoundsCheck); @@ -5432,6 +5457,9 @@ class HParallelMove : public HTemplateInstruction<0> { } // namespace art +#ifdef ART_ENABLE_CODEGEN_arm64 +#include "nodes_arm64.h" +#endif #ifdef ART_ENABLE_CODEGEN_x86 #include "nodes_x86.h" #endif diff --git a/compiler/optimizing/nodes_arm64.h b/compiler/optimizing/nodes_arm64.h new file mode 100644 index 0000000000..885d3a29ee --- /dev/null +++ b/compiler/optimizing/nodes_arm64.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_NODES_ARM64_H_ +#define ART_COMPILER_OPTIMIZING_NODES_ARM64_H_ + +namespace art { + +// This instruction computes an intermediate address pointing in the 'middle' of an object. The +// result pointer cannot be handled by GC, so extra care is taken to make sure that this value is +// never used across anything that can trigger GC. +class HArm64IntermediateAddress : public HExpression<2> { + public: + HArm64IntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc) + : HExpression(Primitive::kPrimNot, SideEffects::DependsOnGC(), dex_pc) { + SetRawInputAt(0, base_address); + SetRawInputAt(1, offset); + } + + bool CanBeMoved() const OVERRIDE { return true; } + bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; } + + HInstruction* GetBaseAddress() const { return InputAt(0); } + HInstruction* GetOffset() const { return InputAt(1); } + + DECLARE_INSTRUCTION(Arm64IntermediateAddress); + + private: + DISALLOW_COPY_AND_ASSIGN(HArm64IntermediateAddress); +}; + +} // namespace art + +#endif // ART_COMPILER_OPTIMIZING_NODES_ARM64_H_ diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc index 16f29b00bc..4413906fd7 100644 --- a/compiler/utils/mips64/assembler_mips64_test.cc +++ b/compiler/utils/mips64/assembler_mips64_test.cc @@ -391,10 +391,30 @@ TEST_F(AssemblerMIPS64Test, Srl) { DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Srl, 5, "srl ${reg1}, ${reg2}, {imm}"), "srl"); } +TEST_F(AssemblerMIPS64Test, Rotr) { + DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Rotr, 5, "rotr ${reg1}, ${reg2}, {imm}"), "rotr"); +} + TEST_F(AssemblerMIPS64Test, Sra) { DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sra, 5, "sra ${reg1}, ${reg2}, {imm}"), "sra"); } +TEST_F(AssemblerMIPS64Test, Sllv) { + DriverStr(RepeatRRR(&mips64::Mips64Assembler::Sllv, "sllv ${reg1}, ${reg2}, ${reg3}"), "sllv"); +} + +TEST_F(AssemblerMIPS64Test, Srlv) { + DriverStr(RepeatRRR(&mips64::Mips64Assembler::Srlv, "srlv ${reg1}, ${reg2}, ${reg3}"), "srlv"); +} + +TEST_F(AssemblerMIPS64Test, Rotrv) { + DriverStr(RepeatRRR(&mips64::Mips64Assembler::Rotrv, "rotrv ${reg1}, ${reg2}, ${reg3}"), "rotrv"); +} + +TEST_F(AssemblerMIPS64Test, Srav) { + DriverStr(RepeatRRR(&mips64::Mips64Assembler::Srav, "srav ${reg1}, ${reg2}, ${reg3}"), "srav"); +} + TEST_F(AssemblerMIPS64Test, Dsll) { DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll, 5, "dsll ${reg1}, ${reg2}, {imm}"), "dsll"); } @@ -403,20 +423,33 @@ TEST_F(AssemblerMIPS64Test, Dsrl) { DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl, 5, "dsrl ${reg1}, ${reg2}, {imm}"), "dsrl"); } +TEST_F(AssemblerMIPS64Test, Drotr) { + DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Drotr, 5, "drotr ${reg1}, ${reg2}, {imm}"), + "drotr"); +} + TEST_F(AssemblerMIPS64Test, Dsra) { DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra, 5, "dsra ${reg1}, ${reg2}, {imm}"), "dsra"); } TEST_F(AssemblerMIPS64Test, Dsll32) { - DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll32, 5, "dsll32 ${reg1}, ${reg2}, {imm}"), "dsll32"); + DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll32, 5, "dsll32 ${reg1}, ${reg2}, {imm}"), + "dsll32"); } TEST_F(AssemblerMIPS64Test, Dsrl32) { - DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl32, 5, "dsrl32 ${reg1}, ${reg2}, {imm}"), "dsrl32"); + DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl32, 5, "dsrl32 ${reg1}, ${reg2}, {imm}"), + "dsrl32"); +} + +TEST_F(AssemblerMIPS64Test, Drotr32) { + DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Drotr32, 5, "drotr32 ${reg1}, ${reg2}, {imm}"), + "drotr32"); } TEST_F(AssemblerMIPS64Test, Dsra32) { - DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra32, 5, "dsra32 ${reg1}, ${reg2}, {imm}"), "dsra32"); + DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra32, 5, "dsra32 ${reg1}, ${reg2}, {imm}"), + "dsra32"); } TEST_F(AssemblerMIPS64Test, Sc) { @@ -435,10 +468,6 @@ TEST_F(AssemblerMIPS64Test, Lld) { DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lld, -9, "lld ${reg1}, {imm}(${reg2})"), "lld"); } -TEST_F(AssemblerMIPS64Test, Rotr) { - DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Rotr, 5, "rotr ${reg1}, ${reg2}, {imm}"), "rotr"); -} - TEST_F(AssemblerMIPS64Test, Seleqz) { DriverStr(RepeatRRR(&mips64::Mips64Assembler::Seleqz, "seleqz ${reg1}, ${reg2}, ${reg3}"), "seleqz"); diff --git a/test/004-UnsafeTest/src/Main.java b/test/004-UnsafeTest/src/Main.java index c93db50ab8..5b22e88014 100644 --- a/test/004-UnsafeTest/src/Main.java +++ b/test/004-UnsafeTest/src/Main.java @@ -129,13 +129,36 @@ public class Main { System.out.println("Unexpectedly not succeeding compareAndSwapLong..."); } - if (unsafe.compareAndSwapObject(t, objectOffset, null, new Object())) { + // We do not use `null` as argument to sun.misc.Unsafe.compareAndSwapObject + // in those tests, as this value is not affected by heap poisoning + // (which uses address negation to poison and unpoison heap object + // references). This way, when heap poisoning is enabled, we can + // better exercise its implementation within that method. + if (unsafe.compareAndSwapObject(t, objectOffset, new Object(), new Object())) { System.out.println("Unexpectedly succeeding compareAndSwapObject..."); } - if (!unsafe.compareAndSwapObject(t, objectOffset, objectValue, null)) { + Object objectValue2 = new Object(); + if (!unsafe.compareAndSwapObject(t, objectOffset, objectValue, objectValue2)) { System.out.println("Unexpectedly not succeeding compareAndSwapObject..."); } - if (!unsafe.compareAndSwapObject(t, objectOffset, null, new Object())) { + Object objectValue3 = new Object(); + if (!unsafe.compareAndSwapObject(t, objectOffset, objectValue2, objectValue3)) { + System.out.println("Unexpectedly not succeeding compareAndSwapObject..."); + } + + // Exercise sun.misc.Unsafe.compareAndSwapObject using the same + // object (`t`) for the `obj` and `newValue` arguments. + if (!unsafe.compareAndSwapObject(t, objectOffset, objectValue3, t)) { + System.out.println("Unexpectedly not succeeding compareAndSwapObject..."); + } + // Exercise sun.misc.Unsafe.compareAndSwapObject using the same + // object (`t`) for the `obj`, `expectedValue` and `newValue` arguments. + if (!unsafe.compareAndSwapObject(t, objectOffset, t, t)) { + System.out.println("Unexpectedly not succeeding compareAndSwapObject..."); + } + // Exercise sun.misc.Unsafe.compareAndSwapObject using the same + // object (`t`) for the `obj` and `expectedValue` arguments. + if (!unsafe.compareAndSwapObject(t, objectOffset, t, new Object())) { System.out.println("Unexpectedly not succeeding compareAndSwapObject..."); } } diff --git a/test/527-checker-array-access-split/expected.txt b/test/527-checker-array-access-split/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/527-checker-array-access-split/expected.txt diff --git a/test/527-checker-array-access-split/info.txt b/test/527-checker-array-access-split/info.txt new file mode 100644 index 0000000000..920680462d --- /dev/null +++ b/test/527-checker-array-access-split/info.txt @@ -0,0 +1 @@ +Test arm64-specific array access optimization. diff --git a/test/527-checker-array-access-split/src/Main.java b/test/527-checker-array-access-split/src/Main.java new file mode 100644 index 0000000000..ead94464bf --- /dev/null +++ b/test/527-checker-array-access-split/src/Main.java @@ -0,0 +1,341 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + + public static void assertIntEquals(int expected, int result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + + /** + * Test that HArrayGet with a constant index is not split. + */ + + /// CHECK-START-ARM64: int Main.constantIndexGet(int[]) instruction_simplifier_arm64 (before) + /// CHECK: <<Array:l\d+>> NullCheck + /// CHECK: <<Index:i\d+>> BoundsCheck + /// CHECK: ArrayGet [<<Array>>,<<Index>>] + + /// CHECK-START-ARM64: int Main.constantIndexGet(int[]) instruction_simplifier_arm64 (after) + /// CHECK: <<Array:l\d+>> NullCheck + /// CHECK: <<Index:i\d+>> BoundsCheck + /// CHECK-NOT: Arm64IntermediateAddress + /// CHECK: ArrayGet [<<Array>>,<<Index>>] + + public static int constantIndexGet(int array[]) { + return array[1]; + } + + /** + * Test that HArraySet with a constant index is not split. + */ + + /// CHECK-START-ARM64: void Main.constantIndexSet(int[]) instruction_simplifier_arm64 (before) + /// CHECK: <<Const2:i\d+>> IntConstant 2 + /// CHECK: <<Array:l\d+>> NullCheck + /// CHECK: <<Index:i\d+>> BoundsCheck + /// CHECK: ArraySet [<<Array>>,<<Index>>,<<Const2>>] + + /// CHECK-START-ARM64: void Main.constantIndexSet(int[]) instruction_simplifier_arm64 (after) + /// CHECK: <<Const2:i\d+>> IntConstant 2 + /// CHECK: <<Array:l\d+>> NullCheck + /// CHECK: <<Index:i\d+>> BoundsCheck + /// CHECK-NOT: Arm64IntermediateAddress + /// CHECK: ArraySet [<<Array>>,<<Index>>,<<Const2>>] + + + public static void constantIndexSet(int array[]) { + array[1] = 2; + } + + /** + * Test basic splitting of HArrayGet. + */ + + /// CHECK-START-ARM64: int Main.get(int[], int) instruction_simplifier_arm64 (before) + /// CHECK: <<Array:l\d+>> NullCheck + /// CHECK: <<Index:i\d+>> BoundsCheck + /// CHECK: ArrayGet [<<Array>>,<<Index>>] + + /// CHECK-START-ARM64: int Main.get(int[], int) instruction_simplifier_arm64 (after) + /// CHECK: <<DataOffset:i\d+>> IntConstant + /// CHECK: <<Array:l\d+>> NullCheck + /// CHECK: <<Index:i\d+>> BoundsCheck + /// CHECK: <<Address:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK-NEXT: ArrayGet [<<Address>>,<<Index>>] + + public static int get(int array[], int index) { + return array[index]; + } + + /** + * Test basic splitting of HArraySet. + */ + + /// CHECK-START-ARM64: void Main.set(int[], int, int) instruction_simplifier_arm64 (before) + /// CHECK: ParameterValue + /// CHECK: ParameterValue + /// CHECK: <<Arg:i\d+>> ParameterValue + /// CHECK: <<Array:l\d+>> NullCheck + /// CHECK: <<Index:i\d+>> BoundsCheck + /// CHECK: ArraySet [<<Array>>,<<Index>>,<<Arg>>] + + /// CHECK-START-ARM64: void Main.set(int[], int, int) instruction_simplifier_arm64 (after) + /// CHECK: ParameterValue + /// CHECK: ParameterValue + /// CHECK: <<Arg:i\d+>> ParameterValue + /// CHECK: <<DataOffset:i\d+>> IntConstant + /// CHECK: <<Array:l\d+>> NullCheck + /// CHECK: <<Index:i\d+>> BoundsCheck + /// CHECK: <<Address:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK-NEXT: ArraySet [<<Address>>,<<Index>>,<<Arg>>] + + public static void set(int array[], int index, int value) { + array[index] = value; + } + + /** + * Check that the intermediate address can be shared after GVN. + */ + + /// CHECK-START-ARM64: void Main.getSet(int[], int) instruction_simplifier_arm64 (before) + /// CHECK: <<Const1:i\d+>> IntConstant 1 + /// CHECK: <<Array:l\d+>> NullCheck + /// CHECK: <<Index:i\d+>> BoundsCheck + /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Array>>,<<Index>>] + /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] + /// CHECK: ArraySet [<<Array>>,<<Index>>,<<Add>>] + + /// CHECK-START-ARM64: void Main.getSet(int[], int) instruction_simplifier_arm64 (after) + /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 + /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant + /// CHECK: <<Array:l\d+>> NullCheck + /// CHECK: <<Index:i\d+>> BoundsCheck + /// CHECK: <<Address1:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK-NEXT: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>] + /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] + /// CHECK: <<Address2:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>] + + /// CHECK-START-ARM64: void Main.getSet(int[], int) GVN_after_arch (after) + /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 + /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant + /// CHECK: <<Array:l\d+>> NullCheck + /// CHECK: <<Index:i\d+>> BoundsCheck + /// CHECK: <<Address:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Address>>,<<Index>>] + /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] + /// CHECK-NOT: Arm64IntermediateAddress + /// CHECK: ArraySet [<<Address>>,<<Index>>,<<Add>>] + + public static void getSet(int array[], int index) { + array[index] = array[index] + 1; + } + + /** + * Check that the intermediate address computation is not reordered or merged + * across IRs that can trigger GC. + */ + + /// CHECK-START-ARM64: int[] Main.accrossGC(int[], int) instruction_simplifier_arm64 (before) + /// CHECK: <<Const1:i\d+>> IntConstant 1 + /// CHECK: <<Array:l\d+>> NullCheck + /// CHECK: <<Index:i\d+>> BoundsCheck + /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Array>>,<<Index>>] + /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] + /// CHECK: NewArray + /// CHECK: ArraySet [<<Array>>,<<Index>>,<<Add>>] + + /// CHECK-START-ARM64: int[] Main.accrossGC(int[], int) instruction_simplifier_arm64 (after) + /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 + /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant + /// CHECK: <<Array:l\d+>> NullCheck + /// CHECK: <<Index:i\d+>> BoundsCheck + /// CHECK: <<Address1:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK-NEXT: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>] + /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] + /// CHECK: NewArray + /// CHECK: <<Address2:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>] + + /// CHECK-START-ARM64: int[] Main.accrossGC(int[], int) GVN_after_arch (after) + /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 + /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant + /// CHECK: <<Array:l\d+>> NullCheck + /// CHECK: <<Index:i\d+>> BoundsCheck + /// CHECK: <<Address1:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>] + /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] + /// CHECK: NewArray + /// CHECK: <<Address2:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: ArraySet [<<Address2>>,<<Index>>,<<Add>>] + + public static int[] accrossGC(int array[], int index) { + int tmp = array[index] + 1; + int[] new_array = new int[1]; + array[index] = tmp; + return new_array; + } + + /** + * Test that the intermediate address is shared between array accesses after + * the bounds check have been removed by BCE. + */ + + /// CHECK-START-ARM64: int Main.canMergeAfterBCE1() instruction_simplifier_arm64 (before) + /// CHECK: <<Const1:i\d+>> IntConstant 1 + /// CHECK: <<Array:l\d+>> NewArray + /// CHECK: <<Index:i\d+>> Phi + /// CHECK: If + // -------------- Loop + /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Array>>,<<Index>>] + /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] + /// CHECK: ArraySet [<<Array>>,<<Index>>,<<Add>>] + + // By the time we reach the architecture-specific instruction simplifier, BCE + // has removed the bounds checks in the loop. + + // Note that we do not care that the `DataOffset` is `12`. But if we do not + // specify it and any other `IntConstant` appears before that instruction, + // checker will match the previous `IntConstant`, and we will thus fail the + // check. + + /// CHECK-START-ARM64: int Main.canMergeAfterBCE1() instruction_simplifier_arm64 (after) + /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 + /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant 12 + /// CHECK: <<Array:l\d+>> NewArray + /// CHECK: <<Index:i\d+>> Phi + /// CHECK: If + // -------------- Loop + /// CHECK: <<Address1:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK-NEXT: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>] + /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] + /// CHECK: <<Address2:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>] + + /// CHECK-START-ARM64: int Main.canMergeAfterBCE1() GVN_after_arch (after) + /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 + /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant 12 + /// CHECK: <<Array:l\d+>> NewArray + /// CHECK: <<Index:i\d+>> Phi + /// CHECK: If + // -------------- Loop + /// CHECK: <<Address:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Address>>,<<Index>>] + /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] + /// CHECK-NOT: Arm64IntermediateAddress + /// CHECK: ArraySet [<<Address>>,<<Index>>,<<Add>>] + + public static int canMergeAfterBCE1() { + int[] array = {0, 1, 2, 3}; + for (int i = 0; i < array.length; i++) { + array[i] = array[i] + 1; + } + return array[array.length - 1]; + } + + /** + * This test case is similar to `canMergeAfterBCE1`, but with different + * indexes for the accesses. + */ + + /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() instruction_simplifier_arm64 (before) + /// CHECK: <<Const1:i\d+>> IntConstant 1 + /// CHECK: <<Array:l\d+>> NewArray + /// CHECK: <<Index:i\d+>> Phi + /// CHECK: If + // -------------- Loop + /// CHECK-DAG: <<Index1:i\d+>> Add [<<Index>>,<<Const1>>] + /// CHECK-DAG: <<ArrayGetI:i\d+>> ArrayGet [<<Array>>,<<Index>>] + /// CHECK-DAG: <<ArrayGetI1:i\d+>> ArrayGet [<<Array>>,<<Index1>>] + /// CHECK: <<Add:i\d+>> Add [<<ArrayGetI>>,<<ArrayGetI1>>] + /// CHECK: ArraySet [<<Array>>,<<Index1>>,<<Add>>] + + // Note that we do not care that the `DataOffset` is `12`. But if we do not + // specify it and any other `IntConstant` appears before that instruction, + // checker will match the previous `IntConstant`, and we will thus fail the + // check. + + /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() instruction_simplifier_arm64 (after) + /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 + /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant 12 + /// CHECK: <<Array:l\d+>> NewArray + /// CHECK: <<Index:i\d+>> Phi + /// CHECK: If + // -------------- Loop + /// CHECK-DAG: <<Index1:i\d+>> Add [<<Index>>,<<Const1>>] + /// CHECK-DAG: <<Address1:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK-DAG: <<ArrayGetI:i\d+>> ArrayGet [<<Address1>>,<<Index>>] + /// CHECK-DAG: <<Address2:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK-DAG: <<ArrayGetI1:i\d+>> ArrayGet [<<Address2>>,<<Index1>>] + /// CHECK: <<Add:i\d+>> Add [<<ArrayGetI>>,<<ArrayGetI1>>] + /// CHECK: <<Address3:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: ArraySet [<<Address3>>,<<Index1>>,<<Add>>] + + /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() GVN_after_arch (after) + /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 + /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant 12 + /// CHECK: <<Array:l\d+>> NewArray + /// CHECK: <<Index:i\d+>> Phi + /// CHECK: If + // -------------- Loop + /// CHECK-DAG: <<Index1:i\d+>> Add [<<Index>>,<<Const1>>] + /// CHECK-DAG: <<Address:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK-DAG: <<ArrayGetI:i\d+>> ArrayGet [<<Address>>,<<Index>>] + /// CHECK-DAG: <<ArrayGetI1:i\d+>> ArrayGet [<<Address>>,<<Index1>>] + /// CHECK: <<Add:i\d+>> Add [<<ArrayGetI>>,<<ArrayGetI1>>] + /// CHECK: ArraySet [<<Address>>,<<Index1>>,<<Add>>] + + // There should be only one intermediate address computation in the loop. + + /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() GVN_after_arch (after) + /// CHECK: Arm64IntermediateAddress + /// CHECK-NOT: Arm64IntermediateAddress + + public static int canMergeAfterBCE2() { + int[] array = {0, 1, 2, 3}; + for (int i = 0; i < array.length - 1; i++) { + array[i + 1] = array[i] + array[i + 1]; + } + return array[array.length - 1]; + } + + + public static void main(String[] args) { + int[] array = {123, 456, 789}; + + assertIntEquals(456, constantIndexGet(array)); + + constantIndexSet(array); + assertIntEquals(2, array[1]); + + assertIntEquals(789, get(array, 2)); + + set(array, 1, 456); + assertIntEquals(456, array[1]); + + getSet(array, 0); + assertIntEquals(124, array[0]); + + accrossGC(array, 0); + assertIntEquals(125, array[0]); + + assertIntEquals(4, canMergeAfterBCE1()); + assertIntEquals(6, canMergeAfterBCE2()); + } +} diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt index b4f686fde9..81ea79a6e2 100644 --- a/tools/libcore_failures.txt +++ b/tools/libcore_failures.txt @@ -164,5 +164,12 @@ names: ["libcore.java.text.NumberFormatTest#test_currencyFromLocale", "libcore.java.text.NumberFormatTest#test_currencyWithPatternDigits"], bug: 25136848 +}, +{ + description: "Lack of IPv6 on some buildbot slaves", + result: EXEC_FAILED, + names: ["libcore.io.OsTest#test_byteBufferPositions_sendto_recvfrom_af_inet6", + "libcore.io.OsTest#test_sendtoSocketAddress_af_inet6"], + bug: 25178637 } ] |