diff options
Diffstat (limited to 'compiler/optimizing')
| -rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 31 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 21 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_mips.cc | 29 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_mips64.cc | 6 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 24 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 24 | ||||
| -rw-r--r-- | compiler/optimizing/inliner.cc | 4 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_arm.cc | 15 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_arm64.cc | 14 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_x86_64.cc | 17 |
10 files changed, 121 insertions, 64 deletions
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 197e473473..45e9b5889a 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -1871,8 +1871,6 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) LocationSummary* locations = invoke->GetLocations(); Register temp = locations->GetTemp(0).AsRegister<Register>(); Register hidden_reg = locations->GetTemp(1).AsRegister<Register>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value(); Location receiver = locations->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); @@ -1898,10 +1896,14 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). __ MaybeUnpoisonHeapReference(temp); + __ LoadFromOffset(kLoadWord, temp, temp, + mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value()); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kArmPointerSize)); // temp = temp->GetImtEntryAt(method_offset); + __ LoadFromOffset(kLoadWord, temp, temp, method_offset); uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize).Int32Value(); - __ LoadFromOffset(kLoadWord, temp, temp, method_offset); // LR = temp->GetEntryPoint(); __ LoadFromOffset(kLoadWord, LR, temp, entry_point); // LR(); @@ -6770,18 +6772,25 @@ void LocationsBuilderARM::VisitClassTableGet(HClassTableGet* instruction) { void InstructionCodeGeneratorARM::VisitClassTableGet(HClassTableGet* instruction) { LocationSummary* locations = instruction->GetLocations(); - uint32_t method_offset = 0; if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) { - method_offset = mirror::Class::EmbeddedVTableEntryOffset( + uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kArmPointerSize).SizeValue(); + __ LoadFromOffset(kLoadWord, + locations->Out().AsRegister<Register>(), + locations->InAt(0).AsRegister<Register>(), + method_offset); } else { - method_offset = mirror::Class::EmbeddedImTableEntryOffset( - instruction->GetIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value(); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + instruction->GetIndex() % ImTable::kSize, kArmPointerSize)); + __ LoadFromOffset(kLoadWord, + locations->Out().AsRegister<Register>(), + locations->InAt(0).AsRegister<Register>(), + mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value()); + __ LoadFromOffset(kLoadWord, + locations->Out().AsRegister<Register>(), + locations->Out().AsRegister<Register>(), + method_offset); } - __ LoadFromOffset(kLoadWord, - locations->Out().AsRegister<Register>(), - locations->InAt(0).AsRegister<Register>(), - method_offset); } #undef __ diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 9680f2bf45..8e58b15baa 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -3527,8 +3527,6 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. LocationSummary* locations = invoke->GetLocations(); Register temp = XRegisterFrom(locations->GetTemp(0)); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value(); Location receiver = locations->InAt(0); Offset class_offset = mirror::Object::ClassOffset(); Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); @@ -3558,6 +3556,10 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). GetAssembler()->MaybeUnpoisonHeapReference(temp.W()); + __ Ldr(temp, + MemOperand(temp, mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value())); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kArm64PointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ Ldr(temp, MemOperand(temp, method_offset)); // lr = temp->GetEntryPoint(); @@ -5180,16 +5182,19 @@ void LocationsBuilderARM64::VisitClassTableGet(HClassTableGet* instruction) { void InstructionCodeGeneratorARM64::VisitClassTableGet(HClassTableGet* instruction) { LocationSummary* locations = instruction->GetLocations(); - uint32_t method_offset = 0; if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) { - method_offset = mirror::Class::EmbeddedVTableEntryOffset( + uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kArm64PointerSize).SizeValue(); + __ Ldr(XRegisterFrom(locations->Out()), + MemOperand(XRegisterFrom(locations->InAt(0)), method_offset)); } else { - method_offset = mirror::Class::EmbeddedImTableEntryOffset( - instruction->GetIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value(); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + instruction->GetIndex() % ImTable::kSize, kArm64PointerSize)); + __ Ldr(XRegisterFrom(locations->Out()), MemOperand(XRegisterFrom(locations->InAt(0)), + mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value())); + __ Ldr(XRegisterFrom(locations->Out()), + MemOperand(XRegisterFrom(locations->Out()), method_offset)); } - __ Ldr(XRegisterFrom(locations->Out()), - MemOperand(XRegisterFrom(locations->InAt(0)), method_offset)); } diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index 12d1164d03..06248a32b3 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -3731,8 +3731,6 @@ void LocationsBuilderMIPS::VisitInvokeInterface(HInvokeInterface* invoke) { void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke) { // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kMipsPointerSize).Uint32Value(); Location receiver = invoke->GetLocations()->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize); @@ -3749,6 +3747,10 @@ void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset); } codegen_->MaybeRecordImplicitNullCheck(invoke); + __ LoadFromOffset(kLoadWord, temp, temp, + mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value()); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kMipsPointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ LoadFromOffset(kLoadWord, temp, temp, method_offset); // T9 = temp->GetEntryPoint(); @@ -5187,18 +5189,25 @@ void LocationsBuilderMIPS::VisitClassTableGet(HClassTableGet* instruction) { void InstructionCodeGeneratorMIPS::VisitClassTableGet(HClassTableGet* instruction) { LocationSummary* locations = instruction->GetLocations(); - uint32_t method_offset = 0; if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) { - method_offset = mirror::Class::EmbeddedVTableEntryOffset( + uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kMipsPointerSize).SizeValue(); + __ LoadFromOffset(kLoadWord, + locations->Out().AsRegister<Register>(), + locations->InAt(0).AsRegister<Register>(), + method_offset); } else { - method_offset = mirror::Class::EmbeddedImTableEntryOffset( - instruction->GetIndex() % mirror::Class::kImtSize, kMipsPointerSize).Uint32Value(); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + instruction->GetIndex() % ImTable::kSize, kMipsPointerSize)); + __ LoadFromOffset(kLoadWord, + locations->Out().AsRegister<Register>(), + locations->InAt(0).AsRegister<Register>(), + mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value()); + __ LoadFromOffset(kLoadWord, + locations->Out().AsRegister<Register>(), + locations->Out().AsRegister<Register>(), + method_offset); } - __ LoadFromOffset(kLoadWord, - locations->Out().AsRegister<Register>(), - locations->InAt(0).AsRegister<Register>(), - method_offset); } #undef __ diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 56ac38ef84..9b405bb53b 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -2933,8 +2933,6 @@ void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) { void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) { // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kMips64PointerSize).Uint32Value(); Location receiver = invoke->GetLocations()->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize); @@ -2951,6 +2949,10 @@ void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invo __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset); } codegen_->MaybeRecordImplicitNullCheck(invoke); + __ LoadFromOffset(kLoadDoubleword, temp, temp, + mirror::Class::ImtPtrOffset(kMips64PointerSize).Uint32Value()); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kMips64PointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset); // T9 = temp->GetEntryPoint(); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index c8a510df63..51d9b7cdba 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -2010,8 +2010,6 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) LocationSummary* locations = invoke->GetLocations(); Register temp = locations->GetTemp(0).AsRegister<Register>(); XmmRegister hidden_reg = locations->GetTemp(1).AsFpuRegister<XmmRegister>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kX86PointerSize).Uint32Value(); Location receiver = locations->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); @@ -2038,7 +2036,12 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). __ MaybeUnpoisonHeapReference(temp); + // temp = temp->GetAddressOfIMT() + __ movl(temp, + Address(temp, mirror::Class::ImtPtrOffset(kX86PointerSize).Uint32Value())); // temp = temp->GetImtEntryAt(method_offset); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kX86PointerSize)); __ movl(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); __ call(Address(temp, @@ -4065,16 +4068,21 @@ void LocationsBuilderX86::VisitClassTableGet(HClassTableGet* instruction) { void InstructionCodeGeneratorX86::VisitClassTableGet(HClassTableGet* instruction) { LocationSummary* locations = instruction->GetLocations(); - uint32_t method_offset = 0; if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) { - method_offset = mirror::Class::EmbeddedVTableEntryOffset( + uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kX86PointerSize).SizeValue(); + __ movl(locations->Out().AsRegister<Register>(), + Address(locations->InAt(0).AsRegister<Register>(), method_offset)); } else { - method_offset = mirror::Class::EmbeddedImTableEntryOffset( - instruction->GetIndex() % mirror::Class::kImtSize, kX86PointerSize).Uint32Value(); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + instruction->GetIndex() % ImTable::kSize, kX86PointerSize)); + __ movl(locations->Out().AsRegister<Register>(), + Address(locations->InAt(0).AsRegister<Register>(), + mirror::Class::ImtPtrOffset(kX86PointerSize).Uint32Value())); + // temp = temp->GetImtEntryAt(method_offset); + __ movl(locations->Out().AsRegister<Register>(), + Address(locations->Out().AsRegister<Register>(), method_offset)); } - __ movl(locations->Out().AsRegister<Register>(), - Address(locations->InAt(0).AsRegister<Register>(), method_offset)); } void LocationsBuilderX86::VisitNot(HNot* not_) { diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 1540ea522c..28b52a1832 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -2220,8 +2220,6 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo LocationSummary* locations = invoke->GetLocations(); CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>(); CpuRegister hidden_reg = locations->GetTemp(1).AsRegister<CpuRegister>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kX86_64PointerSize).Uint32Value(); Location receiver = locations->InAt(0); size_t class_offset = mirror::Object::ClassOffset().SizeValue(); @@ -2247,6 +2245,12 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). __ MaybeUnpoisonHeapReference(temp); + // temp = temp->GetAddressOfIMT() + __ movq(temp, + Address(temp, mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value())); + // temp = temp->GetImtEntryAt(method_offset); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kX86_64PointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ movq(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); @@ -3976,16 +3980,20 @@ void LocationsBuilderX86_64::VisitClassTableGet(HClassTableGet* instruction) { void InstructionCodeGeneratorX86_64::VisitClassTableGet(HClassTableGet* instruction) { LocationSummary* locations = instruction->GetLocations(); - uint32_t method_offset = 0; if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) { - method_offset = mirror::Class::EmbeddedVTableEntryOffset( + uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kX86_64PointerSize).SizeValue(); + __ movq(locations->Out().AsRegister<CpuRegister>(), + Address(locations->InAt(0).AsRegister<CpuRegister>(), method_offset)); } else { - method_offset = mirror::Class::EmbeddedImTableEntryOffset( - instruction->GetIndex() % mirror::Class::kImtSize, kX86_64PointerSize).Uint32Value(); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + instruction->GetIndex() % ImTable::kSize, kX86_64PointerSize)); + __ movq(locations->Out().AsRegister<CpuRegister>(), + Address(locations->InAt(0).AsRegister<CpuRegister>(), + mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value())); + __ movq(locations->Out().AsRegister<CpuRegister>(), + Address(locations->Out().AsRegister<CpuRegister>(), method_offset)); } - __ movq(locations->Out().AsRegister<CpuRegister>(), - Address(locations->InAt(0).AsRegister<CpuRegister>(), method_offset)); } void LocationsBuilderX86_64::VisitNot(HNot* not_) { diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 547c9aa2c8..c8a983b2bd 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -649,8 +649,8 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction, } ArtMethod* new_method = nullptr; if (invoke_instruction->IsInvokeInterface()) { - new_method = ic.GetTypeAt(i)->GetEmbeddedImTableEntry( - method_index % mirror::Class::kImtSize, pointer_size); + new_method = ic.GetTypeAt(i)->GetImt(pointer_size)->Get( + method_index % ImTable::kSize, pointer_size); if (new_method->IsRuntimeMethod()) { // Bail out as soon as we see a conflict trampoline in one of the target's // interface table. diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index 86b7bc138c..6c253adc2d 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -1397,20 +1397,26 @@ void IntrinsicCodeGeneratorARM::VisitSystemArrayCopy(HInvoke* invoke) { Label conditions_on_positions_validated; SystemArrayCopyOptimizations optimizations(invoke); - if (!optimizations.GetDestinationIsSource() && - (!src_pos.IsConstant() || !dest_pos.IsConstant())) { - __ cmp(src, ShifterOperand(dest)); - } // If source and destination are the same, we go to slow path if we need to do // forward copying. if (src_pos.IsConstant()) { int32_t src_pos_constant = src_pos.GetConstant()->AsIntConstant()->GetValue(); if (dest_pos.IsConstant()) { + int32_t dest_pos_constant = dest_pos.GetConstant()->AsIntConstant()->GetValue(); + if (optimizations.GetDestinationIsSource()) { + // Checked when building locations. + DCHECK_GE(src_pos_constant, dest_pos_constant); + } else if (src_pos_constant < dest_pos_constant) { + __ cmp(src, ShifterOperand(dest)); + __ b(slow_path->GetEntryLabel(), EQ); + } + // Checked when building locations. DCHECK(!optimizations.GetDestinationIsSource() || (src_pos_constant >= dest_pos.GetConstant()->AsIntConstant()->GetValue())); } else { if (!optimizations.GetDestinationIsSource()) { + __ cmp(src, ShifterOperand(dest)); __ b(&conditions_on_positions_validated, NE); } __ cmp(dest_pos.AsRegister<Register>(), ShifterOperand(src_pos_constant)); @@ -1418,6 +1424,7 @@ void IntrinsicCodeGeneratorARM::VisitSystemArrayCopy(HInvoke* invoke) { } } else { if (!optimizations.GetDestinationIsSource()) { + __ cmp(src, ShifterOperand(dest)); __ b(&conditions_on_positions_validated, NE); } if (dest_pos.IsConstant()) { diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 04ae3a6732..696fa5254f 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -2017,20 +2017,25 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { vixl::Label conditions_on_positions_validated; SystemArrayCopyOptimizations optimizations(invoke); - if (!optimizations.GetDestinationIsSource() && - (!src_pos.IsConstant() || !dest_pos.IsConstant())) { - __ Cmp(src, dest); - } // If source and destination are the same, we go to slow path if we need to do // forward copying. if (src_pos.IsConstant()) { int32_t src_pos_constant = src_pos.GetConstant()->AsIntConstant()->GetValue(); if (dest_pos.IsConstant()) { + int32_t dest_pos_constant = dest_pos.GetConstant()->AsIntConstant()->GetValue(); + if (optimizations.GetDestinationIsSource()) { + // Checked when building locations. + DCHECK_GE(src_pos_constant, dest_pos_constant); + } else if (src_pos_constant < dest_pos_constant) { + __ Cmp(src, dest); + __ B(slow_path->GetEntryLabel(), eq); + } // Checked when building locations. DCHECK(!optimizations.GetDestinationIsSource() || (src_pos_constant >= dest_pos.GetConstant()->AsIntConstant()->GetValue())); } else { if (!optimizations.GetDestinationIsSource()) { + __ Cmp(src, dest); __ B(&conditions_on_positions_validated, ne); } __ Cmp(WRegisterFrom(dest_pos), src_pos_constant); @@ -2038,6 +2043,7 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { } } else { if (!optimizations.GetDestinationIsSource()) { + __ Cmp(src, dest); __ B(&conditions_on_positions_validated, ne); } __ Cmp(RegisterFrom(src_pos, invoke->InputAt(1)->GetType()), diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 1d32dc7bc5..f726a25fc1 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -1153,20 +1153,22 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { NearLabel conditions_on_positions_validated; SystemArrayCopyOptimizations optimizations(invoke); - if (!optimizations.GetDestinationIsSource() && - (!src_pos.IsConstant() || !dest_pos.IsConstant())) { - __ cmpl(src, dest); - } // If source and destination are the same, we go to slow path if we need to do // forward copying. if (src_pos.IsConstant()) { int32_t src_pos_constant = src_pos.GetConstant()->AsIntConstant()->GetValue(); if (dest_pos.IsConstant()) { - // Checked when building locations. - DCHECK(!optimizations.GetDestinationIsSource() - || (src_pos_constant >= dest_pos.GetConstant()->AsIntConstant()->GetValue())); + int32_t dest_pos_constant = dest_pos.GetConstant()->AsIntConstant()->GetValue(); + if (optimizations.GetDestinationIsSource()) { + // Checked when building locations. + DCHECK_GE(src_pos_constant, dest_pos_constant); + } else if (src_pos_constant < dest_pos_constant) { + __ cmpl(src, dest); + __ j(kEqual, slow_path->GetEntryLabel()); + } } else { if (!optimizations.GetDestinationIsSource()) { + __ cmpl(src, dest); __ j(kNotEqual, &conditions_on_positions_validated); } __ cmpl(dest_pos.AsRegister<CpuRegister>(), Immediate(src_pos_constant)); @@ -1174,6 +1176,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { } } else { if (!optimizations.GetDestinationIsSource()) { + __ cmpl(src, dest); __ j(kNotEqual, &conditions_on_positions_validated); } if (dest_pos.IsConstant()) { |