summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
author Raphael Gault <raphael.gault@linaro.org> 2020-09-30 08:33:10 +0000
committer Nicolas Geoffray <ngeoffray@google.com> 2021-09-06 08:58:50 +0000
commit0700b69cb0c81c3590726be7fbe5b98531cec76b (patch)
tree9699ae3c78a2c7546918ba03aa43b0306d4f48a5 /compiler/optimizing
parent6194403a984dd814f01e6f7c6b270342d760388d (diff)
SVE: Extract Intermediate Address for SVE Vector Memory Operations
This patch introduces an optimization that extracts and factorizes the "base + offset" common part for the address computation when performing an SVE vector memory operation (VecStore/VecLoad). With SVE enabled by default: Test: ./art/test.py --simulate-arm64 --run-test --optimizing \ (With the VIXL simulator patch) Test: ./art/test.py --target --64 --optimizing \ (On Arm FVP with SVE - See steps in test/README.arm_fvp.md) Test: 527-checker-array-access, 655-checker-simd-arm. Change-Id: Icd49e57d5550d1530445a94e5d49e217a999d06d
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_arm64.cc6
-rw-r--r--compiler/optimizing/instruction_simplifier_arm64.cc16
2 files changed, 18 insertions, 4 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index e1a4718140..7401f0db91 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -6909,9 +6909,7 @@ SVEMemOperand InstructionCodeGeneratorARM64::VecSVEAddress(
Register base = InputRegisterAt(instruction, 0);
Location index = locations->InAt(1);
- // TODO: Support intermediate address sharing for SVE accesses.
DCHECK(!instruction->InputAt(1)->IsIntermediateAddressIndex());
- DCHECK(!instruction->InputAt(0)->IsIntermediateAddress());
DCHECK(!index.IsConstant());
uint32_t offset = is_string_char_at
@@ -6919,6 +6917,10 @@ SVEMemOperand InstructionCodeGeneratorARM64::VecSVEAddress(
: mirror::Array::DataOffset(size).Uint32Value();
size_t shift = ComponentSizeShiftWidth(size);
+ if (instruction->InputAt(0)->IsIntermediateAddress()) {
+ return SVEMemOperand(base.X(), XRegisterFrom(index), LSL, shift);
+ }
+
*scratch = temps_scope->AcquireSameSizeAs(base);
__ Add(*scratch, base, offset);
return SVEMemOperand(scratch->X(), XRegisterFrom(index), LSL, shift);
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index ff0859b456..a6ec02012c 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -277,18 +277,30 @@ void InstructionSimplifierArm64Visitor::VisitXor(HXor* instruction) {
}
void InstructionSimplifierArm64Visitor::VisitVecLoad(HVecLoad* instruction) {
- // TODO: Extract regular HIntermediateAddress.
if (!instruction->IsPredicated() && !instruction->IsStringCharAt() &&
TryExtractVecArrayAccessAddress(instruction, instruction->GetIndex())) {
RecordSimplification();
+ } else if (instruction->IsPredicated()) {
+ size_t size = DataType::Size(instruction->GetPackedType());
+ size_t offset = mirror::Array::DataOffset(size).Uint32Value();
+ if (TryExtractArrayAccessAddress(
+ instruction, instruction->GetArray(), instruction->GetIndex(), offset)) {
+ RecordSimplification();
+ }
}
}
void InstructionSimplifierArm64Visitor::VisitVecStore(HVecStore* instruction) {
- // TODO: Extract regular HIntermediateAddress.
if (!instruction->IsPredicated() &&
TryExtractVecArrayAccessAddress(instruction, instruction->GetIndex())) {
RecordSimplification();
+ } else if (instruction->IsPredicated()) {
+ size_t size = DataType::Size(instruction->GetPackedType());
+ size_t offset = mirror::Array::DataOffset(size).Uint32Value();
+ if (TryExtractArrayAccessAddress(
+ instruction, instruction->GetArray(), instruction->GetIndex(), offset)) {
+ RecordSimplification();
+ }
}
}